diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000000..b6bc14420c7 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,3 @@ +# Migrated code style to Black +eabf877cbb86b281fdd37a3fa3cc0edf9b8eb874 +321463922724b225988e517da54a18bad90bc316 diff --git a/.github/PULL_REQUEST_TEMPLATE b/.github/PULL_REQUEST_TEMPLATE index 9d2462105cc..b6af6eaf962 100644 --- a/.github/PULL_REQUEST_TEMPLATE +++ b/.github/PULL_REQUEST_TEMPLATE @@ -4,13 +4,13 @@ Lines should be wrapped at about 72 characters. Please also update the CIME documentation, if necessary, in doc/source/rst and indicate below if you need to have the gh-pages html regenerated.] -Test suite: -Test baseline: -Test namelist changes: +Test suite: +Test baseline: +Test namelist changes: Test status: [bit for bit, roundoff, climate changing] Fixes [CIME Github issue #] -User interface changes?: +User interface changes?: Update gh-pages html (Y/N)?: diff --git a/.github/workflows/srt.yml b/.github/workflows/srt.yml index 459fbaf4cf9..bec8fb952a2 100644 --- a/.github/workflows/srt.yml +++ b/.github/workflows/srt.yml @@ -12,6 +12,27 @@ on: # A workflow run is made up of one or more jobs that can run sequentially or in parallel jobs: + pre-commit: + runs-on: ubuntu-latest + timeout-minutes: 2 + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Set up python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + # Offical action is deprecated in favor of pre-commit.ci + # Should evaulate switching or just running manually. + # - name: Runs pre-commit action + # # Do not run if using act tooling (https://github.com/nektos/act) + # if: ${{ !env.ACT }} + # uses: pre-commit/action@v2.0.3 + - name: Runs pre-commit + run: | + pip install pre-commit + pre-commit run -a + # This workflow contains a single job called "build" build: # The type of runner that the job will run on @@ -41,6 +62,12 @@ jobs: - uses: actions/checkout@v2 + - name: ccs_config checkout + uses: actions/checkout@v2 + with: + repository: ESMCI/ccs_config_cesm + path: ccs_config + - name: share checkout uses: actions/checkout@v2 with: @@ -142,11 +169,12 @@ jobs: pwd mkdir -p $HOME/cesm/scratch mkdir -p $HOME/cesm/inputdata - cd $HOME/work/cime/cime/scripts/tests + cd $HOME/work/cime/cime export NETCDF=$HOME/netcdf-fortran export PATH=$NETCDF/bin:$PATH export LD_LIBRARY_PATH=$NETCDF/lib:$HOME/pnetcdf/lib:$LD_LIBRARY_PATH - ./scripts_regression_tests.py --no-fortran-run --compiler gnu --mpilib openmpi --machine ubuntu-latest + python -m pip install pytest pytest-cov + pytest -vvv --no-fortran-run --compiler gnu --mpilib openmpi --machine ubuntu-latest # the following can be used by developers to login to the github server in case of errors # see https://github.com/marketplace/actions/debugging-with-tmate for further details diff --git a/.github/workflows/srt_nuopc.yml b/.github/workflows/srt_nuopc.yml index b9d5485a517..18219cc93f0 100644 --- a/.github/workflows/srt_nuopc.yml +++ b/.github/workflows/srt_nuopc.yml @@ -32,7 +32,7 @@ jobs: NETCDF_C_PATH: /usr NETCDF_FORTRAN_PATH: ${HOME}/netcdf-fortran PNETCDF_PATH: ${HOME}/pnetcdf - ESMF_VERSION: ESMF_8_2_0_beta_snapshot_14 + ESMF_VERSION: ESMF_8_3_0_beta_snapshot_05 CIME_MODEL: cesm # Steps represent a sequence of tasks that will be executed as part of the job @@ -41,6 +41,12 @@ jobs: - uses: actions/checkout@v2 + - name: ccs_config checkout + uses: actions/checkout@v2 + with: + repository: ESMCI/ccs_config_cesm + path: ccs_config + - name: share checkout uses: actions/checkout@v2 with: @@ -180,12 +186,13 @@ jobs: run: | mkdir -p $HOME/cesm/scratch mkdir -p $HOME/cesm/inputdata - cd $HOME/work/cime/cime/scripts/tests + cd $HOME/work/cime/cime export NETCDF=$HOME/netcdf-fortran export PATH=$NETCDF/bin:$PATH export LD_LIBRARY_PATH=$NETCDF/lib:$HOME/pnetcdf/lib:$LD_LIBRARY_PATH export ESMFMKFILE=$HOME/ESMF/lib/libg/Linux.gfortran.64.openmpi.default/esmf.mk - ./scripts_regression_tests.py --no-fortran-run --compiler gnu --mpilib openmpi --machine ubuntu-latest + python -m pip install pytest pytest-cov + pytest -vvv --no-fortran-run --compiler gnu --mpilib openmpi --machine ubuntu-latest # the following can be used by developers to login to the github server in case of errors diff --git a/.gitignore b/.gitignore index b5853a232de..58405be16e1 100644 --- a/.gitignore +++ b/.gitignore @@ -25,4 +25,3 @@ scripts/Tools/JENKINS* components libraries share - diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..3fb2cba4a0c --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,23 @@ +exclude: ^utils/.*$ + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: check-xml + files: config/ + - id: end-of-file-fixer + - id: trailing-whitespace + - repo: https://github.com/psf/black + rev: 21.10b0 + hooks: + - id: black + files: scripts/lib/CIME + - repo: https://github.com/PyCQA/pylint + rev: v2.11.1 + hooks: + - id: pylint + args: + - --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement,logging-format-interpolation,no-name-in-module,arguments-renamed,unspecified-encoding,protected-access,import-error + files: scripts/lib/CIME + exclude: scripts/lib/CIME/tests diff --git a/CIME/BuildTools/cmakemacroswriter.py b/CIME/BuildTools/cmakemacroswriter.py index 89668299398..2bdfdd1e81e 100644 --- a/CIME/BuildTools/cmakemacroswriter.py +++ b/CIME/BuildTools/cmakemacroswriter.py @@ -12,6 +12,7 @@ from CIME.BuildTools.macrowriterbase import MacroWriterBase from CIME.XML.standard_module_setup import * + logger = logging.getLogger(__name__) @@ -64,9 +65,13 @@ def shell_command_strings(self, command): # counter so that we get a different value next time. var_name = "CIME_TEMP_SHELL" + str(self._var_num) self._var_num += 1 - set_up = "execute_process(COMMAND " + command + \ - " OUTPUT_VARIABLE " + var_name + \ - " OUTPUT_STRIP_TRAILING_WHITESPACE)" + set_up = ( + "execute_process(COMMAND " + + command + + " OUTPUT_VARIABLE " + + var_name + + " OUTPUT_STRIP_TRAILING_WHITESPACE)" + ) tear_down = "unset(" + var_name + ")" return (set_up, "${" + var_name + "}", tear_down) diff --git a/CIME/BuildTools/configure.py b/CIME/BuildTools/configure.py index adf76374fe4..64f57d94d9e 100644 --- a/CIME/BuildTools/configure.py +++ b/CIME/BuildTools/configure.py @@ -19,12 +19,27 @@ from CIME.utils import expect, safe_copy from CIME.XML.compilers import Compilers from CIME.XML.env_mach_specific import EnvMachSpecific +from CIME.XML.files import Files + +import shutil logger = logging.getLogger(__name__) -def configure(machobj, output_dir, macros_format, compiler, mpilib, debug, - comp_interface, sysos, unit_testing=False, noenv=False, threaded=False, - extra_machines_dir=None): + +def configure( + machobj, + output_dir, + macros_format, + compiler, + mpilib, + debug, + comp_interface, + sysos, + unit_testing=False, + noenv=False, + threaded=False, + extra_machines_dir=None, +): """Add Macros, Depends, and env_mach_specific files to a directory. Arguments: @@ -41,16 +56,60 @@ def configure(machobj, output_dir, macros_format, compiler, mpilib, debug, searched for a config_compilers.xml file. """ # Macros generation. - suffixes = {'Makefile': 'make', 'CMake': 'cmake'} - macro_maker = Compilers(machobj, compiler=compiler, mpilib=mpilib, - extra_machines_dir=extra_machines_dir) + suffixes = {"Makefile": "make", "CMake": "cmake"} + + new_cmake_macros_dir = Files(comp_interface=comp_interface).get_value( + "CMAKE_MACROS_DIR" + ) + macro_maker = None for form in macros_format: - out_file_name = os.path.join(output_dir,"Macros."+suffixes[form]) - macro_maker.write_macros_file(macros_file=out_file_name, output_format=suffixes[form]) - copy_depends_files(machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler) - generate_env_mach_specific(output_dir, machobj, compiler, mpilib, - debug, comp_interface, sysos, unit_testing, threaded, noenv=noenv) + if ( + form == "CMake" + and new_cmake_macros_dir is not None + and os.path.exists(new_cmake_macros_dir) + and not "CIME_NO_CMAKE_MACRO" in os.environ + ): + if not os.path.isfile(os.path.join(output_dir, "Macros.cmake")): + safe_copy( + os.path.join(new_cmake_macros_dir, "Macros.cmake"), output_dir + ) + if not os.path.exists(os.path.join(output_dir, "cmake_macros")): + shutil.copytree( + new_cmake_macros_dir, os.path.join(output_dir, "cmake_macros") + ) + + else: + logger.warning("Using deprecated CIME makefile generators") + if macro_maker is None: + macro_maker = Compilers( + machobj, + compiler=compiler, + mpilib=mpilib, + extra_machines_dir=extra_machines_dir, + ) + + out_file_name = os.path.join(output_dir, "Macros." + suffixes[form]) + macro_maker.write_macros_file( + macros_file=out_file_name, output_format=suffixes[form] + ) + + copy_depends_files( + machobj.get_machine_name(), machobj.machines_dir, output_dir, compiler + ) + generate_env_mach_specific( + output_dir, + machobj, + compiler, + mpilib, + debug, + comp_interface, + sysos, + unit_testing, + threaded, + noenv=noenv, + ) + def copy_depends_files(machine_name, machines_dir, output_dir, compiler): """ @@ -74,21 +133,40 @@ def copy_depends_files(machine_name, machines_dir, output_dir, compiler): if not os.path.exists(outputdfile): safe_copy(dfile, outputdfile) -class FakeCase(object): +class FakeCase(object): def __init__(self, compiler, mpilib, debug, comp_interface): # PIO_VERSION is needed to parse config_machines.xml but isn't otherwise used # by FakeCase - self._vals = {"COMPILER":compiler, "MPILIB":mpilib, "DEBUG":debug, - "COMP_INTERFACE":comp_interface, "PIO_VERSION":2, - "SMP_PRESENT":False} + self._vals = { + "COMPILER": compiler, + "MPILIB": mpilib, + "DEBUG": debug, + "COMP_INTERFACE": comp_interface, + "PIO_VERSION": 2, + "SMP_PRESENT": False, + } def get_value(self, attrib): - expect(attrib in self._vals, "FakeCase does not support getting value of '%s'" % attrib) + expect( + attrib in self._vals, + "FakeCase does not support getting value of '%s'" % attrib, + ) return self._vals[attrib] -def generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, - comp_interface, sysos, unit_testing, threaded, noenv=False): + +def generate_env_mach_specific( + output_dir, + machobj, + compiler, + mpilib, + debug, + comp_interface, + sysos, + unit_testing, + threaded, + noenv=False, +): """ env_mach_specific generation. """ @@ -97,8 +175,13 @@ def generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, logger.warning("{} already exists, delete to replace".format(ems_path)) return - ems_file = EnvMachSpecific(output_dir, unit_testing=unit_testing, standalone_configure=True) - ems_file.populate(machobj,attributes={"mpilib":mpilib,"compiler":compiler,"threaded":threaded}) + ems_file = EnvMachSpecific( + output_dir, unit_testing=unit_testing, standalone_configure=True + ) + ems_file.populate( + machobj, + attributes={"mpilib": mpilib, "compiler": compiler, "threaded": threaded}, + ) ems_file.write() if noenv: @@ -106,11 +189,11 @@ def generate_env_mach_specific(output_dir, machobj, compiler, mpilib, debug, fake_case = FakeCase(compiler, mpilib, debug, comp_interface) ems_file.load_env(fake_case) - for shell in ('sh', 'csh'): + for shell in ("sh", "csh"): ems_file.make_env_mach_specific_file(shell, fake_case, output_dir=output_dir) shell_path = os.path.join(output_dir, ".env_mach_specific." + shell) - with open(shell_path, 'a') as shell_file: - if shell == 'sh': + with open(shell_path, "a") as shell_file: + if shell == "sh": shell_file.write("\nexport COMPILER={}\n".format(compiler)) shell_file.write("export MPILIB={}\n".format(mpilib)) shell_file.write("export DEBUG={}\n".format(repr(debug).upper())) diff --git a/CIME/BuildTools/macroconditiontree.py b/CIME/BuildTools/macroconditiontree.py index 4a7026b88bf..fd724d01132 100644 --- a/CIME/BuildTools/macroconditiontree.py +++ b/CIME/BuildTools/macroconditiontree.py @@ -1,7 +1,9 @@ from CIME.XML.standard_module_setup import * + logger = logging.getLogger(__name__) -class MacroConditionTree(object): # pylint: disable=too-many-instance-attributes + +class MacroConditionTree(object): # pylint: disable=too-many-instance-attributes """Tree containing the various possible settings of a specific macro. @@ -48,11 +50,12 @@ def __init__(self, name, settings): for setting in settings: if not setting.do_append: self._do_append = False - assert len(settings) == 1, \ - "Internal error in macros: An ambiguity was " \ - "found after the ambiguity check was complete, " \ - "or there is a mixture of appending and initial " \ + assert len(settings) == 1, ( + "Internal error in macros: An ambiguity was " + "found after the ambiguity check was complete, " + "or there is a mixture of appending and initial " "settings in the condition tree." + ) self._assignments.append((name, setting.value)) self._set_up += setting.set_up self._tear_down += setting.tear_down @@ -73,8 +76,7 @@ def __init__(self, name, settings): partition[cond_val] = [setting] branches = dict() for cond_val in partition: - branches[cond_val] = \ - MacroConditionTree(name, partition[cond_val]) + branches[cond_val] = MacroConditionTree(name, partition[cond_val]) self._branches = branches # pylint shouldn't concern itself with the way that we access other, since @@ -88,10 +90,11 @@ def merge(self, other): """ if self._is_leaf: if other._is_leaf: - assert self._do_append == other._do_append, \ - "Internal error in macros: Tried to merge an " \ - "appending tree with a tree containing initial "\ + assert self._do_append == other._do_append, ( + "Internal error in macros: Tried to merge an " + "appending tree with a tree containing initial " "settings." + ) # If both are leaves, just merge the values. self._assignments += other._assignments self._set_up += other._set_up @@ -121,11 +124,13 @@ def merge(self, other): # their sets of branches. for (cond_val, other_branch) in other._branches.items(): if cond_val in self._branches: - self._branches[cond_val] = \ - self._branches[cond_val].merge(other_branch) + self._branches[cond_val] = self._branches[cond_val].merge( + other_branch + ) else: self._branches[cond_val] = other_branch return self + # pylint:enable=protected-access def write_out(self, writer): @@ -159,6 +164,7 @@ def write_out(self, writer): self._branches[cond_val].write_out(writer) writer.end_ifeq() + def merge_optional_trees(tree, big_tree): """Merge two MacroConditionTrees when one or both objects may be `None`.""" if tree is not None: diff --git a/CIME/BuildTools/macrowriterbase.py b/CIME/BuildTools/macrowriterbase.py index 6acf14d487e..0bd81c25b71 100644 --- a/CIME/BuildTools/macrowriterbase.py +++ b/CIME/BuildTools/macrowriterbase.py @@ -17,6 +17,7 @@ logger = logging.getLogger(__name__) + def _get_components(value): """ >>> value = '-something ${shell ${NETCDF_PATH}/bin/nf-config --flibs} -lblas -llapack' @@ -34,7 +35,7 @@ def _get_components(value): curr_comp = "" idx = 0 while idx < len(value): - if value[idx:idx+8] == "${shell ": + if value[idx : idx + 8] == "${shell ": if curr_comp: components.append((False, curr_comp.strip())) curr_comp = "" @@ -70,6 +71,7 @@ def _get_components(value): return components + @add_metaclass(ABCMeta) class MacroWriterBase(object): @@ -117,7 +119,7 @@ def __init__(self, output): def indent_string(self): """Return an appropriate number of spaces for the indent.""" - return ' ' * self._indent_num + return " " * self._indent_num def indent_left(self): """Decrease the amount of line indent.""" @@ -132,7 +134,7 @@ def write_line(self, line): A trailing newline is added, whether or not the input has one. """ - self.output.write(u"{}{}\n".format(self.indent_string(), line)) + self.output.write("{}{}\n".format(self.indent_string(), line)) @abstractmethod def environment_variable_string(self, name): diff --git a/CIME/BuildTools/makemacroswriter.py b/CIME/BuildTools/makemacroswriter.py index 7cc4b38748c..c6e53987edc 100644 --- a/CIME/BuildTools/makemacroswriter.py +++ b/CIME/BuildTools/makemacroswriter.py @@ -9,11 +9,13 @@ from CIME.BuildTools.macrowriterbase import MacroWriterBase from CIME.XML.standard_module_setup import * -#logger = logging.getLogger(__name__) + +# logger = logging.getLogger(__name__) # This is not the most useful check. # pylint: disable=invalid-name + class MakeMacroWriter(MacroWriterBase): """Macro writer for the Makefile format. diff --git a/CIME/BuildTools/possiblevalues.py b/CIME/BuildTools/possiblevalues.py index 837c702824c..b8a11ae6c2b 100644 --- a/CIME/BuildTools/possiblevalues.py +++ b/CIME/BuildTools/possiblevalues.py @@ -3,6 +3,7 @@ logger = logging.getLogger(__name__) + class PossibleValues(object): """Holds a list of settings for a single "Macros" variable. @@ -111,16 +112,19 @@ def ambiguity_check(self): This function raises an error if an ambiguity is found. """ - for i in range(len(self.settings)-1): - for j in range(i+1, len(self.settings)): + for i in range(len(self.settings) - 1): + for j in range(i + 1, len(self.settings)): if self._specificities[i] != self._specificities[j]: continue other = self.settings[j] - expect(not self.settings[i].is_ambiguous_with(other), - "Variable "+self.name+" is set ambiguously in " - "config_compilers.xml. Check the file for these " - "conflicting settings: \n1: {}\n2: {}".format( - self.settings[i].conditions, other.conditions)) + expect( + not self.settings[i].is_ambiguous_with(other), + "Variable " + self.name + " is set ambiguously in " + "config_compilers.xml. Check the file for these " + "conflicting settings: \n1: {}\n2: {}".format( + self.settings[i].conditions, other.conditions + ), + ) def dependencies(self): """Returns a set of names of variables needed to set this variable.""" @@ -148,9 +152,11 @@ def to_cond_trees(self): # Build trees, starting from the least specific and working up. normal_trees = {} for specificity in specificities: - settings_for_tree = [self.settings[i] - for i in range(len(self.settings)) - if self._specificities[i] == specificity] + settings_for_tree = [ + self.settings[i] + for i in range(len(self.settings)) + if self._specificities[i] == specificity + ] normal_trees[specificity] = MacroConditionTree(self.name, settings_for_tree) if self.append_settings: append_tree = MacroConditionTree(self.name, self.append_settings) diff --git a/CIME/BuildTools/valuesetting.py b/CIME/BuildTools/valuesetting.py index 706e8fb01f4..31af679f046 100644 --- a/CIME/BuildTools/valuesetting.py +++ b/CIME/BuildTools/valuesetting.py @@ -2,6 +2,7 @@ logger = logging.getLogger(__name__) + class ValueSetting(object): """Holds data about how a value can be assigned to a variable. @@ -28,7 +29,9 @@ class ValueSetting(object): has_special_case """ - def __init__(self, value, do_append, conditions, set_up, tear_down): # pylint: disable=too-many-arguments + def __init__( + self, value, do_append, conditions, set_up, tear_down + ): # pylint: disable=too-many-arguments """Create a ValueSetting object by specifying all its data.""" self.value = value self.do_append = do_append diff --git a/CIME/Servers/__init__.py b/CIME/Servers/__init__.py index 8d22604875c..5cd14c0f89d 100644 --- a/CIME/Servers/__init__.py +++ b/CIME/Servers/__init__.py @@ -1,5 +1,6 @@ -#pylint: disable=import-error +# pylint: disable=import-error from distutils.spawn import find_executable + has_gftp = find_executable("globus-url-copy") has_svn = find_executable("svn") has_wget = find_executable("wget") diff --git a/CIME/Servers/ftp.py b/CIME/Servers/ftp.py index 2f8dbf158e7..329aafbeef1 100644 --- a/CIME/Servers/ftp.py +++ b/CIME/Servers/ftp.py @@ -13,30 +13,38 @@ # I think that multiple inheritence would be useful here, but I couldnt make it work # in a py2/3 compatible way. class FTP(GenericServer): - def __init__(self, address, user='', passwd='', server=None): + def __init__(self, address, user="", passwd="", server=None): if not user: - user = '' + user = "" if not passwd: - passwd = '' - expect(server," Must call via ftp_login function") - root_address = address.split('/', 1)[1] + passwd = "" + expect(server, " Must call via ftp_login function") + root_address = address.split("/", 1)[1] self.ftp = server self._ftp_server = address stat = self.ftp.login(user, passwd) logger.debug("login stat {}".format(stat)) if "Login successful" not in stat: - logging.warning("FAIL: Could not login to ftp server {}\n error {}".format(address, stat)) + logging.warning( + "FAIL: Could not login to ftp server {}\n error {}".format( + address, stat + ) + ) return None stat = self.ftp.cwd(root_address) - logger.debug("cwd {} stat {}".format(root_address,stat)) + logger.debug("cwd {} stat {}".format(root_address, stat)) if "Directory successfully changed" not in stat: - logging.warning("FAIL: Could not cd to server root directory {}\n error {}".format(root_address, stat)) + logging.warning( + "FAIL: Could not cd to server root directory {}\n error {}".format( + root_address, stat + ) + ) return None @classmethod - def ftp_login(cls, address, user='', passwd=''): - ftp_server, root_address = address.split('/', 1) + def ftp_login(cls, address, user="", passwd=""): + ftp_server, root_address = address.split("/", 1) logger.info("server address {} root path {}".format(ftp_server, root_address)) try: with Timeout(60): @@ -65,22 +73,29 @@ def fileexists(self, rel_path): if rel_path not in stat: if not stat or not stat[0].startswith(rel_path): - logging.warning("FAIL: File {} not found.\nerror {}".format(rel_path, stat)) + logging.warning( + "FAIL: File {} not found.\nerror {}".format(rel_path, stat) + ) return False return True def getfile(self, rel_path, full_path): try: - stat = self.ftp.retrbinary('RETR {}'.format(rel_path), open(full_path, "wb").write) + stat = self.ftp.retrbinary( + "RETR {}".format(rel_path), open(full_path, "wb").write + ) except all_ftp_errors: if os.path.isfile(full_path): os.remove(full_path) logger.warning("ERROR from ftp server, trying next server") return False - if (stat != '226 Transfer complete.'): - logging.warning("FAIL: Failed to retreve file '{}' from FTP repo '{}' stat={}\n". - format(rel_path, self._ftp_server, stat)) + if stat != "226 Transfer complete.": + logging.warning( + "FAIL: Failed to retreve file '{}' from FTP repo '{}' stat={}\n".format( + rel_path, self._ftp_server, stat + ) + ) return False return True @@ -92,4 +107,4 @@ def getdirectory(self, rel_path, full_path): return False for _file in stat: - self.getfile(_file, full_path+os.sep+os.path.basename(_file)) + self.getfile(_file, full_path + os.sep + os.path.basename(_file)) diff --git a/CIME/Servers/generic_server.py b/CIME/Servers/generic_server.py index 6cebb46bba5..537df181324 100644 --- a/CIME/Servers/generic_server.py +++ b/CIME/Servers/generic_server.py @@ -7,17 +7,21 @@ from CIME.XML.standard_module_setup import * from socket import _GLOBAL_DEFAULT_TIMEOUT + logger = logging.getLogger(__name__) + class GenericServer(object): - def __init__(self, host=' ',user=' ', passwd=' ', acct=' ', timeout=_GLOBAL_DEFAULT_TIMEOUT): + def __init__( + self, host=" ", user=" ", passwd=" ", acct=" ", timeout=_GLOBAL_DEFAULT_TIMEOUT + ): raise NotImplementedError def fileexists(self, rel_path): - ''' Returns True if rel_path exists on server ''' + """Returns True if rel_path exists on server""" raise NotImplementedError def getfile(self, rel_path, full_path): - ''' Get file from rel_path on server and place in location full_path on client - fail if full_path already exists on client, return True if successful ''' + """Get file from rel_path on server and place in location full_path on client + fail if full_path already exists on client, return True if successful""" raise NotImplementedError diff --git a/CIME/Servers/gftp.py b/CIME/Servers/gftp.py index b99c9f3049f..f23943b583a 100644 --- a/CIME/Servers/gftp.py +++ b/CIME/Servers/gftp.py @@ -8,31 +8,52 @@ logger = logging.getLogger(__name__) + class GridFTP(GenericServer): - def __init__(self, address, user='', passwd=''): + def __init__(self, address, user="", passwd=""): self._root_address = address def fileexists(self, rel_path): - stat,out,err = run_cmd("globus-url-copy -list {}".format(os.path.join(self._root_address, os.path.dirname(rel_path))+os.sep)) + stat, out, err = run_cmd( + "globus-url-copy -list {}".format( + os.path.join(self._root_address, os.path.dirname(rel_path)) + os.sep + ) + ) if stat or os.path.basename(rel_path) not in out: - logging.warning("FAIL: File {} not found.\nstat={} error={}".format(rel_path, stat, err)) + logging.warning( + "FAIL: File {} not found.\nstat={} error={}".format(rel_path, stat, err) + ) return False return True def getfile(self, rel_path, full_path): - stat, _,err = run_cmd("globus-url-copy -v {} file://{}".format(os.path.join(self._root_address, rel_path), full_path)) + stat, _, err = run_cmd( + "globus-url-copy -v {} file://{}".format( + os.path.join(self._root_address, rel_path), full_path + ) + ) - if (stat != 0): - logging.warning("FAIL: GridFTP repo '{}' does not have file '{}' error={}\n". - format(self._root_address,rel_path, err)) + if stat != 0: + logging.warning( + "FAIL: GridFTP repo '{}' does not have file '{}' error={}\n".format( + self._root_address, rel_path, err + ) + ) return False return True def getdirectory(self, rel_path, full_path): - stat, _,err = run_cmd("globus-url-copy -v -r {}{} file://{}{}".format(os.path.join(self._root_address, rel_path), os.sep, full_path, os.sep)) + stat, _, err = run_cmd( + "globus-url-copy -v -r {}{} file://{}{}".format( + os.path.join(self._root_address, rel_path), os.sep, full_path, os.sep + ) + ) - if (stat != 0): - logging.warning("FAIL: GridFTP repo '{}' does not have directory '{}' error={}\n". - format(self._root_address,rel_path, err)) + if stat != 0: + logging.warning( + "FAIL: GridFTP repo '{}' does not have directory '{}' error={}\n".format( + self._root_address, rel_path, err + ) + ) return False return True diff --git a/CIME/Servers/svn.py b/CIME/Servers/svn.py index 3b8dd18ea83..7e06ab310d4 100644 --- a/CIME/Servers/svn.py +++ b/CIME/Servers/svn.py @@ -7,9 +7,10 @@ logger = logging.getLogger(__name__) + class SVN(GenericServer): - def __init__(self, address, user='', passwd=''): - self._args = '' + def __init__(self, address, user="", passwd=""): + self._args = "" if user: self._args += "--username {}".format(user) if passwd: @@ -17,20 +18,35 @@ def __init__(self, address, user='', passwd=''): self._svn_loc = address - err = run_cmd("svn --non-interactive --trust-server-cert {} ls {}".format(self._args, address))[0] + err = run_cmd( + "svn --non-interactive --trust-server-cert {} ls {}".format( + self._args, address + ) + )[0] if err != 0: logging.warning( -""" + """ Could not connect to svn repo '{0}' This is most likely either a credential, proxy, or network issue . -To check connection and store your credential run 'svn ls {0}' and permanently store your password""".format(address)) +To check connection and store your credential run 'svn ls {0}' and permanently store your password""".format( + address + ) + ) return None def fileexists(self, rel_path): full_url = os.path.join(self._svn_loc, rel_path) - stat, out, err = run_cmd("svn --non-interactive --trust-server-cert {} ls {}".format(self._args, full_url)) - if (stat != 0): - logging.warning("FAIL: SVN repo '{}' does not have file '{}'\nReason:{}\n{}\n".format(self._svn_loc, full_url, out, err)) + stat, out, err = run_cmd( + "svn --non-interactive --trust-server-cert {} ls {}".format( + self._args, full_url + ) + ) + if stat != 0: + logging.warning( + "FAIL: SVN repo '{}' does not have file '{}'\nReason:{}\n{}\n".format( + self._svn_loc, full_url, out, err + ) + ) return False return True @@ -38,10 +54,17 @@ def getfile(self, rel_path, full_path): if not rel_path: return False full_url = os.path.join(self._svn_loc, rel_path) - stat, output, errput = \ - run_cmd("svn --non-interactive --trust-server-cert {} export {} {}".format(self._args, full_url, full_path)) - if (stat != 0): - logging.warning("svn export failed with output: {} and errput {}\n".format(output, errput)) + stat, output, errput = run_cmd( + "svn --non-interactive --trust-server-cert {} export {} {}".format( + self._args, full_url, full_path + ) + ) + if stat != 0: + logging.warning( + "svn export failed with output: {} and errput {}\n".format( + output, errput + ) + ) return False else: logging.info("SUCCESS\n") @@ -49,10 +72,17 @@ def getfile(self, rel_path, full_path): def getdirectory(self, rel_path, full_path): full_url = os.path.join(self._svn_loc, rel_path) - stat, output, errput = \ - run_cmd("svn --non-interactive --trust-server-cert {} export --force {} {}".format(self._args, full_url, full_path)) - if (stat != 0): - logging.warning("svn export failed with output: {} and errput {}\n".format(output, errput)) + stat, output, errput = run_cmd( + "svn --non-interactive --trust-server-cert {} export --force {} {}".format( + self._args, full_url, full_path + ) + ) + if stat != 0: + logging.warning( + "svn export failed with output: {} and errput {}\n".format( + output, errput + ) + ) return False else: logging.info("SUCCESS\n") diff --git a/CIME/Servers/wget.py b/CIME/Servers/wget.py index 410928ea90b..8ee3b66e81e 100644 --- a/CIME/Servers/wget.py +++ b/CIME/Servers/wget.py @@ -4,12 +4,13 @@ # pylint: disable=super-init-not-called from CIME.XML.standard_module_setup import * from CIME.Servers.generic_server import GenericServer + logger = logging.getLogger(__name__) class WGET(GenericServer): - def __init__(self, address, user='', passwd=''): - self._args = '--no-check-certificate ' + def __init__(self, address, user="", passwd=""): + self._args = "--no-check-certificate " if user: self._args += "--user {} ".format(user) if passwd: @@ -17,8 +18,8 @@ def __init__(self, address, user='', passwd=''): self._server_loc = address @classmethod - def wget_login(cls, address, user='', passwd=''): - args = '--no-check-certificate ' + def wget_login(cls, address, user="", passwd=""): + args = "--no-check-certificate " if user: args += "--user {} ".format(user) if passwd: @@ -27,30 +28,46 @@ def wget_login(cls, address, user='', passwd=''): try: err = run_cmd("wget {} --spider {}".format(args, address), timeout=60)[0] except: - logger.warning("Could not connect to repo '{0}'\nThis is most likely either a proxy, or network issue .".format(address)) + logger.warning( + "Could not connect to repo '{0}'\nThis is most likely either a proxy, or network issue .".format( + address + ) + ) return None if err: - logger.warning("Could not connect to repo '{0}'\nThis is most likely either a proxy, or network issue .".format(address)) + logger.warning( + "Could not connect to repo '{0}'\nThis is most likely either a proxy, or network issue .".format( + address + ) + ) return None - return cls(address, user=user, passwd=passwd) def fileexists(self, rel_path): full_url = os.path.join(self._server_loc, rel_path) stat, out, err = run_cmd("wget {} --spider {}".format(self._args, full_url)) - if (stat != 0): - logging.warning("FAIL: Repo '{}' does not have file '{}'\nReason:{}\n{}\n".format(self._server_loc, full_url, out, err)) + if stat != 0: + logging.warning( + "FAIL: Repo '{}' does not have file '{}'\nReason:{}\n{}\n".format( + self._server_loc, full_url, out, err + ) + ) return False return True def getfile(self, rel_path, full_path): full_url = os.path.join(self._server_loc, rel_path) - stat, output, errput = \ - run_cmd("wget {} {} -nc --output-document {}".format(self._args, full_url, full_path)) - if (stat != 0): - logging.warning("wget failed with output: {} and errput {}\n".format(output, errput)) + stat, output, errput = run_cmd( + "wget {} {} -nc --output-document {}".format( + self._args, full_url, full_path + ) + ) + if stat != 0: + logging.warning( + "wget failed with output: {} and errput {}\n".format(output, errput) + ) # wget puts an empty file if it fails. try: os.remove(full_path) @@ -63,12 +80,16 @@ def getfile(self, rel_path, full_path): def getdirectory(self, rel_path, full_path): full_url = os.path.join(self._server_loc, rel_path) - stat, output, errput = \ - run_cmd("wget {} {} -r -N --no-directories ".format(self._args, full_url+os.sep), from_dir=full_path) + stat, output, errput = run_cmd( + "wget {} {} -r -N --no-directories ".format(self._args, full_url + os.sep), + from_dir=full_path, + ) logger.debug(output) logger.debug(errput) - if (stat != 0): - logging.warning("wget failed with output: {} and errput {}\n".format(output, errput)) + if stat != 0: + logging.warning( + "wget failed with output: {} and errput {}\n".format(output, errput) + ) # wget puts an empty file if it fails. try: os.remove(full_path) diff --git a/CIME/SystemTests/dae.py b/CIME/SystemTests/dae.py index bc4a9a24e5b..f9bdd4d8e01 100644 --- a/CIME/SystemTests/dae.py +++ b/CIME/SystemTests/dae.py @@ -17,7 +17,7 @@ ############################################################################### class DAE(SystemTestsCompareTwo): -############################################################################### + ############################################################################### """ Implementation of the CIME data assimilation test: Compares standard run with a run broken into two data assimilation cycles. @@ -29,16 +29,19 @@ class DAE(SystemTestsCompareTwo): ########################################################################### def __init__(self, case): - ########################################################################### - SystemTestsCompareTwo.__init__(self, case, - separate_builds=False, - run_two_suffix='da', - run_one_description='no data assimilation', - run_two_description='data assimilation') + ########################################################################### + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=False, + run_two_suffix="da", + run_one_description="no data assimilation", + run_two_description="data assimilation", + ) ########################################################################### def _case_one_setup(self): - ########################################################################### + ########################################################################### # Even though there may be test mods turning on data assimilation, # case1 is the control so turn it off self._case.set_value("DATA_ASSIMILATION_SCRIPT", "") @@ -46,15 +49,21 @@ def _case_one_setup(self): ########################################################################### def _case_two_setup(self): - ########################################################################### + ########################################################################### # Allow testmods to set an assimilation script if len(self._case.get_value("DATA_ASSIMILATION_SCRIPT")) == 0: # We need to find the scripts/data_assimilation directory # LIB_DIR should be our parent dir da_dir = os.path.join(os.path.dirname(sms.LIB_DIR), "data_assimilation") - expect(os.path.isdir(da_dir), "ERROR: da_dir, '{}', does not exist".format(da_dir)) + expect( + os.path.isdir(da_dir), + "ERROR: da_dir, '{}', does not exist".format(da_dir), + ) da_file = os.path.join(da_dir, "da_no_data_mod.sh") - expect(os.path.isfile(da_file), "ERROR: da_file, '{}', does not exist".format(da_file)) + expect( + os.path.isfile(da_file), + "ERROR: da_file, '{}', does not exist".format(da_file), + ) # Set up two data assimilation cycles each half of the full run self._case.set_value("DATA_ASSIMILATION_SCRIPT", da_file) @@ -64,20 +73,25 @@ def _case_two_setup(self): da_cycles = 2 self._case.set_value("DATA_ASSIMILATION_CYCLES", da_cycles) stopn = self._case.get_value("STOP_N") - expect((stopn % da_cycles) == 0, "ERROR: DAE test with {0} cycles requires that STOP_N be divisible by {0}".format(da_cycles)) + expect( + (stopn % da_cycles) == 0, + "ERROR: DAE test with {0} cycles requires that STOP_N be divisible by {0}".format( + da_cycles + ), + ) stopn = int(stopn / da_cycles) self._case.set_value("STOP_N", stopn) self._case.flush() ########################################################################### - def run_phase(self): # pylint: disable=arguments-differ - ########################################################################### + def run_phase(self): # pylint: disable=arguments-differ + ########################################################################### # Clean up any da.log files in case this is a re-run. self._activate_case2() case_root = self._get_caseroot2() rundir2 = self._case.get_value("RUNDIR") - da_files = glob.glob(os.path.join(rundir2, 'da.log.*')) + da_files = glob.glob(os.path.join(rundir2, "da.log.*")) for file_ in da_files: os.remove(file_) # End for @@ -98,20 +112,24 @@ def run_phase(self): # pylint: disable=arguments-differ # Do some checks on the data assimilation 'output' from case2 self._activate_case2() - da_files = glob.glob(os.path.join(rundir2, 'da.log.*')) + da_files = glob.glob(os.path.join(rundir2, "da.log.*")) if da_files is None: logger = logging.getLogger(__name__) - path = os.path.join(case_root, 'da.log.*') + path = os.path.join(case_root, "da.log.*") logger.warning("No DA files in {}".format(path)) da_cycles = self._case.get_value("DATA_ASSIMILATION_CYCLES") - expect((da_files is not None) and (len(da_files) == da_cycles), - "ERROR: There were {:d} DA cycles in run but {:d} DA files were found".format(da_cycles, len(da_files) if da_files is not None else 0)) + expect( + (da_files is not None) and (len(da_files) == da_cycles), + "ERROR: There were {:d} DA cycles in run but {:d} DA files were found".format( + da_cycles, len(da_files) if da_files is not None else 0 + ), + ) da_files.sort() cycle_num = 0 compset = self._case.get_value("COMPSET") # Special case for DWAV so we can make sure other variables are set - is_dwav = '_DWAV' in compset + is_dwav = "_DWAV" in compset for fname in da_files: found_caseroot = False found_cycle = False @@ -126,7 +144,9 @@ def run_phase(self): # pylint: disable=arguments-differ if comp == "ESP": continue elif self._case.get_value("DATA_ASSIMILATION_{}".format(comp)): - expected_init = expected_init + self._case.get_value("NINST_{}".format(comp)) + expected_init = expected_init + self._case.get_value( + "NINST_{}".format(comp) + ) # Adjust expected initial run and post-DA numbers if cycle_num == 0: @@ -138,31 +158,55 @@ def run_phase(self): # pylint: disable=arguments-differ with gzip.open(fname, "r") as dfile: for bline in dfile: line = bline.decode("utf-8") - expect(not 'ERROR' in line, "ERROR, error line {} found in {}".format(line, fname)) - if 'caseroot' in line[0:8]: + expect( + not "ERROR" in line, + "ERROR, error line {} found in {}".format(line, fname), + ) + if "caseroot" in line[0:8]: found_caseroot = True - elif 'cycle' in line[0:5]: + elif "cycle" in line[0:5]: found_cycle = True - expect(int(line[7:]) == cycle_num, - "ERROR: Wrong cycle ({:d}) found in {} (expected {:d})".format(int(line[7:]), fname, cycle_num)) - elif 'resume signal' in line: + expect( + int(line[7:]) == cycle_num, + "ERROR: Wrong cycle ({:d}) found in {} (expected {:d})".format( + int(line[7:]), fname, cycle_num + ), + ) + elif "resume signal" in line: found_signal = found_signal + 1 - expect('Post-DA resume signal found' in line[0:27], - "ERROR: bad post-DA message found in {}".format(fname)) - elif 'Initial run' in line: + expect( + "Post-DA resume signal found" in line[0:27], + "ERROR: bad post-DA message found in {}".format(fname), + ) + elif "Initial run" in line: found_init = found_init + 1 - expect('Initial run signal found' in line[0:24], - "ERROR: bad Initial run message found in {}".format(fname)) + expect( + "Initial run signal found" in line[0:24], + "ERROR: bad Initial run message found in {}".format(fname), + ) else: - expect(False, "ERROR: Unrecognized line ('{}') found in {}".format(line, fname)) + expect( + False, + "ERROR: Unrecognized line ('{}') found in {}".format( + line, fname + ), + ) # End for expect(found_caseroot, "ERROR: No caseroot found in {}".format(fname)) expect(found_cycle, "ERROR: No cycle found in {}".format(fname)) - expect(found_signal == expected_signal, - "ERROR: Expected {} post-DA resume signal message(s), {} found in {}".format(expected_signal, found_signal, fname)) - expect(found_init == expected_init, - "ERROR: Expected {} Initial run message(s), {} found in {}".format(expected_init, found_init, fname)) + expect( + found_signal == expected_signal, + "ERROR: Expected {} post-DA resume signal message(s), {} found in {}".format( + expected_signal, found_signal, fname + ), + ) + expect( + found_init == expected_init, + "ERROR: Expected {} Initial run message(s), {} found in {}".format( + expected_init, found_init, fname + ), + ) # End with cycle_num = cycle_num + 1 # End for diff --git a/CIME/SystemTests/eri.py b/CIME/SystemTests/eri.py index 4fe818620e2..0bcf4466646 100644 --- a/CIME/SystemTests/eri.py +++ b/CIME/SystemTests/eri.py @@ -9,13 +9,15 @@ logger = logging.getLogger(__name__) + def _get_rest_date(archive_root): - restdir = os.path.join(archive_root,"rest") + restdir = os.path.join(archive_root, "rest") # get all entries in the directory w/ stats entries = (os.path.join(restdir, fn) for fn in os.listdir(restdir)) entries = ((os.stat(path), path) for path in entries) - entries = sorted((stat[ST_CTIME], path) - for stat, path in entries if S_ISDIR(stat[ST_MODE])) + entries = sorted( + (stat[ST_CTIME], path) for stat, path in entries if S_ISDIR(stat[ST_MODE]) + ) last_dir = os.path.basename(entries[-1][1]) ref_sec = last_dir[-5:] ref_date = last_dir[:10] @@ -34,8 +36,8 @@ def _helper(dout_sr, refdate, refsec, rundir): for item in glob.glob("{}/*rpointer*".format(rest_path)): safe_copy(item, rundir) -class ERI(SystemTestsCommon): +class ERI(SystemTestsCommon): def __init__(self, case): """ initialize an object interface to the ERI system test @@ -47,7 +49,7 @@ def run_phase(self): caseroot = self._case.get_value("CASEROOT") clone1_path = "{}.ref1".format(caseroot) clone2_path = "{}.ref2".format(caseroot) - #self._case.set_value("CHECK_TIMING", False) + # self._case.set_value("CHECK_TIMING", False) # # clone the main case to create ref1 and ref2 cases @@ -56,7 +58,10 @@ def run_phase(self): if os.path.exists(clone_path): shutil.rmtree(clone_path) - clone1, clone2 = [self._case.create_clone(clone_path, keepexe=True) for clone_path in [clone1_path, clone2_path]] + clone1, clone2 = [ + self._case.create_clone(clone_path, keepexe=True) + for clone_path in [clone1_path, clone2_path] + ] orig_case = self._case orig_casevar = orig_case.get_value("CASE") # @@ -67,7 +72,7 @@ def run_phase(self): run_startdate = self._case.get_value("RUN_STARTDATE") start_tod = self._case.get_value("START_TOD") if start_tod == 0: - start_tod="00000" + start_tod = "00000" stop_n1 = int(stop_n / 6) rest_n1 = stop_n1 @@ -75,11 +80,15 @@ def run_phase(self): stop_n2 = stop_n - stop_n1 rest_n2 = int(stop_n2 / 2 + 1) - hist_n = stop_n2 + hist_n = stop_n2 - start_1_year, start_1_month, start_1_day = [int(item) for item in start_1.split("-")] + start_1_year, start_1_month, start_1_day = [ + int(item) for item in start_1.split("-") + ] start_2_year = start_1_year + 2 - start_2 = "{:04d}-{:02d}-{:02d}".format(start_2_year, start_1_month, start_1_day) + start_2 = "{:04d}-{:02d}-{:02d}".format( + start_2_year, start_1_month, start_1_day + ) stop_n3 = stop_n2 - rest_n2 rest_n3 = int(stop_n3 / 2 + 1) @@ -98,7 +107,11 @@ def run_phase(self): os.chdir(clone1_path) self._set_active_case(clone1) - logger.info("ref1 startup: doing a {} {} startup run from {} and {} seconds".format(stop_n1, stop_option, start_1, start_tod)) + logger.info( + "ref1 startup: doing a {} {} startup run from {} and {} seconds".format( + stop_n1, stop_option, start_1, start_tod + ) + ) logger.info(" writing restarts at {} {}".format(rest_n1, stop_option)) logger.info(" short term archiving is on ") @@ -141,25 +154,31 @@ def run_phase(self): # Set startdate to start2, set ref date based on ref1 restart refdate_2, refsec_2 = _get_rest_date(dout_sr1) - logger.info("ref2 hybrid: doing a {} {} startup hybrid run".format(stop_n2, stop_option)) - logger.info(" starting from {} and using ref1 {} and {} seconds".format(start_2, refdate_2, refsec_2)) + logger.info( + "ref2 hybrid: doing a {} {} startup hybrid run".format(stop_n2, stop_option) + ) + logger.info( + " starting from {} and using ref1 {} and {} seconds".format( + start_2, refdate_2, refsec_2 + ) + ) logger.info(" writing restarts at {} {}".format(rest_n2, stop_option)) logger.info(" short term archiving is on ") # setup ref2 case with clone2: - clone2.set_value("RUN_TYPE", "hybrid") + clone2.set_value("RUN_TYPE", "hybrid") clone2.set_value("RUN_STARTDATE", start_2) - clone2.set_value("RUN_REFCASE", "{}.ref1".format(orig_casevar)) - clone2.set_value("RUN_REFDATE", refdate_2) - clone2.set_value("RUN_REFTOD", refsec_2) - clone2.set_value("GET_REFCASE", False) - clone2.set_value("CONTINUE_RUN", False) - clone2.set_value("STOP_N", stop_n2) - clone2.set_value("REST_OPTION", stop_option) - clone2.set_value("REST_N", rest_n2) - clone2.set_value("HIST_OPTION", stop_option) - clone2.set_value("HIST_N", hist_n) + clone2.set_value("RUN_REFCASE", "{}.ref1".format(orig_casevar)) + clone2.set_value("RUN_REFDATE", refdate_2) + clone2.set_value("RUN_REFTOD", refsec_2) + clone2.set_value("GET_REFCASE", False) + clone2.set_value("CONTINUE_RUN", False) + clone2.set_value("STOP_N", stop_n2) + clone2.set_value("REST_OPTION", stop_option) + clone2.set_value("REST_N", rest_n2) + clone2.set_value("HIST_OPTION", stop_option) + clone2.set_value("HIST_N", hist_n) rundir2 = clone2.get_value("RUNDIR") dout_sr2 = clone2.get_value("DOUT_S_ROOT") @@ -182,24 +201,29 @@ def run_phase(self): self._set_active_case(orig_case) refdate_3, refsec_3 = _get_rest_date(dout_sr2) - logger.info("branch: doing a {} {} branch".format(stop_n3, stop_option)) - logger.info(" starting from ref2 {} and {} seconds restarts".format(refdate_3, refsec_3)) + logger.info( + " starting from ref2 {} and {} seconds restarts".format( + refdate_3, refsec_3 + ) + ) logger.info(" writing restarts at {} {}".format(rest_n3, stop_option)) logger.info(" short term archiving is off") - self._case.set_value("RUN_TYPE" , "branch") - self._case.set_value("RUN_REFCASE" , "{}.ref2".format(self._case.get_value("CASE"))) - self._case.set_value("RUN_REFDATE" , refdate_3) - self._case.set_value("RUN_REFTOD" , refsec_3) - self._case.set_value("GET_REFCASE" , False) - self._case.set_value("CONTINUE_RUN" , False) - self._case.set_value("STOP_N" , stop_n3) - self._case.set_value("REST_OPTION" , stop_option) - self._case.set_value("REST_N" , rest_n3) - self._case.set_value("HIST_OPTION" , stop_option) - self._case.set_value("HIST_N" , stop_n2) - self._case.set_value("DOUT_S" , False) + self._case.set_value("RUN_TYPE", "branch") + self._case.set_value( + "RUN_REFCASE", "{}.ref2".format(self._case.get_value("CASE")) + ) + self._case.set_value("RUN_REFDATE", refdate_3) + self._case.set_value("RUN_REFTOD", refsec_3) + self._case.set_value("GET_REFCASE", False) + self._case.set_value("CONTINUE_RUN", False) + self._case.set_value("STOP_N", stop_n3) + self._case.set_value("REST_OPTION", stop_option) + self._case.set_value("REST_N", rest_n3) + self._case.set_value("HIST_OPTION", stop_option) + self._case.set_value("HIST_N", stop_n2) + self._case.set_value("DOUT_S", False) self._case.flush() rundir = self._case.get_value("RUNDIR") @@ -209,7 +233,7 @@ def run_phase(self): _helper(dout_sr2, refdate_3, refsec_3, rundir) # link the hybrid history files from ref2 to the run dir for comparison - for item in glob.iglob("%s/*.hybrid"%rundir2): + for item in glob.iglob("%s/*.hybrid" % rundir2): newfile = "{}".format(item.replace(".ref2", "")) newfile = os.path.basename(newfile) dst = os.path.join(rundir, newfile) @@ -226,14 +250,18 @@ def run_phase(self): # do a restart continue from (3a) (short term archiving off) # - logger.info("branch restart: doing a {} {} continue restart test".format(stop_n4, stop_option)) - - self._case.set_value("CONTINUE_RUN", True) - self._case.set_value("STOP_N", stop_n4) - self._case.set_value("REST_OPTION", "never") - self._case.set_value("DOUT_S", False) - self._case.set_value("HIST_OPTION", stop_option) - self._case.set_value("HIST_N", hist_n) + logger.info( + "branch restart: doing a {} {} continue restart test".format( + stop_n4, stop_option + ) + ) + + self._case.set_value("CONTINUE_RUN", True) + self._case.set_value("STOP_N", stop_n4) + self._case.set_value("REST_OPTION", "never") + self._case.set_value("DOUT_S", False) + self._case.set_value("HIST_OPTION", stop_option) + self._case.set_value("HIST_N", hist_n) self._case.flush() # do the restart run (short term archiving is off) diff --git a/CIME/SystemTests/erio.py b/CIME/SystemTests/erio.py index 25306b7e4fe..f9de01a8b27 100644 --- a/CIME/SystemTests/erio.py +++ b/CIME/SystemTests/erio.py @@ -8,8 +8,8 @@ logger = logging.getLogger(__name__) -class ERIO(SystemTestsCommon): +class ERIO(SystemTestsCommon): def __init__(self, case): """ initialize an object interface to file env_test.xml in the case directory @@ -24,7 +24,7 @@ def _full_run(self, pio_type): expect(self._stop_n > 0, "Bad STOP_N: {:d}".format(self._stop_n)) # Move to config_tests.xml once that's ready - rest_n = int(self._stop_n/2) + 1 + rest_n = int(self._stop_n / 2) + 1 self._case.set_value("REST_N", rest_n) self._case.set_value("REST_OPTION", stop_option) self._case.set_value("HIST_N", self._stop_n) @@ -32,22 +32,37 @@ def _full_run(self, pio_type): self._case.set_value("CONTINUE_RUN", False) self._case.flush() - expect(self._stop_n > 2, "ERROR: stop_n value {:d} too short".format(self._stop_n)) - logger.info("doing an {0} {1} initial test with restart file at {2} {1} with pio type {3}".format(str(self._stop_n), stop_option, str(rest_n), pio_type)) + expect( + self._stop_n > 2, "ERROR: stop_n value {:d} too short".format(self._stop_n) + ) + logger.info( + "doing an {0} {1} initial test with restart file at {2} {1} with pio type {3}".format( + str(self._stop_n), stop_option, str(rest_n), pio_type + ) + ) self.run_indv(suffix=pio_type) def _restart_run(self, pio_type, other_pio_type): stop_option = self._case.get_value("STOP_OPTION") - rest_n = int(self._stop_n/2) + 1 + rest_n = int(self._stop_n / 2) + 1 stop_new = self._stop_n - rest_n - expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,self._stop_n,rest_n)) + expect( + stop_new > 0, + "ERROR: stop_n value {:d} too short {:d} {:d}".format( + stop_new, self._stop_n, rest_n + ), + ) self._case.set_value("STOP_N", stop_new) self._case.set_value("CONTINUE_RUN", True) - self._case.set_value("REST_OPTION","never") + self._case.set_value("REST_OPTION", "never") self._case.flush() - logger.info("doing an {} {} restart test with {} against {}".format(str(stop_new), stop_option, pio_type, other_pio_type)) + logger.info( + "doing an {} {} restart test with {} against {}".format( + str(stop_new), stop_option, pio_type, other_pio_type + ) + ) suffix = "{}.{}".format(other_pio_type, pio_type) self.run_indv(suffix=suffix) @@ -61,7 +76,7 @@ def run_phase(self): if pio_type1 != "default" and pio_type1 != "nothing": self._case.set_value("PIO_TYPENAME", pio_type1) self._full_run(pio_type1) - for pio_type2 in self._pio_types[idx+1:]: + for pio_type2 in self._pio_types[idx + 1 :]: if pio_type2 != "default" and pio_type2 != "nothing": self._case.set_value("PIO_TYPENAME", pio_type2) self._restart_run(pio_type2, pio_type1) diff --git a/CIME/SystemTests/erp.py b/CIME/SystemTests/erp.py index f80c82ce7ff..8f347fe6eee 100644 --- a/CIME/SystemTests/erp.py +++ b/CIME/SystemTests/erp.py @@ -13,29 +13,32 @@ logger = logging.getLogger(__name__) -class ERP(RestartTest): +class ERP(RestartTest): def __init__(self, case): """ initialize a test object """ - RestartTest.__init__(self, case, - separate_builds = True, - run_two_suffix = 'rest', - run_one_description = 'initial', - run_two_description = 'restart') + RestartTest.__init__( + self, + case, + separate_builds=True, + run_two_suffix="rest", + run_one_description="initial", + run_two_description="restart", + ) def _case_two_setup(self): # halve the number of tasks and threads for comp in self._case.get_values("COMP_CLASSES"): - ntasks = self._case1.get_value("NTASKS_{}".format(comp)) - nthreads = self._case1.get_value("NTHRDS_{}".format(comp)) - rootpe = self._case1.get_value("ROOTPE_{}".format(comp)) - if ( nthreads > 1 ): - self._case.set_value("NTHRDS_{}".format(comp), int(nthreads/2)) - if ( ntasks > 1 ): - self._case.set_value("NTASKS_{}".format(comp), int(ntasks/2)) - self._case.set_value("ROOTPE_{}".format(comp), int(rootpe/2)) + ntasks = self._case1.get_value("NTASKS_{}".format(comp)) + nthreads = self._case1.get_value("NTHRDS_{}".format(comp)) + rootpe = self._case1.get_value("ROOTPE_{}".format(comp)) + if nthreads > 1: + self._case.set_value("NTHRDS_{}".format(comp), int(nthreads / 2)) + if ntasks > 1: + self._case.set_value("NTASKS_{}".format(comp), int(ntasks / 2)) + self._case.set_value("ROOTPE_{}".format(comp), int(rootpe / 2)) RestartTest._case_two_setup(self) self._case.case_setup(test_mode=True, reset=True) diff --git a/CIME/SystemTests/err.py b/CIME/SystemTests/err.py index 9c0cd885af7..4dd79a85aae 100644 --- a/CIME/SystemTests/err.py +++ b/CIME/SystemTests/err.py @@ -9,17 +9,20 @@ logger = logging.getLogger(__name__) + class ERR(RestartTest): - def __init__(self, case): # pylint: disable=super-init-not-called + def __init__(self, case): # pylint: disable=super-init-not-called """ initialize an object interface to the ERR system test """ - super(ERR, self).__init__(case, - separate_builds = False, - run_two_suffix = 'rest', - run_one_description = 'initial', - run_two_description = 'restart', - multisubmit = True) + super(ERR, self).__init__( + case, + separate_builds=False, + run_two_suffix="rest", + run_one_description="initial", + run_two_description="restart", + multisubmit=True, + ) def _case_one_setup(self): super(ERR, self)._case_one_setup() @@ -31,19 +34,23 @@ def _case_two_setup(self): def _case_two_custom_prerun_action(self): dout_s_root = self._case1.get_value("DOUT_S_ROOT") - rest_root = os.path.abspath(os.path.join(dout_s_root,"rest")) + rest_root = os.path.abspath(os.path.join(dout_s_root, "rest")) restart_list = ls_sorted_by_mtime(rest_root) expect(len(restart_list) >= 1, "No restart files found in {}".format(rest_root)) - self._case.restore_from_archive(rest_dir= - os.path.join(rest_root, restart_list[0])) + self._case.restore_from_archive( + rest_dir=os.path.join(rest_root, restart_list[0]) + ) def _case_two_custom_postrun_action(self): # Link back to original case1 name # This is needed so that the necessary files are present for # baseline comparison and generation, # since some of them may have been moved to the archive directory - for case_file in glob.iglob(os.path.join(self._case1.get_value("RUNDIR"), - "*.nc.{}".format(self._run_one_suffix))): - orig_file = case_file[:-(1+len(self._run_one_suffix))] + for case_file in glob.iglob( + os.path.join( + self._case1.get_value("RUNDIR"), "*.nc.{}".format(self._run_one_suffix) + ) + ): + orig_file = case_file[: -(1 + len(self._run_one_suffix))] if not os.path.isfile(orig_file): safe_copy(case_file, orig_file) diff --git a/CIME/SystemTests/erri.py b/CIME/SystemTests/erri.py index 6fd79b3e1fa..8cec2b149ce 100644 --- a/CIME/SystemTests/erri.py +++ b/CIME/SystemTests/erri.py @@ -10,8 +10,8 @@ logger = logging.getLogger(__name__) -class ERRI(ERR): +class ERRI(ERR): def __init__(self, case): """ initialize an object interface to the ERU system test @@ -20,10 +20,10 @@ def __init__(self, case): def _case_two_custom_postrun_action(self): rundir = self._case.get_value("RUNDIR") - for logname_gz in glob.glob(os.path.join(rundir, '*.log*.gz')): + for logname_gz in glob.glob(os.path.join(rundir, "*.log*.gz")): # gzipped logfile names are of the form $LOGNAME.gz # Removing the last three characters restores the original name logname = logname_gz[:-3] - with gzip.open(logname_gz, 'rb') as f_in, open(logname, 'w') as f_out: + with gzip.open(logname_gz, "rb") as f_in, open(logname, "w") as f_out: shutil.copyfileobj(f_in, f_out) os.remove(logname_gz) diff --git a/CIME/SystemTests/ers.py b/CIME/SystemTests/ers.py index 17dc50c77c3..a5c393af95c 100644 --- a/CIME/SystemTests/ers.py +++ b/CIME/SystemTests/ers.py @@ -6,8 +6,8 @@ logger = logging.getLogger(__name__) -class ERS(SystemTestsCommon): +class ERS(SystemTestsCommon): def __init__(self, case): """ initialize an object interface to the ERS system test @@ -15,30 +15,39 @@ def __init__(self, case): SystemTestsCommon.__init__(self, case) def _ers_first_phase(self): - stop_n = self._case.get_value("STOP_N") + stop_n = self._case.get_value("STOP_N") stop_option = self._case.get_value("STOP_OPTION") - rest_n = self._case.get_value("REST_N") + rest_n = self._case.get_value("REST_N") expect(stop_n > 0, "Bad STOP_N: {:d}".format(stop_n)) expect(stop_n > 2, "ERROR: stop_n value {:d} too short".format(stop_n)) - logger.info("doing an {0} {1} initial test with restart file at {2} {1}".format(str(stop_n), stop_option, str(rest_n))) + logger.info( + "doing an {0} {1} initial test with restart file at {2} {1}".format( + str(stop_n), stop_option, str(rest_n) + ) + ) self.run_indv() def _ers_second_phase(self): - stop_n = self._case.get_value("STOP_N") + stop_n = self._case.get_value("STOP_N") stop_option = self._case.get_value("STOP_OPTION") - rest_n = int(stop_n/2 + 1) + rest_n = int(stop_n / 2 + 1) stop_new = stop_n - rest_n - expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,stop_n,rest_n)) + expect( + stop_new > 0, + "ERROR: stop_n value {:d} too short {:d} {:d}".format( + stop_new, stop_n, rest_n + ), + ) self._case.set_value("HIST_N", stop_n) self._case.set_value("STOP_N", stop_new) self._case.set_value("CONTINUE_RUN", True) - self._case.set_value("REST_OPTION","never") + self._case.set_value("REST_OPTION", "never") self._case.flush() logger.info("doing an {} {} restart test".format(str(stop_new), stop_option)) - self._skip_pnl=False + self._skip_pnl = False self.run_indv(suffix="rest") # Compare restart file diff --git a/CIME/SystemTests/ers2.py b/CIME/SystemTests/ers2.py index fbfc185f98f..e65f703e36e 100644 --- a/CIME/SystemTests/ers2.py +++ b/CIME/SystemTests/ers2.py @@ -6,8 +6,8 @@ logger = logging.getLogger(__name__) -class ERS2(SystemTestsCommon): +class ERS2(SystemTestsCommon): def __init__(self, case): """ initialize an object interface to the ERS2 system test @@ -15,38 +15,46 @@ def __init__(self, case): SystemTestsCommon.__init__(self, case) def _ers2_first_phase(self): - stop_n = self._case.get_value("STOP_N") + stop_n = self._case.get_value("STOP_N") stop_option = self._case.get_value("STOP_OPTION") - rest_n = self._case.get_value("REST_N") + rest_n = self._case.get_value("REST_N") # Don't need restarts for first run - self._case.set_value("REST_OPTION","never") + self._case.set_value("REST_OPTION", "never") expect(stop_n > 0, "Bad STOP_N: {:d}".format(stop_n)) expect(stop_n > 2, "ERROR: stop_n value {:d} too short".format(stop_n)) - logger.info("doing an {0} {1} initial test with restart file at {2} {1}".format(str(stop_n), stop_option, str(rest_n))) + logger.info( + "doing an {0} {1} initial test with restart file at {2} {1}".format( + str(stop_n), stop_option, str(rest_n) + ) + ) self.run_indv() def _ers2_second_phase(self): - stop_n = self._case.get_value("STOP_N") + stop_n = self._case.get_value("STOP_N") stop_option = self._case.get_value("STOP_OPTION") - rest_n = stop_n/2 + 1 + rest_n = int(stop_n / 2 + 1) stop_new = rest_n - self._case.set_value("REST_OPTION",stop_option) + self._case.set_value("REST_OPTION", stop_option) self._case.set_value("STOP_N", stop_new) self._case.flush() - logger.info("doing first part {} {} restart test".format(str(stop_new), stop_option)) + logger.info( + "doing first part {} {} restart test".format(str(stop_new), stop_option) + ) self.run_indv(suffix="intermediate") - stop_new = stop_n - rest_n + stop_new = int(stop_n - rest_n) self._case.set_value("STOP_N", stop_new) self._case.set_value("CONTINUE_RUN", True) - self._case.set_value("REST_OPTION","never") + self._case.set_value("REST_OPTION", "never") - logger.info("doing second part {} {} restart test".format(str(stop_new), stop_option)) + logger.info( + "doing second part {} {} restart test".format(str(stop_new), stop_option) + ) self.run_indv(suffix="rest") # Compare restart file diff --git a/CIME/SystemTests/ert.py b/CIME/SystemTests/ert.py index 18664c58cc7..36366395190 100644 --- a/CIME/SystemTests/ert.py +++ b/CIME/SystemTests/ert.py @@ -8,8 +8,8 @@ logger = logging.getLogger(__name__) -class ERT(SystemTestsCommon): +class ERT(SystemTestsCommon): def __init__(self, case): """ initialize an object interface to the ERT system test @@ -36,7 +36,7 @@ def _ert_second_phase(self): self._case.set_value("STOP_N", 1) self._case.set_value("CONTINUE_RUN", True) - self._case.set_value("REST_OPTION","never") + self._case.set_value("REST_OPTION", "never") self._case.flush() logger.info("doing an 1 month restart test with no restart files") diff --git a/CIME/SystemTests/funit.py b/CIME/SystemTests/funit.py index f1c084eba06..ab075957aa6 100644 --- a/CIME/SystemTests/funit.py +++ b/CIME/SystemTests/funit.py @@ -10,8 +10,8 @@ logger = logging.getLogger(__name__) -class FUNIT(SystemTestsCommon): +class FUNIT(SystemTestsCommon): def __init__(self, case): """ initialize an object interface to the FUNIT system test @@ -21,7 +21,7 @@ def __init__(self, case): def build_phase(self, sharedlib_only=False, model_only=False): if not sharedlib_only: - exeroot = self._case.get_value("EXEROOT") + exeroot = self._case.get_value("EXEROOT") logfile = os.path.join(exeroot, "funit.bldlog") with open(logfile, "w") as fd: fd.write("No-op\n") @@ -36,18 +36,34 @@ def get_test_spec_dir(self): def run_phase(self): - rundir = self._case.get_value("RUNDIR") - exeroot = self._case.get_value("EXEROOT") - mach = self._case.get_value("MACH") + rundir = self._case.get_value("RUNDIR") + exeroot = self._case.get_value("EXEROOT") + mach = self._case.get_value("MACH") log = os.path.join(rundir, "funit.log") if os.path.exists(log): os.remove(log) test_spec_dir = self.get_test_spec_dir() - unit_test_tool = os.path.abspath(os.path.join(get_cime_root(),"scripts","fortran_unit_testing","run_tests.py")) - args = "--build-dir {} --test-spec-dir {} --machine {}".format(exeroot, test_spec_dir, mach) - stat = run_cmd("{} {} >& funit.log".format(unit_test_tool, args), from_dir=rundir)[0] + unit_test_tool = os.path.abspath( + os.path.join( + get_cime_root(), "scripts", "fortran_unit_testing", "run_tests.py" + ) + ) + args = "--build-dir {} --test-spec-dir {} --machine {}".format( + exeroot, test_spec_dir, mach + ) + + # BUG(wjs, 2022-01-07, ESMCI/CIME#4136) For now, these Fortran unit tests only + # work with the old config_compilers.xml-based configuration + my_env = os.environ.copy() + my_env["CIME_NO_CMAKE_MACRO"] = "ON" + + stat = run_cmd( + "{} {} >& funit.log".format(unit_test_tool, args), + from_dir=rundir, + env=my_env, + )[0] append_testlog(open(os.path.join(rundir, "funit.log"), "r").read()) diff --git a/CIME/SystemTests/homme.py b/CIME/SystemTests/homme.py index f52039b74b7..6161c2e46be 100644 --- a/CIME/SystemTests/homme.py +++ b/CIME/SystemTests/homme.py @@ -1,7 +1,7 @@ from CIME.SystemTests.hommebaseclass import HommeBase -class HOMME(HommeBase): - def __init__(self,case): - HommeBase.__init__(self,case) - self.cmakesuffix='' +class HOMME(HommeBase): + def __init__(self, case): + HommeBase.__init__(self, case) + self.cmakesuffix = "" diff --git a/CIME/SystemTests/hommebaseclass.py b/CIME/SystemTests/hommebaseclass.py index 77a0660a305..5c29fce7533 100644 --- a/CIME/SystemTests/hommebaseclass.py +++ b/CIME/SystemTests/hommebaseclass.py @@ -12,30 +12,30 @@ logger = logging.getLogger(__name__) -class HommeBase(SystemTestsCommon): +class HommeBase(SystemTestsCommon): def __init__(self, case): """ initialize an object interface to the SMS system test """ SystemTestsCommon.__init__(self, case) case.load_env() - self.csnd = 'not defined' + self.csnd = "not defined" self.cmakesuffix = self.csnd def build_phase(self, sharedlib_only=False, model_only=False): if not sharedlib_only: # Build HOMME - srcroot = self._case.get_value("SRCROOT") - mach = self._case.get_value("MACH") - procs = self._case.get_value("TOTALPES") - exeroot = self._case.get_value("EXEROOT") + srcroot = self._case.get_value("SRCROOT") + mach = self._case.get_value("MACH") + procs = self._case.get_value("TOTALPES") + exeroot = self._case.get_value("EXEROOT") baseline = self._case.get_value("BASELINE_ROOT") - basecmp = self._case.get_value("BASECMP_CASE") - compare = self._case.get_value("COMPARE_BASELINE") - gmake = self._case.get_value("GMAKE") - gmake_j = self._case.get_value("GMAKE_J") - cprnc = self._case.get_value("CCSM_CPRNC") + basecmp = self._case.get_value("BASECMP_CASE") + compare = self._case.get_value("COMPARE_BASELINE") + gmake = self._case.get_value("GMAKE") + gmake_j = self._case.get_value("GMAKE_J") + cprnc = self._case.get_value("CCSM_CPRNC") if compare: basename = basecmp @@ -44,25 +44,41 @@ def build_phase(self, sharedlib_only=False, model_only=False): basename = "" baselinedir = exeroot - expect(self.cmakesuffix != self.csnd, - "ERROR in hommebaseclass: Must have cmakesuffix set up") - - cmake_cmd = "cmake -C {0}/components/homme/cmake/machineFiles/{1}{6}.cmake -DUSE_NUM_PROCS={2} {0}/components/homme -DHOMME_BASELINE_DIR={3}/{4} -DCPRNC_DIR={5}/..".format(srcroot, mach, procs, baselinedir, basename, cprnc, self.cmakesuffix) - - run_cmd_no_fail(cmake_cmd, arg_stdout="homme.bldlog", combine_output=True, from_dir=exeroot) - run_cmd_no_fail("{} -j{} VERBOSE=1 test-execs".format(gmake, gmake_j), arg_stdout="homme.bldlog", combine_output=True, from_dir=exeroot) - - post_build(self._case, [os.path.join(exeroot, "homme.bldlog")], build_complete=True) + expect( + self.cmakesuffix != self.csnd, + "ERROR in hommebaseclass: Must have cmakesuffix set up", + ) + + cmake_cmd = "cmake -C {0}/components/homme/cmake/machineFiles/{1}{6}.cmake -DUSE_NUM_PROCS={2} {0}/components/homme -DHOMME_BASELINE_DIR={3}/{4} -DCPRNC_DIR={5}/..".format( + srcroot, mach, procs, baselinedir, basename, cprnc, self.cmakesuffix + ) + + run_cmd_no_fail( + cmake_cmd, + arg_stdout="homme.bldlog", + combine_output=True, + from_dir=exeroot, + ) + run_cmd_no_fail( + "{} -j{} VERBOSE=1 test-execs".format(gmake, gmake_j), + arg_stdout="homme.bldlog", + combine_output=True, + from_dir=exeroot, + ) + + post_build( + self._case, [os.path.join(exeroot, "homme.bldlog")], build_complete=True + ) def run_phase(self): - rundir = self._case.get_value("RUNDIR") - exeroot = self._case.get_value("EXEROOT") + rundir = self._case.get_value("RUNDIR") + exeroot = self._case.get_value("EXEROOT") baseline = self._case.get_value("BASELINE_ROOT") - compare = self._case.get_value("COMPARE_BASELINE") + compare = self._case.get_value("COMPARE_BASELINE") generate = self._case.get_value("GENERATE_BASELINE") - basegen = self._case.get_value("BASEGEN_CASE") - gmake = self._case.get_value("GMAKE") + basegen = self._case.get_value("BASEGEN_CASE") + gmake = self._case.get_value("GMAKE") log = os.path.join(rundir, "homme.log") if os.path.exists(log): @@ -70,21 +86,45 @@ def run_phase(self): if generate: full_baseline_dir = os.path.join(baseline, basegen, "tests", "baseline") - stat = run_cmd("{} -j 4 baseline".format(gmake), arg_stdout=log, combine_output=True, from_dir=exeroot)[0] + stat = run_cmd( + "{} -j 4 baseline".format(gmake), + arg_stdout=log, + combine_output=True, + from_dir=exeroot, + )[0] if stat == 0: if os.path.isdir(full_baseline_dir): shutil.rmtree(full_baseline_dir) with SharedArea(): - dir_util.copy_tree(os.path.join(exeroot, "tests", "baseline"), full_baseline_dir, preserve_mode=False) + dir_util.copy_tree( + os.path.join(exeroot, "tests", "baseline"), + full_baseline_dir, + preserve_mode=False, + ) elif compare: - stat = run_cmd("{} -j 4 check".format(gmake), arg_stdout=log, combine_output=True, from_dir=exeroot)[0] + stat = run_cmd( + "{} -j 4 check".format(gmake), + arg_stdout=log, + combine_output=True, + from_dir=exeroot, + )[0] else: - stat = run_cmd("{} -j 4 baseline".format(gmake), arg_stdout=log, combine_output=True, from_dir=exeroot)[0] + stat = run_cmd( + "{} -j 4 baseline".format(gmake), + arg_stdout=log, + combine_output=True, + from_dir=exeroot, + )[0] if stat == 0: - stat = run_cmd("{} -j 4 check".format(gmake), arg_stdout=log, combine_output=True, from_dir=exeroot)[0] + stat = run_cmd( + "{} -j 4 check".format(gmake), + arg_stdout=log, + combine_output=True, + from_dir=exeroot, + )[0] # Add homme.log output to TestStatus.log so that it can # appear on the dashboard. Otherwise, the TestStatus.log diff --git a/CIME/SystemTests/hommebfb.py b/CIME/SystemTests/hommebfb.py index 35f07b2fc8c..7cd6b370222 100644 --- a/CIME/SystemTests/hommebfb.py +++ b/CIME/SystemTests/hommebfb.py @@ -1,7 +1,7 @@ from CIME.SystemTests.hommebaseclass import HommeBase -class HOMMEBFB(HommeBase): - def __init__(self,case): - HommeBase.__init__(self,case) - self.cmakesuffix='-bfb' +class HOMMEBFB(HommeBase): + def __init__(self, case): + HommeBase.__init__(self, case) + self.cmakesuffix = "-bfb" diff --git a/CIME/SystemTests/icp.py b/CIME/SystemTests/icp.py index 86195c193e4..f0e3988774c 100644 --- a/CIME/SystemTests/icp.py +++ b/CIME/SystemTests/icp.py @@ -4,8 +4,8 @@ from CIME.XML.standard_module_setup import * from CIME.SystemTests.system_tests_common import SystemTestsCommon -class ICP(SystemTestsCommon): +class ICP(SystemTestsCommon): def __init__(self, case): """ initialize an object interface to file env_test.xml in the case directory @@ -16,10 +16,10 @@ def build_phase(self, sharedlib_only=False, model_only=False): self._case.set_value("CICE_AUTO_DECOMP", "false") def run_phase(self): - self._case.set_value("CONTINUE_RUN",False) - self._case.set_value("REST_OPTION","none") - self._case.set_value("HIST_OPTION","$STOP_OPTION") - self._case.set_value("HIST_N","$STOP_N") + self._case.set_value("CONTINUE_RUN", False) + self._case.set_value("REST_OPTION", "none") + self._case.set_value("HIST_OPTION", "$STOP_OPTION") + self._case.set_value("HIST_N", "$STOP_N") self._case.flush() self.run_indv(self) diff --git a/CIME/SystemTests/irt.py b/CIME/SystemTests/irt.py index 68dd744d896..adda8b235ff 100644 --- a/CIME/SystemTests/irt.py +++ b/CIME/SystemTests/irt.py @@ -7,6 +7,7 @@ (3) Recover first interim restart to the case2 run directory (4) Start case2 from restart and run to the end of case1 (5) compare results. +(6) this test does not save or compare history files in baselines. """ @@ -16,24 +17,28 @@ logger = logging.getLogger(__name__) -class IRT(RestartTest): +class IRT(RestartTest): def __init__(self, case): - RestartTest.__init__(self, case, - separate_builds=False, - run_two_suffix = 'restart', - run_one_description = 'initial', - run_two_description = 'restart', - multisubmit = False) + RestartTest.__init__( + self, + case, + separate_builds=False, + run_two_suffix="restart", + run_one_description="initial", + run_two_description="restart", + multisubmit=False, + ) self._skip_pnl = False def _case_one_custom_postrun_action(self): self._case.case_st_archive() # Since preview namelist is run before _case_two_prerun_action, we need to do this here. dout_s_root = self._case1.get_value("DOUT_S_ROOT") - restart_list = ls_sorted_by_mtime(os.path.join(dout_s_root,"rest")) + restart_list = ls_sorted_by_mtime(os.path.join(dout_s_root, "rest")) logger.info("Restart directory list is {}".format(restart_list)) - expect(len(restart_list) >=2,"Expected at least two restart directories") + expect(len(restart_list) >= 2, "Expected at least two restart directories") # Get the older of the two restart directories - self._case2.restore_from_archive(rest_dir=os.path.abspath( - os.path.join(dout_s_root, "rest", restart_list[0]))) + self._case2.restore_from_archive( + rest_dir=os.path.abspath(os.path.join(dout_s_root, "rest", restart_list[0])) + ) diff --git a/CIME/SystemTests/ldsta.py b/CIME/SystemTests/ldsta.py index 442780b7a32..f7a4a2b4729 100644 --- a/CIME/SystemTests/ldsta.py +++ b/CIME/SystemTests/ldsta.py @@ -19,15 +19,17 @@ # datetime objects can't be used anywhere else def _date_to_datetime(date_obj): - return datetime.datetime(year = date_obj.year(), - month = date_obj.month(), - day = date_obj.day(), - hour = date_obj.hour(), - minute = date_obj.minute(), - second = date_obj.second()) + return datetime.datetime( + year=date_obj.year(), + month=date_obj.month(), + day=date_obj.day(), + hour=date_obj.hour(), + minute=date_obj.minute(), + second=date_obj.second(), + ) -class LDSTA(SystemTestsCommon): +class LDSTA(SystemTestsCommon): def __init__(self, case): """ initialize an object interface to the SMS system test @@ -35,35 +37,43 @@ def __init__(self, case): SystemTestsCommon.__init__(self, case) def run_phase(self): - archive_dir = self._case.get_value('DOUT_S_ROOT') + archive_dir = self._case.get_value("DOUT_S_ROOT") if os.path.isdir(archive_dir): shutil.rmtree(archive_dir) self.run_indv() # finished running, so all archive files should exist - start_date = _date_to_datetime(get_file_date(self._case.get_value('RUN_STARTDATE'))) - rest_dir = os.path.join(archive_dir, 'rest') + start_date = _date_to_datetime( + get_file_date(self._case.get_value("RUN_STARTDATE")) + ) + rest_dir = os.path.join(archive_dir, "rest") delta_day = datetime.timedelta(1) current_date = start_date + delta_day next_datecheck = current_date - days_left = self._case.get_value('STOP_N') + days_left = self._case.get_value("STOP_N") final_date = start_date + delta_day * days_left while current_date < final_date: - logger.info('Testing archiving with last date: {}'.format(current_date)) - current_date_str = '{:04}-{:02}-{:02}'.format(current_date.year, - current_date.month, - current_date.day) + logger.info("Testing archiving with last date: {}".format(current_date)) + current_date_str = "{:04}-{:02}-{:02}".format( + current_date.year, current_date.month, current_date.day + ) self._case.case_st_archive(last_date_str=current_date_str, copy_only=False) - archive_dates = [_date_to_datetime(get_file_date(fname)) - for fname in glob.glob(os.path.join(rest_dir, '*'))] + archive_dates = [ + _date_to_datetime(get_file_date(fname)) + for fname in glob.glob(os.path.join(rest_dir, "*")) + ] while next_datecheck <= current_date: - expect(next_datecheck in archive_dates, - 'Not all dates generated and/or archived: ' - + '{} is missing'.format(next_datecheck)) + expect( + next_datecheck in archive_dates, + "Not all dates generated and/or archived: " + + "{} is missing".format(next_datecheck), + ) next_datecheck += delta_day for date in archive_dates: - expect(date <= current_date, - 'Archived date greater than specified by last-date: ' - + '{}'.format(date)) + expect( + date <= current_date, + "Archived date greater than specified by last-date: " + + "{}".format(date), + ) num_days = random.randint(1, min(3, days_left)) days_left -= num_days current_date += num_days * delta_day diff --git a/CIME/SystemTests/mcc.py b/CIME/SystemTests/mcc.py index 96ab0c28ce0..4d47bf2c318 100644 --- a/CIME/SystemTests/mcc.py +++ b/CIME/SystemTests/mcc.py @@ -11,20 +11,22 @@ class MCC(SystemTestsCompareTwo): - def __init__(self, case): self._comp_classes = [] self._test_instances = 3 - SystemTestsCompareTwo.__init__(self, case, - separate_builds = True, - run_two_suffix = 'single_instance', - run_two_description = 'single instance', - run_one_description = 'multi driver') + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=True, + run_two_suffix="single_instance", + run_two_description="single instance", + run_one_description="multi driver", + ) def _case_one_setup(self): # The multicoupler case will increase the number of tasks by the # number of requested couplers. - self._case.set_value("MULTI_DRIVER",True) + self._case.set_value("MULTI_DRIVER", True) self._case.set_value("NINST", self._test_instances) def _case_two_setup(self): diff --git a/CIME/SystemTests/mvk.py b/CIME/SystemTests/mvk.py index 4adfd639dd3..9dc279fdc36 100644 --- a/CIME/SystemTests/mvk.py +++ b/CIME/SystemTests/mvk.py @@ -28,7 +28,6 @@ class MVK(SystemTestsCommon): - def __init__(self, case): """ initialize an object interface to the MVK test @@ -40,8 +39,10 @@ def __init__(self, case): else: self.component = "cam" - if self._case.get_value("RESUBMIT") == 0 \ - and self._case.get_value("GENERATE_BASELINE") is False: + if ( + self._case.get_value("RESUBMIT") == 0 + and self._case.get_value("GENERATE_BASELINE") is False + ): self._case.set_value("COMPARE_BASELINE", True) else: self._case.set_value("COMPARE_BASELINE", False) @@ -50,26 +51,28 @@ def build_phase(self, sharedlib_only=False, model_only=False): # Only want this to happen once. It will impact the sharedlib build # so it has to happen there. if not model_only: - logging.warning('Starting to build multi-instance exe') + logging.warning("Starting to build multi-instance exe") for comp in self._case.get_values("COMP_CLASSES"): - self._case.set_value('NTHRDS_{}'.format(comp), 1) + self._case.set_value("NTHRDS_{}".format(comp), 1) ntasks = self._case.get_value("NTASKS_{}".format(comp)) - self._case.set_value('NTASKS_{}'.format(comp), ntasks * NINST) - if comp != 'CPL': - self._case.set_value('NINST_{}'.format(comp), NINST) + self._case.set_value("NTASKS_{}".format(comp), ntasks * NINST) + if comp != "CPL": + self._case.set_value("NINST_{}".format(comp), NINST) self._case.flush() case_setup(self._case, test_mode=False, reset=True) for iinst in range(1, NINST + 1): - with open('user_nl_{}_{:04d}'.format(self.component, iinst), 'w') as nl_atm_file: - nl_atm_file.write('new_random = .true.\n') - nl_atm_file.write('pertlim = 1.0e-10\n') - nl_atm_file.write('seed_custom = {}\n'.format(iinst)) - nl_atm_file.write('seed_clock = .true.\n') + with open( + "user_nl_{}_{:04d}".format(self.component, iinst), "w" + ) as nl_atm_file: + nl_atm_file.write("new_random = .true.\n") + nl_atm_file.write("pertlim = 1.0e-10\n") + nl_atm_file.write("seed_custom = {}\n".format(iinst)) + nl_atm_file.write("seed_clock = .true.\n") self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) @@ -80,18 +83,22 @@ def _generate_baseline(self): super(MVK, self)._generate_baseline() with CIME.utils.SharedArea(): - basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASEGEN_CASE")) + basegen_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASEGEN_CASE"), + ) rundir = self._case.get_value("RUNDIR") ref_case = self._case.get_value("RUN_REFCASE") env_archive = self._case.get_env("archive") - hists = env_archive.get_all_hist_files(self._case.get_value("CASE"), self.component, rundir, ref_case=ref_case) + hists = env_archive.get_all_hist_files( + self._case.get_value("CASE"), self.component, rundir, ref_case=ref_case + ) logger.debug("MVK additional baseline files: {}".format(hists)) - hists = [os.path.join(rundir,hist) for hist in hists] + hists = [os.path.join(rundir, hist) for hist in hists] for hist in hists: - basename = hist[hist.rfind(self.component):] + basename = hist[hist.rfind(self.component) :] baseline = os.path.join(basegen_dir, basename) if os.path.exists(baseline): os.remove(baseline) @@ -104,19 +111,23 @@ def _compare_baseline(self): # This is here because the comparison is run for each submission # and we only want to compare once the whole run is finished. We # need to return a pass here to continue the submission process. - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_PASS_STATUS) + self._test_status.set_status( + CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_PASS_STATUS + ) return - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_FAIL_STATUS) + self._test_status.set_status( + CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_FAIL_STATUS + ) run_dir = self._case.get_value("RUNDIR") case_name = self._case.get_value("CASE") - base_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASECMP_CASE")) + base_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASECMP_CASE"), + ) - test_name = "{}".format(case_name.split('.')[-1]) + test_name = "{}".format(case_name.split(".")[-1]) evv_config = { test_name: { "module": os.path.join(evv_lib_dir, "extensions", "ks.py"), @@ -131,25 +142,31 @@ def _compare_baseline(self): } } - json_file = os.path.join(run_dir, '.'.join([case_name, 'json'])) - with open(json_file, 'w') as config_file: + json_file = os.path.join(run_dir, ".".join([case_name, "json"])) + with open(json_file, "w") as config_file: json.dump(evv_config, config_file, indent=4) - evv_out_dir = os.path.join(run_dir, '.'.join([case_name, 'evv'])) - evv(['-e', json_file, '-o', evv_out_dir]) + evv_out_dir = os.path.join(run_dir, ".".join([case_name, "evv"])) + evv(["-e", json_file, "-o", evv_out_dir]) - with open(os.path.join(evv_out_dir, 'index.json')) as evv_f: + with open(os.path.join(evv_out_dir, "index.json")) as evv_f: evv_status = json.load(evv_f) comments = "" - for evv_elem in evv_status['Data']['Elements']: - if evv_elem['Type'] == 'ValSummary' \ - and evv_elem['TableTitle'] == 'Kolmogorov-Smirnov test': - comments = "; ".join("{}: {}".format(key, val) for key, val - in evv_elem['Data'][test_name][''].items()) - if evv_elem['Data'][test_name]['']['Test status'].lower() == 'pass': - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_PASS_STATUS) + for evv_elem in evv_status["Data"]["Elements"]: + if ( + evv_elem["Type"] == "ValSummary" + and evv_elem["TableTitle"] == "Kolmogorov-Smirnov test" + ): + comments = "; ".join( + "{}: {}".format(key, val) + for key, val in evv_elem["Data"][test_name][""].items() + ) + if evv_elem["Data"][test_name][""]["Test status"].lower() == "pass": + self._test_status.set_status( + CIME.test_status.BASELINE_PHASE, + CIME.test_status.TEST_PASS_STATUS, + ) break status = self._test_status.get_status(CIME.test_status.BASELINE_PHASE) @@ -159,20 +176,34 @@ def _compare_baseline(self): urlroot = CIME.utils.get_urlroot(mach_obj) if htmlroot is not None: with CIME.utils.SharedArea(): - dir_util.copy_tree(evv_out_dir, os.path.join(htmlroot, 'evv', case_name), preserve_mode=False) + dir_util.copy_tree( + evv_out_dir, + os.path.join(htmlroot, "evv", case_name), + preserve_mode=False, + ) if urlroot is None: urlroot = "[{}_URL]".format(mach_name.capitalize()) viewing = "{}/evv/{}/index.html".format(urlroot, case_name) else: - viewing = "{}\n" \ - " EVV viewing instructions can be found at: " \ - " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" \ - "climate_reproducibility/README.md#test-passfail-and-extended-output" \ - "".format(evv_out_dir) - - comments = "{} {} for test '{}'.\n" \ - " {}\n" \ - " EVV results can be viewed at:\n" \ - " {}".format(CIME.test_status.BASELINE_PHASE, status, test_name, comments, viewing) + viewing = ( + "{}\n" + " EVV viewing instructions can be found at: " + " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" + "climate_reproducibility/README.md#test-passfail-and-extended-output" + "".format(evv_out_dir) + ) + + comments = ( + "{} {} for test '{}'.\n" + " {}\n" + " EVV results can be viewed at:\n" + " {}".format( + CIME.test_status.BASELINE_PHASE, + status, + test_name, + comments, + viewing, + ) + ) CIME.utils.append_testlog(comments, self._orig_caseroot) diff --git a/CIME/SystemTests/nck.py b/CIME/SystemTests/nck.py index 4f6358b8311..af0a2d0c5e6 100644 --- a/CIME/SystemTests/nck.py +++ b/CIME/SystemTests/nck.py @@ -13,15 +13,18 @@ logger = logging.getLogger(__name__) -class NCK(SystemTestsCompareTwo): +class NCK(SystemTestsCompareTwo): def __init__(self, case): self._comp_classes = [] - SystemTestsCompareTwo.__init__(self, case, - separate_builds = True, - run_two_suffix = 'multiinst', - run_one_description = 'one instance', - run_two_description = 'two instances') + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=True, + run_two_suffix="multiinst", + run_one_description="one instance", + run_two_description="two instances", + ) def _common_setup(self): # We start by halving the number of tasks for both cases. This ensures @@ -34,28 +37,27 @@ def _common_setup(self): self._comp_classes.remove("CPL") for comp in self._comp_classes: ntasks = self._case.get_value("NTASKS_{}".format(comp)) - if ( ntasks > 1 ): - self._case.set_value("NTASKS_{}".format(comp), int(ntasks/2)) + if ntasks > 1: + self._case.set_value("NTASKS_{}".format(comp), int(ntasks / 2)) # the following assures that both cases use the same number of total tasks rootpe = self._case.get_value("ROOTPE_{}".format(comp)) - if ( rootpe > 1 ): - self._case.set_value("ROOTPE_{}".format(comp), int(rootpe+ntasks/2)) + if rootpe > 1: + self._case.set_value("ROOTPE_{}".format(comp), int(rootpe + ntasks / 2)) def _case_one_setup(self): for comp in self._comp_classes: self._case.set_value("NINST_{}".format(comp), 1) - def _case_two_setup(self): for comp in self._comp_classes: - if (comp == "ESP"): + if comp == "ESP": self._case.set_value("NINST_{}".format(comp), 1) else: self._case.set_value("NINST_{}".format(comp), 2) ntasks = self._case.get_value("NTASKS_{}".format(comp)) rootpe = self._case.get_value("ROOTPE_{}".format(comp)) - if ( rootpe > 1 ): - self._case.set_value("ROOTPE_{}".format(comp), int(rootpe-ntasks)) - self._case.set_value("NTASKS_{}".format(comp), ntasks*2) + if rootpe > 1: + self._case.set_value("ROOTPE_{}".format(comp), int(rootpe - ntasks)) + self._case.set_value("NTASKS_{}".format(comp), ntasks * 2) self._case.case_setup(test_mode=True, reset=True) diff --git a/CIME/SystemTests/ncr.py b/CIME/SystemTests/ncr.py index 5dc34e02a22..a1cc7d3bad5 100644 --- a/CIME/SystemTests/ncr.py +++ b/CIME/SystemTests/ncr.py @@ -13,17 +13,20 @@ logger = logging.getLogger(__name__) -class NCR(SystemTestsCompareTwo): +class NCR(SystemTestsCompareTwo): def __init__(self, case): """ initialize an NCR test """ - SystemTestsCompareTwo.__init__(self, case, - separate_builds = True, - run_two_suffix = "singleinst", - run_one_description = "two instances, each with the same number of tasks", - run_two_description = "default build") + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=True, + run_two_suffix="singleinst", + run_one_description="two instances, each with the same number of tasks", + run_two_description="default build", + ) def _comp_classes(self): # Return the components which we need to set things for diff --git a/CIME/SystemTests/nodefail.py b/CIME/SystemTests/nodefail.py index e040b564225..c770fc292bc 100644 --- a/CIME/SystemTests/nodefail.py +++ b/CIME/SystemTests/nodefail.py @@ -7,8 +7,8 @@ logger = logging.getLogger(__name__) -class NODEFAIL(ERS): +class NODEFAIL(ERS): def __init__(self, case): """ initialize an object interface to the ERS system test @@ -16,7 +16,7 @@ def __init__(self, case): ERS.__init__(self, case) self._fail_sentinel = os.path.join(case.get_value("RUNDIR"), "FAIL_SENTINEL") - self._fail_str = case.get_value("NODE_FAIL_REGEX") + self._fail_str = case.get_value("NODE_FAIL_REGEX") def _restart_fake_phase(self): # Swap out model.exe for one that emits node failures @@ -27,8 +27,7 @@ def _restart_fake_phase(self): logname = "drv" else: logname = "cpl" - fake_exe = \ -"""#!/bin/bash + fake_exe = """#!/bin/bash fail_sentinel={0} cpl_log={1}/{4}.log.$LID @@ -48,7 +47,9 @@ def _restart_fake_phase(self): echo Insta pass echo SUCCESSFUL TERMINATION > $cpl_log fi -""".format(self._fail_sentinel, rundir, get_model(), self._fail_str, logname) +""".format( + self._fail_sentinel, rundir, get_model(), self._fail_str, logname + ) fake_exe_file = os.path.join(exeroot, "fake.sh") with open(fake_exe_file, "w") as fd: diff --git a/CIME/SystemTests/pea.py b/CIME/SystemTests/pea.py index cb3d3f9344c..b20c3abd4e7 100644 --- a/CIME/SystemTests/pea.py +++ b/CIME/SystemTests/pea.py @@ -12,14 +12,17 @@ logger = logging.getLogger(__name__) -class PEA(SystemTestsCompareTwo): +class PEA(SystemTestsCompareTwo): def __init__(self, case): - SystemTestsCompareTwo.__init__(self, case, - separate_builds = True, - run_two_suffix = 'mpi-serial', - run_one_description = 'default mpi library', - run_two_description = 'mpi-serial') + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=True, + run_two_suffix="mpi-serial", + run_one_description="default mpi library", + run_two_description="mpi-serial", + ) def _common_setup(self): for comp in self._case.get_values("COMP_CLASSES"): @@ -34,11 +37,13 @@ def _case_two_setup(self): mach_name = self._case.get_value("MACH") mach_obj = Machines(machine=mach_name) if mach_obj.is_valid_MPIlib("mpi-serial"): - self._case.set_value("MPILIB","mpi-serial") + self._case.set_value("MPILIB", "mpi-serial") else: - logger.warning("mpi-serial is not supported on machine '{}', " - "so we have to fall back to default MPI and " - "therefore very little is being tested".format(mach_name)) + logger.warning( + "mpi-serial is not supported on machine '{}', " + "so we have to fall back to default MPI and " + "therefore very little is being tested".format(mach_name) + ) if os.path.isfile("Macros"): os.remove("Macros") diff --git a/CIME/SystemTests/pem.py b/CIME/SystemTests/pem.py index 34bb67ae8c1..798417b254a 100644 --- a/CIME/SystemTests/pem.py +++ b/CIME/SystemTests/pem.py @@ -13,14 +13,17 @@ logger = logging.getLogger(__name__) -class PEM(SystemTestsCompareTwo): +class PEM(SystemTestsCompareTwo): def __init__(self, case): - SystemTestsCompareTwo.__init__(self, case, - separate_builds = True, - run_two_suffix = 'modpes', - run_one_description = 'default pe counts', - run_two_description = 'halved pe counts') + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=True, + run_two_suffix="modpes", + run_one_description="default pe counts", + run_two_description="halved pe counts", + ) def _case_one_setup(self): pass @@ -28,6 +31,6 @@ def _case_one_setup(self): def _case_two_setup(self): for comp in self._case.get_values("COMP_CLASSES"): ntasks = self._case.get_value("NTASKS_{}".format(comp)) - if ( ntasks > 1 ): - self._case.set_value("NTASKS_{}".format(comp), int(ntasks/2)) + if ntasks > 1: + self._case.set_value("NTASKS_{}".format(comp), int(ntasks / 2)) self._case.case_setup(test_mode=True, reset=True) diff --git a/CIME/SystemTests/pet.py b/CIME/SystemTests/pet.py index 17f1fbe8c8e..fcf108bd28c 100644 --- a/CIME/SystemTests/pet.py +++ b/CIME/SystemTests/pet.py @@ -11,18 +11,21 @@ logger = logging.getLogger(__name__) -class PET(SystemTestsCompareTwo): +class PET(SystemTestsCompareTwo): def __init__(self, case): """ initialize a test object """ - SystemTestsCompareTwo.__init__(self, case, - separate_builds = False, - multisubmit=True, - run_two_suffix = 'single_thread', - run_one_description = 'default threading', - run_two_description = 'threads set to 1') + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=False, + multisubmit=True, + run_two_suffix="single_thread", + run_one_description="default threading", + run_two_description="threads set to 1", + ) def _case_one_setup(self): # first make sure that all components have threaded settings @@ -32,9 +35,8 @@ def _case_one_setup(self): # Need to redo case_setup because we may have changed the number of threads - def _case_two_setup(self): - #Do a run with all threads set to 1 + # Do a run with all threads set to 1 for comp in self._case.get_values("COMP_CLASSES"): self._case.set_value("NTHRDS_{}".format(comp), 1) diff --git a/CIME/SystemTests/pfs.py b/CIME/SystemTests/pfs.py index 37f090e6a90..32bdbe08002 100644 --- a/CIME/SystemTests/pfs.py +++ b/CIME/SystemTests/pfs.py @@ -9,8 +9,8 @@ logger = logging.getLogger(__name__) -class PFS(SystemTestsCommon): +class PFS(SystemTestsCommon): def __init__(self, case): """ initialize an object interface to the PFS system test diff --git a/CIME/SystemTests/pgn.py b/CIME/SystemTests/pgn.py index 6450230d9ac..bef4dafdcdb 100644 --- a/CIME/SystemTests/pgn.py +++ b/CIME/SystemTests/pgn.py @@ -37,18 +37,19 @@ logger = logging.getLogger(__name__) NUMBER_INITIAL_CONDITIONS = 6 -PERTURBATIONS = OrderedDict([('woprt', 0.0), - ('posprt', 1.0e-14), - ('negprt', -1.0e-14), - ]) -FCLD_NC = 'cam.h0.cloud.nc' -INIT_COND_FILE_TEMPLATE = \ - "20210915.v2.ne4_oQU240.F2010.{}.{}.0002-{:02d}-01-00000.nc" -INSTANCE_FILE_TEMPLATE = '{}{}_{:04d}.h0.0001-01-01-00000{}.nc' +PERTURBATIONS = OrderedDict( + [ + ("woprt", 0.0), + ("posprt", 1.0e-14), + ("negprt", -1.0e-14), + ] +) +FCLD_NC = "cam.h0.cloud.nc" +INIT_COND_FILE_TEMPLATE = "20210915.v2.ne4_oQU240.F2010.{}.{}.0002-{:02d}-01-00000.nc" +INSTANCE_FILE_TEMPLATE = "{}{}_{:04d}.h0.0001-01-01-00000{}.nc" class PGN(SystemTestsCommon): - def __init__(self, case): """ initialize an object interface to the PGN test @@ -67,7 +68,7 @@ def __init__(self, case): def build_phase(self, sharedlib_only=False, model_only=False): ninst = NUMBER_INITIAL_CONDITIONS * len(PERTURBATIONS) - logger.debug('PGN_INFO: number of instance: '+str(ninst)) + logger.debug("PGN_INFO: number of instance: " + str(ninst)) default_ninst = self._case.get_value("NINST_ATM") @@ -76,16 +77,17 @@ def build_phase(self, sharedlib_only=False, model_only=False): # so it has to happen here. if not model_only: # Lay all of the components out concurrently - logger.debug("PGN_INFO: Updating NINST for multi-instance in " - "env_mach_pes.xml") - for comp in ['ATM', 'OCN', 'WAV', 'GLC', 'ICE', 'ROF', 'LND']: + logger.debug( + "PGN_INFO: Updating NINST for multi-instance in " "env_mach_pes.xml" + ) + for comp in ["ATM", "OCN", "WAV", "GLC", "ICE", "ROF", "LND"]: ntasks = self._case.get_value("NTASKS_{}".format(comp)) self._case.set_value("ROOTPE_{}".format(comp), 0) - self._case.set_value("NINST_{}".format(comp), ninst) - self._case.set_value("NTASKS_{}".format(comp), ntasks*ninst) + self._case.set_value("NINST_{}".format(comp), ninst) + self._case.set_value("NTASKS_{}".format(comp), ntasks * ninst) self._case.set_value("ROOTPE_CPL", 0) - self._case.set_value("NTASKS_CPL", ntasks*ninst) + self._case.set_value("NTASKS_CPL", ntasks * ninst) self._case.flush() case_setup(self._case, test_mode=False, reset=True) @@ -98,11 +100,18 @@ def build_phase(self, sharedlib_only=False, model_only=False): iinst = 1 for icond in range(1, NUMBER_INITIAL_CONDITIONS + 1): - fatm_in = os.path.join(csmdata_atm, INIT_COND_FILE_TEMPLATE.format(self.atmmodIC, 'i', icond)) - flnd_in = os.path.join(csmdata_lnd, INIT_COND_FILE_TEMPLATE.format(self.lndmodIC, 'r', icond)) + fatm_in = os.path.join( + csmdata_atm, INIT_COND_FILE_TEMPLATE.format(self.atmmodIC, "i", icond) + ) + flnd_in = os.path.join( + csmdata_lnd, INIT_COND_FILE_TEMPLATE.format(self.lndmodIC, "r", icond) + ) for iprt in PERTURBATIONS.values(): - with open('user_nl_{}_{:04d}'.format(self.atmmod, iinst), 'w') as atmnlfile, \ - open('user_nl_{}_{:04d}'.format(self.lndmod, iinst), 'w') as lndnlfile: + with open( + "user_nl_{}_{:04d}".format(self.atmmod, iinst), "w" + ) as atmnlfile, open( + "user_nl_{}_{:04d}".format(self.lndmod, iinst), "w" + ) as lndnlfile: atmnlfile.write("ncdata = '{}' \n".format(fatm_in)) lndnlfile.write("finidat = '{}' \n".format(flnd_in)) @@ -128,12 +137,14 @@ def get_var_list(self): Get variable list for pergro specific output vars """ rundir = self._case.get_value("RUNDIR") - prg_fname = 'pergro_ptend_names.txt' + prg_fname = "pergro_ptend_names.txt" var_file = os.path.join(rundir, prg_fname) - CIME.utils.expect(os.path.isfile(var_file), - "File {} does not exist in: {}".format(prg_fname, rundir)) + CIME.utils.expect( + os.path.isfile(var_file), + "File {} does not exist in: {}".format(prg_fname, rundir), + ) - with open(var_file, 'r') as fvar: + with open(var_file, "r") as fvar: var_list = fvar.readlines() return list(map(str.strip, var_list)) @@ -145,19 +156,22 @@ def _compare_baseline(self): cloud """ with self._test_status: - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_FAIL_STATUS) + self._test_status.set_status( + CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_FAIL_STATUS + ) logger.debug("PGN_INFO:BASELINE COMPARISON STARTS") run_dir = self._case.get_value("RUNDIR") case_name = self._case.get_value("CASE") - base_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASECMP_CASE")) + base_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASECMP_CASE"), + ) var_list = self.get_var_list() - test_name = "{}".format(case_name.split('.')[-1]) + test_name = "{}".format(case_name.split(".")[-1]) evv_config = { test_name: { "module": os.path.join(evv_lib_dir, "extensions", "pg.py"), @@ -177,25 +191,31 @@ def _compare_baseline(self): } } - json_file = os.path.join(run_dir, '.'.join([case_name, 'json'])) - with open(json_file, 'w') as config_file: + json_file = os.path.join(run_dir, ".".join([case_name, "json"])) + with open(json_file, "w") as config_file: json.dump(evv_config, config_file, indent=4) - evv_out_dir = os.path.join(run_dir, '.'.join([case_name, 'evv'])) - evv(['-e', json_file, '-o', evv_out_dir]) + evv_out_dir = os.path.join(run_dir, ".".join([case_name, "evv"])) + evv(["-e", json_file, "-o", evv_out_dir]) - with open(os.path.join(evv_out_dir, 'index.json'), 'r') as evv_f: + with open(os.path.join(evv_out_dir, "index.json"), "r") as evv_f: evv_status = json.load(evv_f) comments = "" - for evv_elem in evv_status['Data']['Elements']: - if evv_elem['Type'] == 'ValSummary' \ - and evv_elem['TableTitle'] == 'Perturbation growth test': - comments = "; ".join("{}: {}".format(key, val) for key, val - in evv_elem['Data'][test_name][''].items()) - if evv_elem['Data'][test_name]['']['Test status'].lower() == 'pass': - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_PASS_STATUS) + for evv_elem in evv_status["Data"]["Elements"]: + if ( + evv_elem["Type"] == "ValSummary" + and evv_elem["TableTitle"] == "Perturbation growth test" + ): + comments = "; ".join( + "{}: {}".format(key, val) + for key, val in evv_elem["Data"][test_name][""].items() + ) + if evv_elem["Data"][test_name][""]["Test status"].lower() == "pass": + self._test_status.set_status( + CIME.test_status.BASELINE_PHASE, + CIME.test_status.TEST_PASS_STATUS, + ) break status = self._test_status.get_status(CIME.test_status.BASELINE_PHASE) @@ -205,21 +225,35 @@ def _compare_baseline(self): urlroot = CIME.utils.get_urlroot(mach_obj) if htmlroot is not None: with CIME.utils.SharedArea(): - dir_util.copy_tree(evv_out_dir, os.path.join(htmlroot, 'evv', case_name), preserve_mode=False) + dir_util.copy_tree( + evv_out_dir, + os.path.join(htmlroot, "evv", case_name), + preserve_mode=False, + ) if urlroot is None: urlroot = "[{}_URL]".format(mach_name.capitalize()) viewing = "{}/evv/{}/index.html".format(urlroot, case_name) else: - viewing = "{}\n" \ - " EVV viewing instructions can be found at: " \ - " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" \ - "climate_reproducibility/README.md#test-passfail-and-extended-output" \ - "".format(evv_out_dir) - - comments = "{} {} for test '{}'.\n" \ - " {}\n" \ - " EVV results can be viewed at:\n" \ - " {}".format(CIME.test_status.BASELINE_PHASE, status, test_name, comments, viewing) + viewing = ( + "{}\n" + " EVV viewing instructions can be found at: " + " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" + "climate_reproducibility/README.md#test-passfail-and-extended-output" + "".format(evv_out_dir) + ) + + comments = ( + "{} {} for test '{}'.\n" + " {}\n" + " EVV results can be viewed at:\n" + " {}".format( + CIME.test_status.BASELINE_PHASE, + status, + test_name, + comments, + viewing, + ) + ) CIME.utils.append_testlog(comments, self._orig_caseroot) @@ -235,28 +269,41 @@ def run_phase(self): logger.debug("PGN_INFO: Case name is:{}".format(casename)) for icond in range(NUMBER_INITIAL_CONDITIONS): - for iprt, (prt_name, prt_value) in enumerate(PERTURBATIONS.items()): + for iprt, ( + prt_name, + prt_value, # pylint: disable=unused-variable + ) in enumerate(PERTURBATIONS.items()): iinst = pg._sub2instance(icond, iprt, len(PERTURBATIONS)) - fname = os.path.join(rundir, INSTANCE_FILE_TEMPLATE.format(casename + '.', self.atmmod, iinst, '')) - renamed_fname = re.sub(r'\.nc$', '_{}.nc'.format(prt_name), fname) + fname = os.path.join( + rundir, + INSTANCE_FILE_TEMPLATE.format( + casename + ".", self.atmmod, iinst, "" + ), + ) + renamed_fname = re.sub(r"\.nc$", "_{}.nc".format(prt_name), fname) logger.debug("PGN_INFO: fname to rename:{}".format(fname)) logger.debug("PGN_INFO: Renamed file:{}".format(renamed_fname)) try: shutil.move(fname, renamed_fname) except IOError: - CIME.utils.expect(os.path.isfile(renamed_fname), - "ERROR: File {} does not exist".format(renamed_fname)) - logger.debug("PGN_INFO: Renamed file already exists:" - "{}".format(renamed_fname)) + CIME.utils.expect( + os.path.isfile(renamed_fname), + "ERROR: File {} does not exist".format(renamed_fname), + ) + logger.debug( + "PGN_INFO: Renamed file already exists:" + "{}".format(renamed_fname) + ) logger.debug("PGN_INFO: RUN PHASE ENDS") def _generate_baseline(self): super(PGN, self)._generate_baseline() - basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASEGEN_CASE")) + basegen_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), self._case.get_value("BASEGEN_CASE") + ) rundir = self._case.get_value("RUNDIR") casename = self._case.get_value("CASE") @@ -268,23 +315,41 @@ def _generate_baseline(self): for icond in range(NUMBER_INITIAL_CONDITIONS): prt_rmse = {} for iprt, prt_name in enumerate(PERTURBATIONS): - if prt_name == 'woprt': + if prt_name == "woprt": continue iinst_ctrl = pg._sub2instance(icond, 0, nprt) - ifile_ctrl = os.path.join(rundir, - INSTANCE_FILE_TEMPLATE.format(casename + '.', self.atmmod, iinst_ctrl, '_woprt')) + ifile_ctrl = os.path.join( + rundir, + INSTANCE_FILE_TEMPLATE.format( + casename + ".", self.atmmod, iinst_ctrl, "_woprt" + ), + ) iinst_test = pg._sub2instance(icond, iprt, nprt) - ifile_test = os.path.join(rundir, - INSTANCE_FILE_TEMPLATE.format(casename + '.', self.atmmod, iinst_test, '_' + prt_name)) - - prt_rmse[prt_name] = pg.variables_rmse(ifile_test, ifile_ctrl, var_list, 't_') + ifile_test = os.path.join( + rundir, + INSTANCE_FILE_TEMPLATE.format( + casename + ".", self.atmmod, iinst_test, "_" + prt_name + ), + ) + + prt_rmse[prt_name] = pg.variables_rmse( + ifile_test, ifile_ctrl, var_list, "t_" + ) rmse_prototype[icond] = pd.concat(prt_rmse) rmse = pd.concat(rmse_prototype) - cld_rmse = np.reshape(rmse.RMSE.values, (NUMBER_INITIAL_CONDITIONS, nprt - 1, nvar)) - - pg.rmse_writer(os.path.join(rundir, FCLD_NC), - cld_rmse, list(PERTURBATIONS.keys()), var_list, INIT_COND_FILE_TEMPLATE, "cam") + cld_rmse = np.reshape( + rmse.RMSE.values, (NUMBER_INITIAL_CONDITIONS, nprt - 1, nvar) + ) + + pg.rmse_writer( + os.path.join(rundir, FCLD_NC), + cld_rmse, + list(PERTURBATIONS.keys()), + var_list, + INIT_COND_FILE_TEMPLATE, + "cam", + ) logger.debug("PGN_INFO:copy:{} to {}".format(FCLD_NC, basegen_dir)) shutil.copy(os.path.join(rundir, FCLD_NC), basegen_dir) diff --git a/CIME/SystemTests/pre.py b/CIME/SystemTests/pre.py index da002c4f639..54512a00660 100644 --- a/CIME/SystemTests/pre.py +++ b/CIME/SystemTests/pre.py @@ -16,7 +16,7 @@ ############################################################################### class PRE(SystemTestsCompareTwo): -############################################################################### + ############################################################################### """ Implementation of the CIME pause/resume test: Tests having driver 'pause' (write cpl and/or other restart file(s)) and 'resume' @@ -26,23 +26,27 @@ class PRE(SystemTestsCompareTwo): ########################################################################### def __init__(self, case): - ########################################################################### - SystemTestsCompareTwo.__init__(self, case, - separate_builds=False, - run_two_suffix='pr', - run_one_description='no pause/resume', - run_two_description='pause/resume') - self._stopopt = '' - self._stopn = 0 + ########################################################################### + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=False, + run_two_suffix="pr", + run_one_description="no pause/resume", + run_two_description="pause/resume", + ) + self._stopopt = "" + self._stopn = 0 self._cprnc_exe = None + ########################################################################### def _case_one_setup(self): - ########################################################################### + ########################################################################### pass ########################################################################### def _case_two_setup(self): - ########################################################################### + ########################################################################### # Set up a pause/resume run stopopt = self._case1.get_value("STOP_OPTION") stopn = self._case1.get_value("STOP_N") @@ -67,10 +71,10 @@ def _case_two_setup(self): self._case.flush() ########################################################################### - def run_phase(self): # pylint: disable=arguments-differ - ########################################################################### + def run_phase(self): # pylint: disable=arguments-differ + ########################################################################### self._activate_case2() - should_match = (self._case.get_value("DESP_MODE") == "NOCHANGE") + should_match = self._case.get_value("DESP_MODE") == "NOCHANGE" SystemTestsCompareTwo.run_phase(self, success_change=not should_match) # Look for expected coupler restart files logger = logging.getLogger(__name__) @@ -93,30 +97,49 @@ def run_phase(self): # pylint: disable=arguments-differ else: ninst = self._case.get_value("NINST_{}".format(comp)) - comp_name = self._case.get_value('COMP_{}'.format(comp)) - for index in range(1,ninst+1): + comp_name = self._case.get_value("COMP_{}".format(comp)) + for index in range(1, ninst + 1): if ninst == 1: - rname = '*.{}.r.*'.format(comp_name) + rname = "*.{}.r.*".format(comp_name) else: - rname = '*.{}_{:04d}.r.*'.format(comp_name, index) + rname = "*.{}_{:04d}.r.*".format(comp_name, index) restart_files_1 = glob.glob(os.path.join(rundir1, rname)) - expect((len(restart_files_1) > 0), "No case1 restart files for {}".format(comp)) + expect( + (len(restart_files_1) > 0), + "No case1 restart files for {}".format(comp), + ) restart_files_2 = glob.glob(os.path.join(rundir2, rname)) - expect((len(restart_files_2) > len(restart_files_1)), - "No pause (restart) files found in case2 for {}".format(comp)) + expect( + (len(restart_files_2) > len(restart_files_1)), + "No pause (restart) files found in case2 for {}".format(comp), + ) # Do cprnc of restart files. rfile1 = restart_files_1[len(restart_files_1) - 1] # rfile2 has to match rfile1 (same time string) parts = os.path.basename(rfile1).split(".") - glob_str = "*.{}".format(".".join(parts[len(parts)-4:])) + glob_str = "*.{}".format(".".join(parts[len(parts) - 4 :])) restart_files_2 = glob.glob(os.path.join(rundir2, glob_str)) - expect((len(restart_files_2) == 1), - "Missing case2 restart file, {}", glob_str) + expect( + (len(restart_files_2) == 1), + "Missing case2 restart file, {}", + glob_str, + ) rfile2 = restart_files_2[0] - ok = cprnc(comp, rfile1, rfile2, self._case, rundir2, cprnc_exe=self._cprnc_exe)[0] - logger.warning("CPRNC result for {}: {}".format(os.path.basename(rfile1), "PASS" if (ok == should_match) else "FAIL")) + ok = cprnc( + comp, rfile1, rfile2, self._case, rundir2, cprnc_exe=self._cprnc_exe + )[0] + logger.warning( + "CPRNC result for {}: {}".format( + os.path.basename(rfile1), + "PASS" if (ok == should_match) else "FAIL", + ) + ) compare_ok = compare_ok and (should_match == ok) - expect(compare_ok, - "Not all restart files {}".format("matched" if should_match else "failed to match")) + expect( + compare_ok, + "Not all restart files {}".format( + "matched" if should_match else "failed to match" + ), + ) diff --git a/CIME/SystemTests/rep.py b/CIME/SystemTests/rep.py index a3cef73fbfc..60f6b473ea9 100644 --- a/CIME/SystemTests/rep.py +++ b/CIME/SystemTests/rep.py @@ -6,12 +6,12 @@ from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo -class REP(SystemTestsCompareTwo): +class REP(SystemTestsCompareTwo): def __init__(self, case): - SystemTestsCompareTwo.__init__(self, case, - separate_builds = False, - run_two_suffix = 'rep2') + SystemTestsCompareTwo.__init__( + self, case, separate_builds=False, run_two_suffix="rep2" + ) def _case_one_setup(self): pass diff --git a/CIME/SystemTests/restart_tests.py b/CIME/SystemTests/restart_tests.py index cce4ca79241..31d1be32181 100644 --- a/CIME/SystemTests/restart_tests.py +++ b/CIME/SystemTests/restart_tests.py @@ -8,33 +8,43 @@ logger = logging.getLogger(__name__) -class RestartTest(SystemTestsCompareTwo): - - def __init__(self, case, - separate_builds, - run_two_suffix = 'restart', - run_one_description = 'initial', - run_two_description = 'restart', - multisubmit = False): - SystemTestsCompareTwo.__init__(self, case, - separate_builds, - run_two_suffix = run_two_suffix, - run_one_description = run_one_description, - run_two_description = run_two_description, - multisubmit = multisubmit) +class RestartTest(SystemTestsCompareTwo): + def __init__( + self, + case, + separate_builds, + run_two_suffix="restart", + run_one_description="initial", + run_two_description="restart", + multisubmit=False, + ): + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds, + run_two_suffix=run_two_suffix, + run_one_description=run_one_description, + run_two_description=run_two_description, + multisubmit=multisubmit, + ) def _case_one_setup(self): stop_n = self._case1.get_value("STOP_N") - expect(stop_n >= 3,"STOP_N must be at least 3, STOP_N = {}".format(stop_n)) + expect(stop_n >= 3, "STOP_N must be at least 3, STOP_N = {}".format(stop_n)) def _case_two_setup(self): rest_n = self._case1.get_value("REST_N") stop_n = self._case1.get_value("STOP_N") stop_new = stop_n - rest_n - expect(stop_new > 0, "ERROR: stop_n value {:d} too short {:d} {:d}".format(stop_new,stop_n,rest_n)) + expect( + stop_new > 0, + "ERROR: stop_n value {:d} too short {:d} {:d}".format( + stop_new, stop_n, rest_n + ), + ) # hist_n is set to the stop_n value of case1 self._case.set_value("HIST_N", stop_n) self._case.set_value("STOP_N", stop_new) - self._case.set_value("CONTINUE_RUN",True) + self._case.set_value("CONTINUE_RUN", True) self._case.set_value("REST_OPTION", "never") diff --git a/CIME/SystemTests/seq.py b/CIME/SystemTests/seq.py index 716215db567..0a51d50d283 100644 --- a/CIME/SystemTests/seq.py +++ b/CIME/SystemTests/seq.py @@ -6,18 +6,20 @@ logger = logging.getLogger(__name__) -class SEQ(SystemTestsCompareTwo): +class SEQ(SystemTestsCompareTwo): def __init__(self, case): """ initialize an object interface to file env_test.xml in the case directory """ - SystemTestsCompareTwo.__init__(self, - case, - separate_builds=True, - run_two_suffix="seq", - run_one_description = "base", - run_two_description = "sequence") + SystemTestsCompareTwo.__init__( + self, + case, + separate_builds=True, + run_two_suffix="seq", + run_one_description="base", + run_two_description="sequence", + ) def _case_one_setup(self): pass @@ -32,7 +34,7 @@ def _case_two_setup(self): self._case.set_value("ROOTPE_{}".format(comp), 0) else: totalpes = self._case.get_value("TOTALPES") - newntasks = max(1, totalpes//len(comp_classes)) + newntasks = max(1, totalpes // len(comp_classes)) rootpe = newntasks for comp in comp_classes: diff --git a/CIME/SystemTests/sms.py b/CIME/SystemTests/sms.py index 5c5aaafd44f..09722caa3d5 100644 --- a/CIME/SystemTests/sms.py +++ b/CIME/SystemTests/sms.py @@ -8,8 +8,8 @@ logger = logging.getLogger(__name__) -class SMS(SystemTestsCommon): +class SMS(SystemTestsCommon): def __init__(self, case): """ initialize an object interface to the SMS system test diff --git a/CIME/SystemTests/system_tests_common.py b/CIME/SystemTests/system_tests_common.py index 7c1e26c62e1..20aa5a7431b 100644 --- a/CIME/SystemTests/system_tests_common.py +++ b/CIME/SystemTests/system_tests_common.py @@ -4,10 +4,25 @@ from CIME.XML.standard_module_setup import * from CIME.XML.env_run import EnvRun from CIME.XML.env_test import EnvTest -from CIME.utils import append_testlog, get_model, safe_copy, get_timestamp, CIMEError, expect, get_current_commit, SharedArea +from CIME.utils import ( + append_testlog, + get_model, + safe_copy, + get_timestamp, + CIMEError, + expect, + get_current_commit, + SharedArea, +) from CIME.test_status import * -from CIME.hist_utils import copy_histfiles, compare_test, generate_teststatus, \ - compare_baseline, get_ts_synopsis, generate_baseline +from CIME.hist_utils import ( + copy_histfiles, + compare_test, + generate_teststatus, + compare_baseline, + get_ts_synopsis, + generate_baseline, +) from CIME.provenance import save_test_time, get_test_success from CIME.locked_files import LOCKED_DIR, lock_file, is_locked import CIME.build as build @@ -16,8 +31,8 @@ logger = logging.getLogger(__name__) -class SystemTestsCommon(object): +class SystemTestsCommon(object): def __init__(self, case, expected=None): """ initialize a CIME system test object, if the locked env_run.orig.xml @@ -34,9 +49,11 @@ def __init__(self, case, expected=None): self._init_environment(caseroot) self._init_locked_files(caseroot, expected) self._skip_pnl = False - self._cpllog = "drv" if self._case.get_value("COMP_INTERFACE")=="nuopc" else "cpl" - self._ninja = False - self._dry_run = False + self._cpllog = ( + "drv" if self._case.get_value("COMP_INTERFACE") == "nuopc" else "cpl" + ) + self._ninja = False + self._dry_run = False self._user_separate_builds = False def _init_environment(self, caseroot): @@ -65,26 +82,38 @@ def _resetup_case(self, phase, reset=False): # We never want to re-setup if we're doing the resubmitted run phase_status = self._test_status.get_status(phase) phase_comment = self._test_status.get_comment(phase) - rerunning = (phase_status != TEST_PEND_STATUS or - phase_comment == TEST_RERUN_COMMENT) + rerunning = ( + phase_status != TEST_PEND_STATUS or phase_comment == TEST_RERUN_COMMENT + ) if reset or (self._case.get_value("IS_FIRST_RUN") and rerunning): - logging.warning("Resetting case due to detected re-run of phase {}".format(phase)) + logging.warning( + "Resetting case due to detected re-run of phase {}".format(phase) + ) self._case.set_initial_test_values() self._case.case_setup(reset=True, test_mode=True) - def build(self, sharedlib_only=False, model_only=False, ninja=False, dry_run=False, separate_builds=False): + def build( + self, + sharedlib_only=False, + model_only=False, + ninja=False, + dry_run=False, + separate_builds=False, + ): """ Do NOT override this method, this method is the framework that controls the build phase. build_phase is the extension point that subclasses should use. """ success = True - self._ninja = ninja - self._dry_run = dry_run + self._ninja = ninja + self._dry_run = dry_run self._user_separate_builds = separate_builds - for phase_name, phase_bool in [(SHAREDLIB_BUILD_PHASE, not model_only), - (MODEL_BUILD_PHASE, not sharedlib_only)]: + for phase_name, phase_bool in [ + (SHAREDLIB_BUILD_PHASE, not model_only), + (MODEL_BUILD_PHASE, not sharedlib_only), + ]: if phase_bool: self._resetup_case(phase_name) with self._test_status: @@ -92,16 +121,20 @@ def build(self, sharedlib_only=False, model_only=False, ninja=False, dry_run=Fal start_time = time.time() try: - self.build_phase(sharedlib_only=(phase_name==SHAREDLIB_BUILD_PHASE), - model_only=(phase_name==MODEL_BUILD_PHASE)) - except BaseException as e: # We want KeyboardInterrupts to generate FAIL status + self.build_phase( + sharedlib_only=(phase_name == SHAREDLIB_BUILD_PHASE), + model_only=(phase_name == MODEL_BUILD_PHASE), + ) + except BaseException as e: # We want KeyboardInterrupts to generate FAIL status success = False if isinstance(e, CIMEError): # Don't want to print stacktrace for a build failure since that # is not a CIME/infrastructure problem. excmsg = str(e) else: - excmsg = "Exception during build:\n{}\n{}".format(str(e), traceback.format_exc()) + excmsg = "Exception during build:\n{}\n{}".format( + str(e), traceback.format_exc() + ) append_testlog(excmsg, self._orig_caseroot) raise @@ -109,7 +142,11 @@ def build(self, sharedlib_only=False, model_only=False, ninja=False, dry_run=Fal finally: time_taken = time.time() - start_time with self._test_status: - self._test_status.set_status(phase_name, TEST_PASS_STATUS if success else TEST_FAIL_STATUS, comments=("time={:d}".format(int(time_taken)))) + self._test_status.set_status( + phase_name, + TEST_PASS_STATUS if success else TEST_FAIL_STATUS, + comments=("time={:d}".format(int(time_taken))), + ) return success @@ -127,11 +164,17 @@ def build_indv(self, sharedlib_only=False, model_only=False): """ Perform an individual build """ - model = self._case.get_value('MODEL') - build.case_build(self._caseroot, case=self._case, - sharedlib_only=sharedlib_only, model_only=model_only, - save_build_provenance=not model=='cesm', - ninja=self._ninja, dry_run=self._dry_run, separate_builds=self._user_separate_builds) + model = self._case.get_value("MODEL") + build.case_build( + self._caseroot, + case=self._case, + sharedlib_only=sharedlib_only, + model_only=model_only, + save_build_provenance=not model == "cesm", + ninja=self._ninja, + dry_run=self._dry_run, + separate_builds=self._user_separate_builds, + ) logger.info("build_indv complete") def clean_build(self, comps=None): @@ -170,26 +213,30 @@ def run(self, skip_pnl=False): if self._case.get_value("COMPARE_BASELINE"): if do_baseline_ops: - self._phase_modifying_call(BASELINE_PHASE, self._compare_baseline) - self._phase_modifying_call(MEMCOMP_PHASE, self._compare_memory) - self._phase_modifying_call(THROUGHPUT_PHASE, self._compare_throughput) + self._phase_modifying_call(BASELINE_PHASE, self._compare_baseline) + self._phase_modifying_call(MEMCOMP_PHASE, self._compare_memory) + self._phase_modifying_call( + THROUGHPUT_PHASE, self._compare_throughput + ) else: with self._test_status: - self._test_status.set_status(BASELINE_PHASE, TEST_PEND_STATUS) - self._test_status.set_status(MEMCOMP_PHASE, TEST_PEND_STATUS) + self._test_status.set_status(BASELINE_PHASE, TEST_PEND_STATUS) + self._test_status.set_status(MEMCOMP_PHASE, TEST_PEND_STATUS) self._test_status.set_status(THROUGHPUT_PHASE, TEST_PEND_STATUS) - self._phase_modifying_call(MEMLEAK_PHASE, self._check_for_memleak) + self._phase_modifying_call(MEMLEAK_PHASE, self._check_for_memleak) self._phase_modifying_call(STARCHIVE_PHASE, self._st_archive_case_test) - except BaseException as e: # We want KeyboardInterrupts to generate FAIL status + except BaseException as e: # We want KeyboardInterrupts to generate FAIL status success = False if isinstance(e, CIMEError): # Don't want to print stacktrace for a model failure since that # is not a CIME/infrastructure problem. excmsg = str(e) else: - excmsg = "Exception during run:\n{}\n{}".format(str(e), traceback.format_exc()) + excmsg = "Exception during run:\n{}\n{}".format( + str(e), traceback.format_exc() + ) append_testlog(excmsg, self._orig_caseroot) raise @@ -199,41 +246,82 @@ def run(self, skip_pnl=False): time_taken = time.time() - start_time status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS with self._test_status: - self._test_status.set_status(RUN_PHASE, status, comments=("time={:d}".format(int(time_taken)))) + self._test_status.set_status( + RUN_PHASE, status, comments=("time={:d}".format(int(time_taken))) + ) if get_model() == "e3sm": # If run phase worked, remember the time it took in order to improve later walltime ests baseline_root = self._case.get_value("BASELINE_ROOT") if success: srcroot = self._case.get_value("SRCROOT") - save_test_time(baseline_root, self._casebaseid, time_taken, get_current_commit(repo=srcroot)) + save_test_time( + baseline_root, + self._casebaseid, + time_taken, + get_current_commit(repo=srcroot), + ) # If overall things did not pass, offer the user some insight into what might have broken things - overall_status = self._test_status.get_overall_test_status(ignore_namelists=True)[0] + overall_status = self._test_status.get_overall_test_status( + ignore_namelists=True + )[0] if overall_status != TEST_PASS_STATUS: srcroot = self._case.get_value("SRCROOT") - worked_before, last_pass, last_fail_transition = \ - get_test_success(baseline_root, srcroot, self._casebaseid) + worked_before, last_pass, last_fail_transition = get_test_success( + baseline_root, srcroot, self._casebaseid + ) if worked_before: if last_pass is not None: # commits between last_pass and now broke things - stat, out, err = run_cmd("git rev-list --first-parent {}..{}".format(last_pass, "HEAD"), from_dir=srcroot) + stat, out, err = run_cmd( + "git rev-list --first-parent {}..{}".format( + last_pass, "HEAD" + ), + from_dir=srcroot, + ) if stat == 0: - append_testlog("NEW FAIL: Potentially broken merges:\n{}".format(out), self._orig_caseroot) + append_testlog( + "NEW FAIL: Potentially broken merges:\n{}".format( + out + ), + self._orig_caseroot, + ) else: - logger.warning("Unable to list potentially broken merges: {}\n{}".format(out, err)) + logger.warning( + "Unable to list potentially broken merges: {}\n{}".format( + out, err + ) + ) else: if last_pass is not None and last_fail_transition is not None: # commits between last_pass and last_fail_transition broke things - stat, out, err = run_cmd("git rev-list --first-parent {}..{}".format(last_pass, last_fail_transition), from_dir=srcroot) + stat, out, err = run_cmd( + "git rev-list --first-parent {}..{}".format( + last_pass, last_fail_transition + ), + from_dir=srcroot, + ) if stat == 0: - append_testlog("OLD FAIL: Potentially broken merges:\n{}".format(out), self._orig_caseroot) + append_testlog( + "OLD FAIL: Potentially broken merges:\n{}".format( + out + ), + self._orig_caseroot, + ) else: - logger.warning("Unable to list potentially broken merges: {}\n{}".format(out, err)) + logger.warning( + "Unable to list potentially broken merges: {}\n{}".format( + out, err + ) + ) if get_model() == "cesm" and self._case.get_value("GENERATE_BASELINE"): - baseline_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), self._case.get_value("BASEGEN_CASE")) + baseline_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASEGEN_CASE"), + ) generate_teststatus(self._caseroot, baseline_dir) # We return success if the run phase worked; memleaks, diffs will NOT be taken into account @@ -267,26 +355,26 @@ def run_indv(self, suffix="base", st_archive=False, submit_resubmits=None): """ Perform an individual run. Raises an EXCEPTION on fail. """ - stop_n = self._case.get_value("STOP_N") + stop_n = self._case.get_value("STOP_N") stop_option = self._case.get_value("STOP_OPTION") - run_type = self._case.get_value("RUN_TYPE") - rundir = self._case.get_value("RUNDIR") + run_type = self._case.get_value("RUN_TYPE") + rundir = self._case.get_value("RUNDIR") if submit_resubmits is None: do_resub = self._case.get_value("BATCH_SYSTEM") != "none" else: do_resub = submit_resubmits # remove any cprnc output leftover from previous runs - for compout in glob.iglob(os.path.join(rundir,"*.cprnc.out")): + for compout in glob.iglob(os.path.join(rundir, "*.cprnc.out")): os.remove(compout) - infostr = "doing an {:d} {} {} test".format(stop_n, stop_option, run_type) + infostr = "doing an {:d} {} {} test".format(stop_n, stop_option, run_type) rest_option = self._case.get_value("REST_OPTION") if rest_option == "none" or rest_option == "never": infostr += ", no restarts written" else: - rest_n = self._case.get_value("REST_N") + rest_n = self._case.get_value("REST_N") infostr += ", with restarts every {:d} {}".format(rest_n, rest_option) logger.info(infostr) @@ -304,26 +392,28 @@ def run_indv(self, suffix="base", st_archive=False, submit_resubmits=None): def _coupler_log_indicates_run_complete(self): newestcpllogfiles = self._get_latest_cpl_logs() - logger.debug("Latest Coupler log file(s) {}" .format(newestcpllogfiles)) + logger.debug("Latest Coupler log file(s) {}".format(newestcpllogfiles)) # Exception is raised if the file is not compressed allgood = len(newestcpllogfiles) for cpllog in newestcpllogfiles: try: - if CIME.six.b("SUCCESSFUL TERMINATION") in gzip.open(cpllog, 'rb').read(): + if CIME.six.b("SUCCESSFUL TERMINATION") in gzip.open(cpllog, "rb").read(): allgood = allgood - 1 - except Exception as e: # Probably want to be more specific here + except Exception as e: # Probably want to be more specific here msg = e.__str__() - logger.info("{} is not compressed, assuming run failed {}".format(cpllog, msg)) + logger.info( + "{} is not compressed, assuming run failed {}".format(cpllog, msg) + ) - return allgood==0 + return allgood == 0 def _component_compare_copy(self, suffix): comments = copy_histfiles(self._case, suffix) append_testlog(comments, self._orig_caseroot) def _log_cprnc_output_tail(self, filename_pattern, prepend=None): - rundir = self._case.get_value('RUNDIR') + rundir = self._case.get_value("RUNDIR") glob_pattern = "{}/{}".format(rundir, filename_pattern) @@ -332,7 +422,7 @@ def _log_cprnc_output_tail(self, filename_pattern, prepend=None): for output in cprnc_logs: with open(output) as fin: cprnc_log_tail = fin.readlines()[-20:] - + cprnc_log_tail.insert(0, "tail -n20 {}\n\n".format(output)) if prepend is not None: @@ -340,9 +430,9 @@ def _log_cprnc_output_tail(self, filename_pattern, prepend=None): append_testlog("".join(cprnc_log_tail), self._orig_caseroot) - def _component_compare_test(self, suffix1, suffix2, - success_change=False, - ignore_fieldlist_diffs=False): + def _component_compare_test( + self, suffix1, suffix2, success_change=False, ignore_fieldlist_diffs=False + ): """ Return value is not generally checked, but is provided in case a custom run case needs indirection based on success. @@ -352,8 +442,9 @@ def _component_compare_test(self, suffix1, suffix2, diagnostic fields that are missing from the other case), treat the two cases as identical. """ - success, comments = self._do_compare_test(suffix1, suffix2, - ignore_fieldlist_diffs=ignore_fieldlist_diffs) + success, comments = self._do_compare_test( + suffix1, suffix2, ignore_fieldlist_diffs=ignore_fieldlist_diffs + ) if success_change: success = not success @@ -366,7 +457,9 @@ def _component_compare_test(self, suffix1, suffix2, status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS with self._test_status: - self._test_status.set_status("{}_{}_{}".format(COMPARE_PHASE, suffix1, suffix2), status) + self._test_status.set_status( + "{}_{}_{}".format(COMPARE_PHASE, suffix1, suffix2), status + ) return success def _do_compare_test(self, suffix1, suffix2, ignore_fieldlist_diffs=False): @@ -374,8 +467,9 @@ def _do_compare_test(self, suffix1, suffix2, ignore_fieldlist_diffs=False): Wraps the call to compare_test to facilitate replacement in unit tests """ - return compare_test(self._case, suffix1, suffix2, - ignore_fieldlist_diffs=ignore_fieldlist_diffs) + return compare_test( + self._case, suffix1, suffix2, ignore_fieldlist_diffs=ignore_fieldlist_diffs + ) def _st_archive_case_test(self): result = self._case.test_env_archive() @@ -391,15 +485,17 @@ def _get_mem_usage(self, cpllog): increases. """ memlist = [] - meminfo = re.compile(r".*model date =\s+(\w+).*memory =\s+(\d+\.?\d+).*highwater") + meminfo = re.compile( + r".*model date =\s+(\w+).*memory =\s+(\d+\.?\d+).*highwater" + ) if cpllog is not None and os.path.isfile(cpllog): - if '.gz' == cpllog[-3:]: + if ".gz" == cpllog[-3:]: fopen = gzip.open else: fopen = open with fopen(cpllog, "rb") as f: for line in f: - m = meminfo.match(line.decode('utf-8')) + m = meminfo.match(line.decode("utf-8")) if m: memlist.append((float(m.group(1)), float(m.group(2)))) # Remove the last mem record, it's sometimes artificially high @@ -414,8 +510,8 @@ def _get_throughput(self, cpllog): """ if cpllog is not None and os.path.isfile(cpllog): with gzip.open(cpllog, "rb") as f: - cpltext = f.read().decode('utf-8') - m = re.search(r"# simulated years / cmp-day =\s+(\d+\.\d+)\s",cpltext) + cpltext = f.read().decode("utf-8") + m = re.search(r"# simulated years / cmp-day =\s+(\d+\.\d+)\s", cpltext) if m: return float(m.group(1)) return None @@ -427,15 +523,19 @@ def _phase_modifying_call(self, phase, function): """ try: function() - except Exception as e: # Do NOT want to catch KeyboardInterrupt + except Exception as e: # Do NOT want to catch KeyboardInterrupt msg = e.__str__() - excmsg = "Exception during {}:\n{}\n{}".format(phase, msg, traceback.format_exc()) + excmsg = "Exception during {}:\n{}\n{}".format( + phase, msg, traceback.format_exc() + ) logger.warning(excmsg) append_testlog(excmsg, self._orig_caseroot) with self._test_status: - self._test_status.set_status(phase, TEST_FAIL_STATUS, comments="exception") + self._test_status.set_status( + phase, TEST_FAIL_STATUS, comments="exception" + ) def _check_for_memleak(self): """ @@ -447,28 +547,42 @@ def _check_for_memleak(self): for cpllog in latestcpllogs: memlist = self._get_mem_usage(cpllog) - if len(memlist)<3: - self._test_status.set_status(MEMLEAK_PHASE, TEST_PASS_STATUS, comments="insuffiencient data for memleak test") + if len(memlist) < 3: + self._test_status.set_status( + MEMLEAK_PHASE, + TEST_PASS_STATUS, + comments="insuffiencient data for memleak test", + ) else: finaldate = int(memlist[-1][0]) - originaldate = int(memlist[1][0]) # skip first day mem record, it can be too low while initializing + originaldate = int( + memlist[1][0] + ) # skip first day mem record, it can be too low while initializing finalmem = float(memlist[-1][1]) originalmem = float(memlist[1][1]) memdiff = -1 if originalmem > 0: - memdiff = (finalmem - originalmem)/originalmem + memdiff = (finalmem - originalmem) / originalmem tolerance = self._case.get_value("TEST_MEMLEAK_TOLERANCE") if tolerance is None: tolerance = 0.1 expect(tolerance > 0.0, "Bad value for memleak tolerance in test") if memdiff < 0: - self._test_status.set_status(MEMLEAK_PHASE, TEST_PASS_STATUS, comments="insuffiencient data for memleak test") + self._test_status.set_status( + MEMLEAK_PHASE, + TEST_PASS_STATUS, + comments="data for memleak test is insuffiencient", + ) elif memdiff < tolerance: self._test_status.set_status(MEMLEAK_PHASE, TEST_PASS_STATUS) else: - comment = "memleak detected, memory went from {:f} to {:f} in {:d} days".format(originalmem, finalmem, finaldate-originaldate) + comment = "memleak detected, memory went from {:f} to {:f} in {:d} days".format( + originalmem, finalmem, finaldate - originaldate + ) append_testlog(comment, self._orig_caseroot) - self._test_status.set_status(MEMLEAK_PHASE, TEST_FAIL_STATUS, comments=comment) + self._test_status.set_status( + MEMLEAK_PHASE, TEST_FAIL_STATUS, comments=comment + ) def compare_env_run(self, expected=None): """ @@ -476,14 +590,22 @@ def compare_env_run(self, expected=None): """ components = self._case.get_values("COMP_CLASSES") f1obj = self._case.get_env("run") - f2obj = EnvRun(self._caseroot, os.path.join(LOCKED_DIR, "env_run.orig.xml"), components=components) + f2obj = EnvRun( + self._caseroot, + os.path.join(LOCKED_DIR, "env_run.orig.xml"), + components=components, + ) diffs = f1obj.compare_xml(f2obj) for key in diffs.keys(): if expected is not None and key in expected: logging.warning(" Resetting {} for test".format(key)) f1obj.set_value(key, f2obj.get_value(key, resolved=False)) else: - print("WARNING: Found difference in test {}: case: {} original value {}".format(key, diffs[key][0], diffs[key][1])) + print( + "WARNING: Found difference in test {}: case: {} original value {}".format( + key, diffs[key][0], diffs[key][1] + ) + ) return False return True @@ -492,12 +614,14 @@ def _get_latest_cpl_logs(self): find and return the latest cpl log file in the run directory """ coupler_log_path = self._case.get_value("RUNDIR") - cpllogs = glob.glob(os.path.join(coupler_log_path, '{}*.log.*'.format(self._cpllog))) + cpllogs = glob.glob( + os.path.join(coupler_log_path, "{}*.log.*".format(self._cpllog)) + ) lastcpllogs = [] if cpllogs: lastcpllogs.append(max(cpllogs, key=os.path.getctime)) basename = os.path.basename(lastcpllogs[0]) - suffix = basename.split('.',1)[1] + suffix = basename.split(".", 1)[1] for log in cpllogs: if log in lastcpllogs: continue @@ -511,39 +635,55 @@ def _compare_memory(self): with self._test_status: # compare memory usage to baseline baseline_name = self._case.get_value("BASECMP_CASE") - basecmp_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), baseline_name) + basecmp_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), baseline_name + ) newestcpllogfiles = self._get_latest_cpl_logs() if len(newestcpllogfiles) > 0: memlist = self._get_mem_usage(newestcpllogfiles[0]) for cpllog in newestcpllogfiles: - m = re.search(r"/({}.*.log).*.gz".format(self._cpllog),cpllog) + m = re.search(r"/({}.*.log).*.gz".format(self._cpllog), cpllog) if m is not None: - baselog = os.path.join(basecmp_dir, m.group(1))+".gz" + baselog = os.path.join(basecmp_dir, m.group(1)) + ".gz" if baselog is None or not os.path.isfile(baselog): # for backward compatibility - baselog = os.path.join(basecmp_dir, self._cpllog+".log") + baselog = os.path.join(basecmp_dir, self._cpllog + ".log") if os.path.isfile(baselog) and len(memlist) > 3: blmem = self._get_mem_usage(baselog) blmem = 0 if blmem == [] else blmem[-1][1] curmem = memlist[-1][1] - diff = 0.0 if blmem == 0 else (curmem-blmem)/blmem - if diff < 0.1 and self._test_status.get_status(MEMCOMP_PHASE) is None: + diff = 0.0 if blmem == 0 else (curmem - blmem) / blmem + tolerance = self._case.get_value("TEST_MEMLEAK_TOLERANCE") + if tolerance is None: + tolerance = 0.1 + if ( + diff < tolerance + and self._test_status.get_status(MEMCOMP_PHASE) is None + ): self._test_status.set_status(MEMCOMP_PHASE, TEST_PASS_STATUS) - elif self._test_status.get_status(MEMCOMP_PHASE) != TEST_FAIL_STATUS: - comment = "Error: Memory usage increase > 10% from baseline" - self._test_status.set_status(MEMCOMP_PHASE, TEST_FAIL_STATUS, comments=comment) + elif ( + self._test_status.get_status(MEMCOMP_PHASE) != TEST_FAIL_STATUS + ): + comment = "Error: Memory usage increase >{:d}% from baseline's {:f} to {:f}".format( + int(tolerance * 100), blmem, curmem + ) + self._test_status.set_status( + MEMCOMP_PHASE, TEST_FAIL_STATUS, comments=comment + ) append_testlog(comment, self._orig_caseroot) def _compare_throughput(self): with self._test_status: # compare memory usage to baseline baseline_name = self._case.get_value("BASECMP_CASE") - basecmp_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), baseline_name) + basecmp_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), baseline_name + ) newestcpllogfiles = self._get_latest_cpl_logs() for cpllog in newestcpllogfiles: m = re.search(r"/({}.*.log).*.gz".format(self._cpllog), cpllog) if m is not None: - baselog = os.path.join(basecmp_dir, m.group(1))+".gz" + baselog = os.path.join(basecmp_dir, m.group(1)) + ".gz" if baselog is None or not os.path.isfile(baselog): # for backward compatibility baselog = os.path.join(basecmp_dir, self._cpllog) @@ -552,20 +692,37 @@ def _compare_throughput(self): # compare throughput to baseline current = self._get_throughput(cpllog) baseline = self._get_throughput(baselog) - #comparing ypd so bigger is better + # comparing ypd so bigger is better if baseline is not None and current is not None: - diff = (baseline - current)/baseline + diff = (baseline - current) / baseline tolerance = self._case.get_value("TEST_TPUT_TOLERANCE") if tolerance is None: tolerance = 0.1 - expect(tolerance > 0.0, "Bad value for throughput tolerance in test") - comment = "TPUTCOMP: Computation time changed by {:.2f}% relative to baseline".format(diff*100) + expect( + tolerance > 0.0, + "Bad value for throughput tolerance in test", + ) + comment = "TPUTCOMP: Computation time changed by {:.2f}% relative to baseline".format( + diff * 100 + ) append_testlog(comment, self._orig_caseroot) - if diff < tolerance and self._test_status.get_status(THROUGHPUT_PHASE) is None: - self._test_status.set_status(THROUGHPUT_PHASE, TEST_PASS_STATUS) - elif self._test_status.get_status(THROUGHPUT_PHASE) != TEST_FAIL_STATUS: - comment = "Error: TPUTCOMP: Computation time increase > {:d}% from baseline".format(int(tolerance*100)) - self._test_status.set_status(THROUGHPUT_PHASE, TEST_FAIL_STATUS, comments=comment) + if ( + diff < tolerance + and self._test_status.get_status(THROUGHPUT_PHASE) is None + ): + self._test_status.set_status( + THROUGHPUT_PHASE, TEST_PASS_STATUS + ) + elif ( + self._test_status.get_status(THROUGHPUT_PHASE) + != TEST_FAIL_STATUS + ): + comment = "Error: TPUTCOMP: Computation time increase > {:d}% from baseline".format( + int(tolerance * 100) + ) + self._test_status.set_status( + THROUGHPUT_PHASE, TEST_FAIL_STATUS, comments=comment + ) append_testlog(comment, self._orig_caseroot) def _compare_baseline(self): @@ -584,7 +741,9 @@ def _compare_baseline(self): status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS baseline_name = self._case.get_value("BASECMP_CASE") - ts_comments = os.path.dirname(baseline_name) + ": " + get_ts_synopsis(comments) + ts_comments = ( + os.path.dirname(baseline_name) + ": " + get_ts_synopsis(comments) + ) self._test_status.set_status(BASELINE_PHASE, status, comments=ts_comments) def _generate_baseline(self): @@ -597,18 +756,27 @@ def _generate_baseline(self): append_testlog(comments, self._orig_caseroot) status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS baseline_name = self._case.get_value("BASEGEN_CASE") - self._test_status.set_status(GENERATE_PHASE, status, comments=os.path.dirname(baseline_name)) - basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), self._case.get_value("BASEGEN_CASE")) + self._test_status.set_status( + GENERATE_PHASE, status, comments=os.path.dirname(baseline_name) + ) + basegen_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASEGEN_CASE"), + ) # copy latest cpl log to baseline # drop the date so that the name is generic newestcpllogfiles = self._get_latest_cpl_logs() with SharedArea(): for cpllog in newestcpllogfiles: - m = re.search(r"/({}.*.log).*.gz".format(self._cpllog),cpllog) + m = re.search(r"/({}.*.log).*.gz".format(self._cpllog), cpllog) if m is not None: - baselog = os.path.join(basegen_dir, m.group(1))+".gz" - safe_copy(cpllog, - os.path.join(basegen_dir,baselog), preserve_meta=False) + baselog = os.path.join(basegen_dir, m.group(1)) + ".gz" + safe_copy( + cpllog, + os.path.join(basegen_dir, baselog), + preserve_meta=False, + ) + class FakeTest(SystemTestsCommon): """ @@ -618,8 +786,9 @@ class FakeTest(SystemTestsCommon): have names beginnig with "TEST" this is so that the find_system_test in utils.py will work with these classes. """ + def __init__(self, case, expected=None): - super(FakeTest, self).__init__(case,expected=expected) + super(FakeTest, self).__init__(case, expected=expected) self._script = None self._requires_exe = False self._original_exe = self._case.get_value("run_exe") @@ -635,14 +804,16 @@ def _resetup_case(self, phase, reset=False): def build_phase(self, sharedlib_only=False, model_only=False): if self._requires_exe: - super(FakeTest, self).build_phase(sharedlib_only=sharedlib_only, model_only=model_only) + super(FakeTest, self).build_phase( + sharedlib_only=sharedlib_only, model_only=model_only + ) if not sharedlib_only: exeroot = self._case.get_value("EXEROOT") modelexe = os.path.join(exeroot, "fake.exe") - self._case.set_value("run_exe",modelexe) + self._case.set_value("run_exe", modelexe) - with open(modelexe, 'w') as f: + with open(modelexe, "w") as f: f.write("#!/bin/bash\n") f.write(self._script) @@ -651,31 +822,41 @@ def build_phase(self, sharedlib_only=False, model_only=False): if not self._requires_exe: build.post_build(self._case, [], build_complete=True) else: - expect(os.path.exists(modelexe),"Could not find expected file {}".format(modelexe)) - logger.info("FakeTest build_phase complete {} {}".format(modelexe, self._requires_exe)) + expect( + os.path.exists(modelexe), + "Could not find expected file {}".format(modelexe), + ) + logger.info( + "FakeTest build_phase complete {} {}".format( + modelexe, self._requires_exe + ) + ) def run_indv(self, suffix="base", st_archive=False, submit_resubmits=None): mpilib = self._case.get_value("MPILIB") # This flag is needed by mpt to run a script under mpiexec if mpilib == "mpt": os.environ["MPI_SHEPHERD"] = "true" - super(FakeTest, self).run_indv(suffix, st_archive=st_archive, submit_resubmits=submit_resubmits) + super(FakeTest, self).run_indv( + suffix, st_archive=st_archive, submit_resubmits=submit_resubmits + ) -class TESTRUNPASS(FakeTest): +class TESTRUNPASS(FakeTest): def build_phase(self, sharedlib_only=False, model_only=False): rundir = self._case.get_value("RUNDIR") cimeroot = self._case.get_value("CIMEROOT") case = self._case.get_value("CASE") - script = \ -""" + script = """ echo Insta pass echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc -""".format(rundir=rundir, log=self._cpllog, root=cimeroot, case=case) +""".format( + rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) + class TESTRUNDIFF(FakeTest): """ @@ -685,12 +866,12 @@ class TESTRUNDIFF(FakeTest): 3) Re-run the same test from step 1 but do a baseline comparison instead of generation 3.a) This should give you a DIFF """ + def build_phase(self, sharedlib_only=False, model_only=False): rundir = self._case.get_value("RUNDIR") cimeroot = self._case.get_value("CIMEROOT") case = self._case.get_value("CASE") - script = \ -""" + script = """ echo Insta pass echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID if [ -z "$TESTRUNDIFF_ALTERNATE" ]; then @@ -698,43 +879,46 @@ def build_phase(self, sharedlib_only=False, model_only=False): else cp {root}/scripts/tests/cpl.hi2.nc.test {rundir}/{case}.cpl.hi.0.nc fi -""".format(rundir=rundir, log=self._cpllog, root=cimeroot, case=case) +""".format( + rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) + class TESTRUNDIFFRESUBMIT(TESTRUNDIFF): pass -class TESTTESTDIFF(FakeTest): +class TESTTESTDIFF(FakeTest): def build_phase(self, sharedlib_only=False, model_only=False): rundir = self._case.get_value("RUNDIR") cimeroot = self._case.get_value("CIMEROOT") case = self._case.get_value("CASE") - script = \ -""" + script = """ echo Insta pass echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc cp {root}/scripts/tests/cpl.hi2.nc.test {rundir}/{case}.cpl.hi.0.nc.rest -""".format(rundir=rundir, log=self._cpllog, root=cimeroot, case=case) +""".format( + rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) self._set_script(script) - super(TESTTESTDIFF, self).build_phase(sharedlib_only=sharedlib_only, - model_only=model_only) + super(TESTTESTDIFF, self).build_phase( + sharedlib_only=sharedlib_only, model_only=model_only + ) def run_phase(self): super(TESTTESTDIFF, self).run_phase() self._component_compare_test("base", "rest") -class TESTRUNFAIL(FakeTest): +class TESTRUNFAIL(FakeTest): def build_phase(self, sharedlib_only=False, model_only=False): rundir = self._case.get_value("RUNDIR") cimeroot = self._case.get_value("CIMEROOT") case = self._case.get_value("CASE") - script = \ -""" + script = """ if [ -z "$TESTRUNFAIL_PASS" ]; then echo Insta fail echo model failed > {rundir}/{log}.log.$LID @@ -744,10 +928,12 @@ def build_phase(self, sharedlib_only=False, model_only=False): echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc fi -""".format(rundir=rundir, log=self._cpllog, root=cimeroot, case=case) +""".format( + rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) + class TESTRUNFAILRESET(TESTRUNFAIL): """This fake test can fail for two reasons: @@ -765,56 +951,60 @@ def run_indv(self, suffix="base", st_archive=False, submit_resubmits=None): env_test = EnvTest(self._get_caseroot()) stop_n = self._case.get_value("STOP_N") stop_n_test = int(env_test.get_test_parameter("STOP_N")) - expect(stop_n == stop_n_test, "Expect STOP_N to match original ({} != {})".format( - stop_n, stop_n_test)) + expect( + stop_n == stop_n_test, + "Expect STOP_N to match original ({} != {})".format(stop_n, stop_n_test), + ) # Now modify STOP_N so that an error will be generated if it isn't reset properly # upon a rerun self._case.set_value("STOP_N", stop_n + 1) - super(TESTRUNFAILRESET, self).run_indv(suffix=suffix, - st_archive=st_archive, - submit_resubmits=submit_resubmits) + super(TESTRUNFAILRESET, self).run_indv( + suffix=suffix, st_archive=st_archive, submit_resubmits=submit_resubmits + ) -class TESTRUNFAILEXC(TESTRUNPASS): +class TESTRUNFAILEXC(TESTRUNPASS): def run_phase(self): raise RuntimeError("Exception from run_phase") -class TESTRUNSTARCFAIL(TESTRUNPASS): +class TESTRUNSTARCFAIL(TESTRUNPASS): def _st_archive_case_test(self): raise RuntimeError("Exception from st archive") -class TESTBUILDFAIL(TESTRUNPASS): +class TESTBUILDFAIL(TESTRUNPASS): def build_phase(self, sharedlib_only=False, model_only=False): if "TESTBUILDFAIL_PASS" in os.environ: TESTRUNPASS.build_phase(self, sharedlib_only, model_only) else: - if (not sharedlib_only): + if not sharedlib_only: blddir = self._case.get_value("EXEROOT") - bldlog = os.path.join(blddir, "{}.bldlog.{}".format(get_model(), get_timestamp("%y%m%d-%H%M%S"))) + bldlog = os.path.join( + blddir, + "{}.bldlog.{}".format(get_model(), get_timestamp("%y%m%d-%H%M%S")), + ) with open(bldlog, "w") as fd: fd.write("BUILD FAIL: Intentional fail for testing infrastructure") expect(False, "BUILD FAIL: Intentional fail for testing infrastructure") -class TESTBUILDFAILEXC(FakeTest): +class TESTBUILDFAILEXC(FakeTest): def __init__(self, case): FakeTest.__init__(self, case) raise RuntimeError("Exception from init") -class TESTRUNUSERXMLCHANGE(FakeTest): +class TESTRUNUSERXMLCHANGE(FakeTest): def build_phase(self, sharedlib_only=False, model_only=False): caseroot = self._case.get_value("CASEROOT") modelexe = self._case.get_value("run_exe") new_stop_n = self._case.get_value("STOP_N") * 2 - script = \ -""" + script = """ cd {caseroot} ./xmlchange --file env_test.xml STOP_N={stopn} ./xmlchange RESUBMIT=1,STOP_N={stopn},CONTINUE_RUN=FALSE,RESUBMIT_SETS_CONTINUE_RUN=FALSE @@ -823,59 +1013,65 @@ def build_phase(self, sharedlib_only=False, model_only=False): cd {caseroot} ./xmlchange run_exe={modelexe} sleep 5 -""".format(originalexe=self._original_exe, caseroot=caseroot, modelexe=modelexe, stopn=str(new_stop_n)) +""".format( + originalexe=self._original_exe, + caseroot=caseroot, + modelexe=modelexe, + stopn=str(new_stop_n), + ) self._set_script(script, requires_exe=True) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) def run_phase(self): self.run_indv(submit_resubmits=True) -class TESTRUNSLOWPASS(FakeTest): +class TESTRUNSLOWPASS(FakeTest): def build_phase(self, sharedlib_only=False, model_only=False): rundir = self._case.get_value("RUNDIR") cimeroot = self._case.get_value("CIMEROOT") case = self._case.get_value("CASE") - script = \ -""" + script = """ sleep 300 echo Slow pass echo SUCCESSFUL TERMINATION > {rundir}/{log}.log.$LID cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc -""".format(rundir=rundir, log=self._cpllog, root=cimeroot, case=case) +""".format( + rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) + class TESTMEMLEAKFAIL(FakeTest): def build_phase(self, sharedlib_only=False, model_only=False): rundir = self._case.get_value("RUNDIR") cimeroot = self._case.get_value("CIMEROOT") case = self._case.get_value("CASE") - testfile = os.path.join(cimeroot,"scripts","tests","cpl.log.failmemleak.gz") - script = \ -""" + testfile = os.path.join(cimeroot, "scripts", "tests", "cpl.log.failmemleak.gz") + script = """ echo Insta pass gunzip -c {testfile} > {rundir}/{log}.log.$LID cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc -""".format(testfile=testfile, rundir=rundir, log=self._cpllog, root=cimeroot, case=case) +""".format( + testfile=testfile, rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) + class TESTMEMLEAKPASS(FakeTest): def build_phase(self, sharedlib_only=False, model_only=False): rundir = self._case.get_value("RUNDIR") cimeroot = self._case.get_value("CIMEROOT") case = self._case.get_value("CASE") - testfile = os.path.join(cimeroot,"scripts","tests","cpl.log.passmemleak.gz") - script = \ -""" + testfile = os.path.join(cimeroot, "scripts", "tests", "cpl.log.passmemleak.gz") + script = """ echo Insta pass gunzip -c {testfile} > {rundir}/{log}.log.$LID cp {root}/scripts/tests/cpl.hi1.nc.test {rundir}/{case}.cpl.hi.0.nc -""".format(testfile=testfile, rundir=rundir, log=self._cpllog, root=cimeroot, case=case) +""".format( + testfile=testfile, rundir=rundir, log=self._cpllog, root=cimeroot, case=case + ) self._set_script(script) - FakeTest.build_phase(self, - sharedlib_only=sharedlib_only, model_only=model_only) + FakeTest.build_phase(self, sharedlib_only=sharedlib_only, model_only=model_only) diff --git a/CIME/SystemTests/system_tests_compare_two.py b/CIME/SystemTests/system_tests_compare_two.py index 1d41c9667a0..c7f4b70e28e 100644 --- a/CIME/SystemTests/system_tests_compare_two.py +++ b/CIME/SystemTests/system_tests_compare_two.py @@ -54,16 +54,18 @@ logger = logging.getLogger(__name__) -class SystemTestsCompareTwo(SystemTestsCommon): - def __init__(self, - case, - separate_builds = False, - run_two_suffix = 'test', - run_one_description = '', - run_two_description = '', - multisubmit = False, - ignore_fieldlist_diffs = False): +class SystemTestsCompareTwo(SystemTestsCommon): + def __init__( + self, + case, + separate_builds=False, + run_two_suffix="test", + run_one_description="", + run_two_description="", + multisubmit=False, + ignore_fieldlist_diffs=False, + ): """ Initialize a SystemTestsCompareTwo object. Individual test cases that inherit from SystemTestsCompareTwo MUST call this __init__ method. @@ -105,10 +107,12 @@ def __init__(self, # be set in the call to the constructor just like run_two_suffix # currently is. Or, if these tools are rewritten to work without any # suffix, then run_one_suffix can be removed entirely. - self._run_one_suffix = 'base' + self._run_one_suffix = "base" self._run_two_suffix = run_two_suffix.rstrip() - expect(self._run_two_suffix != self._run_one_suffix, - "ERROR: Must have different suffixes for run one and run two") + expect( + self._run_two_suffix != self._run_one_suffix, + "ERROR: Must have different suffixes for run one and run two", + ) self._run_one_description = run_one_description self._run_two_description = run_two_description @@ -125,7 +129,9 @@ def __init__(self, self._setup_cases_if_not_yet_done() - self._multisubmit = multisubmit and self._case1.get_value("BATCH_SYSTEM") != "none" + self._multisubmit = ( + multisubmit and self._case1.get_value("BATCH_SYSTEM") != "none" + ) # ======================================================================== # Methods that MUST be implemented by specific tests that inherit from this @@ -205,8 +211,9 @@ def build_phase(self, sharedlib_only=False, model_only=False): if get_model() != "e3sm": # We need to turn off this change for E3SM because it breaks # the MPAS build system - self._case2.set_value("SHAREDLIBROOT", - self._case1.get_value("SHAREDLIBROOT")) + self._case2.set_value( + "SHAREDLIBROOT", self._case1.get_value("SHAREDLIBROOT") + ) self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only) else: @@ -216,11 +223,13 @@ def build_phase(self, sharedlib_only=False, model_only=False): # valid value for this build, update case2 to reflect this change for comp in self._case1.get_values("COMP_CLASSES"): comp_pio_typename = "{}_PIO_TYPENAME".format(comp) - self._case2.set_value(comp_pio_typename, self._case1.get_value(comp_pio_typename)) + self._case2.set_value( + comp_pio_typename, self._case1.get_value(comp_pio_typename) + ) # The following is needed when _case_two_setup has a case_setup call # despite sharing the build (e.g., to change NTHRDS) - self._case2.set_value("BUILD_COMPLETE",True) + self._case2.set_value("BUILD_COMPLETE", True) def run_phase(self, success_change=False): # pylint: disable=arguments-differ """ @@ -230,41 +239,44 @@ def run_phase(self, success_change=False): # pylint: disable=arguments-differ is_first_run = self._case1.get_value("IS_FIRST_RUN") compare_phase_name = "{}_{}_{}".format( - COMPARE_PHASE, - self._run_one_suffix, - self._run_two_suffix) + COMPARE_PHASE, self._run_one_suffix, self._run_two_suffix + ) # On a batch system with a multisubmit test "RESUBMIT" is used to track # which phase is being ran. By the end of the test it equals 0. If the - # the test fails in a way where the RUN_PHASE is PEND then "RESUBMIT" - # does not get reset to 1 on a rerun and the first phase is skiped - # causing the COMPARE_PHASE to fail. This ensures that "RESUBMIT" will + # the test fails in a way where the RUN_PHASE is PEND then "RESUBMIT" + # does not get reset to 1 on a rerun and the first phase is skiped + # causing the COMPARE_PHASE to fail. This ensures that "RESUBMIT" will # get reset if the test state is not correct for a rerun. # NOTE: "IS_FIRST_RUN" is reset in "case_submit.py" - if (is_first_run and - self._multisubmit and - self._case1.get_value("RESUBMIT") == 0): + if ( + is_first_run + and self._multisubmit + and self._case1.get_value("RESUBMIT") == 0 + ): self._resetup_case(RUN_PHASE, reset=True) - first_phase = self._case1.get_value("RESUBMIT") == 1 # Only relevant for multi-submit tests + first_phase = ( + self._case1.get_value("RESUBMIT") == 1 + ) # Only relevant for multi-submit tests run_type = self._case1.get_value("RUN_TYPE") - logger.info("_multisubmit {} first phase {}".format(self._multisubmit, first_phase)) + logger.info( + "_multisubmit {} first phase {}".format(self._multisubmit, first_phase) + ) # First run if not self._multisubmit or first_phase: - logger.info('Doing first run: ' + self._run_one_description) + logger.info("Doing first run: " + self._run_one_description) # Add a PENDing compare phase so that we'll notice if the second part of compare two # doesn't run. with self._test_status: - self._test_status.set_status( - compare_phase_name, - TEST_PEND_STATUS) + self._test_status.set_status(compare_phase_name, TEST_PEND_STATUS) self._activate_case1() self._case_one_custom_prerun_action() - self.run_indv(suffix = self._run_one_suffix) + self.run_indv(suffix=self._run_one_suffix) self._case_one_custom_postrun_action() # Second run @@ -274,7 +286,7 @@ def run_phase(self, success_change=False): # pylint: disable=arguments-differ # not a with statement, so it's not in a writeable state, so we need to use a with # statement here to put it in a writeable state. with self._case2: - logger.info('Doing second run: ' + self._run_two_description) + logger.info("Doing second run: " + self._run_two_description) self._activate_case2() # This assures that case two namelists are populated self._skip_pnl = False @@ -283,15 +295,18 @@ def run_phase(self, success_change=False): # pylint: disable=arguments-differ self._case2.check_case() self._case_two_custom_prerun_action() - self.run_indv(suffix = self._run_two_suffix) + self.run_indv(suffix=self._run_two_suffix) self._case_two_custom_postrun_action() # Compare results # Case1 is the "main" case, and we need to do the comparisons from there self._activate_case1() self._link_to_case2_output() - self._component_compare_test(self._run_one_suffix, self._run_two_suffix, - success_change=success_change, - ignore_fieldlist_diffs=self._ignore_fieldlist_diffs) + self._component_compare_test( + self._run_one_suffix, + self._run_two_suffix, + success_change=success_change, + ignore_fieldlist_diffs=self._ignore_fieldlist_diffs, + ) def copy_case1_restarts_to_case2(self): """ @@ -303,9 +318,11 @@ def copy_case1_restarts_to_case2(self): files. """ rundir2 = self._case2.get_value("RUNDIR") - self._case1.archive_last_restarts(archive_restdir = rundir2, - rundir=self._case1.get_value("RUNDIR"), - link_to_restart_files = True) + self._case1.archive_last_restarts( + archive_restdir=rundir2, + rundir=self._case1.get_value("RUNDIR"), + link_to_restart_files=True, + ) # ======================================================================== # Private methods @@ -336,8 +353,11 @@ def _get_output_root2(self): # $CIME_OUTPUT_ROOT/$CASE/ is not accidentally shared between # case1 and case2. (Currently nothing is placed here, but this # helps prevent future problems.) - output_root2 = os.path.join(self._case1.get_value("CIME_OUTPUT_ROOT"), - self._case1.get_value("CASE"), "case2_output_root") + output_root2 = os.path.join( + self._case1.get_value("CIME_OUTPUT_ROOT"), + self._case1.get_value("CASE"), + "case2_output_root", + ) return output_root2 def _get_case2_exeroot(self): @@ -402,10 +422,11 @@ def _setup_cases_if_not_yet_done(self): try: self._case2 = self._case1.create_clone( self._caseroot2, - keepexe = not self._separate_builds, - cime_output_root = self._get_output_root2(), - exeroot = self._get_case2_exeroot(), - rundir = self._get_case2_rundir()) + keepexe=not self._separate_builds, + cime_output_root=self._get_output_root2(), + exeroot=self._get_case2_exeroot(), + rundir=self._get_case2_rundir(), + ) self._write_info_to_case2_output_root() self._setup_cases() except BaseException: @@ -420,10 +441,12 @@ def _setup_cases_if_not_yet_done(self): if os.path.isdir(self._caseroot2): shutil.rmtree(self._caseroot2) self._activate_case1() - logger.warning("WARNING: Test case setup failed. Case2 has been removed, " - "but the main case may be in an inconsistent state. " - "If you want to rerun this test, you should create " - "a new test rather than trying to rerun this one.") + logger.warning( + "WARNING: Test case setup failed. Case2 has been removed, " + "but the main case may be in an inconsistent state. " + "If you want to rerun this test, you should create " + "a new test rather than trying to rerun this one." + ) raise def _case_from_existing_caseroot(self, caseroot): @@ -469,10 +492,16 @@ def _write_info_to_case2_output_root(self): try: with open(readme_path, "w") as fd: fd.write("This directory is typically empty.\n\n") - fd.write("case2's run dir is here: {}\n\n".format( - self._case2.get_value("RUNDIR"))) - fd.write("case2's bld dir is here: {}\n".format( - self._case2.get_value("EXEROOT"))) + fd.write( + "case2's run dir is here: {}\n\n".format( + self._case2.get_value("RUNDIR") + ) + ) + fd.write( + "case2's bld dir is here: {}\n".format( + self._case2.get_value("EXEROOT") + ) + ) except IOError: # It's not a big deal if we can't write the README file # (e.g., because the directory doesn't exist or isn't @@ -542,14 +571,13 @@ def _link_to_case2_output(self): rundir2 = self._case2.get_value("RUNDIR") run2suffix = self._run_two_suffix - pattern = '{}*.nc.{}'.format(casename2, run2suffix) + pattern = "{}*.nc.{}".format(casename2, run2suffix) case2_files = glob.glob(os.path.join(rundir2, pattern)) for one_file in case2_files: file_basename = os.path.basename(one_file) modified_basename = file_basename.replace(casename2, casename1, 1) one_link = os.path.join(rundir1, modified_basename) - if (os.path.islink(one_link) and - os.readlink(one_link) == one_file): + if os.path.islink(one_link) and os.readlink(one_link) == one_file: # Link is already set up correctly: do nothing # (os.symlink raises an exception if you try to replace an # existing file) diff --git a/CIME/SystemTests/test_utils/user_nl_utils.py b/CIME/SystemTests/test_utils/user_nl_utils.py index f91b832b943..eab45921c95 100644 --- a/CIME/SystemTests/test_utils/user_nl_utils.py +++ b/CIME/SystemTests/test_utils/user_nl_utils.py @@ -5,6 +5,7 @@ import os import glob + def append_to_user_nl_files(caseroot, component, contents): """ Append the string given by 'contents' to the end of each user_nl file for @@ -30,11 +31,12 @@ def append_to_user_nl_files(caseroot, component, contents): files = _get_list_of_user_nl_files(caseroot, component) if len(files) == 0: - raise RuntimeError('No user_nl files found for component ' + component) + raise RuntimeError("No user_nl files found for component " + component) for one_file in files: - with open(one_file, 'a') as user_nl_file: - user_nl_file.write('\n' + contents + '\n') + with open(one_file, "a") as user_nl_file: + user_nl_file.write("\n" + contents + "\n") + def _get_list_of_user_nl_files(path, component): """Get a list of all user_nl files in the current path for the component @@ -45,7 +47,7 @@ def _get_list_of_user_nl_files(path, component): The list of returned files gives their full path. """ - file_pattern = 'user_nl_' + component + '*' + file_pattern = "user_nl_" + component + "*" file_list = glob.glob(os.path.join(path, file_pattern)) return file_list diff --git a/CIME/SystemTests/tsc.py b/CIME/SystemTests/tsc.py index 4de8a8c0d8d..3065de0cd3a 100644 --- a/CIME/SystemTests/tsc.py +++ b/CIME/SystemTests/tsc.py @@ -32,9 +32,19 @@ SIM_LENGTH = 600 # seconds OUT_FREQ = 10 # seconds INSPECT_AT = [300, 450, 600] # seconds -INIT_COND_FILE_TEMPLATE = \ - "20210915.v2.ne4_oQU240.F2010.{}.{}.0002-{:02d}-01-00000.nc" -VAR_LIST = ["T", "Q", "V", "CLDLIQ", "CLDICE", "NUMLIQ", "NUMICE", "num_a1", "num_a2", "num_a3"] +INIT_COND_FILE_TEMPLATE = "20210915.v2.ne4_oQU240.F2010.{}.{}.0002-{:02d}-01-00000.nc" +VAR_LIST = [ + "T", + "Q", + "V", + "CLDLIQ", + "CLDICE", + "NUMLIQ", + "NUMICE", + "num_a1", + "num_a2", + "num_a3", +] P_THRESHOLD = 0.005 @@ -60,10 +70,10 @@ def build_phase(self, sharedlib_only=False, model_only=False): # so it has to happen there. if not model_only: logging.warning("Starting to build multi-instance exe") - for comp in ['ATM', 'OCN', 'WAV', 'GLC', 'ICE', 'ROF', 'LND']: + for comp in ["ATM", "OCN", "WAV", "GLC", "ICE", "ROF", "LND"]: ntasks = self._case.get_value("NTASKS_{}".format(comp)) self._case.set_value("ROOTPE_{}".format(comp), 0) - self._case.set_value("NINST_{}".format(comp), NINST) + self._case.set_value("NINST_{}".format(comp), NINST) self._case.set_value("NTASKS_{}".format(comp), ntasks * NINST) self._case.set_value("ROOTPE_CPL", 0) @@ -94,12 +104,21 @@ def _run_with_specified_dtime(self, dtime=2): csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4_oQU240_v2_init") nstep_output = OUT_FREQ // dtime - for iinst in range(1, NINST+1): - with open(f'user_nl_{self.atmmod}_'+str(iinst).zfill(4), 'w') as atmnlfile, \ - open(f'user_nl_{self.lndmod}_'+str(iinst).zfill(4), 'w') as lndnlfile: - - fatm_in = os.path.join(csmdata_atm, INIT_COND_FILE_TEMPLATE.format(self.atmmodIC, 'i', iinst)) - flnd_in = os.path.join(csmdata_lnd, INIT_COND_FILE_TEMPLATE.format(self.lndmodIC, 'r', iinst)) + for iinst in range(1, NINST + 1): + with open( + f"user_nl_{self.atmmod}_" + str(iinst).zfill(4), "w" + ) as atmnlfile, open( + f"user_nl_{self.lndmod}_" + str(iinst).zfill(4), "w" + ) as lndnlfile: + + fatm_in = os.path.join( + csmdata_atm, + INIT_COND_FILE_TEMPLATE.format(self.atmmodIC, "i", iinst), + ) + flnd_in = os.path.join( + csmdata_lnd, + INIT_COND_FILE_TEMPLATE.format(self.lndmodIC, "r", iinst), + ) atmnlfile.write("ncdata = '{}' \n".format(fatm_in)) lndnlfile.write("finidat = '{}' \n".format(flnd_in)) @@ -115,9 +134,11 @@ def _run_with_specified_dtime(self, dtime=2): atmnlfile.write("mfilt = 1 \n") atmnlfile.write("ndens = 1 \n") atmnlfile.write("empty_htapes = .true. \n") - atmnlfile.write("fincl1 = 'PS','U','LANDFRAC',{} \n".format( - ''.join(["'{}',".format(s) for s in VAR_LIST])[:-1] - )) + atmnlfile.write( + "fincl1 = 'PS','U','LANDFRAC',{} \n".format( + "".join(["'{}',".format(s) for s in VAR_LIST])[:-1] + ) + ) # Force rebuild namelists self._skip_pnl = False @@ -134,15 +155,18 @@ def run_phase(self): def _compare_baseline(self): with self._test_status as ts: - ts.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_FAIL_STATUS) + ts.set_status( + CIME.test_status.BASELINE_PHASE, CIME.test_status.TEST_FAIL_STATUS + ) run_dir = self._case.get_value("RUNDIR") case_name = self._case.get_value("CASE") - base_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASECMP_CASE")) + base_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASECMP_CASE"), + ) - test_name = "{}".format(case_name.split('.')[-1]) + test_name = "{}".format(case_name.split(".")[-1]) evv_config = { test_name: { "module": os.path.join(evv_lib_dir, "extensions", "tsc.py"), @@ -158,25 +182,31 @@ def _compare_baseline(self): } } - json_file = os.path.join(run_dir, '.'.join([case_name, 'json'])) - with open(json_file, 'w') as config_file: + json_file = os.path.join(run_dir, ".".join([case_name, "json"])) + with open(json_file, "w") as config_file: json.dump(evv_config, config_file, indent=4) - evv_out_dir = os.path.join(run_dir, '.'.join([case_name, 'evv'])) - evv(['-e', json_file, '-o', evv_out_dir]) + evv_out_dir = os.path.join(run_dir, ".".join([case_name, "evv"])) + evv(["-e", json_file, "-o", evv_out_dir]) - with open(os.path.join(evv_out_dir, 'index.json'), 'r') as evv_f: + with open(os.path.join(evv_out_dir, "index.json"), "r") as evv_f: evv_status = json.load(evv_f) comments = "" - for evv_elem in evv_status['Data']['Elements']: - if evv_elem['Type'] == 'ValSummary' \ - and evv_elem['TableTitle'] == 'Time step convergence test': - comments = "; ".join("{}: {}".format(key, val) for key, val - in evv_elem['Data'][test_name][''].items()) - if evv_elem['Data'][test_name]['']['Test status'].lower() == 'pass': - self._test_status.set_status(CIME.test_status.BASELINE_PHASE, - CIME.test_status.TEST_PASS_STATUS) + for evv_elem in evv_status["Data"]["Elements"]: + if ( + evv_elem["Type"] == "ValSummary" + and evv_elem["TableTitle"] == "Time step convergence test" + ): + comments = "; ".join( + "{}: {}".format(key, val) + for key, val in evv_elem["Data"][test_name][""].items() + ) + if evv_elem["Data"][test_name][""]["Test status"].lower() == "pass": + self._test_status.set_status( + CIME.test_status.BASELINE_PHASE, + CIME.test_status.TEST_PASS_STATUS, + ) break status = self._test_status.get_status(CIME.test_status.BASELINE_PHASE) @@ -186,21 +216,35 @@ def _compare_baseline(self): urlroot = CIME.utils.get_urlroot(mach_obj) if htmlroot is not None: with CIME.utils.SharedArea(): - dir_util.copy_tree(evv_out_dir, os.path.join(htmlroot, 'evv', case_name), preserve_mode=False) + dir_util.copy_tree( + evv_out_dir, + os.path.join(htmlroot, "evv", case_name), + preserve_mode=False, + ) if urlroot is None: urlroot = "[{}_URL]".format(mach_name.capitalize()) viewing = "{}/evv/{}/index.html".format(urlroot, case_name) else: - viewing = "{}\n" \ - " EVV viewing instructions can be found at: " \ - " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" \ - "climate_reproducibility/README.md#test-passfail-and-extended-output" \ - "".format(evv_out_dir) - - comments = "{} {} for test '{}'.\n" \ - " {}\n" \ - " EVV results can be viewed at:\n" \ - " {}".format(CIME.test_status.BASELINE_PHASE, status, test_name, comments, viewing) + viewing = ( + "{}\n" + " EVV viewing instructions can be found at: " + " https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/" + "climate_reproducibility/README.md#test-passfail-and-extended-output" + "".format(evv_out_dir) + ) + + comments = ( + "{} {} for test '{}'.\n" + " {}\n" + " EVV results can be viewed at:\n" + " {}".format( + CIME.test_status.BASELINE_PHASE, + status, + test_name, + comments, + viewing, + ) + ) CIME.utils.append_testlog(comments, self._orig_caseroot) @@ -208,18 +252,26 @@ def _generate_baseline(self): super(TSC, self)._generate_baseline() with CIME.utils.SharedArea(): - basegen_dir = os.path.join(self._case.get_value("BASELINE_ROOT"), - self._case.get_value("BASEGEN_CASE")) + basegen_dir = os.path.join( + self._case.get_value("BASELINE_ROOT"), + self._case.get_value("BASEGEN_CASE"), + ) rundir = self._case.get_value("RUNDIR") ref_case = self._case.get_value("RUN_REFCASE") env_archive = self._case.get_env("archive") - hists = env_archive.get_all_hist_files(self._case.get_value("CASE"), self.atmmod, rundir, r'DT\d*', ref_case=ref_case) - hists = [os.path.join(rundir,hist) for hist in hists] + hists = env_archive.get_all_hist_files( + self._case.get_value("CASE"), + self.atmmod, + rundir, + r"DT\d*", + ref_case=ref_case, + ) + hists = [os.path.join(rundir, hist) for hist in hists] logger.debug("TSC additional baseline files: {}".format(hists)) for hist in hists: - basename = hist[hist.rfind(self.atmmod):] + basename = hist[hist.rfind(self.atmmod) :] baseline = os.path.join(basegen_dir, basename) if os.path.exists(baseline): os.remove(baseline) diff --git a/CIME/Tools/archive_metadata b/CIME/Tools/archive_metadata index 9b919daee98..afe56826f0e 100755 --- a/CIME/Tools/archive_metadata +++ b/CIME/Tools/archive_metadata @@ -22,70 +22,117 @@ import sys from string import Template from standard_script_setup import * -from CIME.case import Case -from CIME.utils import is_last_process_complete +from CIME.case import Case +from CIME.utils import is_last_process_complete + # six is for py2/py3 compatibility -from six.moves import configparser, urllib +from six.moves import configparser, urllib # define global constants logger = logging.getLogger(__name__) -_svn_expdb_url = 'https://svn-cesm2-expdb.cgd.ucar.edu' -_exp_types = ['CMIP6', 'production', 'tuning','lens','C1','C2','C3','C4','C5'] -_xml_vars = ['CASE', 'COMPILER', 'COMPSET', 'CONTINUE_RUN', 'DOUT_S', 'DOUT_S_ROOT', - 'GRID', 'MACH', 'MPILIB', 'MODEL', 'MODEL_VERSION', 'REST_N', 'REST_OPTION', - 'RUNDIR', 'RUN_REFCASE', 'RUN_REFDATE', 'RUN_STARTDATE', 'RUN_TYPE', - 'STOP_N', 'STOP_OPTION', 'USER'] -_run_vars = ['JOB_QUEUE', 'JOB_WALLCLOCK_TIME', 'PROJECT'] -_archive_list = ['Buildconf', 'CaseDocs', 'CaseStatus', 'LockedFiles', - 'Macros.make', 'README.case', 'SourceMods', 'software_environment.txt'] -_call_template = Template('in "$function" - Ignoring SVN repo update\n' - 'SVN error executing command "$cmd". \n' - '$error: $strerror') -_copy_template = Template('in "$function" - Unable to copy "$source" to "$dest"' - '$error: $strerror') -_svn_error_template = Template('in "$function" - SVN client unavailable\n' - 'SVN error executing command "$cmd". \n' - '$error: $strerror') -_ignore_patterns = ['*.pyc', '^.git', 'tmp', '.svn', '*~'] -_pp_xml_vars = {'atm' : 'ATMDIAG_test_path_climo', - 'glc' : '', - 'lnd' : 'LNDDIAG_PTMPDIR_1', - 'ice' : 'ICEDIAG_PATH_CLIMO_CONT', - 'ocn' : 'OCNDIAG_TAVGDIR', - 'rof' : '', - 'timeseries' : 'TIMESERIES_OUTPUT_ROOTDIR', - 'xconform' : 'CONFORM_OUTPUT_DIR'} -_pp_diag_vars = {'atm' : ['ATMDIAG_test_first_yr', 'ATMDIAG_test_nyrs'], - 'ice' : ['ICEDIAG_BEGYR_CONT', 'ICEDIAG_ENDYR_CONT', 'ICEDIAG_YRS_TO_AVG'], - 'lnd' : ['LNDDIAG_clim_first_yr_1', 'LNDDIAG_clim_num_yrs_1', - 'LNDDIAG_trends_first_yr_1', 'LNDDIAG_trends_num_yrs_1'], - 'ocn' : ['OCNDIAG_YEAR0', 'OCNDIAG_YEAR1', - 'OCNDIAG_TSERIES_YEAR0', 'OCNDIAG_TSERIES_YEAR1']} -_pp_tseries_comps = ['atm', 'glc', 'ice', 'lnd', 'ocn', 'rof'] +_svn_expdb_url = "https://svn-cesm2-expdb.cgd.ucar.edu" +_exp_types = ["CMIP6", "production", "tuning", "lens", "C1", "C2", "C3", "C4", "C5"] +_xml_vars = [ + "CASE", + "COMPILER", + "COMPSET", + "CONTINUE_RUN", + "DOUT_S", + "DOUT_S_ROOT", + "GRID", + "MACH", + "MPILIB", + "MODEL", + "MODEL_VERSION", + "REST_N", + "REST_OPTION", + "RUNDIR", + "RUN_REFCASE", + "RUN_REFDATE", + "RUN_STARTDATE", + "RUN_TYPE", + "STOP_N", + "STOP_OPTION", + "USER", +] +_run_vars = ["JOB_QUEUE", "JOB_WALLCLOCK_TIME", "PROJECT"] +_archive_list = [ + "Buildconf", + "CaseDocs", + "CaseStatus", + "LockedFiles", + "Macros.make", + "README.case", + "SourceMods", + "software_environment.txt", +] +_call_template = Template( + 'in "$function" - Ignoring SVN repo update\n' + 'SVN error executing command "$cmd". \n' + "$error: $strerror" +) +_copy_template = Template( + 'in "$function" - Unable to copy "$source" to "$dest"' "$error: $strerror" +) +_svn_error_template = Template( + 'in "$function" - SVN client unavailable\n' + 'SVN error executing command "$cmd". \n' + "$error: $strerror" +) +_ignore_patterns = ["*.pyc", "^.git", "tmp", ".svn", "*~"] +_pp_xml_vars = { + "atm": "ATMDIAG_test_path_climo", + "glc": "", + "lnd": "LNDDIAG_PTMPDIR_1", + "ice": "ICEDIAG_PATH_CLIMO_CONT", + "ocn": "OCNDIAG_TAVGDIR", + "rof": "", + "timeseries": "TIMESERIES_OUTPUT_ROOTDIR", + "xconform": "CONFORM_OUTPUT_DIR", +} +_pp_diag_vars = { + "atm": ["ATMDIAG_test_first_yr", "ATMDIAG_test_nyrs"], + "ice": ["ICEDIAG_BEGYR_CONT", "ICEDIAG_ENDYR_CONT", "ICEDIAG_YRS_TO_AVG"], + "lnd": [ + "LNDDIAG_clim_first_yr_1", + "LNDDIAG_clim_num_yrs_1", + "LNDDIAG_trends_first_yr_1", + "LNDDIAG_trends_num_yrs_1", + ], + "ocn": [ + "OCNDIAG_YEAR0", + "OCNDIAG_YEAR1", + "OCNDIAG_TSERIES_YEAR0", + "OCNDIAG_TSERIES_YEAR1", + ], +} +_pp_tseries_comps = ["atm", "glc", "ice", "lnd", "ocn", "rof"] # setting the ssl context to avoid issues with CGD certificates -_context = ssl._create_unverified_context() # pylint:disable=protected-access +_context = ssl._create_unverified_context() # pylint:disable=protected-access # ------------------------------------------------------------------------------- class PasswordPromptAction(argparse.Action): -# ------------------------------------------------------------------------------- - """ SVN developer's password class handler - """ + # ------------------------------------------------------------------------------- + """SVN developer's password class handler""" # pylint: disable=redefined-builtin - def __init__(self, - option_strings=None, - dest=None, - default=None, - required=False, - nargs=0, - help=None): + def __init__( + self, + option_strings=None, + dest=None, + default=None, + required=False, + nargs=0, + help=None, + ): super(PasswordPromptAction, self).__init__( option_strings=option_strings, dest=dest, default=default, required=required, nargs=nargs, - help=help) + help=help, + ) def __call__(self, parser, args, values, option_string=None): # check if ~/.subversion/cmip6.conf exists @@ -95,24 +142,25 @@ class PasswordPromptAction(argparse.Action): # read the .cmip6.conf file config = configparser.SafeConfigParser() config.read(conf_path) - password = config.get('svn', 'password') + password = config.get("svn", "password") else: password = getpass.getpass() setattr(args, self.dest, password) + # --------------------------------------------------------------------- def basic_authorization(user, password): -# --------------------------------------------------------------------- - """ Basic authentication encoding - """ + # --------------------------------------------------------------------- + """Basic authentication encoding""" sauth = user + ":" + password return "Basic " + sauth.encode("base64").rstrip() + # --------------------------------------------------------------------- class SVNException(Exception): -# --------------------------------------------------------------------- - """ SVN command exception handler - """ + # --------------------------------------------------------------------- + """SVN command exception handler""" + def __init__(self, value): super(SVNException, self).__init__(value) self.value = value @@ -120,94 +168,153 @@ class SVNException(Exception): def __str__(self): return repr(self.value) + # ------------------------------------------------------------------------------- def commandline_options(args): -# ------------------------------------------------------------------------------- - """ Process the command line arguments. - """ + # ------------------------------------------------------------------------------- + """Process the command line arguments.""" parser = argparse.ArgumentParser( - description='Query and parse the caseroot files to gather metadata information' \ - ' that can be posted to the CESM experiments database.' \ - ' ' \ - ' CMIP6 experiment case names must be reserved already in the' \ - ' experiment database. Please see:' \ - ' https://csesgweb.cgd.ucar.edu/expdb2.0 for details.') + description="Query and parse the caseroot files to gather metadata information" + " that can be posted to the CESM experiments database." + " " + " CMIP6 experiment case names must be reserved already in the" + " experiment database. Please see:" + " https://csesgweb.cgd.ucar.edu/expdb2.0 for details." + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument('--user', dest='user', type=str, default=None, required=True, - help='User name for SVN CESM developer access (required)') - - parser.add_argument('--password', dest='password', action=PasswordPromptAction, - default='', required=True, - help='Password for SVN CESM developer access (required)') - - parser.add_argument('--caseroot', nargs=1, required=False, - help='Fully quailfied path to case root directory (optional). ' \ - 'Defaults to current working directory.') - - parser.add_argument('--workdir', nargs=1, required=False, - help='Fully quailfied path to directory for storing intermediate ' \ - 'case files. A sub-directory called ' \ - 'archive_temp_dir is created, populated ' \ - 'with case files, and posted to the CESM experiments database and ' \ - 'SVN repository at URL "{0}". ' \ - 'This argument can be used to archive a caseroot when the user ' \ - 'does not have write permission in the caseroot (optional). ' \ - 'Defaults to current working directory.'.format(_svn_expdb_url)) - - parser.add_argument('--expType', dest='expType', nargs=1, required=True, choices=_exp_types, - help='Experiment type. For CMIP6 experiments, the case must already ' \ - 'exist in the experiments database at URL ' \ - ' "http://csegweb.cgd.ucar.edu/expdb2.0" (required). ' \ - 'Must be one of "{0}"'.format(_exp_types)) - - parser.add_argument('--title', nargs=1, required=False, default=None, - help='Title of experiment (optional).') - - parser.add_argument('--ignore-logs', dest='ignore_logs', action='store_true', - help='Ignore updating the SVN repository with the caseroot/logs files. ' \ - 'The experiments database will be updated (optional).') - - parser.add_argument('--ignore-timing', dest='ignore_timing', action='store_true', - help='Ignore updating the the SVN repository with caseroot/timing files.' \ - 'The experiments database will be updated (optional).') - - parser.add_argument('--ignore-repo-update', dest='ignore_repo_update', action='store_true', - help='Ignore updating the SVN repository with all the caseroot files. ' \ - 'The experiments database will be updated (optional).') - - parser.add_argument('--add-files', dest='user_add_files', required=False, - help='Comma-separated list with no spaces of files or directories to be ' \ - 'added to the SVN repository. These are in addition to the default added ' \ - 'caseroot files and directories: '\ - '"{0}, *.xml, user_nl_*" (optional).'.format(_archive_list)) - - parser.add_argument('--dryrun', action='store_true', - help='Parse settings and print what actions will be taken but ' \ - 'do not execute the action (optional).') - - parser.add_argument('--query_cmip6', nargs=2, required=False, - help='Query the experiments database global attributes ' \ - 'for specified CMIP6 casename as argument 1. ' \ - 'Writes a json formatted output file, specified by argument 2, ' \ - 'to subdir archive_files (optional).') - - parser.add_argument('--test-post', dest='test_post', action='store_true', - help='Post metadata to the test expdb2.0 web application server ' \ - 'at URL "http://csegwebdev.cgd.ucar.edu/expdb2.0". ' \ - 'No --test-post argument defaults to posting metadata to the ' \ - 'production expdb2.0 web application server '\ - 'at URL "http://csegweb.cgd.ucar.edu/expdb2.0" (optional).') + parser.add_argument( + "--user", + dest="user", + type=str, + default=None, + required=True, + help="User name for SVN CESM developer access (required)", + ) + + parser.add_argument( + "--password", + dest="password", + action=PasswordPromptAction, + default="", + required=True, + help="Password for SVN CESM developer access (required)", + ) + + parser.add_argument( + "--caseroot", + nargs=1, + required=False, + help="Fully quailfied path to case root directory (optional). " + "Defaults to current working directory.", + ) + + parser.add_argument( + "--workdir", + nargs=1, + required=False, + help="Fully quailfied path to directory for storing intermediate " + "case files. A sub-directory called " + "archive_temp_dir is created, populated " + "with case files, and posted to the CESM experiments database and " + 'SVN repository at URL "{0}". ' + "This argument can be used to archive a caseroot when the user " + "does not have write permission in the caseroot (optional). " + "Defaults to current working directory.".format(_svn_expdb_url), + ) + + parser.add_argument( + "--expType", + dest="expType", + nargs=1, + required=True, + choices=_exp_types, + help="Experiment type. For CMIP6 experiments, the case must already " + "exist in the experiments database at URL " + ' "http://csegweb.cgd.ucar.edu/expdb2.0" (required). ' + 'Must be one of "{0}"'.format(_exp_types), + ) + + parser.add_argument( + "--title", + nargs=1, + required=False, + default=None, + help="Title of experiment (optional).", + ) + + parser.add_argument( + "--ignore-logs", + dest="ignore_logs", + action="store_true", + help="Ignore updating the SVN repository with the caseroot/logs files. " + "The experiments database will be updated (optional).", + ) + + parser.add_argument( + "--ignore-timing", + dest="ignore_timing", + action="store_true", + help="Ignore updating the the SVN repository with caseroot/timing files." + "The experiments database will be updated (optional).", + ) + + parser.add_argument( + "--ignore-repo-update", + dest="ignore_repo_update", + action="store_true", + help="Ignore updating the SVN repository with all the caseroot files. " + "The experiments database will be updated (optional).", + ) + + parser.add_argument( + "--add-files", + dest="user_add_files", + required=False, + help="Comma-separated list with no spaces of files or directories to be " + "added to the SVN repository. These are in addition to the default added " + "caseroot files and directories: " + '"{0}, *.xml, user_nl_*" (optional).'.format(_archive_list), + ) + + parser.add_argument( + "--dryrun", + action="store_true", + help="Parse settings and print what actions will be taken but " + "do not execute the action (optional).", + ) + + parser.add_argument( + "--query_cmip6", + nargs=2, + required=False, + help="Query the experiments database global attributes " + "for specified CMIP6 casename as argument 1. " + "Writes a json formatted output file, specified by argument 2, " + "to subdir archive_files (optional).", + ) + + parser.add_argument( + "--test-post", + dest="test_post", + action="store_true", + help="Post metadata to the test expdb2.0 web application server " + 'at URL "http://csegwebdev.cgd.ucar.edu/expdb2.0". ' + "No --test-post argument defaults to posting metadata to the " + "production expdb2.0 web application server " + 'at URL "http://csegweb.cgd.ucar.edu/expdb2.0" (optional).', + ) opts = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return opts + # --------------------------------------------------------------------- def get_case_vars(case_dict, case): -# --------------------------------------------------------------------- - """ get_case_vars + # --------------------------------------------------------------------- + """get_case_vars loop through the global list of XML vars and get the values from the case object into a case dictionary @@ -215,34 +322,35 @@ def get_case_vars(case_dict, case): case_dict (dict) - case dictionary to store XML variables case (object) - case object """ - logger.debug('get_case_vars') + logger.debug("get_case_vars") for xml_id in _xml_vars: case_dict[xml_id] = case.get_value(xml_id, resolved=True, subgroup=None) for xml_id in _run_vars: - case_dict[xml_id] = case.get_value(xml_id, resolved=True, subgroup='case.run') + case_dict[xml_id] = case.get_value(xml_id, resolved=True, subgroup="case.run") return case_dict + # --------------------------------------------------------------------- def get_disk_usage(path): -# --------------------------------------------------------------------- + # --------------------------------------------------------------------- """get_disk_usage return the total disk usage in bytes for a given path. Arguments: path - path to start """ - logger.debug('get_disk_usage') + logger.debug("get_disk_usage") total_size = 0 cwd = os.getcwd() if os.path.exists(path): os.chdir(path) - cmd = ['du', '--summarize', '--block-size=1'] + cmd = ["du", "--summarize", "--block-size=1"] try: total_size = subprocess.check_output(cmd) - total_size = total_size.replace('\t.\n', '') + total_size = total_size.replace("\t.\n", "") except subprocess.CalledProcessError: msg = "Error executing command = '{0}'".format(cmd) logger.warning(msg) @@ -252,23 +360,24 @@ def get_disk_usage(path): # --------------------------------------------------------------------- def get_ocn_disk_usage(path): -# --------------------------------------------------------------------- + # --------------------------------------------------------------------- """get_ocn_disk_usage return the total disk usage in bytes for a given path. Arguments: path - path to start """ - logger.debug('get_ocn_disk_usage') + logger.debug("get_ocn_disk_usage") total_size = 0 paths = glob.glob(path) for path in paths: total_size += get_disk_usage(path) return int(total_size) + # --------------------------------------------------------------------- def get_pp_path(pp_dir, process): -# --------------------------------------------------------------------- + # --------------------------------------------------------------------- """get_pp_path return the XML path for process @@ -276,311 +385,369 @@ def get_pp_path(pp_dir, process): pp_dir - path to postprocess directory process - process name """ - logger.debug('get_pp_path') + logger.debug("get_pp_path") cwd = os.getcwd() os.chdir(pp_dir) - pp_path_var = '' - if process == 'timeseries': - pp_path_var = _pp_xml_vars['timeseries'] - elif process == 'xconform': - pp_path_var = _pp_xml_vars['xconform'] + pp_path_var = "" + if process == "timeseries": + pp_path_var = _pp_xml_vars["timeseries"] + elif process == "xconform": + pp_path_var = _pp_xml_vars["xconform"] - cmd = ['./pp_config', '--get', pp_path_var, '--value'] + cmd = ["./pp_config", "--get", pp_path_var, "--value"] try: pp_path = subprocess.check_output(cmd) except subprocess.CalledProcessError: msg = "Error executing command = '{0}'".format(cmd) logger.warning(msg) - if (len(pp_path) > 2): + if len(pp_path) > 2: pp_path = pp_path.rstrip() else: - pp_path = '' + pp_path = "" os.chdir(cwd) return pp_path + # --------------------------------------------------------------------- def get_diag_dates(comp, pp_dir): -# --------------------------------------------------------------------- - """ get_diag_dates + # --------------------------------------------------------------------- + """get_diag_dates Query the postprocessing env_diags_[comp].xml file to get the model diag dates for the given component. """ - logger.debug('get_diag_dates') + logger.debug("get_diag_dates") cwd = os.getcwd() os.chdir(pp_dir) - model_dates = '' + model_dates = "" pp_vars = _pp_diag_vars.get(comp) for pp_var in pp_vars: - cmd = ['./pp_config', '--get', pp_var, '--value'] + cmd = ["./pp_config", "--get", pp_var, "--value"] try: pp_value = subprocess.check_output(cmd) except subprocess.CalledProcessError: msg = "Error executing command = '{0}'".format(cmd) logger.warning(msg) - tmp_dates = '{0} = {1}'.format(pp_var, pp_value) + tmp_dates = "{0} = {1}".format(pp_var, pp_value) model_dates = model_dates + tmp_dates os.chdir(cwd) return model_dates + # --------------------------------------------------------------------- def get_pp_status(case_dict): -# --------------------------------------------------------------------- - """ get_pp_status + # --------------------------------------------------------------------- + """get_pp_status Parse the postprocessing log files looking for status information Arguments: case_dict (dict) - case dictionary to store XML variables """ - logger.debug('get_pp_status') + logger.debug("get_pp_status") # initialize status variables msg_avg = dict() msg_diags = dict() - diag_comps = ['atm', 'ice', 'lnd', 'ocn'] - - pp_dir = os.path.join(case_dict['CASEROOT'], 'postprocess') - pp_log_dir = os.path.join(case_dict['CASEROOT'], 'postprocess', 'logs') - - msg_avg['atm'] = "COMPLETED SUCCESSFULLY" - msg_diags['atm'] = "Successfully completed generating atmosphere diagnostics" - case_dict['atm_avg_dates'] = case_dict['atm_diag_dates'] = get_diag_dates('atm', pp_dir) - - msg_avg['ice'] = "Successfully completed generating ice climatology averages" - msg_diags['ice'] = "Successfully completed generating ice diagnostics" - case_dict['ice_avg_dates'] = case_dict['ice_diag_dates'] = get_diag_dates('ice', pp_dir) - - msg_avg['lnd'] = "COMPLETED SUCCESSFULLY" - msg_diags['lnd'] = "Successfully completed generating land diagnostics" - case_dict['lnd_avg_dates'] = case_dict['lnd_diag_dates'] = get_diag_dates('lnd', pp_dir) - - msg_avg['ocn'] = "Successfully completed generating ocean climatology averages" - msg_diags['ocn'] = "Successfully completed generating ocean diagnostics" - case_dict['ocn_avg_dates'] = case_dict['ocn_diag_dates'] = get_diag_dates('ocn', pp_dir) - + diag_comps = ["atm", "ice", "lnd", "ocn"] + + pp_dir = os.path.join(case_dict["CASEROOT"], "postprocess") + pp_log_dir = os.path.join(case_dict["CASEROOT"], "postprocess", "logs") + + msg_avg["atm"] = "COMPLETED SUCCESSFULLY" + msg_diags["atm"] = "Successfully completed generating atmosphere diagnostics" + case_dict["atm_avg_dates"] = case_dict["atm_diag_dates"] = get_diag_dates( + "atm", pp_dir + ) + + msg_avg["ice"] = "Successfully completed generating ice climatology averages" + msg_diags["ice"] = "Successfully completed generating ice diagnostics" + case_dict["ice_avg_dates"] = case_dict["ice_diag_dates"] = get_diag_dates( + "ice", pp_dir + ) + + msg_avg["lnd"] = "COMPLETED SUCCESSFULLY" + msg_diags["lnd"] = "Successfully completed generating land diagnostics" + case_dict["lnd_avg_dates"] = case_dict["lnd_diag_dates"] = get_diag_dates( + "lnd", pp_dir + ) + + msg_avg["ocn"] = "Successfully completed generating ocean climatology averages" + msg_diags["ocn"] = "Successfully completed generating ocean diagnostics" + case_dict["ocn_avg_dates"] = case_dict["ocn_diag_dates"] = get_diag_dates( + "ocn", pp_dir + ) for comp in diag_comps: - case_dict[comp+'_avg_status'] = 'Unknown' - case_dict[comp+'_diag_status'] = 'Unknown' - - if (comp != 'ocn'): - case_dict[comp+'_avg_path'] = os.path.join(case_dict['DOUT_S_ROOT'], comp, 'proc/climo') - case_dict[comp+'_avg_size'] = get_disk_usage(case_dict[comp+'_avg_path']) - case_dict[comp+'_diag_path'] = os.path.join(case_dict['DOUT_S_ROOT'], comp, 'proc/diag') - case_dict[comp+'_diag_size'] = get_disk_usage(case_dict[comp+'_diag_path']) + case_dict[comp + "_avg_status"] = "Unknown" + case_dict[comp + "_diag_status"] = "Unknown" + + if comp != "ocn": + case_dict[comp + "_avg_path"] = os.path.join( + case_dict["DOUT_S_ROOT"], comp, "proc/climo" + ) + case_dict[comp + "_avg_size"] = get_disk_usage( + case_dict[comp + "_avg_path"] + ) + case_dict[comp + "_diag_path"] = os.path.join( + case_dict["DOUT_S_ROOT"], comp, "proc/diag" + ) + case_dict[comp + "_diag_size"] = get_disk_usage( + case_dict[comp + "_diag_path"] + ) else: - case_dict[comp+'_avg_path'] = os.path.join(case_dict['DOUT_S_ROOT'], comp, 'proc/climo*') - case_dict[comp+'_avg_size'] = get_ocn_disk_usage(case_dict[comp+'_avg_path']) - case_dict[comp+'_diag_path'] = os.path.join(case_dict['DOUT_S_ROOT'], comp, 'proc/diag*') - case_dict[comp+'_diag_size'] = get_ocn_disk_usage(case_dict[comp+'_diag_path']) + case_dict[comp + "_avg_path"] = os.path.join( + case_dict["DOUT_S_ROOT"], comp, "proc/climo*" + ) + case_dict[comp + "_avg_size"] = get_ocn_disk_usage( + case_dict[comp + "_avg_path"] + ) + case_dict[comp + "_diag_path"] = os.path.join( + case_dict["DOUT_S_ROOT"], comp, "proc/diag*" + ) + case_dict[comp + "_diag_size"] = get_ocn_disk_usage( + case_dict[comp + "_diag_path"] + ) avg_logs = list() - avg_file_pattern = ("{0}/{1}_averages.log.*".format(pp_log_dir, comp)) + avg_file_pattern = "{0}/{1}_averages.log.*".format(pp_log_dir, comp) avg_logs = glob.glob(avg_file_pattern) if avg_logs: log_file = max(avg_logs, key=os.path.getctime) - if (is_last_process_complete(log_file, msg_avg[comp], - 'Average list complies with standards.')): - case_dict[comp+'_avg_status'] = 'Succeeded' + if is_last_process_complete( + log_file, msg_avg[comp], "Average list complies with standards." + ): + case_dict[comp + "_avg_status"] = "Succeeded" else: - case_dict[comp+'_avg_status'] = 'Started' + case_dict[comp + "_avg_status"] = "Started" diag_logs = list() - diag_file_pattern = ("{0}/{1}_diagnostics.log.*".format(pp_log_dir, comp)) + diag_file_pattern = "{0}/{1}_diagnostics.log.*".format(pp_log_dir, comp) diag_logs = glob.glob(diag_file_pattern) if diag_logs: log_file = max(diag_logs, key=os.path.getctime) - if is_last_process_complete(log_file, msg_diags[comp], 'ncks version'): - case_dict[comp+'_diag_status'] = 'Succeeded' + if is_last_process_complete(log_file, msg_diags[comp], "ncks version"): + case_dict[comp + "_diag_status"] = "Succeeded" else: - case_dict[comp+'_diag_status'] = 'Started' + case_dict[comp + "_diag_status"] = "Started" # get overall timeseries status - case_dict['timeseries_status'] = 'Unknown' - case_dict['timeseries_path'] = get_pp_path(pp_dir, 'timeseries') - case_dict['timeseries_size'] = 0 - case_dict['timeseries_dates'] = '{0}-{1}'.format(case_dict['RUN_STARTDATE'].replace("-", ""), - case_dict['RUN_STARTDATE'].replace("-", "")) - case_dict['timeseries_total_time'] = 0 + case_dict["timeseries_status"] = "Unknown" + case_dict["timeseries_path"] = get_pp_path(pp_dir, "timeseries") + case_dict["timeseries_size"] = 0 + case_dict["timeseries_dates"] = "{0}-{1}".format( + case_dict["RUN_STARTDATE"].replace("-", ""), + case_dict["RUN_STARTDATE"].replace("-", ""), + ) + case_dict["timeseries_total_time"] = 0 tseries_logs = list() - tseries_file_pattern = ("{0}/timeseries.log.*".format(pp_log_dir)) + tseries_file_pattern = "{0}/timeseries.log.*".format(pp_log_dir) tseries_logs = glob.glob(tseries_file_pattern) if tseries_logs: log_file = max(tseries_logs, key=os.path.getctime) - if is_last_process_complete(filepath=log_file, - expect_text='Successfully completed', - fail_text='opening'): - case_dict['timeseries_status'] = 'Succeeded' - with open(log_file, 'r') as fname: + if is_last_process_complete( + filepath=log_file, expect_text="Successfully completed", fail_text="opening" + ): + case_dict["timeseries_status"] = "Succeeded" + with open(log_file, "r") as fname: log_content = fname.readlines() - total_time = [line for line in log_content if 'Total Time:' in line] - case_dict['timeseries_total_time'] = ' '.join(total_time[0].split()) + total_time = [line for line in log_content if "Total Time:" in line] + case_dict["timeseries_total_time"] = " ".join(total_time[0].split()) else: - case_dict['timeseries_status'] = 'Started' - sta_dates = case_dict['sta_last_date'].split("-") - case_dict['timeseries_dates'] = '{0}-{1}'.format(case_dict['RUN_STARTDATE'].replace("-", ""), - ''.join(sta_dates[:-1])) + case_dict["timeseries_status"] = "Started" + sta_dates = case_dict["sta_last_date"].split("-") + case_dict["timeseries_dates"] = "{0}-{1}".format( + case_dict["RUN_STARTDATE"].replace("-", ""), "".join(sta_dates[:-1]) + ) for comp in _pp_tseries_comps: - tseries_path = "{0}/{1}/proc/tseries".format(case_dict['timeseries_path'], comp) - case_dict['timeseries_size'] += get_disk_usage(tseries_path) + tseries_path = "{0}/{1}/proc/tseries".format( + case_dict["timeseries_path"], comp + ) + case_dict["timeseries_size"] += get_disk_usage(tseries_path) # get iconform status = this initializes files in the POSTPROCESS_PATH - case_dict['iconform_status'] = 'Unknown' - case_dict['iconform_path'] = '' - case_dict['iconform_size'] = 0 - case_dict['iconform_dates'] = case_dict['timeseries_dates'] + case_dict["iconform_status"] = "Unknown" + case_dict["iconform_path"] = "" + case_dict["iconform_size"] = 0 + case_dict["iconform_dates"] = case_dict["timeseries_dates"] iconform_logs = list() - iconform_file_pattern = ("{0}/iconform.log.*".format(pp_log_dir)) + iconform_file_pattern = "{0}/iconform.log.*".format(pp_log_dir) iconform_logs = glob.glob(iconform_file_pattern) if iconform_logs: log_file = max(iconform_logs, key=os.path.getctime) - if (is_last_process_complete(log_file, 'Successfully created the conform tool', - 'Running createOutputSpecs')): - case_dict['iconform_status'] = 'Succeeded' + if is_last_process_complete( + log_file, + "Successfully created the conform tool", + "Running createOutputSpecs", + ): + case_dict["iconform_status"] = "Succeeded" else: - case_dict['iconform_status'] = 'Started' + case_dict["iconform_status"] = "Started" # get xconform status - case_dict['xconform_path'] = '' - case_dict['xconform_path'] = get_pp_path(pp_dir, 'xconform') - case_dict['xconform_status'] = 'Unknown' - case_dict['xconform_size'] = get_disk_usage(case_dict['xconform_path']) - case_dict['xconform_dates'] = case_dict['timeseries_dates'] - case_dict['xconform_total_time'] = 0 + case_dict["xconform_path"] = "" + case_dict["xconform_path"] = get_pp_path(pp_dir, "xconform") + case_dict["xconform_status"] = "Unknown" + case_dict["xconform_size"] = get_disk_usage(case_dict["xconform_path"]) + case_dict["xconform_dates"] = case_dict["timeseries_dates"] + case_dict["xconform_total_time"] = 0 xconform_logs = list() - xconform_file_pattern = ("{0}/xconform.log.*".format(pp_log_dir)) + xconform_file_pattern = "{0}/xconform.log.*".format(pp_log_dir) xconform_logs = glob.glob(xconform_file_pattern) if xconform_logs: log_file = max(xconform_logs, key=os.path.getctime) - if (is_last_process_complete(log_file, - 'Successfully completed converting all files', - 'cesm_conform_generator INFO')): - case_dict['xconform_status'] = 'Succeeded' - case_dict['xconform_size'] = get_disk_usage(case_dict['xconform_path']) - with open(log_file, 'r') as fname: + if is_last_process_complete( + log_file, + "Successfully completed converting all files", + "cesm_conform_generator INFO", + ): + case_dict["xconform_status"] = "Succeeded" + case_dict["xconform_size"] = get_disk_usage(case_dict["xconform_path"]) + with open(log_file, "r") as fname: log_content = fname.readlines() - total_time = [line for line in log_content if 'Total Time:' in line] + total_time = [line for line in log_content if "Total Time:" in line] if total_time: - case_dict['xconform_total_time'] = ' '.join(total_time[0].split()) + case_dict["xconform_total_time"] = " ".join(total_time[0].split()) else: - case_dict['xconform_status'] = 'Started' + case_dict["xconform_status"] = "Started" return case_dict + # --------------------------------------------------------------------- def get_run_last_date(casename, run_path): -# --------------------------------------------------------------------- - """ get_run_last_date + # --------------------------------------------------------------------- + """get_run_last_date parse the last cpl.r file in the run_path to retrieve that last date. Arguments: casename run_path - path to run directory """ - logger.debug('get_run_last_date') + logger.debug("get_run_last_date") - pattern = ('{0}.cpl.r.*.nc'.format(casename)) + pattern = "{0}.cpl.r.*.nc".format(casename) cpl_files = sorted(glob.glob(os.path.join(run_path, pattern))) if cpl_files: _, cpl_file = os.path.split(cpl_files[-1]) - fparts = cpl_file.split('.') + fparts = cpl_file.split(".") return fparts[-2] - return '0000-00-00' + return "0000-00-00" + # --------------------------------------------------------------------- def get_sta_last_date(sta_path): -# --------------------------------------------------------------------- - """ get_sta_last_date + # --------------------------------------------------------------------- + """get_sta_last_date parse the last rest directory in the sta_path to retrieve that last date. Arguments: sta_path - path to run directory """ - logger.debug('get_sta_last_date') + logger.debug("get_sta_last_date") - rest_dirs = sorted(glob.glob(os.path.join(sta_path, 'rest/*'))) + rest_dirs = sorted(glob.glob(os.path.join(sta_path, "rest/*"))) if rest_dirs: _, rest_dir = os.path.split(rest_dirs[-1]) return rest_dir - return '0000-00-00' + return "0000-00-00" + # --------------------------------------------------------------------- def get_case_status(case_dict): -# --------------------------------------------------------------------- - """ get_case_status + # --------------------------------------------------------------------- + """get_case_status Parse the CaseStatus and postprocessing log files looking for status information Arguments: case_dict (dict) - case dictionary to store XML variables """ - logger.debug('get_case_status') + logger.debug("get_case_status") # initialize status variables - case_dict['run_status'] = 'Unknown' - case_dict['run_path'] = case_dict['RUNDIR'] - case_dict['run_size'] = 0 - case_dict['run_last_date'] = case_dict['RUN_STARTDATE'] + case_dict["run_status"] = "Unknown" + case_dict["run_path"] = case_dict["RUNDIR"] + case_dict["run_size"] = 0 + case_dict["run_last_date"] = case_dict["RUN_STARTDATE"] - case_dict['sta_status'] = 'Unknown' - case_dict['sta_path'] = case_dict['DOUT_S_ROOT'] - case_dict['sta_size'] = 0 - case_dict['sta_last_date'] = case_dict['RUN_STARTDATE'] + case_dict["sta_status"] = "Unknown" + case_dict["sta_path"] = case_dict["DOUT_S_ROOT"] + case_dict["sta_size"] = 0 + case_dict["sta_last_date"] = case_dict["RUN_STARTDATE"] - cstatus = case_dict['CASEROOT']+'/CaseStatus' + cstatus = case_dict["CASEROOT"] + "/CaseStatus" if os.path.exists(cstatus): # get the run status - run_status_1 = is_last_process_complete(cstatus, "case.run success", "case.run starting") - run_status_2 = is_last_process_complete(cstatus, "model execution success", "model execution starting") + run_status_1 = is_last_process_complete( + cstatus, "case.run success", "case.run starting" + ) + run_status_2 = is_last_process_complete( + cstatus, "model execution success", "model execution starting" + ) if run_status_1 is True or run_status_2 is True: - case_dict['run_status'] = 'Succeeded' - case_dict['run_size'] = get_disk_usage(case_dict['run_path']) - case_dict['run_last_date'] = get_run_last_date(case_dict['CASE'], case_dict['run_path']) + case_dict["run_status"] = "Succeeded" + case_dict["run_size"] = get_disk_usage(case_dict["run_path"]) + case_dict["run_last_date"] = get_run_last_date( + case_dict["CASE"], case_dict["run_path"] + ) # get the STA status - if case_dict['DOUT_S']: + if case_dict["DOUT_S"]: # get only the history, rest and logs dir - ignoring the proc subdirs - sta_status = is_last_process_complete(cstatus, "st_archive success", - "st_archive starting") - case_dict['sta_last_date'] = get_sta_last_date(case_dict['DOUT_S_ROOT']) + sta_status = is_last_process_complete( + cstatus, "st_archive success", "st_archive starting" + ) + case_dict["sta_last_date"] = get_sta_last_date(case_dict["DOUT_S_ROOT"]) if sta_status is True: - case_dict['sta_status'] = 'Succeeded' + case_dict["sta_status"] = "Succeeded" # exclude the proc directories in the sta size estimates - for subdir in ['atm/hist', 'cpl/hist', 'esp/hist', 'ice/hist', 'glc/hist', - 'lnd/hist', 'logs', 'ocn/hist', 'rest', 'rof/hist', - 'wav/hist', 'iac/hist']: - path = os.path.join(case_dict['sta_path'], subdir) + for subdir in [ + "atm/hist", + "cpl/hist", + "esp/hist", + "ice/hist", + "glc/hist", + "lnd/hist", + "logs", + "ocn/hist", + "rest", + "rof/hist", + "wav/hist", + "iac/hist", + ]: + path = os.path.join(case_dict["sta_path"], subdir) if os.path.isdir(path): - case_dict['sta_size'] += get_disk_usage(path) + case_dict["sta_size"] += get_disk_usage(path) # check if the postprocess dir exists in the caseroot - case_dict['postprocess'] = False - if os.path.exists(case_dict['CASEROOT']+'/postprocess'): - case_dict['postprocess'] = True + case_dict["postprocess"] = False + if os.path.exists(case_dict["CASEROOT"] + "/postprocess"): + case_dict["postprocess"] = True case_dict = get_pp_status(case_dict) return case_dict + # --------------------------------------------------------------------- def check_expdb_case(case_dict, username, password): -# --------------------------------------------------------------------- - """ check_exp_case + # --------------------------------------------------------------------- + """check_exp_case Cross check the casename with the database for a CMIP6 experiment Arguments: @@ -591,14 +758,20 @@ def check_expdb_case(case_dict, username, password): Return case_id value; 0 if does not exist or > 0 for exists. """ - logger.debug('check_expdb_case') - data_dict = {'casename':case_dict['CASE'], - 'queryType':'checkCaseExists', - 'expType':case_dict['expType']} + logger.debug("check_expdb_case") + data_dict = { + "casename": case_dict["CASE"], + "queryType": "checkCaseExists", + "expType": case_dict["expType"], + } data = json.dumps(data_dict) - params = urllib.parse.urlencode(dict(username=username, password=password, data=data)) + params = urllib.parse.urlencode( + dict(username=username, password=password, data=data) + ) try: - response = urllib.request.urlopen(case_dict['query_expdb_url'], params, context=_context) + response = urllib.request.urlopen( + case_dict["query_expdb_url"], params, context=_context + ) output = json.loads(response.read().decode()) except urllib.error.HTTPError as http_e: logger.info('ERROR archive_metadata HTTP post failed "%s"', http_e.code) @@ -607,12 +780,13 @@ def check_expdb_case(case_dict, username, password): logger.info('ERROR archive_metadata URL failed "%s"', url_e.reason) sys.exit(1) - return int(output['case_id']) + return int(output["case_id"]) + # --------------------------------------------------------------------- def query_expdb_cmip6(case_dict, username, password): -# --------------------------------------------------------------------- - """ query_exp_case + # --------------------------------------------------------------------- + """query_exp_case Query the expdb for CMIP6 casename = case_dict['q_casename'] metadata. Write out a json file to case_dict['q_outfile']. @@ -622,15 +796,21 @@ def query_expdb_cmip6(case_dict, username, password): password (string) - SVN developer's password """ - logger.debug('query_expdb_cmip6') + logger.debug("query_expdb_cmip6") exists = False - data_dict = {'casename':case_dict['q_casename'], - 'queryType':'CMIP6GlobalAtts', - 'expType':'CMIP6'} + data_dict = { + "casename": case_dict["q_casename"], + "queryType": "CMIP6GlobalAtts", + "expType": "CMIP6", + } data = json.dumps(data_dict) - params = urllib.parse.urlencode(dict(username=username, password=password, data=data)) + params = urllib.parse.urlencode( + dict(username=username, password=password, data=data) + ) try: - response = urllib.request.urlopen(case_dict['query_expdb_url'], params, context=_context) + response = urllib.request.urlopen( + case_dict["query_expdb_url"], params, context=_context + ) output = json.load(response) except urllib.error.HTTPError as http_e: logger.info('ERROR archive_metadata HTTP post failed "%s"', http_e.code) @@ -638,44 +818,49 @@ def query_expdb_cmip6(case_dict, username, password): logger.info('ERROR archive_metadata URL failed "%s"', url_e.reason) if output: - if not os.path.exists('{0}/archive_files'.format(case_dict['workdir'])): - os.makedirs('{0}/archive_files'.format(case_dict['workdir'])) + if not os.path.exists("{0}/archive_files".format(case_dict["workdir"])): + os.makedirs("{0}/archive_files".format(case_dict["workdir"])) - filename = '{0}/archive_files/{1}'.format(case_dict['workdir'], case_dict['q_outfile']) - with io.open(filename, 'w+', encoding='utf-8') as fname: + filename = "{0}/archive_files/{1}".format( + case_dict["workdir"], case_dict["q_outfile"] + ) + with io.open(filename, "w+", encoding="utf-8") as fname: fname.write(json.dumps(output, ensure_ascii=False)) fname.close() exists = True return exists + # --------------------------------------------------------------------- def create_json(case_dict): -# --------------------------------------------------------------------- - """ create_json + # --------------------------------------------------------------------- + """create_json Create a JSON file in the caseroot/archive_files dir. Arguments: case_dict (dict) - case dictionary to store XML variables """ - logger.debug('create_json') + logger.debug("create_json") - if not os.path.exists('{0}/archive_files'.format(case_dict['workdir'])): - os.makedirs('{0}/archive_files'.format(case_dict['workdir'])) + if not os.path.exists("{0}/archive_files".format(case_dict["workdir"])): + os.makedirs("{0}/archive_files".format(case_dict["workdir"])) - filename = '{0}/archive_files/json.{1}'.format(case_dict['workdir'], - datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) - with io.open(filename, 'wb') as fname: + filename = "{0}/archive_files/json.{1}".format( + case_dict["workdir"], datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + ) + with io.open(filename, "wb") as fname: jstr = str(json.dumps(case_dict, indent=4, sort_keys=True, ensure_ascii=False)) if isinstance(jstr, str): - jstr = jstr.decode('utf-8') + jstr = jstr.decode("utf-8") fname.write(jstr) fname.close() + # --------------------------------------------------------------------- def post_json(case_dict, username, password): -# --------------------------------------------------------------------- - """ post_json + # --------------------------------------------------------------------- + """post_json Post a JSON file in the caseroot/archive_files to the remote expdb URL. @@ -684,201 +869,248 @@ def post_json(case_dict, username, password): username (string) - SVN developers username password (string) - SVN developers password """ - logger.debug('post_json') + logger.debug("post_json") - case_dict['COMPSET'] = urllib.parse.quote(case_dict['COMPSET']) - case_dict['GRID'] = urllib.parse.quote(case_dict['GRID']) + case_dict["COMPSET"] = urllib.parse.quote(case_dict["COMPSET"]) + case_dict["GRID"] = urllib.parse.quote(case_dict["GRID"]) data = json.dumps(case_dict) - params = urllib.parse.urlencode(dict(username=username, password=password, data=data)) + params = urllib.parse.urlencode( + dict(username=username, password=password, data=data) + ) try: - urllib.request.urlopen(case_dict['json_expdb_url'], params, context=_context) + urllib.request.urlopen(case_dict["json_expdb_url"], params, context=_context) except urllib.error.HTTPError as http_e: logger.info('ERROR archive_metadata HTTP post failed "%s"', http_e.code) except urllib.error.URLError as url_e: logger.info('ERROR archive_metadata URL failed "%s"', url_e.reason) + # --------------------------------------------------------------------- def check_svn(): -# --------------------------------------------------------------------- - """ check_svn + # --------------------------------------------------------------------- + """check_svn make sure svn client is installed and accessible """ - logger.debug('check_svn') + logger.debug("check_svn") - cmd = ['svn', '--version'] + cmd = ["svn", "--version"] svn_exists = True - result = '' + result = "" try: result = subprocess.check_output(cmd) except subprocess.CalledProcessError as error: - msg = _svn_error_template.substitute(function='check_svn', cmd=cmd, - error=error.returncode, strerror=error.output) + msg = _svn_error_template.substitute( + function="check_svn", cmd=cmd, error=error.returncode, strerror=error.output + ) svn_exists = False logger.info(msg) raise SVNException(msg) - if 'version' not in result: - msg = 'SVN is not available. Ignoring SVN update' + if "version" not in result: + msg = "SVN is not available. Ignoring SVN update" svn_exists = False raise SVNException(msg) return svn_exists + # --------------------------------------------------------------------- def create_temp_archive(case_dict): -# --------------------------------------------------------------------- - """ create_temp_archive + # --------------------------------------------------------------------- + """create_temp_archive Create a temporary SVN sandbox directory in the current caseroot """ - archive_temp_dir = '{0}/archive_temp_dir'.format(case_dict['workdir']) - logger.debug('create_temp_archive %s', archive_temp_dir) + archive_temp_dir = "{0}/archive_temp_dir".format(case_dict["workdir"]) + logger.debug("create_temp_archive %s", archive_temp_dir) if not os.path.exists(archive_temp_dir): os.makedirs(archive_temp_dir) else: - logger.info('ERROR archive_metadata archive_temp_dir already exists. exiting...') + logger.info( + "ERROR archive_metadata archive_temp_dir already exists. exiting..." + ) sys.exit(1) return archive_temp_dir + # --------------------------------------------------------------------- def check_svn_repo(case_dict, username, password): -# --------------------------------------------------------------------- - """ check_svn_repo + # --------------------------------------------------------------------- + """check_svn_repo check if a SVN repo exists for this case """ - logger.debug('check_svn_repo') + logger.debug("check_svn_repo") repo_exists = False - svn_repo = '{0}/trunk'.format(case_dict['svn_repo_url']) - cmd = ['svn', 'list', svn_repo, '--username', username, '--password', password] - result = '' + svn_repo = "{0}/trunk".format(case_dict["svn_repo_url"]) + cmd = ["svn", "list", svn_repo, "--username", username, "--password", password] + result = "" try: result = subprocess.check_output(cmd) except subprocess.CalledProcessError: - msg = 'SVN repo does not exist for this case. A new one will be created.' + msg = "SVN repo does not exist for this case. A new one will be created." logger.warning(msg) - if re.search('README.archive', result): + if re.search("README.archive", result): repo_exists = True return repo_exists + # --------------------------------------------------------------------- def get_trunk_tag(case_dict, username, password): -# --------------------------------------------------------------------- - """ get_trunk_tag + # --------------------------------------------------------------------- + """get_trunk_tag return the most recent trunk tag as an integer """ - logger.debug('get_trunk_tag') + logger.debug("get_trunk_tag") tag = 0 - svn_repo = '{0}/trunk_tags'.format(case_dict['svn_repo_url']) - cmd = ['svn', 'list', svn_repo, '--username', username, '--password', password] - result = '' + svn_repo = "{0}/trunk_tags".format(case_dict["svn_repo_url"]) + cmd = ["svn", "list", svn_repo, "--username", username, "--password", password] + result = "" try: result = subprocess.check_output(cmd) except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', 'list', svn_repo, '--username', username, '--password', '******'] - msg = _call_template.substitute(function='get_trunk_tag', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) + cmd_nopasswd = [ + "svn", + "list", + svn_repo, + "--username", + username, + "--password", + "******", + ] + msg = _call_template.substitute( + function="get_trunk_tag", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) if result: - last_tag = [i for i in result.split('\n') if i][-1] - last_tag = last_tag[:-1].split('_')[-1] - tag = int(last_tag.lstrip('0')) + last_tag = [i for i in result.split("\n") if i][-1] + last_tag = last_tag[:-1].split("_")[-1] + tag = int(last_tag.lstrip("0")) return tag + # --------------------------------------------------------------------- def checkout_repo(case_dict, username, password): -# --------------------------------------------------------------------- - """ checkout_repo + # --------------------------------------------------------------------- + """checkout_repo checkout the repo into the archive_temp_dir """ - logger.debug('checkout_repo') + logger.debug("checkout_repo") - os.chdir(case_dict['archive_temp_dir']) - svn_repo = '{0}/trunk'.format(case_dict['svn_repo_url']) - cmd = ['svn', 'co', '--username', username, '--password', password, svn_repo, '.'] + os.chdir(case_dict["archive_temp_dir"]) + svn_repo = "{0}/trunk".format(case_dict["svn_repo_url"]) + cmd = ["svn", "co", "--username", username, "--password", password, svn_repo, "."] try: subprocess.check_call(cmd) except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', 'co', '--username', username, '--password', '******', svn_repo, '.'] - msg = _call_template.substitute(function='checkout_repo', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) + cmd_nopasswd = [ + "svn", + "co", + "--username", + username, + "--password", + "******", + svn_repo, + ".", + ] + msg = _call_template.substitute( + function="checkout_repo", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) - os.chdir(case_dict['CASEROOT']) + os.chdir(case_dict["CASEROOT"]) + # --------------------------------------------------------------------- def create_readme(case_dict): -# --------------------------------------------------------------------- - """ create_readme + # --------------------------------------------------------------------- + """create_readme Create a generic README.archive file """ - logger.debug('create_readme') - os.chdir(case_dict['archive_temp_dir']) + logger.debug("create_readme") + os.chdir(case_dict["archive_temp_dir"]) - fname = open('README.archive', 'w') - fname.write('Archived metadata is available for this case at URL:\n') - fname.write(case_dict['base_expdb_url']) + fname = open("README.archive", "w") + fname.write("Archived metadata is available for this case at URL:\n") + fname.write(case_dict["base_expdb_url"]) fname.close() + # --------------------------------------------------------------------- def update_repo_add_file(filename, dir1, dir2): -# --------------------------------------------------------------------- - """ update_repo_add_file + # --------------------------------------------------------------------- + """update_repo_add_file Add a file to the SVN repository """ src = os.path.join(dir1, filename) dest = os.path.join(dir2, filename) - logger.debug('left_only: '+src+' -> '+dest) + logger.debug("left_only: " + src + " -> " + dest) if not os.path.exists(dest): shutil.copy2(src, dest) - cmd = ['svn', 'add', '--parents', dest] + cmd = ["svn", "add", "--parents", dest] try: subprocess.check_call(cmd) except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) + # --------------------------------------------------------------------- def update_repo_rm_file(filename, dir1, dir2): -# --------------------------------------------------------------------- - """ update_repo_rm_file + # --------------------------------------------------------------------- + """update_repo_rm_file Remove a file from the SVN repository """ src = os.path.join(dir2, filename) dest = os.path.join(dir1, filename) - logger.debug('right_only: '+src+' -> '+dest) + logger.debug("right_only: " + src + " -> " + dest) if os.path.exists(dest): - cmd = ['svn', 'rm', dest] + cmd = ["svn", "rm", dest] try: subprocess.check_call(cmd) except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) + # --------------------------------------------------------------------- def update_repo_copy_file(filename, dir1, dir2): -# --------------------------------------------------------------------- - """ update_repo_copy_file + # --------------------------------------------------------------------- + """update_repo_copy_file Copy a file into the SVN local repo """ @@ -886,40 +1118,57 @@ def update_repo_copy_file(filename, dir1, dir2): dest = os.path.join(dir2, filename) shutil.copy2(src, dest) + # --------------------------------------------------------------------- def compare_dir_trees(dir1, dir2, archive_list): -# --------------------------------------------------------------------- - """ compare_dir_trees + # --------------------------------------------------------------------- + """compare_dir_trees Compare two directories recursively. Files in each directory are assumed to be equal if their names and contents are equal. - """ - xml_files = glob.glob(os.path.join(dir1, '*.xml')) - user_nl_files = glob.glob(os.path.join(dir1, 'user_nl_*')) + """ + xml_files = glob.glob(os.path.join(dir1, "*.xml")) + user_nl_files = glob.glob(os.path.join(dir1, "user_nl_*")) dirs_cmp = filecmp.dircmp(dir1, dir2, _ignore_patterns) - left_only = [fn for fn in dirs_cmp.left_only if not os.path.islink(fn) - and (fn in xml_files or fn in user_nl_files or fn in archive_list)] - right_only = [fn for fn in dirs_cmp.right_only if not os.path.islink(fn) - and (fn in xml_files or fn in user_nl_files or fn in archive_list)] - funny_files = [fn for fn in dirs_cmp.funny_files if not os.path.islink(fn) - and (fn in xml_files or fn in user_nl_files or fn in archive_list)] + left_only = [ + fn + for fn in dirs_cmp.left_only + if not os.path.islink(fn) + and (fn in xml_files or fn in user_nl_files or fn in archive_list) + ] + right_only = [ + fn + for fn in dirs_cmp.right_only + if not os.path.islink(fn) + and (fn in xml_files or fn in user_nl_files or fn in archive_list) + ] + funny_files = [ + fn + for fn in dirs_cmp.funny_files + if not os.path.islink(fn) + and (fn in xml_files or fn in user_nl_files or fn in archive_list) + ] # files and directories need to be added to svn repo from the caseroot if left_only: for filename in left_only: - if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != '~': + if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != "~": update_repo_add_file(filename, dir1, dir2) else: new_dir1 = os.path.join(dir1, filename) new_dir2 = os.path.join(dir2, filename) os.makedirs(new_dir2) - cmd = ['svn', 'add', new_dir2] + cmd = ["svn", "add", new_dir2] try: subprocess.check_call(cmd) except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) @@ -930,27 +1179,28 @@ def compare_dir_trees(dir1, dir2, archive_list): # files need to be removed from svn repo that are no longer in the caseroot if right_only: for filename in right_only: - if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != '~': + if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != "~": update_repo_rm_file(filename, dir1, dir2) # files are the same but could not be compared so copy the caseroot version if funny_files: for filename in funny_files: - if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != '~': + if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != "~": update_repo_copy_file(filename, dir1, dir2) # common files have changed in the caseroot and need to be copied to the svn repo (_, mismatch, errors) = filecmp.cmpfiles( - dir1, dir2, dirs_cmp.common_files, shallow=False) + dir1, dir2, dirs_cmp.common_files, shallow=False + ) if mismatch: for filename in mismatch: - if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != '~': + if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != "~": update_repo_copy_file(filename, dir1, dir2) # error in file comparison so copy the caseroot file to the svn repo if errors: for filename in errors: - if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != '~': + if os.path.isfile(os.path.join(dir1, filename)) and filename[-1] != "~": update_repo_copy_file(filename, dir1, dir2) # recurse through the subdirs @@ -964,319 +1214,471 @@ def compare_dir_trees(dir1, dir2, archive_list): else: return + # --------------------------------------------------------------------- def update_local_repo(case_dict, ignore_logs, ignore_timing): -# --------------------------------------------------------------------- - """ update_local_repo + # --------------------------------------------------------------------- + """update_local_repo Compare and update local SVN sandbox """ - logger.debug('update_local_repo') - from_dir = case_dict['CASEROOT'] - to_dir = case_dict['archive_temp_dir'] + logger.debug("update_local_repo") + from_dir = case_dict["CASEROOT"] + to_dir = case_dict["archive_temp_dir"] - compare_dir_trees(from_dir, to_dir, case_dict['archive_list']) + compare_dir_trees(from_dir, to_dir, case_dict["archive_list"]) # check if ignore_logs is specified if ignore_logs: os.chdir(to_dir) - if os.path.isdir('./logs'): + if os.path.isdir("./logs"): try: - shutil.rmtree('./logs') + shutil.rmtree("./logs") except OSError: - logger.warning('in "update_local_repo" - Unable to remove "logs" in archive dir.') + logger.warning( + 'in "update_local_repo" - Unable to remove "logs" in archive dir.' + ) - cmd = ['svn', 'delete', './logs'] + cmd = ["svn", "delete", "./logs"] try: subprocess.check_call(cmd) except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) - if os.path.isdir('./postprocess/logs'): - os.chdir('./postprocess') + if os.path.isdir("./postprocess/logs"): + os.chdir("./postprocess") try: - shutil.rmtree('./logs') + shutil.rmtree("./logs") except OSError: - logger.warning('in "update_local_repo" - '\ - 'Unable to remove "postprocess/logs" in archive dir.') + logger.warning( + 'in "update_local_repo" - ' + 'Unable to remove "postprocess/logs" in archive dir.' + ) - cmd = ['svn', 'delete', './logs'] + cmd = ["svn", "delete", "./logs"] try: subprocess.check_call(cmd) except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) else: # add log files - if os.path.exists('{0}/logs'.format(from_dir)): - if not os.path.exists('{0}/logs'.format(to_dir)): - os.makedirs('{0}/logs'.format(to_dir)) - os.chdir(os.path.join(from_dir, 'logs')) - for filename in glob.glob('*.*'): - update_repo_add_file(filename, os.path.join(from_dir, 'logs'), - os.path.join(to_dir, 'logs')) - - if os.path.exists('{0}/postprocess/logs'.format(from_dir)): - if not os.path.exists('{0}/postprocess/logs'.format(to_dir)): - os.makedirs('{0}/postprocess/logs'.format(to_dir)) - os.chdir(os.path.join(from_dir, 'postprocess/logs')) - for filename in glob.glob('*.*'): - update_repo_add_file(filename, os.path.join(from_dir, 'postprocess', 'logs'), - os.path.join(to_dir, 'postprocess', 'logs')) - + if os.path.exists("{0}/logs".format(from_dir)): + if not os.path.exists("{0}/logs".format(to_dir)): + os.makedirs("{0}/logs".format(to_dir)) + os.chdir(os.path.join(from_dir, "logs")) + for filename in glob.glob("*.*"): + update_repo_add_file( + filename, + os.path.join(from_dir, "logs"), + os.path.join(to_dir, "logs"), + ) + + if os.path.exists("{0}/postprocess/logs".format(from_dir)): + if not os.path.exists("{0}/postprocess/logs".format(to_dir)): + os.makedirs("{0}/postprocess/logs".format(to_dir)) + os.chdir(os.path.join(from_dir, "postprocess/logs")) + for filename in glob.glob("*.*"): + update_repo_add_file( + filename, + os.path.join(from_dir, "postprocess", "logs"), + os.path.join(to_dir, "postprocess", "logs"), + ) # check if ignore_timing is specified if ignore_timing: - os.chdir(case_dict['archive_temp_dir']) - if os.path.isdir('./timing'): + os.chdir(case_dict["archive_temp_dir"]) + if os.path.isdir("./timing"): try: - shutil.rmtree('./timing') + shutil.rmtree("./timing") except OSError: - logger.warning('in "update_local_repo" - Unable to remove "timing" in archive dir.') + logger.warning( + 'in "update_local_repo" - Unable to remove "timing" in archive dir.' + ) - cmd = ['svn', 'delete', './timing'] + cmd = ["svn", "delete", "./timing"] try: subprocess.check_call(cmd) except subprocess.CalledProcessError as error: - msg = _call_template.substitute(function='update_lcoal_repo', cmd=cmd, - error=error.returncode, strerror=error.output) + msg = _call_template.substitute( + function="update_lcoal_repo", + cmd=cmd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) else: # add timing files - if os.path.exists('{0}/timing'.format(from_dir)): - if not os.path.exists('{0}/timing'.format(to_dir)): - os.makedirs('{0}/timing'.format(to_dir)) - os.chdir(os.path.join(from_dir, 'timing')) - for filename in glob.glob('*.*'): - update_repo_add_file(filename, os.path.join(from_dir, 'timing'), - os.path.join(to_dir, 'timing')) + if os.path.exists("{0}/timing".format(from_dir)): + if not os.path.exists("{0}/timing".format(to_dir)): + os.makedirs("{0}/timing".format(to_dir)) + os.chdir(os.path.join(from_dir, "timing")) + for filename in glob.glob("*.*"): + update_repo_add_file( + filename, + os.path.join(from_dir, "timing"), + os.path.join(to_dir, "timing"), + ) # --------------------------------------------------------------------- def populate_local_repo(case_dict, ignore_logs, ignore_timing): -# --------------------------------------------------------------------- - """ populate_local_repo + # --------------------------------------------------------------------- + """populate_local_repo Populate local SVN sandbox """ - logger.debug('populate_local_repo') - os.chdir(case_dict['CASEROOT']) + logger.debug("populate_local_repo") + os.chdir(case_dict["CASEROOT"]) # loop through the archive_list and copy to the temp archive dir - for archive in case_dict['archive_list']: + for archive in case_dict["archive_list"]: if os.path.exists(archive): if os.path.isdir(archive): try: - target = case_dict['archive_temp_dir']+'/'+archive - shutil.copytree(archive, target, symlinks=False, - ignore=shutil.ignore_patterns(*_ignore_patterns)) + target = case_dict["archive_temp_dir"] + "/" + archive + shutil.copytree( + archive, + target, + symlinks=False, + ignore=shutil.ignore_patterns(*_ignore_patterns), + ) except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=archive, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) + msg = _copy_template.substitute( + function="populate_local_repo", + source=archive, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) logger.warning(msg) else: try: - shutil.copy2(archive, case_dict['archive_temp_dir']) + shutil.copy2(archive, case_dict["archive_temp_dir"]) except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=archive, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) + msg = _copy_template.substitute( + function="populate_local_repo", + source=archive, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) logger.warning(msg) # add files with .xml as the suffix - xml_files = glob.glob('*.xml') + xml_files = glob.glob("*.xml") for xml_file in xml_files: if os.path.isfile(xml_file): try: - shutil.copy2(xml_file, case_dict['archive_temp_dir']) + shutil.copy2(xml_file, case_dict["archive_temp_dir"]) except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=xml_file, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) + msg = _copy_template.substitute( + function="populate_local_repo", + source=xml_file, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) logger.warning(msg) # add files with .xml as the suffix from the postprocess directory - if os.path.isdir('./postprocess'): - pp_path = '{0}/{1}'.format(case_dict['archive_temp_dir'], 'postprocess') + if os.path.isdir("./postprocess"): + pp_path = "{0}/{1}".format(case_dict["archive_temp_dir"], "postprocess") if not os.path.exists(pp_path): os.mkdir(pp_path) - xml_files = glob.glob('./postprocess/*.xml') + xml_files = glob.glob("./postprocess/*.xml") for xml_file in xml_files: if os.path.isfile(xml_file): try: shutil.copy2(xml_file, pp_path) except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=xml_file, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) + msg = _copy_template.substitute( + function="populate_local_repo", + source=xml_file, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) logger.warning(msg) # add files with user_nl_ as the prefix - user_files = glob.glob('user_nl_*') + user_files = glob.glob("user_nl_*") for user_file in user_files: if os.path.isfile(user_file): try: - shutil.copy2(user_file, case_dict['archive_temp_dir']) + shutil.copy2(user_file, case_dict["archive_temp_dir"]) except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=user_file, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) + msg = _copy_template.substitute( + function="populate_local_repo", + source=user_file, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) logger.warning(msg) # add files with Depends as the prefix - conf_files = glob.glob('Depends.*') + conf_files = glob.glob("Depends.*") for conf_file in conf_files: if os.path.isfile(conf_file): try: - shutil.copy2(conf_file, case_dict['archive_temp_dir']) + shutil.copy2(conf_file, case_dict["archive_temp_dir"]) except OSError as error: - msg = _copy_template.substitute(function='populate_local_repo', - source=conf_file, - dest=case_dict['archive_temp_dir'], - error=error.errno, - strerror=error.strerror) + msg = _copy_template.substitute( + function="populate_local_repo", + source=conf_file, + dest=case_dict["archive_temp_dir"], + error=error.errno, + strerror=error.strerror, + ) logger.warning(msg) # check if ignore_logs is specified if ignore_logs: - os.chdir(case_dict['archive_temp_dir']) - if os.path.isdir('./logs'): + os.chdir(case_dict["archive_temp_dir"]) + if os.path.isdir("./logs"): try: - shutil.rmtree('./logs') + shutil.rmtree("./logs") except OSError: - logger.warning('in "populate_local_repo" - Unable to remove "logs" in archive_temp_dir.') - if os.path.isdir('./postprocess/logs'): - os.chdir('./postprocess') + logger.warning( + 'in "populate_local_repo" - Unable to remove "logs" in archive_temp_dir.' + ) + if os.path.isdir("./postprocess/logs"): + os.chdir("./postprocess") try: - shutil.rmtree('./logs') + shutil.rmtree("./logs") except OSError: - logger.warning('in "populate_local_repo" - ' \ - 'Unable to remove "postprocess/logs" in archive_temp_dir.') - os.chdir(case_dict['CASEROOT']) + logger.warning( + 'in "populate_local_repo" - ' + 'Unable to remove "postprocess/logs" in archive_temp_dir.' + ) + os.chdir(case_dict["CASEROOT"]) # check if ignore_timing is specified if ignore_timing: - os.chdir(case_dict['archive_temp_dir']) - if os.path.isdir('./timing'): + os.chdir(case_dict["archive_temp_dir"]) + if os.path.isdir("./timing"): try: - shutil.rmtree('./timing') + shutil.rmtree("./timing") except OSError: - logger.warning('in "populate_local_repo" - Unable to remove "timing" in archive_temp_dir.') - os.chdir(case_dict['CASEROOT']) + logger.warning( + 'in "populate_local_repo" - Unable to remove "timing" in archive_temp_dir.' + ) + os.chdir(case_dict["CASEROOT"]) # --------------------------------------------------------------------- def checkin_trunk(case_dict, svn_cmd, message, username, password): -# --------------------------------------------------------------------- - """ checkin_trunk + # --------------------------------------------------------------------- + """checkin_trunk Check in the local SVN sandbox to the remote trunk """ - logger.debug('checkin_trunk') + logger.debug("checkin_trunk") - os.chdir(case_dict['archive_temp_dir']) - svn_repo = '{0}/trunk'.format(case_dict['svn_repo_url']) + os.chdir(case_dict["archive_temp_dir"]) + svn_repo = "{0}/trunk".format(case_dict["svn_repo_url"]) msg = '"{0}"'.format(message) - cmd = ['svn', svn_cmd, '--username', username, - '--password', password, '.', '--message', msg] - - if svn_cmd in ['import']: + cmd = [ + "svn", + svn_cmd, + "--username", + username, + "--password", + password, + ".", + "--message", + msg, + ] + + if svn_cmd in ["import"]: # create the trunk dir msg = '"create trunk"' - cmd = ['svn', 'mkdir', '--parents', svn_repo, - '--username', username, '--password', password, '--message', msg] + cmd = [ + "svn", + "mkdir", + "--parents", + svn_repo, + "--username", + username, + "--password", + password, + "--message", + msg, + ] try: subprocess.check_call(cmd) except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', 'mkdir', '--parents', svn_repo, - '--username', username, '--password', '******', - '--message', msg] - msg = _call_template.substitute(function='checkin_trunk', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) + cmd_nopasswd = [ + "svn", + "mkdir", + "--parents", + svn_repo, + "--username", + username, + "--password", + "******", + "--message", + msg, + ] + msg = _call_template.substitute( + function="checkin_trunk", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) # create the trunk_tags dir - tags = '{0}/trunk_tags'.format(case_dict['svn_repo_url']) + tags = "{0}/trunk_tags".format(case_dict["svn_repo_url"]) msg = '"create trunk_tags"' - cmd = ['svn', 'mkdir', tags, '--username', username, - '--password', password, '--message', msg] + cmd = [ + "svn", + "mkdir", + tags, + "--username", + username, + "--password", + password, + "--message", + msg, + ] try: subprocess.check_call(cmd) except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', 'mkdir', tags, '--username', username, - '--password', '******', '--message', msg] - msg = _call_template.substitute(function='checkin_trunk', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) + cmd_nopasswd = [ + "svn", + "mkdir", + tags, + "--username", + username, + "--password", + "******", + "--message", + msg, + ] + msg = _call_template.substitute( + function="checkin_trunk", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) msg = '"{0}"'.format(message) - cmd = ['svn', svn_cmd, '--username', username, '--password', password, '.', - svn_repo, '--message', msg] + cmd = [ + "svn", + svn_cmd, + "--username", + username, + "--password", + password, + ".", + svn_repo, + "--message", + msg, + ] # check-in the trunk to svn try: subprocess.check_call(cmd) except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', svn_cmd, '--username', username, - '--password', '******', '.', '--message', msg] - msg = _call_template.substitute(function='checkin_trunk', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) + cmd_nopasswd = [ + "svn", + svn_cmd, + "--username", + username, + "--password", + "******", + ".", + "--message", + msg, + ] + msg = _call_template.substitute( + function="checkin_trunk", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) + # --------------------------------------------------------------------- def create_tag(case_dict, new_tag, username, password): -# --------------------------------------------------------------------- - """ create_tag + # --------------------------------------------------------------------- + """create_tag create a new trunk tag in the remote repo """ - logger.debug('create_tag') + logger.debug("create_tag") # create a new trunk tag - os.chdir(case_dict['archive_temp_dir']) - svn_repo = '{0}/trunk'.format(case_dict['svn_repo_url']) - svn_repo_tag = '{0}/trunk_tags/{1}'.format(case_dict['svn_repo_url'], new_tag) + os.chdir(case_dict["archive_temp_dir"]) + svn_repo = "{0}/trunk".format(case_dict["svn_repo_url"]) + svn_repo_tag = "{0}/trunk_tags/{1}".format(case_dict["svn_repo_url"], new_tag) msg = '"create new trunk tag"' - cmd = ['svn', 'copy', '--username', username, '--password', password, - svn_repo, svn_repo_tag, '--message', msg] + cmd = [ + "svn", + "copy", + "--username", + username, + "--password", + password, + svn_repo, + svn_repo_tag, + "--message", + msg, + ] try: subprocess.check_call(cmd) except subprocess.CalledProcessError as error: - cmd_nopasswd = ['svn', 'copy', '--username', username, '--password', '******', - svn_repo, svn_repo_tag, '--message', msg] - msg = _call_template.substitute(function='checkin_trunk', cmd=cmd_nopasswd, - error=error.returncode, strerror=error.output) + cmd_nopasswd = [ + "svn", + "copy", + "--username", + username, + "--password", + "******", + svn_repo, + svn_repo_tag, + "--message", + msg, + ] + msg = _call_template.substitute( + function="checkin_trunk", + cmd=cmd_nopasswd, + error=error.returncode, + strerror=error.output, + ) logger.warning(msg) raise SVNException(msg) + # ------------------------------------------------------------------------- def update_repo(ignore_logs, ignore_timing, case_dict, username, password): -# ------------------------------------------------------------------------- - """ update_repo + # ------------------------------------------------------------------------- + """update_repo Update SVN repo """ - logger.debug('update_repo') + logger.debug("update_repo") try: # check if svn client is installed @@ -1284,32 +1686,45 @@ def update_repo(ignore_logs, ignore_timing, case_dict, username, password): if svn_exists: # check if the case repo exists - case_dict['svn_repo_url'] = '{0}/{1}'.format(_svn_expdb_url, case_dict['CASE']) + case_dict["svn_repo_url"] = "{0}/{1}".format( + _svn_expdb_url, case_dict["CASE"] + ) repo_exists = check_svn_repo(case_dict, username, password) - case_dict['archive_temp_dir'] = create_temp_archive(case_dict) - case_dict['archive_list'] = _archive_list + case_dict['user_add_files'] + case_dict["archive_temp_dir"] = create_temp_archive(case_dict) + case_dict["archive_list"] = _archive_list + case_dict["user_add_files"] if repo_exists: # update trunk and make a new tag last_tag = get_trunk_tag(case_dict, username, password) - new_tag = '{0}_{1}'.format(case_dict['CASE'], str(last_tag+1).zfill(4)) + new_tag = "{0}_{1}".format( + case_dict["CASE"], str(last_tag + 1).zfill(4) + ) checkout_repo(case_dict, username, password) update_local_repo(case_dict, ignore_logs, ignore_timing) - msg = 'update case metadata for {0} by {1}'.format(case_dict['CASE'], username) - checkin_trunk(case_dict, 'ci', msg, username, password) + msg = "update case metadata for {0} by {1}".format( + case_dict["CASE"], username + ) + checkin_trunk(case_dict, "ci", msg, username, password) create_tag(case_dict, new_tag, username, password) - logger.info('SVN repository trunk updated at URL "%s"', case_dict['svn_repo_url']) + logger.info( + 'SVN repository trunk updated at URL "%s"', + case_dict["svn_repo_url"], + ) logger.info(' and a new trunk tag created "%s"', new_tag) else: # create a new case repo - new_tag = '{0}_0001'.format(case_dict['CASE']) + new_tag = "{0}_0001".format(case_dict["CASE"]) create_readme(case_dict) populate_local_repo(case_dict, ignore_logs, ignore_timing) - msg = ('initial import of case metadata for {0} by {1}' - .format(case_dict['CASE'], username)) - checkin_trunk(case_dict, 'import', msg, username, password) + msg = "initial import of case metadata for {0} by {1}".format( + case_dict["CASE"], username + ) + checkin_trunk(case_dict, "import", msg, username, password) create_tag(case_dict, new_tag, username, password) - logger.info('SVN repository imported to trunk URL "%s"', case_dict['svn_repo_url']) + logger.info( + 'SVN repository imported to trunk URL "%s"', + case_dict["svn_repo_url"], + ) logger.info(' and a new trunk tag created for "%s"', new_tag) except SVNException: @@ -1317,203 +1732,232 @@ def update_repo(ignore_logs, ignore_timing, case_dict, username, password): return case_dict + # --------------------------------------------------------------------- def get_timing_data(case_dict): -# --------------------------------------------------------------------- - """ get_timing_data + # --------------------------------------------------------------------- + """get_timing_data parse the timing data file and add information to the case_dict Arguments: case_dict (dict) - case dictionary to store XML variables """ - logger.debug('get_timing_data') + logger.debug("get_timing_data") # initialize the timing values in the dictionary - case_dict['model_cost'] = 'undefined' - case_dict['model_throughput'] = 'undefined' + case_dict["model_cost"] = "undefined" + case_dict["model_throughput"] = "undefined" - timing_dir = case_dict['CASEROOT']+'/timing' - last_time = '' + timing_dir = case_dict["CASEROOT"] + "/timing" + last_time = "" if os.path.exists(timing_dir): # check if timing files exists - timing_file_pattern = 'cesm_timing.'+case_dict['CASE'] - last_time = max(glob.glob(timing_dir+'/'+timing_file_pattern+'.*'), - key=os.path.getctime) + timing_file_pattern = "cesm_timing." + case_dict["CASE"] + last_time = max( + glob.glob(timing_dir + "/" + timing_file_pattern + ".*"), + key=os.path.getctime, + ) if last_time: - if 'gz' in last_time: + if "gz" in last_time: # gunzip file first - with gzip.open(last_time, 'rb') as fname: + with gzip.open(last_time, "rb") as fname: file_content = fname.readlines() else: - with open(last_time, 'r') as fname: + with open(last_time, "r") as fname: file_content = fname.readlines() # search the file content for matching lines - model_cost = [line for line in file_content if 'Model Cost:' in line] - model_throughput = [line for line in file_content if 'Model Throughput:' in line] + model_cost = [line for line in file_content if "Model Cost:" in line] + model_throughput = [ + line for line in file_content if "Model Throughput:" in line + ] - case_dict['model_cost'] = ' '.join(model_cost[0].split()) - case_dict['model_throughput'] = ' '.join(model_throughput[0].split()) + case_dict["model_cost"] = " ".join(model_cost[0].split()) + case_dict["model_throughput"] = " ".join(model_throughput[0].split()) return case_dict + # --------------------------------------------------------------------- def initialize_main(options): -# --------------------------------------------------------------------- - """ initialize_main + # --------------------------------------------------------------------- + """initialize_main Initialize the case dictionary data structure with command line options """ - logger.debug('intialize_main') + logger.debug("intialize_main") case_dict = dict() - case_dict['CASEROOT'] = os.getcwd() + case_dict["CASEROOT"] = os.getcwd() if options.caseroot: - case_dict['CASEROOT'] = options.caseroot[0] + case_dict["CASEROOT"] = options.caseroot[0] - case_dict['workdir'] = case_dict['CASEROOT'] + case_dict["workdir"] = case_dict["CASEROOT"] if options.workdir: - case_dict['workdir'] = options.workdir[0] + case_dict["workdir"] = options.workdir[0] username = None if options.user: username = options.user - case_dict['svnlogin'] = username + case_dict["svnlogin"] = username password = None if options.password: password = options.password if options.expType: - case_dict['expType'] = options.expType[0] + case_dict["expType"] = options.expType[0] - case_dict['title'] = None + case_dict["title"] = None if options.title: - case_dict['title'] = options.title[0] + case_dict["title"] = options.title[0] - case_dict['dryrun'] = False + case_dict["dryrun"] = False if options.dryrun: - case_dict['dryrun'] = True + case_dict["dryrun"] = True - case_dict['archive_temp_dir'] = '' + case_dict["archive_temp_dir"] = "" - case_dict['user_add_files'] = list() + case_dict["user_add_files"] = list() if options.user_add_files: - case_dict['user_add_files'] = options.user_add_files.split(',') + case_dict["user_add_files"] = options.user_add_files.split(",") - case_dict['q_casename'] = '' - case_dict['q_outfile'] = '' + case_dict["q_casename"] = "" + case_dict["q_outfile"] = "" if options.query_cmip6: - case_dict['q_casename'] = options.query_cmip6[0] - case_dict['q_outfile'] = options.query_cmip6[1] + case_dict["q_casename"] = options.query_cmip6[0] + case_dict["q_outfile"] = options.query_cmip6[1] - case_dict['base_expdb_url'] = 'https://csegweb.cgd.ucar.edu/expdb2.0' + case_dict["base_expdb_url"] = "https://csegweb.cgd.ucar.edu/expdb2.0" if options.test_post: - case_dict['base_expdb_url'] = 'https://csegwebdev.cgd.ucar.edu/expdb2.0' - case_dict['json_expdb_url'] = case_dict['base_expdb_url'] + '/cgi-bin/processJSON.cgi' - case_dict['query_expdb_url'] = case_dict['base_expdb_url'] + '/cgi-bin/query.cgi' + case_dict["base_expdb_url"] = "https://csegwebdev.cgd.ucar.edu/expdb2.0" + case_dict["json_expdb_url"] = ( + case_dict["base_expdb_url"] + "/cgi-bin/processJSON.cgi" + ) + case_dict["query_expdb_url"] = case_dict["base_expdb_url"] + "/cgi-bin/query.cgi" return case_dict, username, password + # --------------------------------------------------------------------- def main_func(options): -# --------------------------------------------------------------------- - """ main function + # --------------------------------------------------------------------- + """main function Arguments: options (list) - input options from command line """ - logger.debug('main_func') + logger.debug("main_func") (case_dict, username, password) = initialize_main(options) # check if query_cmip6 argument is specified if options.query_cmip6: - if case_dict['dryrun']: - logger.info('Dryrun - calling query_expdb_cmip6 for case metadata') + if case_dict["dryrun"]: + logger.info("Dryrun - calling query_expdb_cmip6 for case metadata") else: if query_expdb_cmip6(case_dict, username, password): - logger.info('Casename "%s" CMIP6 global attribute '\ - 'metadata written to "%s/archive_files/%s" ' \ - 'from "%s"', - case_dict['workdir'], case_dict['q_casename'], - case_dict['q_outfile'], case_dict['query_expdb_url']) - logger.info('Successful completion of archive_metadata') + logger.info( + 'Casename "%s" CMIP6 global attribute ' + 'metadata written to "%s/archive_files/%s" ' + 'from "%s"', + case_dict["workdir"], + case_dict["q_casename"], + case_dict["q_outfile"], + case_dict["query_expdb_url"], + ) + logger.info("Successful completion of archive_metadata") sys.exit(0) else: - logger.info('ERROR archive_metadata failed to find "%s" '\ - 'in experiments database at "%s".', - case_dict['q_casename'], case_dict['query_expdb_url']) + logger.info( + 'ERROR archive_metadata failed to find "%s" ' + 'in experiments database at "%s".', + case_dict["q_casename"], + case_dict["query_expdb_url"], + ) sys.exit(1) # loop through the _xml_vars gathering values - with Case(case_dict['CASEROOT'], read_only=True) as case: - if case_dict['dryrun']: - logger.info('Dryrun - calling get_case_vars') + with Case(case_dict["CASEROOT"], read_only=True) as case: + if case_dict["dryrun"]: + logger.info("Dryrun - calling get_case_vars") else: case_dict = get_case_vars(case_dict, case) # check reserved casename expdb for CMIP6 experiments - if case_dict['expType'].lower() == 'cmip6': - if case_dict['dryrun']: - logger.info('Dryrun - calling check_expdb_case for CMIP6 experiment reservation') + if case_dict["expType"].lower() == "cmip6": + if case_dict["dryrun"]: + logger.info( + "Dryrun - calling check_expdb_case for CMIP6 experiment reservation" + ) else: - case_dict['case_id'] = check_expdb_case(case_dict, username, password) - if case_dict['case_id'] < 1: - logger.info('Unable to archive CMIP6 metadata. '\ - '"%s" casename does not exist in database. '\ - 'All CMIP6 experiments casenames must be '\ - 'reserved in the experiments database at URL: '\ - 'https://csegweb.cgd.ucar.edu/expdb2.0 '\ - 'prior to running archive_metadata.', case_dict['CASE']) + case_dict["case_id"] = check_expdb_case(case_dict, username, password) + if case_dict["case_id"] < 1: + logger.info( + "Unable to archive CMIP6 metadata. " + '"%s" casename does not exist in database. ' + "All CMIP6 experiments casenames must be " + "reserved in the experiments database at URL: " + "https://csegweb.cgd.ucar.edu/expdb2.0 " + "prior to running archive_metadata.", + case_dict["CASE"], + ) sys.exit(1) # get the case status into the case_dict - if case_dict['dryrun']: - logger.info('Dryrun - calling get_case_status') + if case_dict["dryrun"]: + logger.info("Dryrun - calling get_case_status") else: case_dict = get_case_status(case_dict) # create / update the cesm expdb repo with the caseroot files if not options.ignore_repo_update: - if case_dict['dryrun']: - logger.info('Dryrun - calling update_repo') + if case_dict["dryrun"]: + logger.info("Dryrun - calling update_repo") else: - case_dict = update_repo(options.ignore_logs, options.ignore_timing, - case_dict, username, password) + case_dict = update_repo( + options.ignore_logs, + options.ignore_timing, + case_dict, + username, + password, + ) # parse the timing data into the case_dict if not options.ignore_timing: - if case_dict['dryrun']: - logger.info('Dryrun - calling get_timing_data') + if case_dict["dryrun"]: + logger.info("Dryrun - calling get_timing_data") else: case_dict = get_timing_data(case_dict) # Create a JSON file containing the case_dict with the date appended to the filename - if case_dict['dryrun']: - logger.info('Dryrun - calling create_json') + if case_dict["dryrun"]: + logger.info("Dryrun - calling create_json") else: create_json(case_dict) # post the JSON to the remote DB - if case_dict['dryrun']: - logger.info('Dryrun - calling post_json') + if case_dict["dryrun"]: + logger.info("Dryrun - calling post_json") else: post_json(case_dict, username, password) # clean-up the temporary archive files dir - if case_dict['dryrun']: + if case_dict["dryrun"]: logger.info('Dryrun - deleting "./archive_temp_dir"') else: - if not options.ignore_repo_update and os.path.exists(case_dict['archive_temp_dir']): - shutil.rmtree(case_dict['archive_temp_dir']) + if not options.ignore_repo_update and os.path.exists( + case_dict["archive_temp_dir"] + ): + shutil.rmtree(case_dict["archive_temp_dir"]) - logger.info('Successful completion of archive_metadata') + logger.info("Successful completion of archive_metadata") return 0 -#=================================== + +# =================================== if __name__ == "__main__": try: diff --git a/CIME/Tools/bld_diff b/CIME/Tools/bld_diff index 5b57510dd3c..27193d61560 100755 --- a/CIME/Tools/bld_diff +++ b/CIME/Tools/bld_diff @@ -12,7 +12,7 @@ import argparse, sys, os, gzip ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} log1 log2 OR @@ -20,10 +20,12 @@ OR \033[1mEXAMPLES:\033[0m > {0} case1 case2 -""".format(os.path.basename(args[0])), +""".format( + os.path.basename(args[0]) + ), description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter -) + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) @@ -31,21 +33,31 @@ OR parser.add_argument("log2", help="Second log.") - parser.add_argument("-I", "--ignore-includes", action="store_true", - help="Ignore differences in include flags") + parser.add_argument( + "-I", + "--ignore-includes", + action="store_true", + help="Ignore differences in include flags", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.log1, args.log2, args.ignore_includes + ############################################################################### def is_compile_line(line): -############################################################################### - return line.count("-I") > 0 and not line.startswith("gmake ") and not line.startswith("make ") + ############################################################################### + return ( + line.count("-I") > 0 + and not line.startswith("gmake ") + and not line.startswith("make ") + ) + ############################################################################### def get_compile_lines_from_log(logfile_text): -############################################################################### + ############################################################################### result = [] for line in logfile_text.splitlines(): if is_compile_line(line): @@ -53,10 +65,11 @@ def get_compile_lines_from_log(logfile_text): return result + _SRCFILE_ENDINGS = (".F", ".f", ".c", ".F90", ".f90", ".cpp") ############################################################################### def parse_log(logfile_text): -############################################################################### + ############################################################################### compile_lines = get_compile_lines_from_log(logfile_text) result = {} for compile_line in compile_lines: @@ -65,7 +78,7 @@ def parse_log(logfile_text): for item in items: for ending in _SRCFILE_ENDINGS: if item.endswith(ending): - #expect(compiled_file is None, "Found multiple things that look like files in '{}'".format(compile_line)) + # expect(compiled_file is None, "Found multiple things that look like files in '{}'".format(compile_line)) compiled_file = os.path.basename(item) break @@ -73,31 +86,44 @@ def parse_log(logfile_text): break if compiled_file is None: - print("WARNING: Found nothing that looks like a file in '{}'".format(compile_line)) + print( + "WARNING: Found nothing that looks like a file in '{}'".format( + compile_line + ) + ) else: if compiled_file in result: - print("WARNING: Found multiple compilations of {}".format(compiled_file)) + print( + "WARNING: Found multiple compilations of {}".format(compiled_file) + ) result[compiled_file] = items # TODO - Need to capture link lines too return result + ############################################################################### def get_case_from_log(logpath): -############################################################################### + ############################################################################### return os.path.abspath(os.path.join(os.path.dirname(logpath), "..")) + ############################################################################### def read_maybe_gzip(filepath): -############################################################################### - opener = lambda: gzip.open(filepath, "rt") if filepath.endswith(".gz") else open(filepath, "r") + ############################################################################### + opener = ( + lambda: gzip.open(filepath, "rt") + if filepath.endswith(".gz") + else open(filepath, "r") + ) with opener() as fd: return fd.read() + ############################################################################### def log_diff(log1, log2, repls, ignore_includes): -############################################################################### + ############################################################################### """ Search for build/link commands and compare them """ @@ -118,21 +144,21 @@ def log_diff(log1, log2, repls, ignore_includes): file_set1 = set(compile_dict1.keys()) file_set2 = set(compile_dict2.keys()) - for item in (file_set1 - file_set2): + for item in file_set1 - file_set2: print("{} is missing compilation of {}".format(log2, item)) are_same = False - for item in (file_set2 - file_set1): + for item in file_set2 - file_set1: print("{} has unexpected compilation of {}".format(log2, item)) are_same = False - for item in (file_set1 & file_set2): + for item in file_set1 & file_set2: print("Checking compilation of {}".format(item)) flags1 = compile_dict1[item] flags2 = compile_dict2[item] missing = set(flags1) - set(flags2) - extra = set(flags2) - set(flags1) + extra = set(flags2) - set(flags1) # Let's not worry about order yet even though some flags are order-sensitive for flag in missing: @@ -141,15 +167,21 @@ def log_diff(log1, log2, repls, ignore_includes): are_same = False for flag in extra: - if flag != "-o" and not flag.startswith("CMakeFiles") and not (ignore_includes and flag.startswith("-I")) and item not in flag: + if ( + flag != "-o" + and not flag.startswith("CMakeFiles") + and not (ignore_includes and flag.startswith("-I")) + and item not in flag + ): print(" Extra flag {}".format(flag)) are_same = False return are_same + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### log1, log2, ignore_includes = parse_command_line(sys.argv, description) xml_normalize_fields = ["TEST_TESTID", "SRCROOT"] @@ -158,20 +190,29 @@ def _main_func(description): try: case1 = get_case_from_log(log1) case2 = get_case_from_log(log2) - val1 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case1) - val2 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case2) + val1 = run_cmd_no_fail( + "./xmlquery --value {}".format(xml_normalize_field), from_dir=case1 + ) + val2 = run_cmd_no_fail( + "./xmlquery --value {}".format(xml_normalize_field), from_dir=case2 + ) if os.sep in val1: repls[os.path.normpath(val2)] = os.path.normpath(val1) else: repls[val2] = val1 except Exception as e: - logging.warning("Warning, failed to normalize on {}: {}".format(xml_normalize_field, str(e))) + logging.warning( + "Warning, failed to normalize on {}: {}".format( + xml_normalize_field, str(e) + ) + ) repls = {} same = log_diff(log1, log2, repls, ignore_includes) sys.exit(0 if same == 0 else 1) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/bless_test_results b/CIME/Tools/bless_test_results index 121d1956b98..beccb955023 100755 --- a/CIME/Tools/bless_test_results +++ b/CIME/Tools/bless_test_results @@ -21,7 +21,7 @@ _MACHINE = Machines() ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [-n] [-r ] [-b ] [-c ] [ ...] [--verbose] OR @@ -38,80 +38,161 @@ OR > {0} -n foo bar \033[1;32m# From most recent run of jenkins, bless history changes for next \033[0m > {0} -r /home/jenkins/acme/scratch/jenkins -b next --hist-only -""".format(os.path.basename(args[0])), +""".format( + os.path.basename(args[0]) + ), description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - default_compiler = _MACHINE.get_default_compiler() - scratch_root = _MACHINE.get_value("CIME_OUTPUT_ROOT") - default_testroot = os.path.join(scratch_root) + default_compiler = _MACHINE.get_default_compiler() + scratch_root = _MACHINE.get_value("CIME_OUTPUT_ROOT") + default_testroot = os.path.join(scratch_root) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("-n", "--namelists-only", action="store_true", - help="Only analyze namelists.") + parser.add_argument( + "-n", "--namelists-only", action="store_true", help="Only analyze namelists." + ) - parser.add_argument("--hist-only", action="store_true", - help="Only analyze history files.") + parser.add_argument( + "--hist-only", action="store_true", help="Only analyze history files." + ) - parser.add_argument("-b", "--baseline-name", - help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.") + parser.add_argument( + "-b", + "--baseline-name", + help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.", + ) - parser.add_argument("--baseline-root", - help="Root of baselines. Default will use the BASELINE_ROOT from the case.") + parser.add_argument( + "--baseline-root", + help="Root of baselines. Default will use the BASELINE_ROOT from the case.", + ) - parser.add_argument("-c", "--compiler", default=default_compiler, - help="Compiler of run you want to bless") + parser.add_argument( + "-c", + "--compiler", + default=default_compiler, + help="Compiler of run you want to bless", + ) - parser.add_argument("-p", "--no-skip-pass", action="store_true", - help="Normally, if namelist or baseline phase exists and shows PASS, we assume no bless is needed. " - "This option forces the bless to happen regardless.") + parser.add_argument( + "-p", + "--no-skip-pass", + action="store_true", + help="Normally, if namelist or baseline phase exists and shows PASS, we assume no bless is needed. " + "This option forces the bless to happen regardless.", + ) - parser.add_argument("--report-only", action="store_true", - help="Only report what files will be overwritten and why. Caution is a good thing when updating baselines") + parser.add_argument( + "--report-only", + action="store_true", + help="Only report what files will be overwritten and why. Caution is a good thing when updating baselines", + ) - parser.add_argument("-r", "--test-root", default=default_testroot, - help="Path to test results that are being blessed") + parser.add_argument( + "-r", + "--test-root", + default=default_testroot, + help="Path to test results that are being blessed", + ) - parser.add_argument("--new-test-root", - help="If bless_test_results needs to create cases (for blessing namelists), use this root area") + parser.add_argument( + "--new-test-root", + help="If bless_test_results needs to create cases (for blessing namelists), use this root area", + ) - parser.add_argument("--new-test-id", - help="If bless_test_results needs to create cases (for blessing namelists), use this test id") + parser.add_argument( + "--new-test-id", + help="If bless_test_results needs to create cases (for blessing namelists), use this test id", + ) - parser.add_argument("-t", "--test-id", - help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.") + parser.add_argument( + "-t", + "--test-id", + help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.", + ) - parser.add_argument("-f", "--force", action="store_true", - help="Update every diff without asking. VERY DANGEROUS. Should only be used within testing scripts.") + parser.add_argument( + "-f", + "--force", + action="store_true", + help="Update every diff without asking. VERY DANGEROUS. Should only be used within testing scripts.", + ) - parser.add_argument("bless_tests", nargs="*", - help="When blessing, limit the bless to tests matching these regex") + parser.add_argument( + "bless_tests", + nargs="*", + help="When blessing, limit the bless to tests matching these regex", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - expect(not (args.report_only and args.force), - "Makes no sense to use -r and -f simultaneously") - expect(not (args.namelists_only and args.hist_only), - "Makes no sense to use --namelists-only and --hist-only simultaneously") + expect( + not (args.report_only and args.force), + "Makes no sense to use -r and -f simultaneously", + ) + expect( + not (args.namelists_only and args.hist_only), + "Makes no sense to use --namelists-only and --hist-only simultaneously", + ) + + return ( + args.baseline_name, + args.baseline_root, + args.test_root, + args.compiler, + args.test_id, + args.namelists_only, + args.hist_only, + args.report_only, + args.force, + args.bless_tests, + args.no_skip_pass, + args.new_test_root, + args.new_test_id, + ) - return args.baseline_name, args.baseline_root, args.test_root, args.compiler, args.test_id, args.namelists_only, args.hist_only, args.report_only, args.force, args.bless_tests, args.no_skip_pass, args.new_test_root, args.new_test_id ############################################################################### def _main_func(description): -############################################################################### - baseline_name, baseline_root, test_root, compiler, test_id, namelists_only, hist_only, \ - report_only, force, bless_tests, no_skip_pass, new_test_root, new_test_id = \ - parse_command_line(sys.argv, description) - - success = bless_test_results(baseline_name, baseline_root, test_root, compiler, - test_id=test_id, namelists_only=namelists_only, hist_only=hist_only, - report_only=report_only, force=force, bless_tests=bless_tests, no_skip_pass=no_skip_pass, - new_test_root=new_test_root, new_test_id=new_test_id) + ############################################################################### + ( + baseline_name, + baseline_root, + test_root, + compiler, + test_id, + namelists_only, + hist_only, + report_only, + force, + bless_tests, + no_skip_pass, + new_test_root, + new_test_id, + ) = parse_command_line(sys.argv, description) + + success = bless_test_results( + baseline_name, + baseline_root, + test_root, + compiler, + test_id=test_id, + namelists_only=namelists_only, + hist_only=hist_only, + report_only=report_only, + force=force, + bless_tests=bless_tests, + no_skip_pass=no_skip_pass, + new_test_root=new_test_root, + new_test_id=new_test_id, + ) sys.exit(0 if success else 1) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/case.build b/CIME/Tools/case.build index 66e0df2b65d..c8e4d54c467 100755 --- a/CIME/Tools/case.build +++ b/CIME/Tools/case.build @@ -45,34 +45,46 @@ other options are specific to one mode or the other: from standard_script_setup import * import CIME.build as build -from CIME.case import Case -from CIME.utils import find_system_test, get_model -from CIME.test_status import * +from CIME.case import Case +from CIME.utils import find_system_test, get_model +from CIME.test_status import * ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build.\n" - "Default is current directory.") + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to build.\n" "Default is current directory.", + ) if get_model() == "e3sm": - parser.add_argument("--ninja", action="store_true", - help="Use ninja backed for CMake (instead of gmake). " - "The ninja backend is better at scanning fortran dependencies but " - "seems to be less reliable across different platforms and compilers.") - - parser.add_argument("--separate-builds", action="store_true", - help="Build each component one at a time, separately, with output going to separate logs") - - parser.add_argument("--dry-run", action="store_true", - help="Just print the cmake and ninja commands.") + parser.add_argument( + "--ninja", + action="store_true", + help="Use ninja backed for CMake (instead of gmake). " + "The ninja backend is better at scanning fortran dependencies but " + "seems to be less reliable across different platforms and compilers.", + ) + + parser.add_argument( + "--separate-builds", + action="store_true", + help="Build each component one at a time, separately, with output going to separate logs", + ) + + parser.add_argument( + "--dry-run", + action="store_true", + help="Just print the cmake and ninja commands.", + ) mutex_group = parser.add_mutually_exclusive_group() @@ -81,64 +93,121 @@ def parse_command_line(args, description): # config_file = files.get_value("CONFIG_CPL_FILE") # component = Component(config_file, "CPL") # comps = [x.lower() for x in component.get_valid_model_components()] - comps = ["cpl","atm","lnd","ice","ocn","rof","glc","wav","esp","iac"] - libs = ["csmshare", "mct", "pio", "gptl"] + comps = ["cpl", "atm", "lnd", "ice", "ocn", "rof", "glc", "wav", "esp", "iac"] + libs = ["csmshare", "mct", "pio", "gptl"] allobjs = comps + libs - mutex_group.add_argument("--sharedlib-only", action="store_true", - help="Only build shared libraries.") - - mutex_group.add_argument("-m", "--model-only", action="store_true", - help="Assume shared libraries are already built.") - - mutex_group.add_argument("-b", "--build", nargs="+", choices=allobjs, - help="Libraries to build.\n" - "Will cause namelist generation to be skipped.") - - mutex_group.add_argument("--skip-provenance-check", action="store_true", - help="Do not check and save build provenance") - - mutex_group.add_argument("--clean-all", action="store_true", - help="Clean all objects (including sharedlib objects that may be\n" - "used by other builds).") - - mutex_group.add_argument("--clean", nargs="*", choices=allobjs, - help="Clean objects associated with specific libraries.\n" - "With no arguments, clean all objects other than sharedlib objects.") - - mutex_group.add_argument("--clean-depends", nargs="*", choices=comps+["csmshare"], - help="Clean Depends and Srcfiles only.\n" - "This allows you to rebuild after adding new\n" - "files in the source tree or in SourceMods.") + mutex_group.add_argument( + "--sharedlib-only", action="store_true", help="Only build shared libraries." + ) + + mutex_group.add_argument( + "-m", + "--model-only", + action="store_true", + help="Assume shared libraries are already built.", + ) + + mutex_group.add_argument( + "-b", + "--build", + nargs="+", + choices=allobjs, + help="Libraries to build.\n" "Will cause namelist generation to be skipped.", + ) + + mutex_group.add_argument( + "--skip-provenance-check", + action="store_true", + help="Do not check and save build provenance", + ) + + mutex_group.add_argument( + "--clean-all", + action="store_true", + help="Clean all objects (including sharedlib objects that may be\n" + "used by other builds).", + ) + + mutex_group.add_argument( + "--clean", + nargs="*", + choices=allobjs, + help="Clean objects associated with specific libraries.\n" + "With no arguments, clean all objects other than sharedlib objects.", + ) + + mutex_group.add_argument( + "--clean-depends", + nargs="*", + choices=comps + ["csmshare"], + help="Clean Depends and Srcfiles only.\n" + "This allows you to rebuild after adding new\n" + "files in the source tree or in SourceMods.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - clean_depends = args.clean_depends if args.clean_depends is None or len(args.clean_depends) else comps + clean_depends = ( + args.clean_depends + if args.clean_depends is None or len(args.clean_depends) + else comps + ) cleanlist = args.clean if args.clean is None or len(args.clean) else comps buildlist = None if args.build is None or len(args.build) == 0 else args.build if get_model() != "e3sm": args.separate_builds = False - args.ninja = False + args.ninja = False + + return ( + args.caseroot, + args.sharedlib_only, + args.model_only, + cleanlist, + args.clean_all, + buildlist, + clean_depends, + not args.skip_provenance_check, + args.separate_builds, + args.ninja, + args.dry_run, + ) - return args.caseroot, args.sharedlib_only, args.model_only, cleanlist, args.clean_all, buildlist, clean_depends, not args.skip_provenance_check, args.separate_builds, args.ninja, args.dry_run ############################################################################### def _main_func(description): -############################################################################### - caseroot, sharedlib_only, model_only, cleanlist, clean_all, buildlist,clean_depends, save_build_provenance, separate_builds, ninja, dry_run = \ - parse_command_line(sys.argv, description) + ############################################################################### + ( + caseroot, + sharedlib_only, + model_only, + cleanlist, + clean_all, + buildlist, + clean_depends, + save_build_provenance, + separate_builds, + ninja, + dry_run, + ) = parse_command_line(sys.argv, description) success = True with Case(caseroot, read_only=False, record=True) as case: - testname = case.get_value('TESTCASE') + testname = case.get_value("TESTCASE") if cleanlist is not None or clean_all or clean_depends is not None: - build.clean(case, cleanlist=cleanlist, clean_all=clean_all, clean_depends=clean_depends) - elif(testname is not None): - logging.warning("Building test for {} in directory {}".format(testname, - caseroot)) + build.clean( + case, + cleanlist=cleanlist, + clean_all=clean_all, + clean_depends=clean_depends, + ) + elif testname is not None: + logging.warning( + "Building test for {} in directory {}".format(testname, caseroot) + ) try: # The following line can throw exceptions if the testname is # not found or the test constructor throws. We need to be @@ -146,22 +215,42 @@ def _main_func(description): # happens. test = find_system_test(testname, case)(case) except BaseException: - phase_to_fail = MODEL_BUILD_PHASE if model_only else SHAREDLIB_BUILD_PHASE + phase_to_fail = ( + MODEL_BUILD_PHASE if model_only else SHAREDLIB_BUILD_PHASE + ) with TestStatus(test_dir=caseroot) as ts: - ts.set_status(phase_to_fail, TEST_FAIL_STATUS, comments="failed to initialize") + ts.set_status( + phase_to_fail, TEST_FAIL_STATUS, comments="failed to initialize" + ) raise - expect(buildlist is None, - "Build lists don't work with tests, use create_newcase (not create_test) to use this feature") - success = test.build(sharedlib_only=sharedlib_only, model_only=model_only, ninja=ninja, dry_run=dry_run, separate_builds=separate_builds) + expect( + buildlist is None, + "Build lists don't work with tests, use create_newcase (not create_test) to use this feature", + ) + success = test.build( + sharedlib_only=sharedlib_only, + model_only=model_only, + ninja=ninja, + dry_run=dry_run, + separate_builds=separate_builds, + ) else: - success = build.case_build(caseroot, case=case, sharedlib_only=sharedlib_only, - model_only=model_only, buildlist=buildlist, - save_build_provenance=save_build_provenance, - separate_builds=separate_builds, ninja=ninja, dry_run=dry_run) + success = build.case_build( + caseroot, + case=case, + sharedlib_only=sharedlib_only, + model_only=model_only, + buildlist=buildlist, + save_build_provenance=save_build_provenance, + separate_builds=separate_builds, + ninja=ninja, + dry_run=dry_run, + ) sys.exit(0 if success else 1) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/case.cmpgen_namelists b/CIME/Tools/case.cmpgen_namelists index 7b4449ee6fe..c9930e71795 100755 --- a/CIME/Tools/case.cmpgen_namelists +++ b/CIME/Tools/case.cmpgen_namelists @@ -7,55 +7,89 @@ generate, or both) for this case. from standard_script_setup import * -from CIME.case import Case -from argparse import RawTextHelpFormatter +from CIME.case import Case +from argparse import RawTextHelpFormatter ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=RawTextHelpFormatter) + description=description, formatter_class=RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory for which namelists are compared/generated. " - "\nDefault is current directory.") - - parser.add_argument("-c", "--compare", action="store_true", - help="Force a namelist comparison against baselines. " - "\nDefault is to follow the case specification.") - - parser.add_argument("-g", "--generate", action="store_true", - help="Force a generation of namelist baselines. " - "\nDefault is to follow the case specification.") - - parser.add_argument("--compare-name", - help="Force comparison to use baselines with this name. " - "\nDefault is to follow the case specification.") - - parser.add_argument("--generate-name", - help="Force generation to use baselines with this name. " - "\nDefault is to follow the case specification.") - - parser.add_argument("--baseline-root", - help="Root of baselines. " - "\nDefault is the case's BASELINE_ROOT.") + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory for which namelists are compared/generated. " + "\nDefault is current directory.", + ) + + parser.add_argument( + "-c", + "--compare", + action="store_true", + help="Force a namelist comparison against baselines. " + "\nDefault is to follow the case specification.", + ) + + parser.add_argument( + "-g", + "--generate", + action="store_true", + help="Force a generation of namelist baselines. " + "\nDefault is to follow the case specification.", + ) + + parser.add_argument( + "--compare-name", + help="Force comparison to use baselines with this name. " + "\nDefault is to follow the case specification.", + ) + + parser.add_argument( + "--generate-name", + help="Force generation to use baselines with this name. " + "\nDefault is to follow the case specification.", + ) + + parser.add_argument( + "--baseline-root", + help="Root of baselines. " "\nDefault is the case's BASELINE_ROOT.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - return args.caseroot, args.compare, args.generate, args.compare_name, args.generate_name, args.baseline_root + return ( + args.caseroot, + args.compare, + args.generate, + args.compare_name, + args.generate_name, + args.baseline_root, + ) + ############################################################################### def _main_func(description): -############################################################################### - caseroot, compare, generate, compare_name, generate_name, baseline_root \ - = parse_command_line(sys.argv, description) + ############################################################################### + ( + caseroot, + compare, + generate, + compare_name, + generate_name, + baseline_root, + ) = parse_command_line(sys.argv, description) with Case(caseroot, read_only=False) as case: - success = case.case_cmpgen_namelists(compare, generate, compare_name, generate_name, baseline_root) + success = case.case_cmpgen_namelists( + compare, generate, compare_name, generate_name, baseline_root + ) sys.exit(0 if success else CIME.utils.TESTS_FAILED_ERR_CODE) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/case.qstatus b/CIME/Tools/case.qstatus index 8918428b05d..4902dc511d5 100755 --- a/CIME/Tools/case.qstatus +++ b/CIME/Tools/case.qstatus @@ -9,29 +9,33 @@ Typical usage is simply: from standard_script_setup import * -from CIME.case import Case -from CIME.test_status import * +from CIME.case import Case +from CIME.test_status import * ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to query.\n" - "Default is current directory.") + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to query.\n" "Default is current directory.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### caseroot = parse_command_line(sys.argv, description) with Case(caseroot, read_only=False) as case: @@ -39,5 +43,6 @@ def _main_func(description): sys.exit(0) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/case.setup b/CIME/Tools/case.setup index 9bcc52fea3f..1ae3ba239cc 100755 --- a/CIME/Tools/case.setup +++ b/CIME/Tools/case.setup @@ -17,51 +17,73 @@ To rerun after making changes to env_mach_pes.xml or env_mach_specific.xml, run: """ from standard_script_setup import * -from CIME.case import Case +from CIME.case import Case ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to setup.\n" - "Default is current directory.") - - parser.add_argument("-c", "--clean", action="store_true", - help="Removes the batch run script for target machine.\n" - "If the testmode argument is present then keep the test\n" - "script if it is present - otherwise remove it.\n" - "The user_nl_xxx and Macros files are never removed by case.setup -\n" - "you must remove them manually.") - - parser.add_argument("-t", "--test-mode", action="store_true", - help="Keeps the test script when the --clean argument is used.") - - parser.add_argument("-r", "--reset", action="store_true", - help="Does a clean followed by setup.\n" - "This flag should be used when rerunning case.setup after it\n" - "has already been run for this case.") - - parser.add_argument("-k", "--keep", action="append", default=[], - help="When cleaning/resetting a case, do not remove/refresh files in this list. " - "Choices are batch script, env_mach_specific.xml, Macros.make, Macros.cmake. " - "Use should use this if you have local modifications to these files that you want to keep.") + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to setup.\n" "Default is current directory.", + ) + + parser.add_argument( + "-c", + "--clean", + action="store_true", + help="Removes the batch run script for target machine.\n" + "If the testmode argument is present then keep the test\n" + "script if it is present - otherwise remove it.\n" + "The user_nl_xxx and Macros files are never removed by case.setup -\n" + "you must remove them manually.", + ) + + parser.add_argument( + "-t", + "--test-mode", + action="store_true", + help="Keeps the test script when the --clean argument is used.", + ) + + parser.add_argument( + "-r", + "--reset", + action="store_true", + help="Does a clean followed by setup.\n" + "This flag should be used when rerunning case.setup after it\n" + "has already been run for this case.", + ) + + parser.add_argument( + "-k", + "--keep", + action="append", + default=[], + help="When cleaning/resetting a case, do not remove/refresh files in this list. " + "Choices are batch script, env_mach_specific.xml, Macros.make, Macros.cmake. " + "Use should use this if you have local modifications to these files that you want to keep.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot, args.clean, args.test_mode, args.reset, args.keep + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### caseroot, clean, test_mode, reset, keep = parse_command_line(sys.argv, description) with Case(caseroot, read_only=False, record=True) as case: case.case_setup(clean=clean, test_mode=test_mode, reset=reset, keep=keep) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/case.submit b/CIME/Tools/case.submit index 49979d86237..32bce36dc01 100755 --- a/CIME/Tools/case.submit +++ b/CIME/Tools/case.submit @@ -16,72 +16,102 @@ Other examples: """ from standard_script_setup import * -from CIME.case import Case -from CIME.utils import expect +from CIME.case import Case +from CIME.utils import expect from six.moves import configparser ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to submit.\n" - "Default is current directory.") - - parser.add_argument("--job", "-j", - help="Name of the job to be submitted;\n" - "can be any of the jobs listed in env_batch.xml.\n" - "This will be the first job of any defined workflow. " - "Default is case.run.") - - parser.add_argument("--only-job", - help="Name of the job to be submitted;\n" - "can be any of the jobs listed in env_batch.xml.\n" - "Only this job will be run, workflow and RESUBMIT are ignored. " - "Default is case.run.") - - parser.add_argument("--no-batch", action="store_true", - help="Do not submit jobs to batch system, run locally.") - - parser.add_argument("--prereq", - help="Specify a prerequisite job id, this job will not start until the\n" - "job with this id is completed (batch mode only).") - - parser.add_argument("--prereq-allow-failure", action="store_true", - help="Allows starting the run even if the prerequisite fails.\n" - "This also allows resubmits to run if the original failed and the\n" - "resubmit was submitted to the queue with the orginal as a dependency,\n" - "as in the case of --resubmit-immediate.") - - parser.add_argument("--resubmit", action="store_true", - help="Used with tests only, to continue rather than restart a test.") - - parser.add_argument("--resubmit-immediate", action="store_true", - help="This queues all of the resubmissions immediately after\n" - "the first job is queued. These rely on the queue system to\n" - "handle dependencies.") - - parser.add_argument("--skip-preview-namelist", action="store_true", - help="Skip calling preview-namelist during case.run.") + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to submit.\n" "Default is current directory.", + ) + + parser.add_argument( + "--job", + "-j", + help="Name of the job to be submitted;\n" + "can be any of the jobs listed in env_batch.xml.\n" + "This will be the first job of any defined workflow. " + "Default is case.run.", + ) + + parser.add_argument( + "--only-job", + help="Name of the job to be submitted;\n" + "can be any of the jobs listed in env_batch.xml.\n" + "Only this job will be run, workflow and RESUBMIT are ignored. " + "Default is case.run.", + ) + + parser.add_argument( + "--no-batch", + action="store_true", + help="Do not submit jobs to batch system, run locally.", + ) + + parser.add_argument( + "--prereq", + help="Specify a prerequisite job id, this job will not start until the\n" + "job with this id is completed (batch mode only).", + ) + + parser.add_argument( + "--prereq-allow-failure", + action="store_true", + help="Allows starting the run even if the prerequisite fails.\n" + "This also allows resubmits to run if the original failed and the\n" + "resubmit was submitted to the queue with the orginal as a dependency,\n" + "as in the case of --resubmit-immediate.", + ) + + parser.add_argument( + "--resubmit", + action="store_true", + help="Used with tests only, to continue rather than restart a test.", + ) + + parser.add_argument( + "--resubmit-immediate", + action="store_true", + help="This queues all of the resubmissions immediately after\n" + "the first job is queued. These rely on the queue system to\n" + "handle dependencies.", + ) + + parser.add_argument( + "--skip-preview-namelist", + action="store_true", + help="Skip calling preview-namelist during case.run.", + ) CIME.utils.add_mail_type_args(parser) - parser.add_argument("-a", "--batch-args", - help="Used to pass additional arguments to batch system.") + parser.add_argument( + "-a", "--batch-args", help="Used to pass additional arguments to batch system." + ) - parser.add_argument("--chksum", action="store_true", - help="Verifies input data checksums.") + parser.add_argument( + "--chksum", action="store_true", help="Verifies input data checksums." + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) CIME.utils.resolve_mail_type_args(args) - expect(args.job is None or args.only_job is None, "Cannot specify both --job and --only-job") + expect( + args.job is None or args.only_job is None, + "Cannot specify both --job and --only-job", + ) job = None workflow = True if args.job: @@ -90,19 +120,44 @@ def parse_command_line(args, description): job = args.only_job workflow = False + return ( + args.caseroot, + job, + args.no_batch, + args.prereq, + args.prereq_allow_failure, + args.resubmit, + args.resubmit_immediate, + args.skip_preview_namelist, + args.mail_user, + args.mail_type, + args.batch_args, + workflow, + args.chksum, + ) - return (args.caseroot, job, args.no_batch, args.prereq, args.prereq_allow_failure, - args.resubmit, args.resubmit_immediate, args.skip_preview_namelist, args.mail_user, - args.mail_type, args.batch_args, workflow, args.chksum) ############################################################################### def _main_func(description, test_args=False): -############################################################################### - caseroot, job, no_batch, prereq, allow_fail, resubmit, resubmit_immediate, skip_pnl, \ - mail_user, mail_type, batch_args, workflow, chksum = parse_command_line(sys.argv, description) + ############################################################################### + ( + caseroot, + job, + no_batch, + prereq, + allow_fail, + resubmit, + resubmit_immediate, + skip_pnl, + mail_user, + mail_type, + batch_args, + workflow, + chksum, + ) = parse_command_line(sys.argv, description) # save these options to a hidden file for use during resubmit - config_file = os.path.join(caseroot,".submit_options") + config_file = os.path.join(caseroot, ".submit_options") if skip_pnl or mail_user or mail_type or batch_args: config = configparser.RawConfigParser() config.add_section("SubmitOptions") @@ -121,10 +176,21 @@ def _main_func(description, test_args=False): if not test_args: with Case(caseroot, read_only=False, record=True) as case: - case.submit(job=job, no_batch=no_batch, prereq=prereq, allow_fail=allow_fail, - resubmit=resubmit, resubmit_immediate=resubmit_immediate, skip_pnl=skip_pnl, - mail_user=mail_user, mail_type=mail_type, - batch_args=batch_args, workflow=workflow, chksum=chksum) + case.submit( + job=job, + no_batch=no_batch, + prereq=prereq, + allow_fail=allow_fail, + resubmit=resubmit, + resubmit_immediate=resubmit_immediate, + skip_pnl=skip_pnl, + mail_user=mail_user, + mail_type=mail_type, + batch_args=batch_args, + workflow=workflow, + chksum=chksum, + ) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/case_diff b/CIME/Tools/case_diff index f8176cb3677..e642dd8e7ac 100755 --- a/CIME/Tools/case_diff +++ b/CIME/Tools/case_diff @@ -12,7 +12,7 @@ import argparse, sys, os ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} case1 case2 [skip-files] OR @@ -20,10 +20,12 @@ OR \033[1mEXAMPLES:\033[0m > {0} case1 case2 -""".format(os.path.basename(args[0])), +""".format( + os.path.basename(args[0]) + ), description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter -) + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) @@ -31,25 +33,30 @@ OR parser.add_argument("case2", help="Second case.") - parser.add_argument("skip_list", nargs="*", - help="skip these files. You'll probably want to skip the bld directory if it's inside the case") + parser.add_argument( + "skip_list", + nargs="*", + help="skip these files. You'll probably want to skip the bld directory if it's inside the case", + ) - parser.add_argument("-b", "--show-binary", action="store_true", - help="Show binary diffs") + parser.add_argument( + "-b", "--show-binary", action="store_true", help="Show binary diffs" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.case1, args.case2, args.show_binary, args.skip_list + ############################################################################### def recursive_diff(dir1, dir2, repls, show_binary=False, skip_list=()): -############################################################################### + ############################################################################### """ Starting at dir1, dir2 respectively, compare their contents """ # The assertions below hurt performance - #assert os.path.isdir(dir1), dir1 + " not a directory" - #assert os.path.isdir(dir2), dir2 + " not a directory" + # assert os.path.isdir(dir1), dir1 + " not a directory" + # assert os.path.isdir(dir2), dir2 + " not a directory" # Get contents of both directories dir1_contents = set(os.listdir(dir1)) @@ -58,38 +65,44 @@ def recursive_diff(dir1, dir2, repls, show_binary=False, skip_list=()): # Use set operations to figure out what they have in common dir1_only = dir1_contents - dir2_contents dir2_only = dir2_contents - dir1_contents - both = dir1_contents & dir2_contents + both = dir1_contents & dir2_contents num_differing_files = 0 # Print the unique items for dirname, set_obj in [(dir1, dir1_only), (dir2, dir2_only)]: for item in sorted(set_obj): - if (item not in skip_list): - print ("===============================================================================") - print (os.path.join(dirname, item), "is unique") + if item not in skip_list: + print( + "===============================================================================" + ) + print(os.path.join(dirname, item), "is unique") num_differing_files += 1 # Handling of the common items is trickier for item in sorted(both): - if (item in skip_list): + if item in skip_list: continue path1 = os.path.join(dir1, item) path2 = os.path.join(dir2, item) path1isdir = os.path.isdir(path1) # If the directory status of the files differs, report diff - if (path1isdir != os.path.isdir(path2)): - print ("===============================================================================") - print (path1 + " DIFFERS (directory status)") + if path1isdir != os.path.isdir(path2): + print( + "===============================================================================" + ) + print(path1 + " DIFFERS (directory status)") num_differing_files += 1 continue # If we've made it this far, the files' status is the same. If the # files are directories, recursively check them, otherwise check # that the file contents match - if (path1isdir): - num_differing_files += recursive_diff(path1, path2, repls, show_binary, skip_list) + if path1isdir: + num_differing_files += recursive_diff( + path1, path2, repls, show_binary, skip_list + ) else: # # As a (huge) performance enhancement, if the files have the same # # size, we assume the contents match @@ -97,36 +110,47 @@ def recursive_diff(dir1, dir2, repls, show_binary=False, skip_list=()): # print path1 + " DIFFERS (contents)" stat, out, err = run_cmd("file {}".format(path1)) - if (stat != 0): - logging.warning("Failed to probe file '{}', out: '{}', err: '{}'".format(path1, out, err)) + if stat != 0: + logging.warning( + "Failed to probe file '{}', out: '{}', err: '{}'".format( + path1, out, err + ) + ) continue is_text_file = "text" in out - if (not (not show_binary and not is_text_file)): + if not (not show_binary and not is_text_file): the_text = open(path2, "r").read() for replace_item, replace_with in repls.items(): the_text = the_text.replace(replace_item, replace_with) stat, out, _ = run_cmd("diff -w {} -".format(path1), input_str=the_text) - if (stat != 0): - print ("===============================================================================") - print (path1 + " DIFFERS (contents)") + if stat != 0: + print( + "===============================================================================" + ) + print(path1 + " DIFFERS (contents)") num_differing_files += 1 - print (" "+ out) + print(" " + out) return num_differing_files + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### case1, case2, show_binary, skip_list = parse_command_line(sys.argv, description) xml_normalize_fields = ["TEST_TESTID", "SRCROOT"] repls = {} for xml_normalize_field in xml_normalize_fields: try: - val1 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case1) - val2 = run_cmd_no_fail("./xmlquery --value {}".format(xml_normalize_field), from_dir=case2) + val1 = run_cmd_no_fail( + "./xmlquery --value {}".format(xml_normalize_field), from_dir=case1 + ) + val2 = run_cmd_no_fail( + "./xmlquery --value {}".format(xml_normalize_field), from_dir=case2 + ) if os.sep in val1: repls[os.path.normpath(val2)] = os.path.normpath(val1) else: @@ -139,7 +163,8 @@ def _main_func(description): logging.info(num_differing_files, "files are different") sys.exit(0 if num_differing_files == 0 else 1) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/check_case b/CIME/Tools/check_case index aae32ad6e30..9954b33dc0c 100755 --- a/CIME/Tools/check_case +++ b/CIME/Tools/check_case @@ -20,7 +20,7 @@ want to perform these checks without actually submitting the case. from standard_script_setup import * from CIME.utils import expect -from CIME.case import Case +from CIME.case import Case import argparse @@ -28,19 +28,20 @@ logger = logging.getLogger(__name__) ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### parse_command_line(sys.argv, description) with Case(read_only=False, record=True) as case: @@ -49,10 +50,10 @@ def _main_func(description): build_complete = case.get_value("BUILD_COMPLETE") if not build_complete: - expect(False, - "Please rebuild the model interactively by calling case.build") + expect(False, "Please rebuild the model interactively by calling case.build") + + logger.info("check_case OK ") - logger.info( "check_case OK ") ############################################################################### diff --git a/CIME/Tools/check_input_data b/CIME/Tools/check_input_data index 3c8e96b73fa..179af50c4fd 100755 --- a/CIME/Tools/check_input_data +++ b/CIME/Tools/check_input_data @@ -25,52 +25,87 @@ import argparse ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("--protocol", default=None, - help="The input data protocol to download data.") - - parser.add_argument("--server", default=None, - help="The input data repository from which to download data.") - - - parser.add_argument("-i", "--input-data-root",default=None, - help="The root directory where input data goes,\n" - "use xmlquery DIN_LOC_ROOT to see default value.") - - - parser.add_argument("--data-list-dir", default="Buildconf", - help="Where to find list of input files") - - parser.add_argument("--download", action="store_true", - help="Attempt to download missing input files") - - parser.add_argument("--chksum", action="store_true", - help="chksum inputfiles against inputdata_chksum.dat (if available)") + parser.add_argument( + "--protocol", default=None, help="The input data protocol to download data." + ) + + parser.add_argument( + "--server", + default=None, + help="The input data repository from which to download data.", + ) + + parser.add_argument( + "-i", + "--input-data-root", + default=None, + help="The root directory where input data goes,\n" + "use xmlquery DIN_LOC_ROOT to see default value.", + ) + + parser.add_argument( + "--data-list-dir", default="Buildconf", help="Where to find list of input files" + ) + + parser.add_argument( + "--download", + action="store_true", + help="Attempt to download missing input files", + ) + + parser.add_argument( + "--chksum", + action="store_true", + help="chksum inputfiles against inputdata_chksum.dat (if available)", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - return args.protocol, args.server, args.input_data_root, args.data_list_dir, args.download, args.chksum + return ( + args.protocol, + args.server, + args.input_data_root, + args.data_list_dir, + args.download, + args.chksum, + ) + ############################################################################### def _main_func(description): -############################################################################### - protocol, address, input_data_root, data_list_dir, download, chksum = parse_command_line(sys.argv, description) + ############################################################################### + ( + protocol, + address, + input_data_root, + data_list_dir, + download, + chksum, + ) = parse_command_line(sys.argv, description) with Case() as case: - sys.exit(0 if case.check_all_input_data(protocol=protocol, - address=address, - input_data_root=input_data_root, - data_list_dir=data_list_dir, - download=download, - chksum=chksum) else 1) + sys.exit( + 0 + if case.check_all_input_data( + protocol=protocol, + address=address, + input_data_root=input_data_root, + data_list_dir=data_list_dir, + download=download, + chksum=chksum, + ) + else 1 + ) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/check_lockedfiles b/CIME/Tools/check_lockedfiles index b9371dd8aba..ec279754388 100755 --- a/CIME/Tools/check_lockedfiles +++ b/CIME/Tools/check_lockedfiles @@ -6,6 +6,7 @@ This script compares xml files from standard_script_setup import * from CIME.case import Case + def parse_command_line(args, description): parser = argparse.ArgumentParser( usage="""\n{0} [--verbose] @@ -15,27 +16,30 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# check_lockedfiles SMS\033[0m > {0} -""".format(os.path.basename(args[0])), - +""".format( + os.path.basename(args[0]) + ), description=description, - - formatter_class=argparse.ArgumentDefaultsHelpFormatter + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to build") + parser.add_argument( + "--caseroot", default=os.getcwd(), help="Case directory to build" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot + def _main_func(description): caseroot = parse_command_line(sys.argv, description) with Case(case_root=caseroot, read_only=True) as case: case.check_lockedfiles() + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/cime_bisect b/CIME/Tools/cime_bisect index 0ebdae7e95d..e427679f692 100755 --- a/CIME/Tools/cime_bisect +++ b/CIME/Tools/cime_bisect @@ -18,7 +18,7 @@ logger = logging.getLogger(__name__) ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [--bad=] [--verbose] OR @@ -46,37 +46,52 @@ OR > {0} HEAD~4 'ERS.f45_g37.B1850C5 --no-run' 'SMS.f45_g37.F' -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) parser.add_argument("good", help="Name of most recent known good commit.") - parser.add_argument("-B", "--bad", default="HEAD", - help="Name of bad commit, default is current HEAD.") + parser.add_argument( + "-B", + "--bad", + default="HEAD", + help="Name of bad commit, default is current HEAD.", + ) - parser.add_argument("-a", "--all-commits", action="store_true", - help="Test all commits, not just merges") + parser.add_argument( + "-a", + "--all-commits", + action="store_true", + help="Test all commits, not just merges", + ) - parser.add_argument("-S", "--script", - help="Use your own custom script instead") + parser.add_argument("-S", "--script", help="Use your own custom script instead") - parser.add_argument("testargs", nargs="*", help="String to pass to create_test. Combine with single quotes if it includes multiple args.") + parser.add_argument( + "testargs", + nargs="*", + help="String to pass to create_test. Combine with single quotes if it includes multiple args.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - expect(os.path.exists(".git"), "Please run the root of a repo. Either your CIME repo or model depending on which one you want to bisect on") + expect( + os.path.exists(".git"), + "Please run the root of a repo. Either your CIME repo or model depending on which one you want to bisect on", + ) return args.testargs, args.good, args.bad, args.all_commits, args.script + ############################################################################### def cime_bisect(testargs, good, bad, commits_to_skip, custom_script): -############################################################################### + ############################################################################### logger.info("####################################################") logger.info("TESTING WITH ARGS '{}'".format(testargs)) logger.info("####################################################") @@ -97,21 +112,31 @@ def cime_bisect(testargs, good, bad, commits_to_skip, custom_script): # Formulate the create_test command, let create_test make the test-id, it will use # a timestamp that will allow us to avoid collisions - thing_to_run = custom_script if custom_script else "{} {}".format(create_test, testargs) + thing_to_run = ( + custom_script if custom_script else "{} {}".format(create_test, testargs) + ) bisect_cmd = "git submodule update --recursive && {}".format(thing_to_run) if not custom_script: is_batch = False try: from CIME.XML.machines import Machines + machine = Machines() is_batch = machine.has_batch_system() except: pass - if (is_batch and "--no-run" not in testargs and "--no-build" not in testargs and "--no-setup" not in testargs): - expect("--wait" in testargs, - "Your create_test command likely needs --wait to work correctly with bisect") + if ( + is_batch + and "--no-run" not in testargs + and "--no-build" not in testargs + and "--no-setup" not in testargs + ): + expect( + "--wait" in testargs, + "Your create_test command likely needs --wait to work correctly with bisect", + ) try: cmd = "git bisect run sh -c '{}'".format(bisect_cmd) @@ -120,8 +145,10 @@ def cime_bisect(testargs, good, bad, commits_to_skip, custom_script): # Get list of potentially bad commits from output lines = output.splitlines() - regex = re.compile(r'^([a-f0-9]{40}).*$') - bad_commits = set([regex.match(line).groups()[0] for line in lines if regex.match(line)]) + regex = re.compile(r"^([a-f0-9]{40}).*$") + bad_commits = set( + [regex.match(line).groups()[0] for line in lines if regex.match(line)] + ) bad_commits_filtered = bad_commits - commits_to_skip @@ -130,22 +157,30 @@ def cime_bisect(testargs, good, bad, commits_to_skip, custom_script): logger.info("####################################################") logger.info("BAD MERGE FOR ARGS '{}' IS:".format(testargs)) logger.info("####################################################") - logger.warning(run_cmd_no_fail("git show {}".format(bad_commits_filtered.pop()))) + logger.warning( + run_cmd_no_fail("git show {}".format(bad_commits_filtered.pop())) + ) finally: run_cmd_no_fail("git bisect reset && git submodule update --recursive") + ############################################################################### def _main_func(description): -############################################################################### - testargs, good, bad, all_commits, custom_script = \ - parse_command_line(sys.argv, description) + ############################################################################### + testargs, good, bad, all_commits, custom_script = parse_command_line( + sys.argv, description + ) # Important: we only want to test merges if not all_commits: - commits_we_want_to_test = run_cmd_no_fail("git rev-list {}..{} --merges --first-parent".format(good, bad)).splitlines() - all_commits_ = run_cmd_no_fail("git rev-list {}..{}".format(good, bad)).splitlines() - commits_to_skip = set(all_commits_) - set(commits_we_want_to_test) + commits_we_want_to_test = run_cmd_no_fail( + "git rev-list {}..{} --merges --first-parent".format(good, bad) + ).splitlines() + all_commits_ = run_cmd_no_fail( + "git rev-list {}..{}".format(good, bad) + ).splitlines() + commits_to_skip = set(all_commits_) - set(commits_we_want_to_test) logger.info("Skipping {} non-merge commits".format(len(commits_to_skip))) for item in commits_to_skip: logger.debug(item) @@ -158,7 +193,8 @@ def _main_func(description): for set_of_test_args in testargs: cime_bisect(set_of_test_args, good, bad, commits_to_skip, custom_script) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/code_checker b/CIME/Tools/code_checker index e0366b0f4c1..85de0fcfd17 100755 --- a/CIME/Tools/code_checker +++ b/CIME/Tools/code_checker @@ -10,16 +10,17 @@ from standard_script_setup import * from CIME.code_checker import check_code, expect import argparse, sys, os -#pylint: disable=import-error + +# pylint: disable=import-error from distutils.spawn import find_executable logger = logging.getLogger(__name__) ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( -usage="""\n{0} [--verbose] + usage="""\n{0} [--verbose] OR {0} --help @@ -30,28 +31,37 @@ OR \033[1;32m# Check code single file case.py \033[0m \033[1;32m# Note, you do NOT have to provide the path to this file, the tool will find it \033[0m > {0} case.py -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("-j", "--num-procs", type=int, default=10, - help="The number of files to check in parallel") + parser.add_argument( + "-j", + "--num-procs", + type=int, + default=10, + help="The number of files to check in parallel", + ) - parser.add_argument("files", nargs="*", - help="Restrict checking to specific files. Relative name is fine.") + parser.add_argument( + "files", + nargs="*", + help="Restrict checking to specific files. Relative name is fine.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.num_procs, args.files + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### pylint = find_executable("pylint") expect(pylint is not None, "pylint not found") @@ -64,7 +74,8 @@ def _main_func(description): sys.exit(0) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/compare_namelists b/CIME/Tools/compare_namelists index 10652d2b493..5aba49edbc0 100755 --- a/CIME/Tools/compare_namelists +++ b/CIME/Tools/compare_namelists @@ -13,21 +13,21 @@ import argparse, sys, os ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( -usage="""\n{0} [-c ] [--verbose] + usage="""\n{0} [-c ] [--verbose] OR {0} --help \033[1mEXAMPLES:\033[0m \033[1;32m# Compare namelist files\033[0m > {0} baseline_dir/test/namelistfile mytestarea/namelistfile -c -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) @@ -35,35 +35,50 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("new_file", help="Path to file to compare against gold") - parser.add_argument("-c", "--case", action="store", dest="case", default=None, - help="The case base id (..). Helps us normalize data.") + parser.add_argument( + "-c", + "--case", + action="store", + dest="case", + default=None, + help="The case base id (..). Helps us normalize data.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) # Normalize case - if (args.case is not None): + if args.case is not None: args.case = CIME.utils.normalize_case_id(args.case) return args.gold_file, args.new_file, args.case + ############################################################################### def _main_func(description): -############################################################################### - gold_file, compare_file, case = \ - parse_command_line(sys.argv, description) + ############################################################################### + gold_file, compare_file, case = parse_command_line(sys.argv, description) - if (case is None): - logging.warning("No case id data available, will not be able to normalize values as effectively") + if case is None: + logging.warning( + "No case id data available, will not be able to normalize values as effectively" + ) else: logging.info("Using case: '{}'".format(case)) - success, comments = CIME.compare_namelists.compare_namelist_files(gold_file, compare_file, case) - expect(success, - "Namelist diff between files {} and {}\n{}".format(gold_file, compare_file, comments)) + success, comments = CIME.compare_namelists.compare_namelist_files( + gold_file, compare_file, case + ) + expect( + success, + "Namelist diff between files {} and {}\n{}".format( + gold_file, compare_file, comments + ), + ) print("Files {} and {} MATCH".format(gold_file, compare_file)) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/compare_test_results b/CIME/Tools/compare_test_results index 249de07fc2a..3b59ccd007f 100755 --- a/CIME/Tools/compare_test_results +++ b/CIME/Tools/compare_test_results @@ -32,9 +32,9 @@ _MACHINE = Machines() ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( -usage="""\n{0} [-r ] [-b -c ] [-t ] [ ...] [--verbose] + usage="""\n{0} [-r ] [-b -c ] [-t ] [ ...] [--verbose] OR {0} --help @@ -49,57 +49,106 @@ OR > {0} -r /home/jenkins/acme/scratch/jenkins -b next \033[1;32m# For typical CESM workflow, where baselines are named with tags \033[0m > {0} -t TESTID -b BASELINE_TAG -""".format(os.path.basename(args[0])), - -description=description, +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) - - default_compiler = _MACHINE.get_default_compiler() - scratch_root = _MACHINE.get_value("CIME_OUTPUT_ROOT") - default_testroot = os.path.join(scratch_root) + default_compiler = _MACHINE.get_default_compiler() + scratch_root = _MACHINE.get_value("CIME_OUTPUT_ROOT") + default_testroot = os.path.join(scratch_root) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("-n", "--namelists-only", action="store_true", - help="Only analyze namelists.") - - parser.add_argument("--hist-only", action="store_true", - help="Only analyze history files.") - - parser.add_argument("-b", "--baseline-name", - help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.") - - parser.add_argument("--baseline-root", - help="Root of baselines. Default will use BASELINE_ROOT from the case.") - - parser.add_argument("-c", "--compiler", default=default_compiler, - help="Compiler of run you want to compare") - - parser.add_argument("-r", "--test-root", default=default_testroot, - help="Path to test results that are being compared") - - parser.add_argument("-t", "--test-id", - help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.") - - parser.add_argument("compare_tests", nargs="*", - help="When comparing, limit the comparison to tests matching these regex") + parser.add_argument( + "-n", "--namelists-only", action="store_true", help="Only analyze namelists." + ) + + parser.add_argument( + "--hist-only", action="store_true", help="Only analyze history files." + ) + + parser.add_argument( + "-b", + "--baseline-name", + help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.", + ) + + parser.add_argument( + "--baseline-root", + help="Root of baselines. Default will use BASELINE_ROOT from the case.", + ) + + parser.add_argument( + "-c", + "--compiler", + default=default_compiler, + help="Compiler of run you want to compare", + ) + + parser.add_argument( + "-r", + "--test-root", + default=default_testroot, + help="Path to test results that are being compared", + ) + + parser.add_argument( + "-t", + "--test-id", + help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.", + ) + + parser.add_argument( + "compare_tests", + nargs="*", + help="When comparing, limit the comparison to tests matching these regex", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - return args.baseline_name, args.baseline_root, args.test_root, args.compiler, args.test_id, args.compare_tests, args.namelists_only, args.hist_only + return ( + args.baseline_name, + args.baseline_root, + args.test_root, + args.compiler, + args.test_id, + args.compare_tests, + args.namelists_only, + args.hist_only, + ) + ############################################################################### def _main_func(description): -############################################################################### - baseline_name, baseline_root, test_root, compiler, test_id, compare_tests, namelists_only, hist_only = \ - parse_command_line(sys.argv, description) - - success = compare_test_results(baseline_name, baseline_root, test_root, compiler, test_id, compare_tests, namelists_only, hist_only) + ############################################################################### + ( + baseline_name, + baseline_root, + test_root, + compiler, + test_id, + compare_tests, + namelists_only, + hist_only, + ) = parse_command_line(sys.argv, description) + + success = compare_test_results( + baseline_name, + baseline_root, + test_root, + compiler, + test_id, + compare_tests, + namelists_only, + hist_only, + ) sys.exit(0 if success else CIME.utils.TESTS_FAILED_ERR_CODE) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/component_compare_baseline b/CIME/Tools/component_compare_baseline index bf43662c58d..2adad3b94ad 100755 --- a/CIME/Tools/component_compare_baseline +++ b/CIME/Tools/component_compare_baseline @@ -6,12 +6,12 @@ Compares current component history files against baselines from standard_script_setup import * -from CIME.case import Case +from CIME.case import Case from CIME.hist_utils import compare_baseline ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [] [--verbose] OR @@ -20,26 +20,29 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# Compare baselines \033[0m > {0} -""".format(os.path.basename(args[0])), +""".format( + os.path.basename(args[0]) + ), description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory") + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory" + ) - parser.add_argument("-b", "--baseline-dir", - help="Use custom baseline dir") + parser.add_argument("-b", "--baseline-dir", help="Use custom baseline dir") args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot, args.baseline_dir + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### caseroot, baseline_dir = parse_command_line(sys.argv, description) with Case(caseroot) as case: success, comments = compare_baseline(case, baseline_dir) @@ -47,5 +50,6 @@ def _main_func(description): sys.exit(0 if success else 1) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/component_compare_copy b/CIME/Tools/component_compare_copy index 17aa2560a7e..28005cd6674 100755 --- a/CIME/Tools/component_compare_copy +++ b/CIME/Tools/component_compare_copy @@ -7,12 +7,12 @@ This allows us to save these results if we want to run the case again. from standard_script_setup import * -from CIME.case import Case +from CIME.case import Case from CIME.hist_utils import copy_histfiles ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} suffix [] [--verbose] OR @@ -21,29 +21,33 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# Setup case \033[0m > {0} -""".format(os.path.basename(args[0])), +""".format( + os.path.basename(args[0]) + ), description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("--suffix", - help="Suffix to append to hist files") + parser.add_argument("--suffix", help="Suffix to append to hist files") - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory") + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.suffix, args.caseroot + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### suffix, caseroot = parse_command_line(sys.argv, description) with Case(caseroot) as case: copy_histfiles(case, suffix) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/component_compare_test b/CIME/Tools/component_compare_test index 431d986198d..ece602e1af4 100755 --- a/CIME/Tools/component_compare_test +++ b/CIME/Tools/component_compare_test @@ -6,12 +6,12 @@ Compares two component history files in the testcase directory from standard_script_setup import * -from CIME.case import Case +from CIME.case import Case from CIME.hist_utils import compare_test ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} suffix1 suffix2 [] [--verbose] OR @@ -20,29 +20,31 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# Setup case \033[0m > {0} -""".format(os.path.basename(args[0])), +""".format( + os.path.basename(args[0]) + ), description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("suffix1", - help="The suffix of the first set of files") + parser.add_argument("suffix1", help="The suffix of the first set of files") - parser.add_argument("suffix2", - help="The suffix of the second set of files") + parser.add_argument("suffix2", help="The suffix of the second set of files") - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory") + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.suffix1, args.suffix2, args.caseroot + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### suffix1, suffix2, caseroot = parse_command_line(sys.argv, description) with Case(caseroot) as case: success, comments = compare_test(case, suffix1, suffix2) @@ -50,5 +52,6 @@ def _main_func(description): sys.exit(0 if success else 1) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/component_generate_baseline b/CIME/Tools/component_generate_baseline index 6721c7326c0..ff8f39170f0 100755 --- a/CIME/Tools/component_generate_baseline +++ b/CIME/Tools/component_generate_baseline @@ -6,12 +6,12 @@ Copies current component history files into baselines from standard_script_setup import * -from CIME.case import Case +from CIME.case import Case from CIME.hist_utils import generate_baseline ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [] [--verbose] OR @@ -20,39 +20,49 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# Generate baselines \033[0m > {0} -""".format(os.path.basename(args[0])), +""".format( + os.path.basename(args[0]) + ), description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory") + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory" + ) - parser.add_argument("-b", "--baseline-dir", - help="Use custom baseline dir") + parser.add_argument("-b", "--baseline-dir", help="Use custom baseline dir") - parser.add_argument("-o", "--allow-baseline-overwrite", action="store_true", - help="By default an attempt to overwrite an existing baseline directory " - "will raise an error. Specifying this option allows " - "existing baseline directories to be silently overwritten.") + parser.add_argument( + "-o", + "--allow-baseline-overwrite", + action="store_true", + help="By default an attempt to overwrite an existing baseline directory " + "will raise an error. Specifying this option allows " + "existing baseline directories to be silently overwritten.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot, args.baseline_dir, args.allow_baseline_overwrite + ############################################################################### def _main_func(description): -############################################################################### - caseroot, baseline_dir, allow_baseline_overwrite = \ - parse_command_line(sys.argv, description) + ############################################################################### + caseroot, baseline_dir, allow_baseline_overwrite = parse_command_line( + sys.argv, description + ) with Case(caseroot) as case: - success, comments = generate_baseline(case, baseline_dir, - allow_baseline_overwrite) + success, comments = generate_baseline( + case, baseline_dir, allow_baseline_overwrite + ) print(comments) sys.exit(0 if success else 1) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/cs.status b/CIME/Tools/cs.status index 92085d0d76f..3db5402b741 100755 --- a/CIME/Tools/cs.status +++ b/CIME/Tools/cs.status @@ -14,56 +14,94 @@ from CIME.utils import expect from CIME.cs_status import cs_status from CIME import test_status -_PERFORMANCE_PHASES = [test_status.THROUGHPUT_PHASE, - test_status.MEMCOMP_PHASE] +_PERFORMANCE_PHASES = [test_status.THROUGHPUT_PHASE, test_status.MEMCOMP_PHASE] ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) parser.add_argument("paths", nargs="*", help="Paths to TestStatus files.") options_group = parser.add_mutually_exclusive_group() - options_group.add_argument("-s", "--summary", action="store_true", - help="Only show summary") - - options_group.add_argument("-f", "--fails-only", action="store_true", - help="Only show non-PASSes (this includes PENDs as well as FAILs)") - - parser.add_argument("-c", "--count-fails", action="append", default=[], - metavar="PHASE", - help="For this phase, do not give line-by-line output; instead, just report\n" - "the total number of tests that have not PASSed this phase\n" - "(this includes PENDs as well as FAILs).\n" - "This is typically used with the --fails-only option,\n" - "but it can also be used without that option.\n" - "(However, it cannot be used with the --summary option.)\n" - "(Can be specified multiple times.)") - - performance_fails_equivalent = ' '.join(["--count-fails {}".format(phase) - for phase in _PERFORMANCE_PHASES]) - parser.add_argument("-p", "--count-performance-fails", action="store_true", - help="For phases that involve performance comparisons with baseline:\n" - "Do not give line-by-line output; instead, just report the total number\n" - "of tests that have not PASSed this phase.\n" - "(This can be useful because these performance comparisons can be\n" - "subject to machine variability.)\n" - "This is equivalent to specifying:\n" - "{}".format(performance_fails_equivalent)) - - parser.add_argument("-x", "--expected-fails-file", - help="Path to XML file listing expected failures for this test suite") - - parser.add_argument("-t", "--test-id", action="append", default=[], - help="Include all tests with this test id.\n" - "(Can be specified multiple times.)") - - parser.add_argument("-r", "--test-root", default=os.getcwd(), - help="Test root used when --test-id is given") + options_group.add_argument( + "-s", "--summary", action="store_true", help="Only show summary" + ) + + options_group.add_argument( + "-f", + "--fails-only", + action="store_true", + help="Only show non-PASSes (this includes PENDs as well as FAILs)", + ) + + parser.add_argument( + "-c", + "--count-fails", + action="append", + default=[], + metavar="PHASE", + help="For this phase, do not give line-by-line output; instead, just report\n" + "the total number of tests that have not PASSed this phase\n" + "(this includes PENDs as well as FAILs).\n" + "This is typically used with the --fails-only option,\n" + "but it can also be used without that option.\n" + "(However, it cannot be used with the --summary option.)\n" + "(Can be specified multiple times.)", + ) + + performance_fails_equivalent = " ".join( + ["--count-fails {}".format(phase) for phase in _PERFORMANCE_PHASES] + ) + parser.add_argument( + "-p", + "--count-performance-fails", + action="store_true", + help="For phases that involve performance comparisons with baseline:\n" + "Do not give line-by-line output; instead, just report the total number\n" + "of tests that have not PASSed this phase.\n" + "(This can be useful because these performance comparisons can be\n" + "subject to machine variability.)\n" + "This is equivalent to specifying:\n" + "{}".format(performance_fails_equivalent), + ) + + parser.add_argument( + "--check-throughput", + action="store_true", + help="Fail if throughput check fails (fail if tests slow down)", + ) + + parser.add_argument( + "--check-memory", + action="store_true", + help="Fail if memory check fails (fail if tests footprint grows)", + ) + + parser.add_argument( + "-x", + "--expected-fails-file", + help="Path to XML file listing expected failures for this test suite", + ) + + parser.add_argument( + "-t", + "--test-id", + action="append", + default=[], + help="Include all tests with this test id.\n" + "(Can be specified multiple times.)", + ) + + parser.add_argument( + "-r", + "--test-root", + default=os.getcwd(), + help="Test root used when --test-id is given", + ) args = parser.parse_args(args[1:]) @@ -72,35 +110,72 @@ def parse_command_line(args, description): if args.count_performance_fails: args.count_fails.extend(_PERFORMANCE_PHASES) - return args.paths, args.summary, args.fails_only, args.count_fails, args.expected_fails_file, args.test_id, args.test_root + return ( + args.paths, + args.summary, + args.fails_only, + args.count_fails, + args.check_throughput, + args.check_memory, + args.expected_fails_file, + args.test_id, + args.test_root, + ) + def _validate_args(args): - expect(not (args.summary and args.count_fails), - "--count-fails cannot be specified with --summary") - expect(not (args.summary and args.count_performance_fails), - "--count-performance-fails cannot be specified with --summary") - _validate_phases(args.count_fails, '--count-fails') + expect( + not (args.summary and args.count_fails), + "--count-fails cannot be specified with --summary", + ) + expect( + not (args.summary and args.count_performance_fails), + "--count-performance-fails cannot be specified with --summary", + ) + _validate_phases(args.count_fails, "--count-fails") + def _validate_phases(list_of_phases, arg_name): for phase in list_of_phases: - expect(phase in test_status.ALL_PHASES, - "Phase {} specified with {} argument is not a valid TestStatus phase".format( - phase, arg_name)) + expect( + phase in test_status.ALL_PHASES, + "Phase {} specified with {} argument is not a valid TestStatus phase".format( + phase, arg_name + ), + ) + ############################################################################### def _main_func(description): -############################################################################### - test_paths, summary, fails_only, count_fails, expected_fails_file, test_ids, test_root = parse_command_line(sys.argv, description) + ############################################################################### + ( + test_paths, + summary, + fails_only, + count_fails, + check_throughput, + check_memory, + expected_fails_file, + test_ids, + test_root, + ) = parse_command_line(sys.argv, description) for test_id in test_ids: - test_paths.extend(glob.glob(os.path.join(test_root, "*%s/TestStatus" % test_id))) + test_paths.extend( + glob.glob(os.path.join(test_root, "*%s/TestStatus" % test_id)) + ) + + cs_status( + test_paths=test_paths, + summary=summary, + fails_only=fails_only, + count_fails_phase_list=count_fails, + check_throughput=check_throughput, + check_memory=check_memory, + expected_fails_filepath=expected_fails_file, + ) - cs_status(test_paths=test_paths, - summary=summary, - fails_only=fails_only, - count_fails_phase_list=count_fails, - expected_fails_filepath=expected_fails_file) ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/e3sm_check_env b/CIME/Tools/e3sm_check_env index e7ce0e5923a..b1756c9cfd3 100755 --- a/CIME/Tools/e3sm_check_env +++ b/CIME/Tools/e3sm_check_env @@ -16,117 +16,163 @@ LOG = [] ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [--verbose] OR {0} --help -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) + ############################################################################### def check_sh(): -############################################################################### - stat = run_cmd('sh --version')[0] + ############################################################################### + stat = run_cmd("sh --version")[0] if stat != 0: - LOG.append('* sh appears not to be available in your environment.') - LOG.append(' Please make sure it exists in your PATH.') + LOG.append("* sh appears not to be available in your environment.") + LOG.append(" Please make sure it exists in your PATH.") + ############################################################################### def check_csh(): # Can't believe I'm actually checking for csh. -JNJ -############################################################################### - stat = run_cmd('csh --version')[0] + ############################################################################### + stat = run_cmd("csh --version")[0] if stat != 0: # Also tolerates tcsh - LOG.append('* csh appears not to be available in your environment.') - LOG.append(' Please make sure it exists in your PATH.') + LOG.append("* csh appears not to be available in your environment.") + LOG.append(" Please make sure it exists in your PATH.") + ############################################################################### def check_perl_module(module_name): -############################################################################### + ############################################################################### stat = run_cmd('perl -e "require {};"'.format(module_name)[0]) if stat != 0: - LOG.append('* E3SM requires the Perl module {}, but it is not available.'.format(module_name)) - LOG.append(' Please make sure that it exists in your @INC.') + LOG.append( + "* E3SM requires the Perl module {}, but it is not available.".format( + module_name + ) + ) + LOG.append(" Please make sure that it exists in your @INC.") + ############################################################################### def check_perl(): -############################################################################### + ############################################################################### # First, make sure we have the right version of Perl. e3sm_perl_major_version = 5 e3sm_perl_minor_version = 16 stat, output, _ = run_cmd("perl -e 'print $^V;'") if stat != 0: - LOG.append('* Perl appears not to be available in your environment.') - LOG.append(' Please make sure it exists in your PATH.') + LOG.append("* Perl appears not to be available in your environment.") + LOG.append(" Please make sure it exists in your PATH.") return - output = output[1:] # get rid of leading 'v' character - major_version, minor_version, _ = [int(item) for item in output.split('.')] - if major_version != e3sm_perl_major_version or minor_version < e3sm_perl_minor_version: - LOG.append('* E3SM requires Perl version {:d}.{:d}+. You appear to be using {:d}.{:d}.'.format(e3sm_perl_major_version, e3sm_perl_minor_version, major_version, minor_version)) - LOG.append(' Please check to see whether an appropriate version exists on this machine,') - LOG.append(' possibly via a loadable module.') + output = output[1:] # get rid of leading 'v' character + major_version, minor_version, _ = [int(item) for item in output.split(".")] + if ( + major_version != e3sm_perl_major_version + or minor_version < e3sm_perl_minor_version + ): + LOG.append( + "* E3SM requires Perl version {:d}.{:d}+. You appear to be using {:d}.{:d}.".format( + e3sm_perl_major_version, + e3sm_perl_minor_version, + major_version, + minor_version, + ) + ) + LOG.append( + " Please check to see whether an appropriate version exists on this machine," + ) + LOG.append(" possibly via a loadable module.") # Okay, our version is good. What about all those pesky modules? - check_perl_module('XML::LibXML') - check_perl_module('XML::SAX') - check_perl_module('XML::SAX::Exception') - check_perl_module('Switch') + check_perl_module("XML::LibXML") + check_perl_module("XML::SAX") + check_perl_module("XML::SAX::Exception") + check_perl_module("Switch") + ############################################################################### def check_git(): -############################################################################### + ############################################################################### e3sm_git_major_version = 2 e3sm_git_minor_version = 0 - stat, output, _ = run_cmd('git --version') + stat, output, _ = run_cmd("git --version") if stat != 0: - LOG.append('* Git appears not to be available in your environment.') - LOG.append(' Please make sure it exists in your PATH.') + LOG.append("* Git appears not to be available in your environment.") + LOG.append(" Please make sure it exists in your PATH.") return version = output.split()[-1] - num_dots = version.count('.') + num_dots = version.count(".") if num_dots == 1: - major_version, minor_version = [int(s) for s in version.split('.')] + major_version, minor_version = [int(s) for s in version.split(".")] elif num_dots == 2: - major_version, minor_version, _ = [int(s) for s in version.split('.')] + major_version, minor_version, _ = [int(s) for s in version.split(".")] else: LOG.append('* Unparseable git version string: "{}"'.format(output)) return - if major_version != e3sm_git_major_version or minor_version < e3sm_git_minor_version: - LOG.append('* E3SM requires Git version {:d}.{:d}+. You appear to be using version {:d}.{:d}.'.format(e3sm_git_major_version, e3sm_git_minor_version, major_version, minor_version)) + if ( + major_version != e3sm_git_major_version + or minor_version < e3sm_git_minor_version + ): + LOG.append( + "* E3SM requires Git version {:d}.{:d}+. You appear to be using version {:d}.{:d}.".format( + e3sm_git_major_version, + e3sm_git_minor_version, + major_version, + minor_version, + ) + ) + ############################################################################### def check_svn(): -############################################################################### + ############################################################################### e3sm_svn_major_version = 1 e3sm_svn_minor_version = 4 e3sm_svn_patch_version = 2 - stat, output, _ = run_cmd('svn --version --quiet') + stat, output, _ = run_cmd("svn --version --quiet") if stat != 0: - LOG.append('* Subversion appears not to be available in your environment.') - LOG.append(' Please make sure it exists in your PATH.') + LOG.append("* Subversion appears not to be available in your environment.") + LOG.append(" Please make sure it exists in your PATH.") return - major_version, minor_version, patch_version = [int(s) for s in output.split('.')] - if major_version < e3sm_svn_major_version or minor_version < e3sm_svn_minor_version or patch_version < e3sm_svn_patch_version: - LOG.append('* E3SM requires Subversion version {:d}.{:d}.{:d}+. You appear to be using version {:d}.{:d}.{:d}.'.format(e3sm_svn_major_version, e3sm_svn_minor_version, e3sm_svn_patch_version, major_version, minor_version, patch_version)) + major_version, minor_version, patch_version = [int(s) for s in output.split(".")] + if ( + major_version < e3sm_svn_major_version + or minor_version < e3sm_svn_minor_version + or patch_version < e3sm_svn_patch_version + ): + LOG.append( + "* E3SM requires Subversion version {:d}.{:d}.{:d}+. You appear to be using version {:d}.{:d}.{:d}.".format( + e3sm_svn_major_version, + e3sm_svn_minor_version, + e3sm_svn_patch_version, + major_version, + minor_version, + patch_version, + ) + ) + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### parse_command_line(sys.argv, description) check_sh() @@ -136,16 +182,19 @@ def _main_func(description): check_svn() if len(LOG) > 0: - print('e3sm_check_env found problems with your E3SM development environment:\n') + print("e3sm_check_env found problems with your E3SM development environment:\n") for line in LOG: print(line) sys.exit(1) else: - print('e3sm_check_env found no problems with your E3SM development environment.') + print( + "e3sm_check_env found no problems with your E3SM development environment." + ) sys.exit(0) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/e3sm_compile_wrap.py b/CIME/Tools/e3sm_compile_wrap.py index 358b8816c27..949a40a7564 100644 --- a/CIME/Tools/e3sm_compile_wrap.py +++ b/CIME/Tools/e3sm_compile_wrap.py @@ -14,16 +14,19 @@ ############################################################################### def run_cmd(args): -############################################################################### + ############################################################################### t1 = time.time() result = subprocess.call(args) t2 = time.time() arglen = len(args) target = None - for idx, arg, in enumerate(args): - if arg == "-o" and idx+1 < arglen: - target = args[idx+1] + for ( + idx, + arg, + ) in enumerate(args): + if arg == "-o" and idx + 1 < arglen: + target = args[idx + 1] break if arg.startswith("lib") and arg.endswith(".a"): @@ -38,20 +41,23 @@ def run_cmd(args): return result + ############################################################################### def parse_command_line(args, _): -############################################################################### + ############################################################################### return args[1:] + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### cmd_args = parse_command_line(sys.argv, description) result = run_cmd(cmd_args) sys.exit(result) + ############################################################################### if __name__ == "__main__": diff --git a/CIME/Tools/generate_cylc_workflow.py b/CIME/Tools/generate_cylc_workflow.py index f6721efc40a..16bbeec65e8 100755 --- a/CIME/Tools/generate_cylc_workflow.py +++ b/CIME/Tools/generate_cylc_workflow.py @@ -6,58 +6,74 @@ from standard_script_setup import * -from CIME.case import Case -from CIME.utils import expect, transform_vars +from CIME.case import Case +from CIME.utils import expect, transform_vars import argparse, re + logger = logging.getLogger(__name__) ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory for which namelists are generated.\n" - "Default is current directory.") - - parser.add_argument('--cycles', default=1, - help="The number of cycles to run, default is RESUBMIT") - - parser.add_argument("--ensemble", default=1, - help="generate suite.rc for an ensemble of cases, the case name argument must end in an integer.\n" - "for example: ./generate_cylc_workflow.py --ensemble 4 \n" - "will generate a workflow file in the current case, if that case is named case.01," - "the workflow will include case.01, case.02, case.03 and case.04") + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory for which namelists are generated.\n" + "Default is current directory.", + ) + + parser.add_argument( + "--cycles", default=1, help="The number of cycles to run, default is RESUBMIT" + ) + + parser.add_argument( + "--ensemble", + default=1, + help="generate suite.rc for an ensemble of cases, the case name argument must end in an integer.\n" + "for example: ./generate_cylc_workflow.py --ensemble 4 \n" + "will generate a workflow file in the current case, if that case is named case.01," + "the workflow will include case.01, case.02, case.03 and case.04", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot, args.cycles, int(args.ensemble) + def cylc_get_ensemble_first_and_last(case, ensemble): if ensemble == 1: - return 1,None + return 1, None casename = case.get_value("CASE") m = re.search(r"(.*[^\d])(\d+)$", casename) minval = int(m.group(2)) - maxval = minval+ensemble-1 - return minval,maxval + maxval = minval + ensemble - 1 + return minval, maxval + def cylc_get_case_path_string(case, ensemble): caseroot = case.get_value("CASEROOT") casename = case.get_value("CASE") if ensemble == 1: return "{};".format(caseroot) - basepath = os.path.abspath(caseroot+"/..") + basepath = os.path.abspath(caseroot + "/..") m = re.search(r"(.*[^\d])(\d+)$", casename) expect(m, "casename {} must end in an integer for ensemble method".format(casename)) - return "{basepath}/{basename}$(printf \"%0{intlen}d\"".format(basepath=basepath, basename=m.group(1), intlen=len(m.group(2))) + " ${CYLC_TASK_PARAM_member});" + return ( + '{basepath}/{basename}$(printf "%0{intlen}d"'.format( + basepath=basepath, basename=m.group(1), intlen=len(m.group(2)) + ) + + " ${CYLC_TASK_PARAM_member});" + ) def cylc_batch_job_template(job, jobname, case, ensemble): @@ -68,15 +84,24 @@ def cylc_batch_job_template(job, jobname, case, ensemble): submit_args = env_batch.get_submit_args(case, job) case_path_string = cylc_get_case_path_string(case, ensemble) - return """ + return ( + """ [[{jobname}]] script = cd {case_path_string} ./case.submit --job {job} [[[job]]] batch system = {batch_system_type} batch submit command template = {batchsubmit} {submit_args} '%(job)s' [[[directives]]] -""".format(jobname=jobname, job=job, case_path_string=case_path_string, batch_system_type=batch_system_type, - batchsubmit=batchsubmit, submit_args=submit_args) + "{{ batchdirectives }}\n" +""".format( + jobname=jobname, + job=job, + case_path_string=case_path_string, + batch_system_type=batch_system_type, + batchsubmit=batchsubmit, + submit_args=submit_args, + ) + + "{{ batchdirectives }}\n" + ) def cylc_script_job_template(job, case, ensemble): @@ -84,73 +109,101 @@ def cylc_script_job_template(job, case, ensemble): return """ [[{job}]] script = cd {case_path_string} ./case.submit --job {job} -""".format(job=job, case_path_string=case_path_string) +""".format( + job=job, case_path_string=case_path_string + ) + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### caseroot, cycles, ensemble = parse_command_line(sys.argv, description) - expect(os.path.isfile(os.path.join(caseroot, "CaseStatus")), - "case.setup must be run prior to running {}".format(__file__)) + expect( + os.path.isfile(os.path.join(caseroot, "CaseStatus")), + "case.setup must be run prior to running {}".format(__file__), + ) with Case(caseroot, read_only=True) as case: if cycles == 1: - cycles = max(1, case.get_value('RESUBMIT')) - env_batch = case.get_env('batch') - env_workflow = case.get_env('workflow') + cycles = max(1, case.get_value("RESUBMIT")) + env_batch = case.get_env("batch") + env_workflow = case.get_env("workflow") jobs = env_workflow.get_jobs() - casename = case.get_value('CASE') - input_template = os.path.join(case.get_value("MACHDIR"),"cylc_suite.rc.template") + casename = case.get_value("CASE") + input_template = os.path.join( + case.get_value("MACHDIR"), "cylc_suite.rc.template" + ) - overrides = {"cycles":cycles, - 'casename':casename} + overrides = {"cycles": cycles, "casename": casename} input_text = open(input_template).read() - first,last = cylc_get_ensemble_first_and_last(case, ensemble) + first, last = cylc_get_ensemble_first_and_last(case, ensemble) if ensemble == 1: - overrides.update({'members':"{}".format(first)}) - overrides.update({"workflow_description":"case {}".format(case.get_value("CASE"))}) + overrides.update({"members": "{}".format(first)}) + overrides.update( + {"workflow_description": "case {}".format(case.get_value("CASE"))} + ) else: - overrides.update({'members':"{}..{}".format(first,last)}) + overrides.update({"members": "{}..{}".format(first, last)}) firstcase = case.get_value("CASE") intlen = len(str(last)) - lastcase = firstcase[:-intlen]+str(last) - overrides.update({"workflow_description":"ensemble from {} to {}".format(firstcase,lastcase)}) - overrides.update({"case_path_string":cylc_get_case_path_string(case, ensemble)}) + lastcase = firstcase[:-intlen] + str(last) + overrides.update( + { + "workflow_description": "ensemble from {} to {}".format( + firstcase, lastcase + ) + } + ) + overrides.update( + {"case_path_string": cylc_get_case_path_string(case, ensemble)} + ) for job in jobs: jobname = job - if job == 'case.st_archive': + if job == "case.st_archive": continue - if job == 'case.run': - jobname = 'run' + if job == "case.run": + jobname = "run" overrides.update(env_batch.get_job_overrides(job, case)) - overrides.update({'job_id':'run.'+casename}) - input_text = input_text + cylc_batch_job_template(job, jobname, case, ensemble) + overrides.update({"job_id": "run." + casename}) + input_text = input_text + cylc_batch_job_template( + job, jobname, case, ensemble + ) else: - depends_on = env_workflow.get_value('dependency', subgroup=job) - if depends_on.startswith('case.'): + depends_on = env_workflow.get_value("dependency", subgroup=job) + if depends_on.startswith("case."): depends_on = depends_on[5:] - input_text = input_text.replace(' => '+depends_on,' => '+depends_on+' => '+job) - + input_text = input_text.replace( + " => " + depends_on, " => " + depends_on + " => " + job + ) overrides.update(env_batch.get_job_overrides(job, case)) - overrides.update({'job_id':job+'.'+casename}) - if 'total_tasks' in overrides and overrides['total_tasks'] > 1: - input_text = input_text + cylc_batch_job_template(job, jobname, case, ensemble) + overrides.update({"job_id": job + "." + casename}) + if "total_tasks" in overrides and overrides["total_tasks"] > 1: + input_text = input_text + cylc_batch_job_template( + job, jobname, case, ensemble + ) else: - input_text = input_text + cylc_script_job_template(jobname, case, ensemble) - - - overrides.update({'batchdirectives':env_batch.get_batch_directives(case,job, overrides=overrides, - output_format='cylc')}) + input_text = input_text + cylc_script_job_template( + jobname, case, ensemble + ) + + overrides.update( + { + "batchdirectives": env_batch.get_batch_directives( + case, job, overrides=overrides, output_format="cylc" + ) + } + ) # we need to re-transform for each job to get job size correctly - input_text = transform_vars(input_text, case=case, subgroup=job, overrides=overrides) + input_text = transform_vars( + input_text, case=case, subgroup=job, overrides=overrides + ) with open("suite.rc", "w") as f: f.write(case.get_resolved_value(input_text)) - -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/getTiming b/CIME/Tools/getTiming index 62310daabf1..3f16847385a 100755 --- a/CIME/Tools/getTiming +++ b/CIME/Tools/getTiming @@ -8,23 +8,30 @@ import argparse, sys, os from CIME.case import Case from CIME.get_timing import get_timing + def parse_command_line(args, description): parser = argparse.ArgumentParser( usage="\n%s [-lid|--lid] [-h|--help]" % os.path.basename(args[0]), description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("-lid", "--lid", - help="print using yymmdd-hhmmss format", - default="999999-999999") - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to get timing for") + parser.add_argument( + "-lid", + "--lid", + help="print using yymmdd-hhmmss format", + default="999999-999999", + ) + parser.add_argument( + "--caseroot", default=os.getcwd(), help="Case directory to get timing for" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot, args.lid + def __main_func(description): """ Reads timing information from $CASEROOT/timing/$MODEL_timing_stats.$lid and @@ -34,5 +41,6 @@ def __main_func(description): with Case(caseroot, read_only=True) as case: get_timing(case, lid) + if __name__ == "__main__": __main_func(__doc__) diff --git a/CIME/Tools/get_case_env b/CIME/Tools/get_case_env index f19945806f3..eeed5957865 100755 --- a/CIME/Tools/get_case_env +++ b/CIME/Tools/get_case_env @@ -6,7 +6,7 @@ Dump what the CIME environment would be for a case. Only supports E3SM for now. """ from standard_script_setup import * -from CIME.XML.machines import Machines +from CIME.XML.machines import Machines from CIME.test_scheduler import TestScheduler from CIME.utils import parse_test_name, expect import get_tests @@ -15,9 +15,9 @@ import argparse, tempfile, shutil ############################################################################### def parse_command_line(raw_args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( -usage="""{0} [-c ] + usage="""{0} [-c ] OR {0} --help @@ -33,40 +33,56 @@ OR \033[1;32m# Same as above but also load it into current shell env \033[0m > eval $(./{0} -c SMS_Mmpi-serial.f09_g16.X) -""".format(os.path.basename(raw_args[0])), -description=description, -formatter_class=argparse.RawTextHelpFormatter -) +""".format( + os.path.basename(raw_args[0]) + ), + description=description, + formatter_class=argparse.RawTextHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("-c", "--case", default="SMS.f09_g16.X", - help="The case for which you want the env. Default=%(default)s") + parser.add_argument( + "-c", + "--case", + default="SMS.f09_g16.X", + help="The case for which you want the env. Default=%(default)s", + ) raw_args.append("--silent") args = CIME.utils.parse_args_and_handle_standard_logging_options(raw_args, parser) return args.case + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### casename = parse_command_line(sys.argv, description) compiler = parse_test_name(casename)[5] machine = get_tests.infer_machine_name_from_tests([casename]) mach_obj = Machines(machine=machine) compiler = mach_obj.get_default_compiler() if compiler is None else compiler - full_test_name = get_tests.get_full_test_names([casename], mach_obj.get_machine_name(), compiler)[0] + full_test_name = get_tests.get_full_test_names( + [casename], mach_obj.get_machine_name(), compiler + )[0] output_root = tempfile.mkdtemp() shell_env = None try: - impl = TestScheduler([full_test_name], no_build=True, machine_name=machine, compiler=compiler, - output_root=output_root) + impl = TestScheduler( + [full_test_name], + no_build=True, + machine_name=machine, + compiler=compiler, + output_root=output_root, + ) success = impl.run_tests() - test_dir = impl._get_test_dir(full_test_name) # pylint: disable=protected-access + test_dir = impl._get_test_dir( + full_test_name + ) # pylint: disable=protected-access shell_exe = os.path.split(os.environ["SHELL"])[-1] suffix = ".sh" if shell_exe in ["bash", "sh"] else ".csh" file_to_read = os.path.join(test_dir, ".env_mach_specific{}".format(suffix)) @@ -90,10 +106,11 @@ def _main_func(description): expect(shell_env is not None, "Bad shell_env state") print(shell_env) else: - print(errs)#, file=sys.stderr) + print(errs) # , file=sys.stderr) sys.exit(1) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/get_standard_makefile_args b/CIME/Tools/get_standard_makefile_args index 02d336a54bd..5357c09cc1a 100755 --- a/CIME/Tools/get_standard_makefile_args +++ b/CIME/Tools/get_standard_makefile_args @@ -7,29 +7,33 @@ should only be used when the components buildlib is not written in python from standard_script_setup import * -from CIME.build import get_standard_makefile_args -from CIME.case import Case -from CIME.test_status import * +from CIME.build import get_standard_makefile_args +from CIME.case import Case +from CIME.test_status import * ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build.\n" - "Default is current directory.") + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to build.\n" "Default is current directory.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### caseroot = parse_command_line(sys.argv, description) success = True @@ -38,5 +42,6 @@ def _main_func(description): sys.exit(0 if success else 1) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/jenkins_generic_job b/CIME/Tools/jenkins_generic_job index 4ee3ecfd5bc..1084fa90a39 100755 --- a/CIME/Tools/jenkins_generic_job +++ b/CIME/Tools/jenkins_generic_job @@ -16,9 +16,9 @@ from CIME.jenkins_generic_job import jenkins_generic_job ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( -usage="""\n{0} [-g] [-d] [--verbose] + usage="""\n{0} [-g] [-d] [--verbose] OR {0} --help @@ -29,81 +29,146 @@ OR > {0} -d \033[1;32m# Run the tests, generating a full set of baselines (useful for first run on a machine) \033[0m > {0} -g -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) default_baseline = CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) if default_baseline is not None: - default_baseline = default_baseline.replace(".", "_").replace("/", "_") # Dots or slashes will mess things up - - parser.add_argument("-g", "--generate-baselines", action="store_true", - help="Generate baselines") - - parser.add_argument("--baseline-compare", action="store_true", - help="Do baseline comparisons. Off by default.") - - parser.add_argument("--submit-to-cdash", action="store_true", - help="Send results to CDash") - - parser.add_argument("-n", "--no-submit", action="store_true", - help="Force us to not send results to CDash, overrides --submit-to-cdash. Useful for CI") - - parser.add_argument("--update-success", action="store_true", - help="Record test success in baselines. Only the nightly process should use this in general.") - - parser.add_argument("--no-update-success", action="store_true", - help="For us to not record test success in baselines, overrides --update-success. Useful for CI.") - - parser.add_argument("--no-batch", action="store_true", - help="Do not use batch system even if on batch machine") - - parser.add_argument("-c", "--cdash-build-name", - help="Build name to use for CDash submission. Default will be __") - - parser.add_argument("-p", "--cdash-project", default=CIME.wait_for_tests.E3SM_MAIN_CDASH, - help="The name of the CDash project where results should be uploaded") - - parser.add_argument("-b", "--baseline-name", default=default_baseline, - help="Baseline name for baselines to use. Also impacts dashboard job name. Useful for testing a branch other than next or master") - - parser.add_argument("-B", "--baseline-root", - help="Baseline area for baselines to use. Default will be config_machine value for machine") - - parser.add_argument("-O", "--override-baseline-name", - help="Force comparison with these baseines without impacting dashboard or test-id.") - - parser.add_argument("-t", "--test-suite", - help="Override default e3sm test suite that will be run") - - parser.add_argument("-r", "--scratch-root", - help="Override default e3sm scratch root. Use this to avoid conflicting with other jenkins jobs") - - parser.add_argument("--cdash-build-group", default=CIME.wait_for_tests.CDASH_DEFAULT_BUILD_GROUP, - help="The build group to be used to display results on the CDash dashboard.") - - parser.add_argument("-j", "--parallel-jobs", type=int, default=None, - help="Number of tasks create_test should perform simultaneously. Default " - "will be min(num_cores, num_tests).") - - parser.add_argument("--walltime", - help="Force a specific walltime for all tests.") - - parser.add_argument("-m", "--machine", - help="The machine for which to build tests, this machine must be defined" - " in the config_machines.xml file for the given model. " - "Default is to match the name of the machine in the test name or " - "the name of the machine this script is run on to the " - "NODENAME_REGEX field in config_machines.xml. This option is highly " - "unsafe and should only be used if you know what you're doing.") - - parser.add_argument("--compiler", - help="Compiler to use to build cime. Default will be the default defined for the machine.") + default_baseline = default_baseline.replace(".", "_").replace( + "/", "_" + ) # Dots or slashes will mess things up + + parser.add_argument( + "-g", "--generate-baselines", action="store_true", help="Generate baselines" + ) + + parser.add_argument( + "--baseline-compare", + action="store_true", + help="Do baseline comparisons. Off by default.", + ) + + parser.add_argument( + "--submit-to-cdash", action="store_true", help="Send results to CDash" + ) + + parser.add_argument( + "-n", + "--no-submit", + action="store_true", + help="Force us to not send results to CDash, overrides --submit-to-cdash. Useful for CI", + ) + + parser.add_argument( + "--update-success", + action="store_true", + help="Record test success in baselines. Only the nightly process should use this in general.", + ) + + parser.add_argument( + "--no-update-success", + action="store_true", + help="For us to not record test success in baselines, overrides --update-success. Useful for CI.", + ) + + parser.add_argument( + "--no-batch", + action="store_true", + help="Do not use batch system even if on batch machine", + ) + + parser.add_argument( + "-c", + "--cdash-build-name", + help="Build name to use for CDash submission. Default will be __", + ) + + parser.add_argument( + "-p", + "--cdash-project", + default=CIME.wait_for_tests.E3SM_MAIN_CDASH, + help="The name of the CDash project where results should be uploaded", + ) + + parser.add_argument( + "-b", + "--baseline-name", + default=default_baseline, + help="Baseline name for baselines to use. Also impacts dashboard job name. Useful for testing a branch other than next or master", + ) + + parser.add_argument( + "-B", + "--baseline-root", + help="Baseline area for baselines to use. Default will be config_machine value for machine", + ) + + parser.add_argument( + "-O", + "--override-baseline-name", + help="Force comparison with these baseines without impacting dashboard or test-id.", + ) + + parser.add_argument( + "-t", "--test-suite", help="Override default e3sm test suite that will be run" + ) + + parser.add_argument( + "-r", + "--scratch-root", + help="Override default e3sm scratch root. Use this to avoid conflicting with other jenkins jobs", + ) + + parser.add_argument( + "--cdash-build-group", + default=CIME.wait_for_tests.CDASH_DEFAULT_BUILD_GROUP, + help="The build group to be used to display results on the CDash dashboard.", + ) + + parser.add_argument( + "-j", + "--parallel-jobs", + type=int, + default=None, + help="Number of tasks create_test should perform simultaneously. Default " + "will be min(num_cores, num_tests).", + ) + + parser.add_argument("--walltime", help="Force a specific walltime for all tests.") + + parser.add_argument( + "-m", + "--machine", + help="The machine for which to build tests, this machine must be defined" + " in the config_machines.xml file for the given model. " + "Default is to match the name of the machine in the test name or " + "the name of the machine this script is run on to the " + "NODENAME_REGEX field in config_machines.xml. This option is highly " + "unsafe and should only be used if you know what you're doing.", + ) + + parser.add_argument( + "--compiler", + help="Compiler to use to build cime. Default will be the default defined for the machine.", + ) + + parser.add_argument( + "--check-throughput", + action="store_true", + help="Fail if throughput check fails (fail if tests slow down)", + ) + + parser.add_argument( + "--check-memory", + action="store_true", + help="Fail if memory check fails (fail if tests footprint grows)", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) @@ -113,39 +178,121 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter if args.no_update_success: args.update_success = False - expect(not (args.submit_to_cdash and args.generate_baselines), - "Does not make sense to use --generate-baselines and --submit-to-cdash together") - expect(not (args.cdash_build_name is not None and not args.submit_to_cdash), - "Does not make sense to use --cdash-build-name without --submit-to-cdash") - expect(not (args.cdash_project is not CIME.wait_for_tests.E3SM_MAIN_CDASH and not args.submit_to_cdash), - "Does not make sense to use --cdash-project without --submit-to-cdash") + expect( + not (args.submit_to_cdash and args.generate_baselines), + "Does not make sense to use --generate-baselines and --submit-to-cdash together", + ) + expect( + not (args.cdash_build_name is not None and not args.submit_to_cdash), + "Does not make sense to use --cdash-build-name without --submit-to-cdash", + ) + expect( + not ( + args.cdash_project is not CIME.wait_for_tests.E3SM_MAIN_CDASH + and not args.submit_to_cdash + ), + "Does not make sense to use --cdash-project without --submit-to-cdash", + ) machine = Machines(machine=args.machine) args.machine = machine - args.test_suite = machine.get_value("TESTS") if args.test_suite is None else args.test_suite - args.scratch_root = machine.get_value("CIME_OUTPUT_ROOT") if args.scratch_root is None else args.scratch_root - args.compiler = machine.get_default_compiler() if args.compiler is None else args.compiler - - expect(args.baseline_name is not None, - "Failed to probe baseline_name from git branch, please provide one. It is essential for formulating the test-id even if baseline comparisons are not being done") + args.test_suite = ( + machine.get_value("TESTS") if args.test_suite is None else args.test_suite + ) + args.scratch_root = ( + machine.get_value("CIME_OUTPUT_ROOT") + if args.scratch_root is None + else args.scratch_root + ) + args.compiler = ( + machine.get_default_compiler() if args.compiler is None else args.compiler + ) + + expect( + args.baseline_name is not None, + "Failed to probe baseline_name from git branch, please provide one. It is essential for formulating the test-id even if baseline comparisons are not being done", + ) if args.override_baseline_name is None: args.override_baseline_name = args.baseline_name - return args.generate_baselines, args.submit_to_cdash, args.no_batch, args.baseline_name, args.cdash_build_name, \ - args.cdash_project, args.test_suite, args.cdash_build_group, args.baseline_compare, args.scratch_root, args.parallel_jobs, args.walltime, args.machine, args.compiler, args.override_baseline_name, args.baseline_root, args.update_success + return ( + args.generate_baselines, + args.submit_to_cdash, + args.no_batch, + args.baseline_name, + args.cdash_build_name, + args.cdash_project, + args.test_suite, + args.cdash_build_group, + args.baseline_compare, + args.scratch_root, + args.parallel_jobs, + args.walltime, + args.machine, + args.compiler, + args.override_baseline_name, + args.baseline_root, + args.update_success, + args.check_throughput, + args.check_memory, + ) + ############################################################################### def _main_func(description): -############################################################################### - generate_baselines, submit_to_cdash, no_batch, baseline_name, cdash_build_name, cdash_project, test_suite, cdash_build_group, baseline_compare, scratch_root, parallel_jobs, walltime, machine, compiler, real_baseline_name, baseline_root, update_success = \ - parse_command_line(sys.argv, description) + ############################################################################### + ( + generate_baselines, + submit_to_cdash, + no_batch, + baseline_name, + cdash_build_name, + cdash_project, + test_suite, + cdash_build_group, + baseline_compare, + scratch_root, + parallel_jobs, + walltime, + machine, + compiler, + real_baseline_name, + baseline_root, + update_success, + check_throughput, + check_memory, + ) = parse_command_line(sys.argv, description) + + sys.exit( + 0 + if jenkins_generic_job( + generate_baselines, + submit_to_cdash, + no_batch, + baseline_name, + cdash_build_name, + cdash_project, + test_suite, + cdash_build_group, + baseline_compare, + scratch_root, + parallel_jobs, + walltime, + machine, + compiler, + real_baseline_name, + baseline_root, + update_success, + check_throughput, + check_memory, + ) + else CIME.utils.TESTS_FAILED_ERR_CODE + ) - sys.exit(0 if jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, baseline_name, cdash_build_name, cdash_project, test_suite, cdash_build_group, baseline_compare, scratch_root, parallel_jobs, walltime, machine, compiler, real_baseline_name, baseline_root, update_success) - else CIME.utils.TESTS_FAILED_ERR_CODE) ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/list_e3sm_tests b/CIME/Tools/list_e3sm_tests index 4ca5ed0ac9c..c577584a537 100755 --- a/CIME/Tools/list_e3sm_tests +++ b/CIME/Tools/list_e3sm_tests @@ -13,9 +13,9 @@ import sys, argparse, os ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( -usage="""\n{0} [ ...] [--verbose] + usage="""\n{0} [ ...] [--verbose] OR {0} --help @@ -26,43 +26,54 @@ OR > {0} compsets e3sm_developer \033[1;32m# List all grids tested by e3sm_developer \033[0m > {0} grid e3sm_developer -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("suites", nargs="+", - help="The tests suites to list. Test suites: {}".format(", ".join(get_tests.get_test_suites()))) + parser.add_argument( + "suites", + nargs="+", + help="The tests suites to list. Test suites: {}".format( + ", ".join(get_tests.get_test_suites()) + ), + ) - parser.add_argument("-t", "--thing-to-list", choices=("compsets", "grids", "testcases", "tests"), default="tests", - help="The thing you want to list") + parser.add_argument( + "-t", + "--thing-to-list", + choices=("compsets", "grids", "testcases", "tests"), + default="tests", + help="The thing you want to list", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - if (not args.suites): + if not args.suites: args.suites = get_tests.get_test_suites() return args.thing_to_list, args.suites + ############################################################################### def list_tests(thing_to_list, suites): -############################################################################### + ############################################################################### things = set() for suite in suites: tests = get_tests.get_test_suite(suite) for test in tests: testcase, _, grid, compset = CIME.utils.parse_test_name(test)[:4] - if (thing_to_list == "compsets"): + if thing_to_list == "compsets": things.add(compset) - elif (thing_to_list == "grids"): + elif thing_to_list == "grids": things.add(grid) - elif (thing_to_list == "testcases"): + elif thing_to_list == "testcases": things.add(testcase) - elif (thing_to_list == "tests"): + elif thing_to_list == "tests": things.add(test) else: expect(False, "Unrecognized thing to list '{}'".format(thing_to_list)) @@ -71,12 +82,14 @@ def list_tests(thing_to_list, suites): for item in sorted(things): print(" {}".format(item)) + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### thing_to_list, suites = parse_command_line(sys.argv, description) list_tests(thing_to_list, suites) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/mvsource b/CIME/Tools/mvsource index 7d2179d8513..3f1879a298e 100755 --- a/CIME/Tools/mvsource +++ b/CIME/Tools/mvsource @@ -3,6 +3,7 @@ # It is intended to fix links and update your case source if the caseroot or src is moved. # import os, sys + caseroot = sys.argv[1] cimeroot = sys.argv[2] @@ -15,11 +16,15 @@ try: from CIME.case import Case from CIME.utils import symlink_force except: - print("ERROR: {} does not appear to be a valid CIMEROOT directory\n".format(cimeroot)) + print( + "ERROR: {} does not appear to be a valid CIMEROOT directory\n".format(cimeroot) + ) # simple way to verify we are in a caseroot -if not os.path.exists(os.path.join(caseroot,"env_case.xml")): - print("ERROR: {} does not appear to be a valid CASEROOT directory\n".format(caseroot)) +if not os.path.exists(os.path.join(caseroot, "env_case.xml")): + print( + "ERROR: {} does not appear to be a valid CASEROOT directory\n".format(caseroot) + ) for dirpath, dirnames, filenames in os.walk(caseroot): os.chdir(dirpath) @@ -27,9 +32,9 @@ for dirpath, dirnames, filenames in os.walk(caseroot): if os.path.islink(_file): oldpath = os.path.realpath(os.readlink(_file)) link_name = _file - cr = os.sep+'cime'+os.sep + cr = os.sep + "cime" + os.sep if cr in oldpath: - index = oldpath.find(cr)+len(cr)-1 + index = oldpath.find(cr) + len(cr) - 1 newpath = cimeroot + oldpath[index:] if os.path.exists(newpath): print("Updating link for {}".format(_file)) @@ -38,10 +43,10 @@ os.chdir(caseroot) with Case(caseroot, read_only=False) as case: print("Updating case xml") - case.set_value('CIMEROOT', cimeroot) - case.set_value('SRCROOT', os.path.dirname(cimeroot)) - case.set_value('CASEROOT', caseroot) - model = case.get_value('MODEL') - case.set_value("MACHDIR", os.path.join(cimeroot,"config",model,"machines")) + case.set_value("CIMEROOT", cimeroot) + case.set_value("SRCROOT", os.path.dirname(cimeroot)) + case.set_value("CASEROOT", caseroot) + model = case.get_value("MODEL") + case.set_value("MACHDIR", os.path.join(cimeroot, "config", model, "machines")) -os.system('cp env_case.xml LockedFiles/') +os.system("cp env_case.xml LockedFiles/") diff --git a/CIME/Tools/normalize_cases b/CIME/Tools/normalize_cases index df4fb8b2164..6fa0e8111dc 100755 --- a/CIME/Tools/normalize_cases +++ b/CIME/Tools/normalize_cases @@ -15,7 +15,7 @@ import argparse, sys, os, glob ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} case1 case2 OR @@ -23,12 +23,12 @@ OR \033[1mEXAMPLES:\033[0m > {0} case1 case2 -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) @@ -40,21 +40,26 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter return args.case1, args.case2 + ############################################################################### def normalize_cases(case1, case2): -############################################################################### + ############################################################################### # gunzip all logs for case_dir in [case1, case2]: for log_dir in ["bld", "run"]: gzips = glob.glob(os.path.join(case_dir, log_dir, "*.gz")) - if (gzips): + if gzips: run_cmd_no_fail("gunzip -f {}".format(" ".join(gzips))) # Change case1 to be as if it had same test-id as case2 test_id1 = run_cmd_no_fail("./xmlquery --value TEST_TESTID", from_dir=case1) test_id2 = run_cmd_no_fail("./xmlquery --value TEST_TESTID", from_dir=case2) - run_cmd_no_fail("for item in $(find -type f); do sed -i 's/{}/{}/g' $item; done".format(test_id1, test_id2), - from_dir=case1) + run_cmd_no_fail( + "for item in $(find -type f); do sed -i 's/{}/{}/g' $item; done".format( + test_id1, test_id2 + ), + from_dir=case1, + ) # Change case1 to look as if it is was built/run at exact same time as case2 for log_dir in ["bld", "run"]: @@ -70,31 +75,46 @@ def normalize_cases(case1, case2): case2_lids = list(sorted(case2_lids)) for case1_lid, case2_lid in zip(case1_lids, case2_lids): - run_cmd_no_fail("for item in $(find -type f); do sed -i 's/{}/{}/g' $item; done".format(case1_lid, case2_lid), - from_dir=case1) + run_cmd_no_fail( + "for item in $(find -type f); do sed -i 's/{}/{}/g' $item; done".format( + case1_lid, case2_lid + ), + from_dir=case1, + ) for case1_lid, case2_lid in zip(case1_lids, case2_lids): - files_needing_rename = run_cmd_no_fail('find -depth -name "*.{}"'.format(case1_lid), from_dir=case1).splitlines() + files_needing_rename = run_cmd_no_fail( + 'find -depth -name "*.{}"'.format(case1_lid), from_dir=case1 + ).splitlines() for file_needing_rename in files_needing_rename: expect(file_needing_rename.endswith(case1_lid), "broken") new_name = file_needing_rename.rstrip(case1_lid) + case2_lid - os.rename(os.path.join(case1, file_needing_rename), os.path.join(case1, new_name)) + os.rename( + os.path.join(case1, file_needing_rename), + os.path.join(case1, new_name), + ) # Normalize CIMEROOT case1_root = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=case1) case2_root = run_cmd_no_fail("./xmlquery --value CIMEROOT", from_dir=case2) - if (case1_root != case2_root): - run_cmd_no_fail("for item in $(find -type f); do sed -i 's:{}:{}:g' $item; done".format(case1_root, case2_root), - from_dir=case1) + if case1_root != case2_root: + run_cmd_no_fail( + "for item in $(find -type f); do sed -i 's:{}:{}:g' $item; done".format( + case1_root, case2_root + ), + from_dir=case1, + ) + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### case1, case2 = parse_command_line(sys.argv, description) normalize_cases(case1, case2) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/pelayout b/CIME/Tools/pelayout index 6ed06bae676..48a13a4cc91 100755 --- a/CIME/Tools/pelayout +++ b/CIME/Tools/pelayout @@ -49,60 +49,80 @@ logger = logging.getLogger(__name__) ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### # Start with usage description - parser = argparse.ArgumentParser(description=description , - formatter_class=argparse.RawDescriptionHelpFormatter) + parser = argparse.ArgumentParser( + description=description, formatter_class=argparse.RawDescriptionHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) # Set command line options - parser.add_argument("--set-ntasks", default=None, - help="Total number of tasks to set for the case") - - parser.add_argument("--set-nthrds", "--set-nthreads", default=None, - help="Number of threads to set for all components") - - parser.add_argument("--format", - default="%4C: %6T/%6H; %6R %6P", - help="Format the PE layout items for each component (see below)") - - parser.add_argument("--header", - default="Comp NTASKS NTHRDS ROOTPE PSTRIDE", - help="Custom header for PE layout display") - - parser.add_argument("--no-header", default=False , action="store_true" , - help="Do not print any PE layout header") - - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to reference") + parser.add_argument( + "--set-ntasks", default=None, help="Total number of tasks to set for the case" + ) + + parser.add_argument( + "--set-nthrds", + "--set-nthreads", + default=None, + help="Number of threads to set for all components", + ) + + parser.add_argument( + "--format", + default="%4C: %6T/%6H; %6R %6P", + help="Format the PE layout items for each component (see below)", + ) + + parser.add_argument( + "--header", + default="Comp NTASKS NTHRDS ROOTPE PSTRIDE", + help="Custom header for PE layout display", + ) + + parser.add_argument( + "--no-header", + default=False, + action="store_true", + help="Do not print any PE layout header", + ) + + parser.add_argument( + "--caseroot", default=os.getcwd(), help="Case directory to reference" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - if (args.no_header): + if args.no_header: args.header = None # End if return args.format, args.set_ntasks, args.set_nthrds, args.header, args.caseroot + + # End def parse_command_line ############################################################################### def get_value_as_string(case, var, attribute=None, resolved=False, subgroup=None): -############################################################################### + ############################################################################### thistype = case.get_type_info(var) - value = case.get_value(var, attribute=attribute, resolved=resolved, subgroup=subgroup) + value = case.get_value( + var, attribute=attribute, resolved=resolved, subgroup=subgroup + ) if value is not None and thistype: value = convert_to_string(value, thistype, var) return value + ############################################################################### def format_pelayout(comp, ntasks, nthreads, rootpe, pstride, arg_format): -############################################################################### + ############################################################################### """ Format the PE layout information for each component, using a default format, or using the arg_format input, if it exists. """ - subs = { 'C': comp, 'T': ntasks, 'H': nthreads, 'R': rootpe, 'P' : pstride} + subs = {"C": comp, "T": ntasks, "H": nthreads, "R": rootpe, "P": pstride} layout_str = re.sub(r"%([0-9]*)C", r"{C:\1}", arg_format) layout_str = re.sub(r"%([-+0-9]*)T", r"{T:\1}", layout_str) layout_str = re.sub(r"%([-+0-9]*)H", r"{H:\1}", layout_str) @@ -110,23 +130,34 @@ def format_pelayout(comp, ntasks, nthreads, rootpe, pstride, arg_format): layout_str = re.sub(r"%([-+0-9]*)P", r"{P:\1}", layout_str) layout_str = layout_str.format(**subs) return layout_str + + # End def format_pelayout ############################################################################### def print_pelayout(case, ntasks, nthreads, rootpes, pstrid, arg_format, header): -############################################################################### + ############################################################################### """ Print the PE layout information for each component, using the format, if it exists. """ comp_classes = case.get_values("COMP_CLASSES") - if (header is not None): + if header is not None: print(header) # End if maxthrds = -1 for comp in comp_classes: - print(format_pelayout(comp, ntasks[comp], nthreads[comp], rootpes[comp], pstrid[comp], arg_format)) + print( + format_pelayout( + comp, + ntasks[comp], + nthreads[comp], + rootpes[comp], + pstrid[comp], + arg_format, + ) + ) if nthreads[comp] > maxthrds: maxthrds = nthreads[comp] # End for @@ -137,15 +168,16 @@ def print_pelayout(case, ntasks, nthreads, rootpes, pstrid, arg_format, header): print("ESMF_AWARE_THREADING is {}".format(eat)) tasks = case.get_value("MAX_MPITASKS_PER_NODE") if not eat: - tasks = tasks/maxthrds + tasks = tasks / maxthrds print("ROOTPE is with respect to {} tasks per node".format(tasks)) + # End def print_pelayout ############################################################################### def gather_pelayout(case): -############################################################################### + ############################################################################### """ Gather the PE layout information for each component """ @@ -156,27 +188,31 @@ def gather_pelayout(case): comp_classes = case.get_values("COMP_CLASSES") for comp in comp_classes: - ntasks[comp] = int(case.get_value("NTASKS_"+comp)) - nthreads[comp] = int(case.get_value("NTHRDS_"+comp)) - rootpes[comp] = int(case.get_value("ROOTPE_"+comp)) - pstride[comp] = int(case.get_value("PSTRID_"+comp)) + ntasks[comp] = int(case.get_value("NTASKS_" + comp)) + nthreads[comp] = int(case.get_value("NTHRDS_" + comp)) + rootpes[comp] = int(case.get_value("ROOTPE_" + comp)) + pstride[comp] = int(case.get_value("PSTRID_" + comp)) # End for return ntasks, nthreads, rootpes, pstride + + # End def gather_pelayout ############################################################################### def set_nthreads(case, nthreads): -############################################################################### + ############################################################################### comp_classes = case.get_values("COMP_CLASSES") for comp in comp_classes: case.set_value("NTHRDS", nthreads, comp) # End for + + # End def set_nthreads ############################################################################### def modify_ntasks(case, new_tot_tasks): -############################################################################### + ############################################################################### comp_classes = case.get_values("COMP_CLASSES") new_tasks = {} new_roots = {} @@ -187,12 +223,12 @@ def modify_ntasks(case, new_tot_tasks): # How many tasks are currently being used? for comp in comp_classes: - if ((curr_tasks[comp] + curr_roots[comp]) > curr_tot_tasks): + if (curr_tasks[comp] + curr_roots[comp]) > curr_tot_tasks: curr_tot_tasks = curr_tasks[comp] + curr_roots[comp] # End if # End for - if (new_tot_tasks != curr_tot_tasks): + if new_tot_tasks != curr_tot_tasks: # Compute new task counts and root pes for comp in comp_classes: new_tasks[comp] = curr_tasks[comp] * new_tot_tasks / curr_tot_tasks @@ -214,32 +250,37 @@ def modify_ntasks(case, new_tot_tasks): # We got this far? Go ahead and change PE layout for comp in comp_classes: - case.set_value("NTASKS_"+comp, new_tasks[comp], comp) - case.set_value("ROOTPE_"+comp, new_roots[comp], comp) + case.set_value("NTASKS_" + comp, new_tasks[comp], comp) + case.set_value("ROOTPE_" + comp, new_roots[comp], comp) # End for # End if (#tasks changed) + + # End def modify_ntasks ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### # Initialize command line parser and get command line options - arg_format, set_ntasks, set_nthrds, header, caseroot = parse_command_line(sys.argv, description) + arg_format, set_ntasks, set_nthrds, header, caseroot = parse_command_line( + sys.argv, description + ) # Initialize case ; read in all xml files from caseroot with Case(caseroot, record=True) as case: - if (set_nthrds is not None): + if set_nthrds is not None: set_nthreads(case, set_nthrds) # End if - if (set_ntasks is not None): + if set_ntasks is not None: modify_ntasks(case, int(set_ntasks)) # End if ntasks, nthreads, rootpes, pstrid = gather_pelayout(case) print_pelayout(case, ntasks, nthreads, rootpes, pstrid, arg_format, header) # End with + # End def _main_func -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) # End if diff --git a/CIME/Tools/preview_namelists b/CIME/Tools/preview_namelists index 96ad8b02da3..69b89eb5663 100755 --- a/CIME/Tools/preview_namelists +++ b/CIME/Tools/preview_namelists @@ -17,41 +17,51 @@ Typical usage is simply: from standard_script_setup import * -from CIME.case import Case -from CIME.utils import expect +from CIME.case import Case +from CIME.utils import expect import argparse ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory for which namelists are generated.\n" - "Default is current directory.") + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory for which namelists are generated.\n" + "Default is current directory.", + ) - parser.add_argument('--component', - help="Specify component's namelist to build.\n" - "If not specified, generates namelists for all components.") + parser.add_argument( + "--component", + help="Specify component's namelist to build.\n" + "If not specified, generates namelists for all components.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### args = parse_command_line(sys.argv, description) - expect(os.path.isfile(os.path.join(args.caseroot, "CaseStatus")), - "case.setup must be run prior to running preview_namelists") + expect( + os.path.isfile(os.path.join(args.caseroot, "CaseStatus")), + "case.setup must be run prior to running preview_namelists", + ) with Case(args.caseroot, read_only=False, record=True) as case: case.create_namelists(component=args.component) -if (__name__ == "__main__"): + +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/preview_run b/CIME/Tools/preview_run index d8b9f41d46c..06946b6abb9 100755 --- a/CIME/Tools/preview_run +++ b/CIME/Tools/preview_run @@ -24,33 +24,42 @@ logger = logging.getLogger(__name__) ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to query.\n" - "Default is current directory.") - - parser.add_argument("-j", "--job", default=None, - help="The job you want to print.\n" - "Default is case.run (or case.test if this is a test).") + parser.add_argument( + "caseroot", + nargs="?", + default=os.getcwd(), + help="Case directory to query.\n" "Default is current directory.", + ) + + parser.add_argument( + "-j", + "--job", + default=None, + help="The job you want to print.\n" + "Default is case.run (or case.test if this is a test).", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.caseroot, args.job + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### caseroot, job = parse_command_line(sys.argv, description) logging.disable(logging.INFO) with Case(caseroot, read_only=False) as case: case.preview_run(print, job) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/save_provenance b/CIME/Tools/save_provenance index 14d0f37efaf..dd7046e8cda 100755 --- a/CIME/Tools/save_provenance +++ b/CIME/Tools/save_provenance @@ -6,14 +6,14 @@ This tool provide command-line access to provenance-saving functionality from standard_script_setup import * -from CIME.case import Case +from CIME.case import Case from CIME.provenance import * -from CIME.utils import get_lids +from CIME.utils import get_lids from CIME.get_timing import get_timing ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [] [--verbose] OR @@ -22,36 +22,47 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# Save run (timing) provenance for current case \033[0m > {0} postrun -""".format(os.path.basename(args[0])), +""".format( + os.path.basename(args[0]) + ), description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("mode", choices=("build", "prerun", "postrun"), - help="Phase for which to save provenance. " - "prerun is mostly for infrastructure testing; " - "it does not make sense to store this information manually otherwise") + parser.add_argument( + "mode", + choices=("build", "prerun", "postrun"), + help="Phase for which to save provenance. " + "prerun is mostly for infrastructure testing; " + "it does not make sense to store this information manually otherwise", + ) - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory") + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory" + ) - parser.add_argument("-l", "--lid", - help="Force system to save provenance with this LID") + parser.add_argument( + "-l", "--lid", help="Force system to save provenance with this LID" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.mode, args.caseroot, args.lid + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### mode, caseroot, lid = parse_command_line(sys.argv, description) with Case(caseroot, read_only=False) as case: if mode == "build": - expect(False, "Saving build provenance manually is not currently supported " - "but it should already always be happening automatically") + expect( + False, + "Saving build provenance manually is not currently supported " + "but it should already always be happening automatically", + ) save_build_provenance(case, lid=lid) elif mode == "prerun": expect(lid is not None, "You must provide LID for prerun mode") @@ -64,13 +75,16 @@ def _main_func(description): lids = get_lids(case) for lid in lids: # call get_timing if needed - expected_timing_file = os.path.join(caseroot, "timing", "{}_timing.{}.{}.gz" .format(model, caseid, lid)) - if (not os.path.exists(expected_timing_file)): + expected_timing_file = os.path.join( + caseroot, "timing", "{}_timing.{}.{}.gz".format(model, caseid, lid) + ) + if not os.path.exists(expected_timing_file): get_timing(case, lid) save_prerun_provenance(case, lid=lid) save_postrun_provenance(case, lid=lid) else: expect(False, "Unhandled mode '{}'".format(mode)) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/simple_compare b/CIME/Tools/simple_compare index 00eb05ec9ea..59bc328341c 100755 --- a/CIME/Tools/simple_compare +++ b/CIME/Tools/simple_compare @@ -13,21 +13,21 @@ import argparse, sys, os ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( -usage="""\n{0} [-c ] [--verbose] + usage="""\n{0} [-c ] [--verbose] OR {0} --help \033[1mEXAMPLES:\033[0m \033[1;32m# Compare files\033[0m > {0} baseline_dir/test/file mytestarea/file -c -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) @@ -35,38 +35,53 @@ formatter_class=argparse.ArgumentDefaultsHelpFormatter parser.add_argument("new_file", help="Path to file to compare against gold") - parser.add_argument("-c", "--case", action="store", dest="case", default=None, - help="The case base id (..). Helps us normalize data.") + parser.add_argument( + "-c", + "--case", + action="store", + dest="case", + default=None, + help="The case base id (..). Helps us normalize data.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) # Normalize case - if (args.case is not None): + if args.case is not None: args.case = CIME.utils.normalize_case_id(args.case) return args.gold_file, args.new_file, args.case + ############################################################################### def _main_func(description): -############################################################################### - gold_file, compare_file, case = \ - parse_command_line(sys.argv, description) + ############################################################################### + gold_file, compare_file, case = parse_command_line(sys.argv, description) - if (case is None): - logging.warning("No case id data available, will not be able to normalize values as effectively") + if case is None: + logging.warning( + "No case id data available, will not be able to normalize values as effectively" + ) else: logging.info("Using case: '{}'".format(case)) - if gold_file.endswith('runconfig'): - success, comments = CIME.simple_compare.compare_runconfigfiles(gold_file, compare_file, case) + if gold_file.endswith("runconfig"): + success, comments = CIME.simple_compare.compare_runconfigfiles( + gold_file, compare_file, case + ) else: - success, comments = CIME.simple_compare.compare_files(gold_file, compare_file, case) - expect(success, - "Diff between files {} and {}:\n{}".format(gold_file, compare_file, comments)) + success, comments = CIME.simple_compare.compare_files( + gold_file, compare_file, case + ) + expect( + success, + "Diff between files {} and {}:\n{}".format(gold_file, compare_file, comments), + ) print("Files {} and {} MATCH".format(gold_file, compare_file)) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/standard_script_setup.py b/CIME/Tools/standard_script_setup.py index 9383ce274e4..774f7bc81f4 100644 --- a/CIME/Tools/standard_script_setup.py +++ b/CIME/Tools/standard_script_setup.py @@ -14,6 +14,7 @@ os.environ["CIMEROOT"] = cimeroot import CIME.utils + CIME.utils.check_minimum_python_version(3, 6) CIME.utils.stop_buffering_output() import logging, argparse diff --git a/CIME/Tools/testreporter.py b/CIME/Tools/testreporter.py index 4847ca90dd1..f42d94b4e7a 100755 --- a/CIME/Tools/testreporter.py +++ b/CIME/Tools/testreporter.py @@ -6,215 +6,231 @@ from standard_script_setup import * -from CIME.XML.env_build import EnvBuild -from CIME.XML.env_case import EnvCase -from CIME.XML.env_test import EnvTest -from CIME.XML.test_reporter import TestReporter -from CIME.utils import expect +from CIME.XML.env_build import EnvBuild +from CIME.XML.env_case import EnvCase +from CIME.XML.env_test import EnvTest +from CIME.XML.test_reporter import TestReporter +from CIME.utils import expect from CIME.XML.generic_xml import GenericXML import glob ############################################################################### def parse_command_line(args): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser() CIME.utils.setup_standard_logging_options(parser) # Parse command line options - #parser = argparse.ArgumentParser(description='Arguements for testreporter') - parser.add_argument("--tagname", - help="Name of the tag being tested.") - parser.add_argument("--testid", - help="Test id, ie c2_0_a6g_ing,c2_0_b6g_gnu.") - parser.add_argument("--testroot", - help="Root directory for tests to populate the database.") - parser.add_argument("--testtype", - help="Type of test, prealpha or prebeta.") - parser.add_argument("--dryrun",action="store_true", - help="Do a dry run, database will not be populated.") - parser.add_argument("--dumpxml",action="store_true", - help="Dump XML test results to sceen.") + # parser = argparse.ArgumentParser(description='Arguements for testreporter') + parser.add_argument("--tagname", help="Name of the tag being tested.") + parser.add_argument("--testid", help="Test id, ie c2_0_a6g_ing,c2_0_b6g_gnu.") + parser.add_argument( + "--testroot", help="Root directory for tests to populate the database." + ) + parser.add_argument("--testtype", help="Type of test, prealpha or prebeta.") + parser.add_argument( + "--dryrun", + action="store_true", + help="Do a dry run, database will not be populated.", + ) + parser.add_argument( + "--dumpxml", action="store_true", help="Dump XML test results to sceen." + ) args = parser.parse_args() CIME.utils.parse_args_and_handle_standard_logging_options(args) - return args.testroot, args.testid, args.tagname, args.testtype, args.dryrun, args.dumpxml + return ( + args.testroot, + args.testid, + args.tagname, + args.testtype, + args.dryrun, + args.dumpxml, + ) + ############################################################################### def get_testreporter_xml(testroot, testid, tagname, testtype): -############################################################################### + ############################################################################### os.chdir(testroot) # # Retrieve compiler name and mpi library # - xml_file=glob.glob("*"+testid+"/env_build.xml") - expect(len(xml_file) > 0, "Tests not found. It's possible your testid, {} is wrong.".format(testid)) - envxml=(EnvBuild(".",infile=xml_file[0])) - compiler=envxml.get_value("COMPILER") - mpilib=envxml.get_value("MPILIB") + xml_file = glob.glob("*" + testid + "/env_build.xml") + expect( + len(xml_file) > 0, + "Tests not found. It's possible your testid, {} is wrong.".format(testid), + ) + envxml = EnvBuild(".", infile=xml_file[0]) + compiler = envxml.get_value("COMPILER") + mpilib = envxml.get_value("MPILIB") # # Retrieve machine name # - xml_file=glob.glob("*"+testid+"/env_case.xml") - envxml=(EnvCase(".",infile=xml_file[0])) - machine=envxml.get_value("MACH") + xml_file = glob.glob("*" + testid + "/env_case.xml") + envxml = EnvCase(".", infile=xml_file[0]) + machine = envxml.get_value("MACH") # # Retrieve baseline tag to compare to # - xml_file=glob.glob("*"+testid+"/env_test.xml") - envxml=(EnvTest(".",infile=xml_file[0])) + xml_file = glob.glob("*" + testid + "/env_test.xml") + envxml = EnvTest(".", infile=xml_file[0]) baseline = envxml.get_value("BASELINE_NAME_CMP") # # Create XML header # - testxml=TestReporter() - testxml.setup_header(tagname,machine,compiler,mpilib,testroot,testtype,baseline) + testxml = TestReporter() + testxml.setup_header( + tagname, machine, compiler, mpilib, testroot, testtype, baseline + ) # # Create lists on tests based on the testid in the testroot directory. # - test_names=glob.glob("*"+testid) + test_names = glob.glob("*" + testid) # # Loop over all tests and parse the test results # - test_status={} + test_status = {} for test_name in test_names: - if not os.path.isfile(test_name+"/TestStatus"): + if not os.path.isfile(test_name + "/TestStatus"): continue - test_status['COMMENT']="" - test_status['BASELINE']='----' - test_status['MEMCOMP']='----' - test_status['MEMLEAK']='----' - test_status['NLCOMP']='----' - test_status['STATUS']='----' - test_status['TPUTCOMP']='----' + test_status["COMMENT"] = "" + test_status["BASELINE"] = "----" + test_status["MEMCOMP"] = "----" + test_status["MEMLEAK"] = "----" + test_status["NLCOMP"] = "----" + test_status["STATUS"] = "----" + test_status["TPUTCOMP"] = "----" # # Check to see if TestStatus is present, if not then continue # I might want to set the status to fail # try: - lines = [line.rstrip('\n') for line in open(test_name+"/TestStatus")] + lines = [line.rstrip("\n") for line in open(test_name + "/TestStatus")] except (IOError, OSError): - test_status['STATUS']="FAIL" - test_status['COMMENT']="TestStatus missing. " + test_status["STATUS"] = "FAIL" + test_status["COMMENT"] = "TestStatus missing. " continue # # Loop over each line of TestStatus, and check for different types of failures. # for line in lines: if "NLCOMP" in line: - test_status['NLCOMP']=line[0:4] + test_status["NLCOMP"] = line[0:4] if "MEMLEAK" in line: - test_status['MEMLEAK']=line[0:4] + test_status["MEMLEAK"] = line[0:4] if "MEMCOMP" in line: - test_status['MEMCOMP']=line[0:4] + test_status["MEMCOMP"] = line[0:4] if "BASELINE" in line: - test_status['BASELINE']=line[0:4] + test_status["BASELINE"] = line[0:4] if "TPUTCOMP" in line: - test_status['TPUTCOMP']=line[0:4] + test_status["TPUTCOMP"] = line[0:4] if "FAIL PFS" in line: - test_status['STATUS']="FAIL" + test_status["STATUS"] = "FAIL" if "INIT" in line: - test_status['INIT']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="INIT fail! " + test_status["INIT"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "SFAIL" + test_status["COMMENT"] += "INIT fail! " break if "CREATE_NEWCASE" in line: - test_status['CREATE_NEWCASE']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="CREATE_NEWCASE fail! " + test_status["CREATE_NEWCASE"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "SFAIL" + test_status["COMMENT"] += "CREATE_NEWCASE fail! " break if "XML" in line: - test_status['XML']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="XML fail! " + test_status["XML"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "SFAIL" + test_status["COMMENT"] += "XML fail! " break if "SETUP" in line: - test_status['SETUP']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="SFAIL" - test_status['COMMENT']+="SETUP fail! " + test_status["SETUP"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "SFAIL" + test_status["COMMENT"] += "SETUP fail! " break if "SHAREDLIB_BUILD" in line: - test_status['SHAREDLIB_BUILD']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="CFAIL" - test_status['COMMENT']+="SHAREDLIB_BUILD fail! " + test_status["SHAREDLIB_BUILD"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "CFAIL" + test_status["COMMENT"] += "SHAREDLIB_BUILD fail! " break if "MODEL_BUILD" in line: - test_status['MODEL_BUILD']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['STATUS']="CFAIL" - test_status['COMMENT']+="MODEL_BUILD fail! " + test_status["MODEL_BUILD"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["STATUS"] = "CFAIL" + test_status["COMMENT"] += "MODEL_BUILD fail! " break if "SUBMIT" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="SUBMIT fail! " + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "SUBMIT fail! " break if "RUN" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="RUN fail! " + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "RUN fail! " break if "COMPARE_base_rest" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="Restart fail! " + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "Restart fail! " break if "COMPARE_base_hybrid" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="Hybrid fail! " + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "Hybrid fail! " break if "COMPARE_base_multiinst" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="Multi instance fail! " + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "Multi instance fail! " break if "COMPARE_base_test" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="Base test fail! " + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "Base test fail! " break if "COMPARE_base_single_thread" in line: - test_status['STATUS']=line[0:4] - if line[0:4] in ("FAIL","PEND"): - test_status['COMMENT']+="Thread test fail! " + test_status["STATUS"] = line[0:4] + if line[0:4] in ("FAIL", "PEND"): + test_status["COMMENT"] += "Thread test fail! " break # # Do not include time comments. Just a preference to have cleaner comments in the test database # try: - if 'time=' not in line and 'GENERATE' not in line: - if 'BASELINE' not in line: - test_status['COMMENT']+=line.split(' ',3)[3]+' ' + if "time=" not in line and "GENERATE" not in line: + if "BASELINE" not in line: + test_status["COMMENT"] += line.split(" ", 3)[3] + " " else: - test_status['COMMENT']+=line.split(' ',4)[4]+' ' - except Exception: # Probably want to be more specific here + test_status["COMMENT"] += line.split(" ", 4)[4] + " " + except Exception: # Probably want to be more specific here pass # # Fill in the xml with the test results # - testxml.add_result(test_name,test_status) + testxml.add_result(test_name, test_status) return testxml + ############################################################################## def _main_func(): -############################################################################### + ############################################################################### testroot, testid, tagname, testtype, dryrun, dumpxml = parse_command_line(sys.argv) @@ -224,8 +240,7 @@ def _main_func(): # Dump xml to a file. # if dumpxml: - GenericXML.write(testxml,outfile="TestRecord.xml") - + GenericXML.write(testxml, outfile="TestRecord.xml") # # Prompt for username and password, then post the XML string to the test database website @@ -233,6 +248,7 @@ def _main_func(): if not dryrun: testxml.push2testdb() + ############################################################################### if __name__ == "__main__": diff --git a/CIME/Tools/wait_for_tests b/CIME/Tools/wait_for_tests index 2831c92b53e..ffe29f2727d 100755 --- a/CIME/Tools/wait_for_tests +++ b/CIME/Tools/wait_for_tests @@ -16,9 +16,9 @@ import argparse, sys, os ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( -usage="""\n{0} [ ...] [--verbose] + usage="""\n{0} [ ...] [--verbose] OR {0} --help @@ -29,80 +29,152 @@ OR > {0} path/to/testdir \033[1;32m# Wait for all tests in a test area\033[0m > {0} path/to/testarea/*/TestStatus -""".format(os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("paths", default=".", nargs="*", help="Paths to test directories or status file. Pwd default.") - - parser.add_argument("-n", "--no-wait", action="store_true", - help="Do not wait for tests to finish") - - parser.add_argument("--no-run", action="store_true", - help="Do not expect run phase to be completed") - - parser.add_argument("-t", "--check-throughput", action="store_true", - help="Fail if throughput check fails (fail if tests slow down)") - - parser.add_argument("-m", "--check-memory", action="store_true", - help="Fail if memory check fails (fail if tests footprint grows)") - - parser.add_argument("-i", "--ignore-namelist-diffs", action="store_true", - help="Do not fail a test if the only problem is diffing namelists") - - parser.add_argument("--ignore-memleak", action="store_true", - help="Do not fail a test if the only problem is a memleak") - - parser.add_argument("--force-log-upload", action="store_true", - help="Always upload logs to cdash, even if test passed") - - parser.add_argument("-b", "--cdash-build-name", - help="Build name, implies you want results send to Cdash") - - parser.add_argument("-p", "--cdash-project", default=CIME.wait_for_tests.E3SM_MAIN_CDASH, - help="The name of the CDash project where results should be uploaded") - - parser.add_argument("-g", "--cdash-build-group", default=CIME.wait_for_tests.CDASH_DEFAULT_BUILD_GROUP, - help="The build group to be used to display results on the CDash dashboard.") - - parser.add_argument("--timeout", type=int, - help="Timeout wait in seconds.") - - parser.add_argument("--update-success", action="store_true", - help="Record test success in baselines. Only the nightly process should use this in general.") + parser.add_argument( + "paths", + default=".", + nargs="*", + help="Paths to test directories or status file. Pwd default.", + ) + + parser.add_argument( + "-n", "--no-wait", action="store_true", help="Do not wait for tests to finish" + ) + + parser.add_argument( + "--no-run", action="store_true", help="Do not expect run phase to be completed" + ) + + parser.add_argument( + "-t", + "--check-throughput", + action="store_true", + help="Fail if throughput check fails (fail if tests slow down)", + ) + + parser.add_argument( + "-m", + "--check-memory", + action="store_true", + help="Fail if memory check fails (fail if tests footprint grows)", + ) + + parser.add_argument( + "-i", + "--ignore-namelist-diffs", + action="store_true", + help="Do not fail a test if the only problem is diffing namelists", + ) + + parser.add_argument( + "--ignore-memleak", + action="store_true", + help="Do not fail a test if the only problem is a memleak", + ) + + parser.add_argument( + "--force-log-upload", + action="store_true", + help="Always upload logs to cdash, even if test passed", + ) + + parser.add_argument( + "-b", + "--cdash-build-name", + help="Build name, implies you want results send to Cdash", + ) + + parser.add_argument( + "-p", + "--cdash-project", + default=CIME.wait_for_tests.E3SM_MAIN_CDASH, + help="The name of the CDash project where results should be uploaded", + ) + + parser.add_argument( + "-g", + "--cdash-build-group", + default=CIME.wait_for_tests.CDASH_DEFAULT_BUILD_GROUP, + help="The build group to be used to display results on the CDash dashboard.", + ) + + parser.add_argument("--timeout", type=int, help="Timeout wait in seconds.") + + parser.add_argument( + "--update-success", + action="store_true", + help="Record test success in baselines. Only the nightly process should use this in general.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - return args.paths, args.no_wait, args.check_throughput, args.check_memory, args.ignore_namelist_diffs, args.ignore_memleak, args.cdash_build_name, args.cdash_project, args.cdash_build_group, args.timeout, args.force_log_upload, args.no_run, args.update_success + return ( + args.paths, + args.no_wait, + args.check_throughput, + args.check_memory, + args.ignore_namelist_diffs, + args.ignore_memleak, + args.cdash_build_name, + args.cdash_project, + args.cdash_build_group, + args.timeout, + args.force_log_upload, + args.no_run, + args.update_success, + ) + ############################################################################### def _main_func(description): -############################################################################### - test_paths, no_wait, check_throughput, check_memory, ignore_namelist_diffs, ignore_memleak, cdash_build_name, cdash_project, cdash_build_group, timeout, force_log_upload, no_run, update_success = \ - parse_command_line(sys.argv, description) - - sys.exit(0 if CIME.wait_for_tests.wait_for_tests(test_paths, - no_wait=no_wait, - check_throughput=check_throughput, - check_memory=check_memory, - ignore_namelists=ignore_namelist_diffs, - ignore_memleak=ignore_memleak, - cdash_build_name=cdash_build_name, - cdash_project=cdash_project, - cdash_build_group=cdash_build_group, - timeout=timeout, - force_log_upload=force_log_upload, - no_run=no_run, - update_success=update_success, - expect_test_complete=not no_wait) - else CIME.utils.TESTS_FAILED_ERR_CODE) + ############################################################################### + ( + test_paths, + no_wait, + check_throughput, + check_memory, + ignore_namelist_diffs, + ignore_memleak, + cdash_build_name, + cdash_project, + cdash_build_group, + timeout, + force_log_upload, + no_run, + update_success, + ) = parse_command_line(sys.argv, description) + + sys.exit( + 0 + if CIME.wait_for_tests.wait_for_tests( + test_paths, + no_wait=no_wait, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelist_diffs, + ignore_memleak=ignore_memleak, + cdash_build_name=cdash_build_name, + cdash_project=cdash_project, + cdash_build_group=cdash_build_group, + timeout=timeout, + force_log_upload=force_log_upload, + no_run=no_run, + update_success=update_success, + expect_test_complete=not no_wait, + ) + else CIME.utils.TESTS_FAILED_ERR_CODE + ) + ############################################################################### -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/xmlchange b/CIME/Tools/xmlchange index feca8d9954d..044495238c9 100755 --- a/CIME/Tools/xmlchange +++ b/CIME/Tools/xmlchange @@ -48,7 +48,12 @@ Examples: from standard_script_setup import * -from CIME.utils import expect, convert_to_type, append_case_status, get_batch_script_for_job +from CIME.utils import ( + expect, + convert_to_type, + append_case_status, + get_batch_script_for_job, +) from CIME.case import Case import re @@ -58,110 +63,177 @@ logger = logging.getLogger("xmlchange") ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("listofsettings", nargs="?", default='', - help="Comma-separated list of settings in the form: var1=value,var2=value,...") + parser.add_argument( + "listofsettings", + nargs="?", + default="", + help="Comma-separated list of settings in the form: var1=value,var2=value,...", + ) - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to change.\n" - "Default is current directory.") + parser.add_argument( + "--caseroot", + default=os.getcwd(), + help="Case directory to change.\n" "Default is current directory.", + ) # Need to support older single dash version of arguments for compatibility with components - parser.add_argument("--append","-append", action="store_true", - help="Append to the existing value rather than overwriting it.") - - parser.add_argument("--subgroup","-subgroup", - help="Apply to this subgroup only.") - - parser.add_argument("--id", "-id", - help="The variable to set.\n" - "(Used in the alternative --id var --val value form, rather than\n" - "the recommended var=value form.)") - - parser.add_argument("--val","-val", - help="The value to set.\n" - "(Used in the alternative --id var --val value form, rather than\n" - "the recommended var=value form.)") - - parser.add_argument("--file", "-file", - help="XML file to edit.\n" - "Generally not needed, but can be specified to ensure that only the\n" - "expected file is being changed. (If a variable is not found in this file,\n" - "an error will be generated.)") - - parser.add_argument("--delimiter","-delimiter", type=str, default="," , - help="Delimiter string in listofvalues.\n" - "Default is ','.") - - parser.add_argument("--dryrun","-dryrun", action="store_true", - help="Parse settings and print key-value pairs, but don't actually change anything.") - - parser.add_argument("--noecho", "-noecho", action="store_true", - help="Do not update CaseStatus with this change.\n" - "This option is mainly meant to be used by cime scripts: the 'paper trail' in\n" - "CaseStatus is meant to show changes made by the user, so we generally don't\n" - "want this to be contaminated by changes made automatically by cime scripts.") - - parser.add_argument("-f","--force", action="store_true", - help="Ignore typing checks and store value.") - - parser.add_argument("-loglevel", - help="Ignored, only for backwards compatibility.") + parser.add_argument( + "--append", + "-append", + action="store_true", + help="Append to the existing value rather than overwriting it.", + ) + + parser.add_argument("--subgroup", "-subgroup", help="Apply to this subgroup only.") + + parser.add_argument( + "--id", + "-id", + help="The variable to set.\n" + "(Used in the alternative --id var --val value form, rather than\n" + "the recommended var=value form.)", + ) + + parser.add_argument( + "--val", + "-val", + help="The value to set.\n" + "(Used in the alternative --id var --val value form, rather than\n" + "the recommended var=value form.)", + ) + + parser.add_argument( + "--file", + "-file", + help="XML file to edit.\n" + "Generally not needed, but can be specified to ensure that only the\n" + "expected file is being changed. (If a variable is not found in this file,\n" + "an error will be generated.)", + ) + + parser.add_argument( + "--delimiter", + "-delimiter", + type=str, + default=",", + help="Delimiter string in listofvalues.\n" "Default is ','.", + ) + + parser.add_argument( + "--dryrun", + "-dryrun", + action="store_true", + help="Parse settings and print key-value pairs, but don't actually change anything.", + ) + + parser.add_argument( + "--noecho", + "-noecho", + action="store_true", + help="Do not update CaseStatus with this change.\n" + "This option is mainly meant to be used by cime scripts: the 'paper trail' in\n" + "CaseStatus is meant to show changes made by the user, so we generally don't\n" + "want this to be contaminated by changes made automatically by cime scripts.", + ) + + parser.add_argument( + "-f", + "--force", + action="store_true", + help="Ignore typing checks and store value.", + ) + + parser.add_argument("-loglevel", help="Ignored, only for backwards compatibility.") args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) listofsettings = [] - if( len(args.listofsettings )): + if len(args.listofsettings): expect(args.id is None, "Cannot specify both listofsettings and --id") expect(args.val is None, "Cannot specify both listofsettings and --val") delimiter = re.escape(args.delimiter) - listofsettings = re.split(r'(? %s " % (argstr) append_case_status("xmlchange", "success", msg=msg, caseroot=caseroot) + def _main_func(description): # pylint: disable=unused-variable - caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, append, noecho, force , dry = parse_command_line(sys.argv, description) - - xmlchange(caseroot, listofsettings, xmlfile, xmlid, xmlval, subgroup, append, noecho, force, dry) - -if (__name__ == "__main__"): + ( + caseroot, + listofsettings, + xmlfile, + xmlid, + xmlval, + subgroup, + append, + noecho, + force, + dry, + ) = parse_command_line(sys.argv, description) + + xmlchange( + caseroot, + listofsettings, + xmlfile, + xmlid, + xmlval, + subgroup, + append, + noecho, + force, + dry, + ) + + +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/xmlconvertors/config_pes_converter.py b/CIME/Tools/xmlconvertors/config_pes_converter.py index 194e01dff19..74986646984 100755 --- a/CIME/Tools/xmlconvertors/config_pes_converter.py +++ b/CIME/Tools/xmlconvertors/config_pes_converter.py @@ -15,23 +15,31 @@ from distutils.spawn import find_executable import xml.etree.ElementTree as ET import grid_xml_converter + LOGGER = logging.getLogger(__name__) ############################################################################### def parse_command_line(args): -############################################################################### - parser = argparse.ArgumentParser(description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) + ############################################################################### + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) # Set command line options - parser.add_argument("-cime2file", "--cime2file", - help="location of config_grid.xml file in CIME2 format", - required=True) - parser.add_argument("-cime5file", "--cime5file", - help="location of config_grids.xml file in CIME5 format", - required=True) + parser.add_argument( + "-cime2file", + "--cime2file", + help="location of config_grid.xml file in CIME2 format", + required=True, + ) + parser.add_argument( + "-cime5file", + "--cime5file", + help="location of config_grids.xml file in CIME5 format", + required=True, + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) @@ -41,8 +49,9 @@ def parse_command_line(args): return args.cime2file, args.cime5file + class PesNode(grid_xml_converter.DataNode): - def __init__(self,root): + def __init__(self, root): self.ignore = False super(PesNode, self).__init__(root) @@ -55,43 +64,44 @@ def setattrib(self, node, tag, key=None): if key in self.data: node.set(tag, self.data[key]) else: - node.set(tag, 'any') + node.set(tag, "any") def keyvalue(self): - return "{}:{}:{}:{}".format(self.data['gridname'], self.data['machname'], - self.data['pesize'], self.data['compset']) - + return "{}:{}:{}:{}".format( + self.data["gridname"], + self.data["machname"], + self.data["pesize"], + self.data["compset"], + ) def to_cime5(self): - gridnode = ET.Element('grid') - self.setattrib(gridnode, 'name', 'gridname') - machnode = ET.SubElement(gridnode, 'mach') - self.setattrib(machnode, 'name', 'machname') - pesnode = ET.SubElement(machnode, 'pes') - self.setattrib(pesnode, 'compset') - self.setattrib(pesnode, 'pesize') - commentnode = ET.SubElement(pesnode, 'comment') + gridnode = ET.Element("grid") + self.setattrib(gridnode, "name", "gridname") + machnode = ET.SubElement(gridnode, "mach") + self.setattrib(machnode, "name", "machname") + pesnode = ET.SubElement(machnode, "pes") + self.setattrib(pesnode, "compset") + self.setattrib(pesnode, "pesize") + commentnode = ET.SubElement(pesnode, "comment") commentnode.text = "none" - for d in ['ntasks', 'nthrds', 'rootpe']: + for d in ["ntasks", "nthrds", "rootpe"]: newnode = ET.SubElement(pesnode, d) - for comp in ['atm', 'lnd', 'rof', 'ice', 'ocn', 'glc', 'wav', 'cpl', 'iac']: - tag = d + '_' + comp + for comp in ["atm", "lnd", "rof", "ice", "ocn", "glc", "wav", "cpl", "iac"]: + tag = d + "_" + comp if tag in self.data[d]: ET.SubElement(newnode, tag).text = str(self.data[d][tag]) return gridnode - - def __eq__(self, other): - for k in ['gridname', 'machname', 'pesize', 'compset']: + for k in ["gridname", "machname", "pesize", "compset"]: if k not in self.data and k not in other.data: continue if k not in self.data or k not in other.data: return False if self.data[k] != other.data[k]: return False - for d in ['ntasks', 'nthrds', 'rootpe']: + for d in ["ntasks", "nthrds", "rootpe"]: for k in self.data[d]: if k not in self.data[d] and k not in other.data[d]: continue @@ -101,65 +111,70 @@ def __eq__(self, other): return False return True + class Cime5PesNode(PesNode): def set_data(self, xmlnode): - for d in ['ntasks', 'nthrds', 'rootpe']: + for d in ["ntasks", "nthrds", "rootpe"]: self.data[d] = {} self.xmlnode = xmlnode - self.data['gridname'] = xmlnode.get('name') - machnode = xmlnode.find('mach') - self.data['machname'] = machnode.get('name') - pesnode = machnode.find('pes') - self.data['pesize'] = pesnode.get('pesize') - self.data['compset'] = pesnode.get('compset') - commentnode = pesnode.find('comment') + self.data["gridname"] = xmlnode.get("name") + machnode = xmlnode.find("mach") + self.data["machname"] = machnode.get("name") + pesnode = machnode.find("pes") + self.data["pesize"] = pesnode.get("pesize") + self.data["compset"] = pesnode.get("compset") + commentnode = pesnode.find("comment") if commentnode is not None: - self.data['comment'] = commentnode.text - for tag in ['ntasks', 'nthrds', 'rootpe']: + self.data["comment"] = commentnode.text + for tag in ["ntasks", "nthrds", "rootpe"]: node = pesnode.find(tag) for child in node.getchildren(): self.data[tag][child.tag] = child.text.strip() + class Cime2PesNode(PesNode): ISDEFAULT = "-999999" - DEFAULTS = {'ntasks':'16', 'nthrds':'1', 'rootpe':'0'} + DEFAULTS = {"ntasks": "16", "nthrds": "1", "rootpe": "0"} + def set_data(self, xmlnode): # Set Defaults - for d in ['ntasks', 'nthrds', 'rootpe']: + for d in ["ntasks", "nthrds", "rootpe"]: self.data[d] = {} - for comp in ['atm', 'lnd', 'ice', 'ocn', 'glc', 'rof', 'wav', 'cpl', 'iac']: - self.data['ntasks']['ntasks_' + comp] = self.ISDEFAULT - self.data['nthrds']['nthrds_' + comp] = self.ISDEFAULT - self.data['rootpe']['rootpe_' + comp] = self.ISDEFAULT + for comp in ["atm", "lnd", "ice", "ocn", "glc", "rof", "wav", "cpl", "iac"]: + self.data["ntasks"]["ntasks_" + comp] = self.ISDEFAULT + self.data["nthrds"]["nthrds_" + comp] = self.ISDEFAULT + self.data["rootpe"]["rootpe_" + comp] = self.ISDEFAULT # Read in node self.xmlnode = xmlnode - for checktag in ['OS', 'TEST']: + for checktag in ["OS", "TEST"]: check = xmlnode.get(checktag) if check is not None: self.ignore = True return - self.data['machname'] = xmlnode.get('MACH', default='any') - self.data['gridname'] = xmlnode.get('GRID', default='any') - self.data['pesize'] = xmlnode.get('PECOUNT', default='any') - self.data['compset'] = xmlnode.get('CCSM_LCOMPSET', default='any') - for d in ['ntasks', 'nthrds', 'rootpe']: - for comp in ['atm', 'lnd', 'ice', 'ocn', 'glc', 'rof', 'wav', 'cpl', 'iac']: - tag = d + '_' + comp + self.data["machname"] = xmlnode.get("MACH", default="any") + self.data["gridname"] = xmlnode.get("GRID", default="any") + self.data["pesize"] = xmlnode.get("PECOUNT", default="any") + self.data["compset"] = xmlnode.get("CCSM_LCOMPSET", default="any") + for d in ["ntasks", "nthrds", "rootpe"]: + for comp in ["atm", "lnd", "ice", "ocn", "glc", "rof", "wav", "cpl", "iac"]: + tag = d + "_" + comp node = xmlnode.find(tag.upper()) if node is not None: val = node.text.strip() - if val[0] == '$': + if val[0] == "$": resolvetag = val[1:] if resolvetag == "MAX_TASKS_PER_NODE": - val = '-1' + val = "-1" elif resolvetag == "MAX_GPUS_PER_NODE": - val = '-1' + val = "-1" else: refnode = xmlnode.find(resolvetag) if refnode is None: # use default value - val = self.data[resolvetag.lower()[0:6]][resolvetag.lower()] + val = self.data[resolvetag.lower()[0:6]][ + resolvetag.lower() + ] else: val = xmlnode.find(resolvetag).text.strip() @@ -167,29 +182,30 @@ def set_data(self, xmlnode): # Set to defaults. CIME2 had unresolved defaults that referred # back to the ATM value, so setting just the ATM value would in effect # set all values - for d in ['ntasks', 'nthrds', 'rootpe']: - atmtag = d + '_atm' + for d in ["ntasks", "nthrds", "rootpe"]: + atmtag = d + "_atm" if self.data[d][atmtag] == self.ISDEFAULT: self.data[d][atmtag] = self.DEFAULTS[d] - for comp in ['lnd', 'rof', 'ice', 'ocn', 'glc', 'wav', 'cpl', 'iac']: - tag = d + '_' + comp + for comp in ["lnd", "rof", "ice", "ocn", "glc", "wav", "cpl", "iac"]: + tag = d + "_" + comp if self.data[d][tag] == self.ISDEFAULT: self.data[d][tag] = self.data[d][atmtag] - - class PesTree(grid_xml_converter.DataTree): def __init__(self, xmlfilename): # original xml file has bad comments import re, StringIO + if os.access(xmlfilename, os.R_OK): - with open(xmlfilename, 'r') as xmlfile: + with open(xmlfilename, "r") as xmlfile: t1 = xmlfile.read() - t2 = re.sub(r'(?<=)', - lambda x: x.group(0).replace('-', ' '), t2) + t2 = re.sub( + r"(?<=)", lambda x: x.group(0).replace("-", " "), t2 + ) tempxml = StringIO.StringIO(t3) super(PesTree, self).__init__(tempxml) tempxml.close() @@ -200,11 +216,11 @@ def __init__(self, xmlfilename): def populate(self): if self.root is None: return - xmlnodes = self.root.findall('grid') + xmlnodes = self.root.findall("grid") nodeclass = Cime5PesNode if len(xmlnodes) == 0: - xmlnodes = self.root.findall('pes') + xmlnodes = self.root.findall("pes") nodeclass = Cime2PesNode for xmlnode in xmlnodes: datanode = nodeclass(self.root) @@ -212,21 +228,22 @@ def populate(self): if not datanode.ignore: self.nodes.append(datanode) - - def writexml(self, addlist, newfilename): - root = ET.Element('config_pes') + root = ET.Element("config_pes") for a, b in addlist: if b is not None: - root.append(ET.Element('REPLACE')) + root.append(ET.Element("REPLACE")) root.append(b.to_cime5()) - root.append(ET.Element('WITH')) + root.append(ET.Element("WITH")) if a is not None: root.append(a.to_cime5()) xmllint = find_executable("xmllint") if xmllint is not None: - run_cmd("{} --format --output {} -".format(xmllint, newfilename), - input_str=ET.tostring(root)) + run_cmd( + "{} --format --output {} -".format(xmllint, newfilename), + input_str=ET.tostring(root), + ) + def diff_tree(atree, btree): afound = [] @@ -242,7 +259,6 @@ def diff_tree(atree, btree): else: bkeys.append(bnode.keyvalue()) - for anode in atree.nodes: for bnode in btree.nodes: if bnode in bfound: @@ -262,8 +278,6 @@ def diff_tree(atree, btree): addlist.append([anode, None]) - - LOGGER.info("Number of ok nodes: {:d}".format(len(oklist))) LOGGER.info("Number of wrong nodes: {:d}".format(len(fixlist))) LOGGER.info("Number of missing nodes: {:d}".format(len(addlist))) @@ -283,10 +297,8 @@ def pes_compare(): LOGGER.info("Comparing config_pes files...") oklist, fixlist, addlist = diff_tree(cime2pestree, cime5pestree) - cime5pestree.postprocess(fixlist, addlist, "tempgrid.xml", cime5file, - "badgrid.xml") + cime5pestree.postprocess(fixlist, addlist, "tempgrid.xml", cime5file, "badgrid.xml") + if __name__ == "__main__": pes_compare() - - diff --git a/CIME/Tools/xmlconvertors/convert-grid-v1-to-v2 b/CIME/Tools/xmlconvertors/convert-grid-v1-to-v2 index 6de4546ca1a..53b22d0e4a6 100755 --- a/CIME/Tools/xmlconvertors/convert-grid-v1-to-v2 +++ b/CIME/Tools/xmlconvertors/convert-grid-v1-to-v2 @@ -5,6 +5,7 @@ Convert a grid file from v1 to v2. """ import argparse, sys, os + sys.path.append(os.path.join(os.path.dirname(__file__), "..")) from standard_script_setup import * @@ -16,29 +17,31 @@ from collections import OrderedDict ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} OR {0} --help -""".format(os.path.basename(args[0])), +""".format( + os.path.basename(args[0]) + ), description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter + formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("v1file", - help="v1 file path") + parser.add_argument("v1file", help="v1 file path") args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.v1file + ############################################################################### def convert_gridmaps(v1file_obj, v2file_obj): -############################################################################### - gridmap_data = [] # (attribs, {name->file}) + ############################################################################### + gridmap_data = [] # (attribs, {name->file}) v1gridmaps = v1file_obj.get_child(name="gridmaps") v1gridmap = v1file_obj.get_children(name="gridmap", root=v1gridmaps) @@ -46,35 +49,39 @@ def convert_gridmaps(v1file_obj, v2file_obj): attribs = v1file_obj.attrib(gridmap_block) children = [] for child in v1file_obj.get_children(root=gridmap_block): - children.append( (v1file_obj.name(child), v1file_obj.text(child)) ) + children.append((v1file_obj.name(child), v1file_obj.text(child))) - gridmap_data.append( (attribs, children) ) + gridmap_data.append((attribs, children)) v2gridmaps = v2file_obj.make_child("gridmaps") for attribs, children in gridmap_data: gridmap = v2file_obj.make_child("gridmap", attributes=attribs, root=v2gridmaps) for name, text in children: - v2file_obj.make_child("map", attributes={"name": name}, root=gridmap, text=text) + v2file_obj.make_child( + "map", attributes={"name": name}, root=gridmap, text=text + ) + ############################################################################### def convert_domains(v1file_obj, v2file_obj): -############################################################################### - domain_data = [] # (name, nx, ny, {filemask->mask->file}, {pathmask->mask->path}, desc) + ############################################################################### + domain_data = ( + [] + ) # (name, nx, ny, {filemask->mask->file}, {pathmask->mask->path}, desc) v1domains = v1file_obj.get_child(name="domains") v1domain = v1file_obj.get_children(name="domain", root=v1domains) for domain_block in v1domain: attrib = v1file_obj.attrib(domain_block) - expect(attrib.keys() == ["name"], - "Unexpected attribs: {}".format(attrib)) + expect(attrib.keys() == ["name"], "Unexpected attribs: {}".format(attrib)) name = attrib["name"] desc = v1file_obj.get_element_text("desc", root=domain_block) - sup = v1file_obj.get_element_text("support", root=domain_block) - nx = v1file_obj.get_element_text("nx", root=domain_block) - ny = v1file_obj.get_element_text("ny", root=domain_block) + sup = v1file_obj.get_element_text("support", root=domain_block) + nx = v1file_obj.get_element_text("nx", root=domain_block) + ny = v1file_obj.get_element_text("ny", root=domain_block) if sup and not desc: desc = sup @@ -89,19 +96,26 @@ def convert_domains(v1file_obj, v2file_obj): mask_key, mask_value = attrib.items()[0] component, _ = mask_key.split("_") - masks.setdefault(component, OrderedDict())[mask_value] = v1file_obj.text(child) + masks.setdefault(component, OrderedDict())[ + mask_value + ] = v1file_obj.text(child) for child in v1file_obj.get_children(root=domain_block): - expect(v1file_obj.name(child) in ["nx", "ny", "file", "path", "desc", "support"], - "Unhandled child of grid '{}'".format(v1file_obj.name(child))) + expect( + v1file_obj.name(child) + in ["nx", "ny", "file", "path", "desc", "support"], + "Unhandled child of grid '{}'".format(v1file_obj.name(child)), + ) - domain_data.append( (name, nx, ny, file_masks, path_masks, desc) ) + domain_data.append((name, nx, ny, file_masks, path_masks, desc)) v2domains = v2file_obj.make_child("domains") for name, nx, ny, file_masks, path_masks, desc in domain_data: - attribs = {"name":name} if name else {} - domain_block = v2file_obj.make_child("domain", attributes=attribs, root=v2domains) + attribs = {"name": name} if name else {} + domain_block = v2file_obj.make_child( + "domain", attributes=attribs, root=v2domains + ) v2file_obj.make_child("nx", root=domain_block, text=nx) v2file_obj.make_child("ny", root=domain_block, text=ny) @@ -119,7 +133,9 @@ def convert_domains(v1file_obj, v2file_obj): fullfile = os.path.join(path, filename) mask_value = mask_value if mask_value not in ["reg", name] else "" - file_to_attrib.setdefault(fullfile, OrderedDict()).setdefault(mask_value, []).append(component) + file_to_attrib.setdefault(fullfile, OrderedDict()).setdefault( + mask_value, [] + ).append(component) for filename, masks in file_to_attrib.iteritems(): attrib = {} @@ -130,15 +146,18 @@ def convert_domains(v1file_obj, v2file_obj): if mask: attrib["mask"] = mask - v2file_obj.make_child("file", attributes=attrib, root=domain_block, text=filename) + v2file_obj.make_child( + "file", attributes=attrib, root=domain_block, text=filename + ) if desc: v2file_obj.make_child("desc", root=domain_block, text=desc) + ############################################################################### def convert_grids(v1file_obj, v2file_obj): -############################################################################### - grid_data = [] # (compset, lname, sname, alias, support) + ############################################################################### + grid_data = [] # (compset, lname, sname, alias, support) v1grids = v1file_obj.get_child(name="grids") v1grid = v1file_obj.get_children(name="grid", root=v1grids) @@ -146,17 +165,20 @@ def convert_grids(v1file_obj, v2file_obj): attrib = v1file_obj.attrib(grid_block) compset = attrib["compset"] if "compset" in attrib else None - expect(attrib.keys() in [ ["compset"], [] ], - "Unexpected attribs: {}".format(attrib)) + expect( + attrib.keys() in [["compset"], []], "Unexpected attribs: {}".format(attrib) + ) - lname = v1file_obj.get_element_text("lname", root=grid_block) - sname = v1file_obj.get_element_text("sname", root=grid_block) - alias = v1file_obj.get_element_text("alias", root=grid_block) + lname = v1file_obj.get_element_text("lname", root=grid_block) + sname = v1file_obj.get_element_text("sname", root=grid_block) + alias = v1file_obj.get_element_text("alias", root=grid_block) support = v1file_obj.get_element_text("support", root=grid_block) for child in v1file_obj.get_children(root=grid_block): - expect(v1file_obj.name(child) in ["lname", "sname", "alias", "support"], - "Unhandled child of grid '{}'".format(v1file_obj.name(child))) + expect( + v1file_obj.name(child) in ["lname", "sname", "alias", "support"], + "Unhandled child of grid '{}'".format(v1file_obj.name(child)), + ) grid_data.append((compset, lname, sname, alias, support)) @@ -166,8 +188,8 @@ def convert_grids(v1file_obj, v2file_obj): for compset, lname, sname, alias, support in grid_data: v2_alias = alias if alias else sname - attribs = {"alias":v2_alias} if v2_alias else {} - attribs.update({"compset":compset} if compset else {}) + attribs = {"alias": v2_alias} if v2_alias else {} + attribs.update({"compset": compset} if compset else {}) v2grid = v2file_obj.make_child("model_grid", attributes=attribs, root=v2grids) pieces_raw = lname.split("_") @@ -176,9 +198,17 @@ def convert_grids(v1file_obj, v2file_obj): if "%" in raw_piece: pieces.append(raw_piece) else: - pieces[-1] += ("_" + raw_piece) - - ctype_map = {"a":"atm", "l":"lnd", "oi":"ocnice", "r":"rof", "m":"mask", "g":"glc", "w":"wav"} + pieces[-1] += "_" + raw_piece + + ctype_map = { + "a": "atm", + "l": "lnd", + "oi": "ocnice", + "r": "rof", + "m": "mask", + "g": "glc", + "w": "wav", + } mask = None for piece in pieces: ctype, data = piece.split("%") @@ -187,16 +217,24 @@ def convert_grids(v1file_obj, v2file_obj): expect(mask is None, "Multiple masks") mask = data else: - v2file_obj.make_child("grid", attributes={"name":cname}, text=data, root=v2grid) + v2file_obj.make_child( + "grid", attributes={"name": cname}, text=data, root=v2grid + ) if mask is not None: v2file_obj.make_child("mask", text=mask, root=v2grid) + ############################################################################### def convert_to_v2(v1file): -############################################################################### + ############################################################################### v1file_obj = GenericXML(infile=v1file, read_only=True) - v2file_obj = GenericXML(infile="out.xml", read_only=False, root_name_override="grid_data", root_attrib_override={"version":"2.0"}) + v2file_obj = GenericXML( + infile="out.xml", + read_only=False, + root_name_override="grid_data", + root_attrib_override={"version": "2.0"}, + ) convert_grids(v1file_obj, v2file_obj) @@ -206,12 +244,14 @@ def convert_to_v2(v1file): v2file_obj.write(outfile=sys.stdout) + ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### v1file = parse_command_line(sys.argv, description) convert_to_v2(v1file) -if (__name__ == "__main__"): + +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/xmlconvertors/grid_xml_converter.py b/CIME/Tools/xmlconvertors/grid_xml_converter.py index 654fc4bbb4a..59f4d17f6ba 100755 --- a/CIME/Tools/xmlconvertors/grid_xml_converter.py +++ b/CIME/Tools/xmlconvertors/grid_xml_converter.py @@ -26,32 +26,39 @@ ############################################################################### def parse_command_line(args): -############################################################################### - parser = argparse.ArgumentParser(description=__doc__, - formatter_class=argparse.RawDescriptionHelpFormatter) + ############################################################################### + parser = argparse.ArgumentParser( + description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) # Set command line options - parser.add_argument("-cime2file", "--cime2file", - help="location of config_grid.xml file in CIME2 format", - required=True) - parser.add_argument("-cime5file", "--cime5file", - help="location of config_grids.xml file in CIME5 format", - required=True) + parser.add_argument( + "-cime2file", + "--cime2file", + help="location of config_grid.xml file in CIME2 format", + required=True, + ) + parser.add_argument( + "-cime5file", + "--cime5file", + help="location of config_grids.xml file in CIME5 format", + required=True, + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.cime2file, args.cime5file - class DataNode(object): """ non-demoninational dictionary of node data: """ + def __init__(self, xmlroot): - self.xmlroot = xmlroot # in case additional information needed + self.xmlroot = xmlroot # in case additional information needed self.data = {} self.name = None self.xmlnode = None @@ -59,27 +66,26 @@ def __init__(self, xmlroot): def keyvalue(self): return self.data[self.key] + class GridNode(DataNode): - key = 'lname' + key = "lname" + def __str__(self): return ET.tostring(self.xmlnode) - def to_cime5(self): - node = ET.Element('grid') - if 'compset' in self.data and self.data['compset'] is not None: - node.set('compset', self.data['compset']) + node = ET.Element("grid") + if "compset" in self.data and self.data["compset"] is not None: + node.set("compset", self.data["compset"]) - for k in ['sname', 'lname', 'alias', 'support']: + for k in ["sname", "lname", "alias", "support"]: if k in self.data and self.data[k] is not None: ET.SubElement(node, k).text = self.data[k] return node - - def __eq__(self, other): - for k in ['lname', 'sname', 'compset', 'alias']: + for k in ["lname", "sname", "compset", "alias"]: if k not in self.data and k not in other.data: continue if k not in self.data or k not in other.data: @@ -88,97 +94,114 @@ def __eq__(self, other): return False return True + class Cime2GridNode(GridNode): def set_data(self, xmlnode): self.xmlnode = xmlnode if xmlnode.text is not None: - self.data['lname'] = xmlnode.text - for k in ['lname', 'sname', 'alias', 'compset']: + self.data["lname"] = xmlnode.text + for k in ["lname", "sname", "alias", "compset"]: tmpval = xmlnode.get(k) if tmpval is not None: self.data[k] = tmpval.strip() - tmpval = xmlnode.get('support_level') + tmpval = xmlnode.get("support_level") if tmpval is not None: - self.data['support'] = tmpval.strip() + self.data["support"] = tmpval.strip() + class Cime5GridNode(GridNode): def set_data(self, xmlnode): self.xmlnode = xmlnode - for k in ['sname', 'lname', 'support', 'alias']: + for k in ["sname", "lname", "support", "alias"]: if xmlnode.find(k) is not None: self.data[k] = xmlnode.find(k).text.strip() - if xmlnode.get('compset') is not None: - self.data['compset'] = xmlnode.get('compset').strip() + if xmlnode.get("compset") is not None: + self.data["compset"] = xmlnode.get("compset").strip() + class GridmapNode(DataNode): def set_data(self, xmlnode): self.keys = [] - self.data['maps'] = {} + self.data["maps"] = {} self.xmlnode = xmlnode - for k in ['atm_grid', 'lnd_grid', 'ocn_grid', 'rof_grid', 'glc_grid', - 'wav_grid', 'ice_grid', 'iac_grid' ]: + for k in [ + "atm_grid", + "lnd_grid", + "ocn_grid", + "rof_grid", + "glc_grid", + "wav_grid", + "ice_grid", + "iac_grid", + ]: att = xmlnode.get(k) if att is not None: self.data[k] = att.strip() self.keys.append(k) self.sort() for child in xmlnode.getchildren(): - self.data['maps'][child.tag] = child.text.strip() + self.data["maps"][child.tag] = child.text.strip() + def sort(self): newlist = sorted(self.keys, key=operator.itemgetter(0)) self.keys = newlist + def to_cime5(self): - node = ET.Element('gridmap') - for k in ['atm_grid', 'lnd_grid', 'ocn_grid', 'rof_grid', 'glc_grid']: + node = ET.Element("gridmap") + for k in ["atm_grid", "lnd_grid", "ocn_grid", "rof_grid", "glc_grid"]: if k in self.data: node.set(k, self.data[k]) - for key, value in self.data['maps'].items(): + for key, value in self.data["maps"].items(): ET.SubElement(node, key).text = value return node + def __str__(self): return str(self.keyvalue()) + str(self.data) + def __eq__(self, other): if self.keyvalue() != other.keyvalue(): return False - if len(self.data['maps']) != len(other.data['maps']): + if len(self.data["maps"]) != len(other.data["maps"]): return False - for key, value in self.data['maps'].items(): - if key not in other.data['maps'] or value != other.data['maps'][key]: + for key, value in self.data["maps"].items(): + if key not in other.data["maps"] or value != other.data["maps"][key]: return False return True def keyvalue(self): - return "{}:{}:{}:{}".format(self.keys[0], self.data[self.keys[0]], - self.keys[1], self.data[self.keys[1]]) + return "{}:{}:{}:{}".format( + self.keys[0], self.data[self.keys[0]], self.keys[1], self.data[self.keys[1]] + ) + + class DomainNode(DataNode): """ non-demoninational dictionary of domain node information: """ - key = 'name' + + key = "name" def to_cime5(self): - node = ET.Element('domain') - node.set('name', self.data['name']) - for tag in ['nx', 'ny', 'desc', 'support']: + node = ET.Element("domain") + node.set("name", self.data["name"]) + for tag in ["nx", "ny", "desc", "support"]: if tag in self.data: ET.SubElement(node, tag).text = self.data[tag] - for fop in ['file', 'path']: + for fop in ["file", "path"]: if fop in self.data: for comp, mask, filename in self.data[fop]: - attribs = {'{}{}_mask'.format(comp,mask)} + attribs = {"{}{}_mask".format(comp, mask)} ET.SubElement(node, fop, attribs).text = filename return node - - def sort(self): - for fop in ['file', 'path']: + for fop in ["file", "path"]: newlist = sorted(self.data[fop], key=operator.itemgetter(0)) self.data[fop] = newlist def __eq__(self, other): # Check for different name, nx, or ny values - for k in ['name', 'nx', 'ny']: + for k in ["name", "nx", "ny"]: if k not in self.data and k not in other.data: continue if k not in self.data or k not in other.data: @@ -186,7 +209,7 @@ def __eq__(self, other): if self.data[k] != other.data[k]: return False # Compare (sorted) file, path lists for equality - for fop in ['file', 'path']: + for fop in ["file", "path"]: if fop not in self.data and fop not in other.data: contine if fop not in self.data or fop not in other.data: @@ -204,51 +227,56 @@ def __eq__(self, other): def __str__(self): return str(self.data) + class Cime2DomainNode(DomainNode): """ Read in a domain node from Cime2 xml format """ + def set_data(self, xmlnode): self.xmlnode = xmlnode - self.data['name'] = xmlnode.get('name').strip() - self.data['file'] = [] - self.data['path'] = [] - for tag in ['nx', 'ny', 'desc']: + self.data["name"] = xmlnode.get("name").strip() + self.data["file"] = [] + self.data["path"] = [] + for tag in ["nx", "ny", "desc"]: child = xmlnode.find(tag) if child is not None: self.data[tag] = child.text # Find any griddom nodes that match this name - griddoms = self.xmlroot.findall('.griddom[@grid="{}"]'.format(self.data['name'])) + griddoms = self.xmlroot.findall( + '.griddom[@grid="{}"]'.format(self.data["name"]) + ) for gd in griddoms: - mask = gd.get('mask') - for comp in ['ATM', 'LND', 'OCN', 'ICE']: - for fop in ['FILE', 'PATH']: - tag = '{}_DOMAIN_{}'.format(comp, fop) + mask = gd.get("mask") + for comp in ["ATM", "LND", "OCN", "ICE"]: + for fop in ["FILE", "PATH"]: + tag = "{}_DOMAIN_{}".format(comp, fop) n = gd.find(tag) if n is not None: - self.data[fop.lower()].append([comp.lower(), mask, - n.text]) + self.data[fop.lower()].append([comp.lower(), mask, n.text]) # sort the file and path entries self.sort() + class Cime5DomainNode(DomainNode): """ Read in a domain node from Cime5 xml format """ + def set_data(self, xmlnode): self.xmlnode = xmlnode - self.data['name'] = xmlnode.get('name') - self.data['file'] = [] - self.data['path'] = [] - for tag in ['nx', 'ny', 'desc', 'support']: + self.data["name"] = xmlnode.get("name") + self.data["file"] = [] + self.data["path"] = [] + for tag in ["nx", "ny", "desc", "support"]: child = xmlnode.find(tag) if child is not None: self.data[tag] = child.text - for comp in ['lnd', 'atm', 'ocn', 'ice']: - masktag = '{}_mask'.format(comp) - for fop in ['file', 'path']: - fopnodes = xmlnode.findall('{}[@{}]'.format(fop, masktag)) + for comp in ["lnd", "atm", "ocn", "ice"]: + masktag = "{}_mask".format(comp) + for fop in ["file", "path"]: + fopnodes = xmlnode.findall("{}[@{}]".format(fop, masktag)) for n in fopnodes: mask = n.get(masktag) filename = n.text.strip() @@ -257,11 +285,12 @@ def set_data(self, xmlnode): # sort the file and path entries self.sort() + class DataTree(object): def __init__(self, xmlfilename): self.xmlfilename = xmlfilename - if hasattr(xmlfilename, 'read') or os.access(xmlfilename, os.R_OK): + if hasattr(xmlfilename, "read") or os.access(xmlfilename, os.R_OK): self.doc = ET.parse(xmlfilename) else: self.doc = ET.ElementTree() @@ -278,13 +307,12 @@ def next(self): raise StopIteration if self.index < len(self.nodes): self.index += 1 - return self.nodes[self.index-1] + return self.nodes[self.index - 1] def __iter__(self): return self - def postprocess(self, fixlist, addlist, newxmlfile, currentxmlfile, - badxmlfile): + def postprocess(self, fixlist, addlist, newxmlfile, currentxmlfile, badxmlfile): if len(addlist) > 0: logger.info("\n\nWriting suggested nodes to {}".format(newxmlfile)) logger.info("Copy 'grid' nodes into corresponding location in") @@ -296,14 +324,15 @@ def postprocess(self, fixlist, addlist, newxmlfile, currentxmlfile, logger.info("config/acme/config_grids.xml. These nodes") logger.info("have been written to {}".format(badxmlfile)) + class GridTree(DataTree): def populate(self): if self.root is None: return - xmlnodes = self.root.findall('GRID') + xmlnodes = self.root.findall("GRID") nodeclass = Cime2GridNode if len(xmlnodes) == 0: - xmlnodes = self.root.findall('./grids/grid') + xmlnodes = self.root.findall("./grids/grid") nodeclass = Cime5GridNode for xmlnode in xmlnodes: @@ -312,20 +341,22 @@ def populate(self): self.nodes.append(datanode) def writexml(self, addlist, newfilename): - root = ET.Element('grid_data') - grids = ET.SubElement(root, 'grids') + root = ET.Element("grid_data") + grids = ET.SubElement(root, "grids") for a, b in addlist: if b is not None: - grids.append(ET.Element('REPLACE')) + grids.append(ET.Element("REPLACE")) grids.append(b.to_cime5()) - grids.append(ET.Element('WITH')) + grids.append(ET.Element("WITH")) if a is not None: grids.append(a.to_cime5()) xmllint = find_executable("xmllint") if xmllint is not None: - run_cmd_no_fail("{} --format --output {} -".format(xmllint, newfilename), - input_str=ET.tostring(root)) + run_cmd_no_fail( + "{} --format --output {} -".format(xmllint, newfilename), + input_str=ET.tostring(root), + ) class DomainTree(DataTree): @@ -333,10 +364,10 @@ def populate(self): if self.root is None: return - xmlnodes = self.root.findall('gridhorz') + xmlnodes = self.root.findall("gridhorz") nodeclass = Cime2DomainNode if len(xmlnodes) == 0: - xmlnodes = self.root.findall('./domains/domain') + xmlnodes = self.root.findall("./domains/domain") nodeclass = Cime5DomainNode for node in xmlnodes: @@ -345,46 +376,52 @@ def populate(self): self.nodes.append(datanode) def writexml(self, addlist, newfilename): - root = ET.Element('grid_data') - domains = ET.SubElement(root, 'domains') + root = ET.Element("grid_data") + domains = ET.SubElement(root, "domains") for a, b in addlist: if b is not None: - domains.append(ET.Element('REPLACE')) + domains.append(ET.Element("REPLACE")) domains.append(b.to_cime5()) - domains.append(ET.Element('WITH')) + domains.append(ET.Element("WITH")) if a is not None: domains.append(a.to_cime5()) xmllint = find_executable("xmllint") if xmllint is not None: - run_cmd_no_fail("{} --format --output {} -".format(xmllint, newfilename), - input_str=ET.tostring(root)) + run_cmd_no_fail( + "{} --format --output {} -".format(xmllint, newfilename), + input_str=ET.tostring(root), + ) + class GridmapTree(DataTree): def populate(self): if self.root is None: return - xmlnodes = self.root.findall('gridmap') + xmlnodes = self.root.findall("gridmap") if len(xmlnodes) == 0: - xmlnodes = self.root.findall('./gridmaps/gridmap') + xmlnodes = self.root.findall("./gridmaps/gridmap") for xmlnode in xmlnodes: datanode = GridmapNode(self.root) datanode.set_data(xmlnode) self.nodes.append(datanode) def writexml(self, addlist, newfilename): - root = ET.Element('gridmaps') - gridmaps = ET.SubElement(root, 'gridmap') + root = ET.Element("gridmaps") + gridmaps = ET.SubElement(root, "gridmap") for a, b in addlist: if b is not None: - gridmaps.append(ET.Element('REPLACE')) + gridmaps.append(ET.Element("REPLACE")) gridmaps.append(b.to_cime5()) - gridmaps.append(ET.Element('WITH')) + gridmaps.append(ET.Element("WITH")) if a is not None: gridmaps.append(a.to_cime5()) xmllint = find_executable("xmllint") if xmllint is not None: - run_cmd_no_fail("{} --format --output {} -".format(xmllint, newfilename), - input_str=ET.tostring(root)) + run_cmd_no_fail( + "{} --format --output {} -".format(xmllint, newfilename), + input_str=ET.tostring(root), + ) + def diff_tree(atree, btree): afound = [] @@ -400,7 +437,6 @@ def diff_tree(atree, btree): else: bkeys.append(bnode.keyvalue()) - for anode in atree.nodes: for bnode in btree.nodes: if bnode in bfound: @@ -420,8 +456,6 @@ def diff_tree(atree, btree): addlist.append([anode, None]) - - logger.info("Number of ok nodes: {:d}".format(len(oklist))) logger.info("Number of wrong nodes: {:d}".format(len(fixlist))) logger.info("Number of missing nodes: {:d}".format(len(addlist))) @@ -431,8 +465,6 @@ def diff_tree(atree, btree): return [oklist, fixlist, addlist] - - def grid_compare(): cime2file, cime5file = parse_command_line(sys.argv) @@ -445,16 +477,20 @@ def grid_compare(): logger.info("Comparing grid nodes...") oklist, fixlist, addlist = diff_tree(cime2gridtree, cime5gridtree) - cime5gridtree.postprocess(fixlist, addlist, "tempgrid.xml", cime5file, - "badgrid.xml") + cime5gridtree.postprocess( + fixlist, addlist, "tempgrid.xml", cime5file, "badgrid.xml" + ) oklist, fixlist, addlist = diff_tree(cime2domaintree, cime5domaintree) - cime5domaintree.postprocess(fixlist, addlist, "tempdomain.xml", - cime5file, "baddomain.xml") + cime5domaintree.postprocess( + fixlist, addlist, "tempdomain.xml", cime5file, "baddomain.xml" + ) oklist, fixlist, addlist = diff_tree(cime2gridmaptree, cime5gridmaptree) - cime5gridmaptree.postprocess(fixlist, addlist, "tempgridmap.xml", - cime5file, "badgridmap.xml") + cime5gridmaptree.postprocess( + fixlist, addlist, "tempgridmap.xml", cime5file, "badgridmap.xml" + ) + if __name__ == "__main__": grid_compare() diff --git a/CIME/Tools/xmlquery b/CIME/Tools/xmlquery index ea29a7b590a..679e5cef4da 100755 --- a/CIME/Tools/xmlquery +++ b/CIME/Tools/xmlquery @@ -108,96 +108,189 @@ logger = logging.getLogger("xmlquery") unsupported_files = ["env_mach_specific.xml", "env_archive.xml"] ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( - description=description, - formatter_class=argparse.RawTextHelpFormatter) + description=description, formatter_class=argparse.RawTextHelpFormatter + ) CIME.utils.setup_standard_logging_options(parser) # Set command line options - parser.add_argument("variables", nargs="*" , - help="Variable name(s) to query from env_*.xml file(s)\n" - "( 'variable_name' from value ).\n" - "Multiple variables can be given, separated by commas or spaces.\n") - - parser.add_argument("--caseroot" , "-caseroot", default=os.getcwd(), - help="Case directory to reference.\n" - "Default is current directory.") - - parser.add_argument("--listall", "-listall" , default=False , action="store_true" , - help="List all variables and their values.") - - parser.add_argument("--file" , "-file", - help="The file you want to query. If not given, queries all files.\n" - "Typically used with the --listall option.") - - parser.add_argument("--subgroup","-subgroup", - help="Apply to this subgroup only.") - - parser.add_argument("-p", "--partial-match", action="store_true", - help="Allow partial matches of variable names, treats args as regex.") - - parser.add_argument("--no-resolve", "-no-resolve", action="store_true", - help="Do not resolve variable values.") + parser.add_argument( + "variables", + nargs="*", + help="Variable name(s) to query from env_*.xml file(s)\n" + "( 'variable_name' from value ).\n" + "Multiple variables can be given, separated by commas or spaces.\n", + ) + + parser.add_argument( + "--caseroot", + "-caseroot", + default=os.getcwd(), + help="Case directory to reference.\n" "Default is current directory.", + ) + + parser.add_argument( + "--listall", + "-listall", + default=False, + action="store_true", + help="List all variables and their values.", + ) + + parser.add_argument( + "--file", + "-file", + help="The file you want to query. If not given, queries all files.\n" + "Typically used with the --listall option.", + ) + + parser.add_argument("--subgroup", "-subgroup", help="Apply to this subgroup only.") + + parser.add_argument( + "-p", + "--partial-match", + action="store_true", + help="Allow partial matches of variable names, treats args as regex.", + ) + + parser.add_argument( + "--no-resolve", + "-no-resolve", + action="store_true", + help="Do not resolve variable values.", + ) group = parser.add_mutually_exclusive_group() - group.add_argument("--full", default=False, action="store_true", - help="Print a full listing for each variable, including value, type,\n" - "valid values, description and file.") - - group.add_argument("--fileonly", "-fileonly", default=False, action="store_true", - help="Only print the filename that each variable is defined in.") - - group.add_argument("--value", "-value", default=False, action="store_true", - help="Only print one value without newline character.\n" - "If more than one has been found print first value in list.") - - group.add_argument("--raw", default=False, action="store_true", - help="Print the complete raw record associated with each variable.") - - group.add_argument("--description", default=False, action="store_true", - help="Print the description associated with each variable.") - - group.add_argument("--get-group", default=False, action="store_true", - help="Print the group associated with each variable.") - - group.add_argument("--type", default=False, action="store_true", - help="Print the data type associated with each variable.") - - group.add_argument("--valid-values", default=False, action="store_true", - help="Print the valid values associated with each variable, if defined.") + group.add_argument( + "--full", + default=False, + action="store_true", + help="Print a full listing for each variable, including value, type,\n" + "valid values, description and file.", + ) + + group.add_argument( + "--fileonly", + "-fileonly", + default=False, + action="store_true", + help="Only print the filename that each variable is defined in.", + ) + + group.add_argument( + "--value", + "-value", + default=False, + action="store_true", + help="Only print one value without newline character.\n" + "If more than one has been found print first value in list.", + ) + + group.add_argument( + "--raw", + default=False, + action="store_true", + help="Print the complete raw record associated with each variable.", + ) + + group.add_argument( + "--description", + default=False, + action="store_true", + help="Print the description associated with each variable.", + ) + + group.add_argument( + "--get-group", + default=False, + action="store_true", + help="Print the group associated with each variable.", + ) + + group.add_argument( + "--type", + default=False, + action="store_true", + help="Print the data type associated with each variable.", + ) + + group.add_argument( + "--valid-values", + default=False, + action="store_true", + help="Print the valid values associated with each variable, if defined.", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - if (len(sys.argv) == 1) : + if len(sys.argv) == 1: parser.print_help() exit() if len(args.variables) == 1: - variables = args.variables[0].split(',') + variables = args.variables[0].split(",") else: variables = args.variables - return variables, args.subgroup, args.caseroot, args.listall, args.fileonly, \ - args.value, args.no_resolve, args.raw, args.description, args.get_group, args.full, \ - args.type, args.valid_values, args.partial_match, args.file + return ( + variables, + args.subgroup, + args.caseroot, + args.listall, + args.fileonly, + args.value, + args.no_resolve, + args.raw, + args.description, + args.get_group, + args.full, + args.type, + args.valid_values, + args.partial_match, + args.file, + ) + def get_value_as_string(case, var, attribute=None, resolved=False, subgroup=None): - if var in ["THREAD_COUNT", "TOTAL_TASKS", "TASKS_PER_NODE", "NUM_NODES", "SPARE_NODES", "TASKS_PER_NUMA", "CORES_PER_TASK", "NGPUS_PER_NODE"]: + if var in [ + "THREAD_COUNT", + "TOTAL_TASKS", + "TASKS_PER_NODE", + "NUM_NODES", + "SPARE_NODES", + "TASKS_PER_NUMA", + "CORES_PER_TASK", + "NGPUS_PER_NODE", + ]: value = str(getattr(case, var.lower())) else: thistype = case.get_type_info(var) - value = case.get_value(var, attribute=attribute, resolved=resolved, subgroup=subgroup) + value = case.get_value( + var, attribute=attribute, resolved=resolved, subgroup=subgroup + ) if value is not None and thistype: value = convert_to_string(value, thistype, var) return value -def xmlquery_sub(case, variables, subgroup=None, fileonly=False, - resolved=True, raw=False, description=False, get_group=False, - full=False, dtype=False, valid_values=False, xmlfile=None): + +def xmlquery_sub( + case, + variables, + subgroup=None, + fileonly=False, + resolved=True, + raw=False, + description=False, + get_group=False, + full=False, + dtype=False, + valid_values=False, + xmlfile=None, +): """ Return list of attributes and their values, print formatted @@ -214,21 +307,22 @@ def xmlquery_sub(case, variables, subgroup=None, fileonly=False, else: groups = case.get_record_fields(var, "group") if not groups: - groups = ['none'] + groups = ["none"] if xmlfile: - expect(xmlfile not in unsupported_files, - "XML file {} is unsupported by this tool." - .format(xmlfile)) + expect( + xmlfile not in unsupported_files, + "XML file {} is unsupported by this tool.".format(xmlfile), + ) if not groups: - value = case.get_value(var, resolved=resolved) - results['none'] = {} - results['none'][var] = {} - results['none'][var]['value'] = value + value = case.get_value(var, resolved=resolved) + results["none"] = {} + results["none"][var] = {} + results["none"][var]["value"] = value elif not groups: - results['none'] = {} - results['none'][var] = {} + results["none"] = {} + results["none"][var] = {} for group in groups: if not group in results: @@ -238,7 +332,7 @@ def xmlquery_sub(case, variables, subgroup=None, fileonly=False, expect(group, "No group found for var {}".format(var)) if get_group: - results[group][var]['get_group'] = group + results[group][var]["get_group"] = group value = get_value_as_string(case, var, resolved=resolved, subgroup=group) if value is None: @@ -247,45 +341,82 @@ def xmlquery_sub(case, variables, subgroup=None, fileonly=False, value = [] for comp in comp_classes: try: - nextval = get_value_as_string(case,var, attribute={"compclass" : comp}, resolved=resolved, subgroup=group) - except Exception: # probably want to be more specific - nextval = get_value_as_string(case,var, attribute={"compclass" : comp}, resolved=False, subgroup=group) + nextval = get_value_as_string( + case, + var, + attribute={"compclass": comp}, + resolved=resolved, + subgroup=group, + ) + except Exception: # probably want to be more specific + nextval = get_value_as_string( + case, + var, + attribute={"compclass": comp}, + resolved=False, + subgroup=group, + ) if nextval is not None: value.append(comp + ":" + "{}".format(nextval)) else: - value = get_value_as_string(case, var, resolved=resolved, subgroup=group) + value = get_value_as_string( + case, var, resolved=resolved, subgroup=group + ) if value is None: if xmlfile: - expect(False, " No results found for variable {} in file {}".format(var, xmlfile)) + expect( + False, + " No results found for variable {} in file {}".format( + var, xmlfile + ), + ) else: expect(False, " No results found for variable {}".format(var)) - results[group][var]['value'] = value + results[group][var]["value"] = value if raw: - results[group][var]['raw'] = case.get_record_fields(var, "raw") + results[group][var]["raw"] = case.get_record_fields(var, "raw") if description or full: - results[group][var]['desc'] = case.get_record_fields(var, "desc") + results[group][var]["desc"] = case.get_record_fields(var, "desc") if fileonly or full: - results[group][var]['file'] = case.get_record_fields(var, "file") + results[group][var]["file"] = case.get_record_fields(var, "file") if dtype or full: - results[group][var]['type'] = case.get_type_info(var) + results[group][var]["type"] = case.get_type_info(var) if valid_values or full: - results[group][var]['valid_values'] = case.get_record_fields(var, "valid_values") #*** this is the problem *** + results[group][var]["valid_values"] = case.get_record_fields( + var, "valid_values" + ) # *** this is the problem *** return results + def _main_func(description): # Initialize command line parser and get command line options - variables, subgroup, caseroot, listall, fileonly, \ - value, no_resolve, raw, description, get_group, full, dtype, \ - valid_values, partial_match, xmlfile = parse_command_line(sys.argv, description) - - expect(xmlfile not in unsupported_files, - "XML file {} is unsupported by this tool." - .format(xmlfile)) + ( + variables, + subgroup, + caseroot, + listall, + fileonly, + value, + no_resolve, + raw, + description, + get_group, + full, + dtype, + valid_values, + partial_match, + xmlfile, + ) = parse_command_line(sys.argv, description) + + expect( + xmlfile not in unsupported_files, + "XML file {} is unsupported by this tool.".format(xmlfile), + ) # Initialize case ; read in all xml files from caseroot with Case(caseroot) as case: @@ -301,7 +432,9 @@ def _main_func(description): for all_variable in all_variables: if regex.search(all_variable): if subgroup is not None: - vargroups = case.get_record_fields(all_variable, "group") + vargroups = case.get_record_fields( + all_variable, "group" + ) if subgroup not in vargroups: continue @@ -322,52 +455,74 @@ def _main_func(description): else: variables = all_variables expect(variables, "No variables found") - results = xmlquery_sub(case, variables, subgroup, fileonly, resolved=not no_resolve, - raw=raw, description=description, get_group=get_group, full=full, - dtype=dtype, valid_values=valid_values, xmlfile=xmlfile) + results = xmlquery_sub( + case, + variables, + subgroup, + fileonly, + resolved=not no_resolve, + raw=raw, + description=description, + get_group=get_group, + full=full, + dtype=dtype, + valid_values=valid_values, + xmlfile=xmlfile, + ) if full or description: - wrapper=textwrap.TextWrapper() + wrapper = textwrap.TextWrapper() wrapper.subsequent_indent = "\t\t\t" wrapper.fix_sentence_endings = True cnt = 0 for group in sorted(iter(results)): - if (len(variables) > 1 or len(results) > 1 or full) and not get_group and not value: + if ( + (len(variables) > 1 or len(results) > 1 or full) + and not get_group + and not value + ): print("\nResults in group {}".format(group)) for var in variables: if var in results[group]: if raw: - print(results[group][var]['raw']) + print(results[group][var]["raw"]) elif get_group: - print("\t{}: {}".format(var, results[group][var]['get_group'])) + print("\t{}: {}".format(var, results[group][var]["get_group"])) elif value: if cnt > 0: sys.stdout.write(",") - sys.stdout.write("{}".format(results[group][var]['value'])) + sys.stdout.write("{}".format(results[group][var]["value"])) cnt += 1 elif description: - if results[group][var]['desc'][0] is not None: - desc_text = ' '.join(results[group][var]['desc'][0].split()) + if results[group][var]["desc"][0] is not None: + desc_text = " ".join(results[group][var]["desc"][0].split()) print("\t{}: {}".format(var, wrapper.fill(desc_text))) elif fileonly: - print("\t{}: {}".format(var, results[group][var]['file'])) + print("\t{}: {}".format(var, results[group][var]["file"])) elif dtype: - print("\t{}: {}".format(var, results[group][var]['type'])) + print("\t{}: {}".format(var, results[group][var]["type"])) elif valid_values: - if 'valid_values' in results[group][var]: - print("\t{}: {}".format(var, results[group][var]["valid_values"])) + if "valid_values" in results[group][var]: + print( + "\t{}: {}".format(var, results[group][var]["valid_values"]) + ) elif full: - if results[group][var]['desc'][0] is not None: - desc_text = ' '.join(results[group][var]['desc'][0].split()) - print("\t{}: value={}".format(var, results[group][var]['value'])) - print("\t\ttype: {}".format(results[group][var]['type'][0])) - if 'valid_values' in results[group][var]: - print("\t\tvalid_values: {}".format(results[group][var]["valid_values"])) + if results[group][var]["desc"][0] is not None: + desc_text = " ".join(results[group][var]["desc"][0].split()) + print("\t{}: value={}".format(var, results[group][var]["value"])) + print("\t\ttype: {}".format(results[group][var]["type"][0])) + if "valid_values" in results[group][var]: + print( + "\t\tvalid_values: {}".format( + results[group][var]["valid_values"] + ) + ) print("\t\tdescription: {}".format(wrapper.fill(desc_text))) - print("\t\tfile: {}".format(results[group][var]['file'][0])) + print("\t\tfile: {}".format(results[group][var]["file"][0])) else: - print("\t{}: {}".format(var, results[group][var]['value'])) + print("\t{}: {}".format(var, results[group][var]["value"])) + -if (__name__ == "__main__"): +if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/xmltestentry b/CIME/Tools/xmltestentry index d4dcf414818..025f13cf491 100755 --- a/CIME/Tools/xmltestentry +++ b/CIME/Tools/xmltestentry @@ -89,4 +89,3 @@ if ($check_num > 0) { print FILE " \n"; close (FILE); } - diff --git a/CIME/XML/archive.py b/CIME/XML/archive.py index 4f98078f39d..eb7b64c7634 100644 --- a/CIME/XML/archive.py +++ b/CIME/XML/archive.py @@ -10,8 +10,8 @@ logger = logging.getLogger(__name__) -class Archive(ArchiveBase): +class Archive(ArchiveBase): def __init__(self, infile=None, files=None): """ initialize an object @@ -25,27 +25,39 @@ def setup(self, env_archive, components, files=None): if files is None: files = Files() - components_node = env_archive.make_child("components", attributes={"version":"2.0"}) + components_node = env_archive.make_child( + "components", attributes={"version": "2.0"} + ) arch_components = deepcopy(components) model = get_model() - if 'drv' not in arch_components and model != 'ufs': - arch_components.append('drv') - if 'dart' not in arch_components and model == 'cesm': - arch_components.append('dart') + if "drv" not in arch_components and model != "ufs": + arch_components.append("drv") + if "dart" not in arch_components and model == "cesm": + arch_components.append("dart") for comp in arch_components: - infile = files.get_value("ARCHIVE_SPEC_FILE", {"component":comp}) + infile = files.get_value("ARCHIVE_SPEC_FILE", {"component": comp}) if infile is not None and os.path.isfile(infile): arch = Archive(infile=infile, files=files) - specs = arch.get_optional_child(name="comp_archive_spec", attributes={"compname":comp}) + specs = arch.get_optional_child( + name="comp_archive_spec", attributes={"compname": comp} + ) else: if infile is None: - logger.debug("No archive file defined for component {}".format(comp)) + logger.debug( + "No archive file defined for component {}".format(comp) + ) else: - logger.debug("Archive file {} for component {} not found".format(infile,comp)) + logger.debug( + "Archive file {} for component {} not found".format( + infile, comp + ) + ) - specs = self.get_optional_child(name="comp_archive_spec", attributes={"compname":comp}) + specs = self.get_optional_child( + name="comp_archive_spec", attributes={"compname": comp} + ) if specs is None: logger.debug("No archive specs found for component {}".format(comp)) @@ -57,13 +69,17 @@ def get_all_config_archive_files(self, files): """ Returns the list of ARCHIVE_SPEC_FILES that exist on disk as defined in config_files.xml """ - archive_spec_node = files.get_child("entry", {"id" : "ARCHIVE_SPEC_FILE"}) - component_nodes = files.get_children("value", root=files.get_child("values", root=archive_spec_node)) + archive_spec_node = files.get_child("entry", {"id": "ARCHIVE_SPEC_FILE"}) + component_nodes = files.get_children( + "value", root=files.get_child("values", root=archive_spec_node) + ) config_archive_files = [] for comp in component_nodes: - attr = self.get(comp,"component") + attr = self.get(comp, "component") if attr: - compval = files.get_value("ARCHIVE_SPEC_FILE", attribute={"component":attr}) + compval = files.get_value( + "ARCHIVE_SPEC_FILE", attribute={"component": attr} + ) else: compval = self.get_resolved_value(self.text(comp)) diff --git a/CIME/XML/archive_base.py b/CIME/XML/archive_base.py index fc3ab4d8156..538c4713d87 100644 --- a/CIME/XML/archive_base.py +++ b/CIME/XML/archive_base.py @@ -6,14 +6,15 @@ logger = logging.getLogger(__name__) -class ArchiveBase(GenericXML): +class ArchiveBase(GenericXML): def get_entry(self, compname): """ Returns an xml node corresponding to compname in comp_archive_spec """ - return self.scan_optional_child('comp_archive_spec', - attributes={"compname":compname}) + return self.scan_optional_child( + "comp_archive_spec", attributes={"compname": compname} + ) def _get_file_node_text(self, attnames, archive_entry): """ @@ -37,7 +38,7 @@ def get_rest_file_extensions(self, archive_entry): returns a list of text entries or an empty list if no entries are found """ - return self._get_file_node_text(['rest_file_extension'],archive_entry) + return self._get_file_node_text(["rest_file_extension"], archive_entry) def get_hist_file_extensions(self, archive_entry): """ @@ -46,7 +47,7 @@ def get_hist_file_extensions(self, archive_entry): returns a list of text entries or an empty list if no entries are found """ - return self._get_file_node_text(['hist_file_extension'],archive_entry) + return self._get_file_node_text(["hist_file_extension"], archive_entry) def get_hist_file_ext_regexes(self, archive_entry): """ @@ -55,7 +56,7 @@ def get_hist_file_ext_regexes(self, archive_entry): returns a list of text entries or an empty list if no entries are found """ - return self._get_file_node_text(['hist_file_ext_regex'],archive_entry) + return self._get_file_node_text(["hist_file_ext_regex"], archive_entry) def get_entry_value(self, name, archive_entry): """ @@ -67,12 +68,18 @@ def get_entry_value(self, name, archive_entry): return self.text(node) return None - def get_latest_hist_files(self, casename, model, from_dir, suffix="", ref_case=None): + def get_latest_hist_files( + self, casename, model, from_dir, suffix="", ref_case=None + ): """ get the most recent history files in directory from_dir with suffix if provided """ - test_hists = self.get_all_hist_files(casename, model, from_dir, suffix=suffix, ref_case=ref_case) - ext_regexes = self.get_hist_file_ext_regexes(self.get_entry(self._get_compname(model))) + test_hists = self.get_all_hist_files( + casename, model, from_dir, suffix=suffix, ref_case=ref_case + ) + ext_regexes = self.get_hist_file_ext_regexes( + self.get_entry(self._get_compname(model)) + ) latest_files = {} histlist = [] for hist in test_hists: @@ -106,24 +113,42 @@ def get_all_hist_files(self, casename, model, from_dir, suffix="", ref_case=None # Strip any trailing $ if suffix is present and add it back after the suffix for ext in extensions: - if ext.endswith('$') and has_suffix: + if ext.endswith("$") and has_suffix: ext = ext[:-1] - string = model+r'\d?_?(\d{4})?\.'+ext + string = model + r"\d?_?(\d{4})?\." + ext if has_suffix: - string += '.'+suffix+'$' + string += "." + suffix + "$" - - logger.debug ("Regex is {}".format(string)) + logger.debug("Regex is {}".format(string)) pfile = re.compile(string) - hist_files.extend([f for f in os.listdir(from_dir) if pfile.search(f) and ( (f.startswith(casename) or f.startswith(model)) and not f.endswith("cprnc.out") )]) + hist_files.extend( + [ + f + for f in os.listdir(from_dir) + if pfile.search(f) + and ( + (f.startswith(casename) or f.startswith(model)) + and not f.endswith("cprnc.out") + ) + ] + ) if ref_case: - expect(ref_case not in casename,"ERROR: ref_case name {} conflicts with casename {}".format(ref_case,casename)) - hist_files = [h for h in hist_files if not (ref_case in os.path.basename(h))] + expect( + ref_case not in casename, + "ERROR: ref_case name {} conflicts with casename {}".format( + ref_case, casename + ), + ) + hist_files = [ + h for h in hist_files if not (ref_case in os.path.basename(h)) + ] hist_files = list(set(hist_files)) hist_files.sort() - logger.debug("get_all_hist_files returns {} for model {}".format(hist_files, model)) + logger.debug( + "get_all_hist_files returns {} for model {}".format(hist_files, model) + ) return hist_files @@ -137,6 +162,7 @@ def _get_compname(model): return "drv" return model + def _get_extension(model, filepath, ext_regexes): r""" For a hist file for the given model, return what we call the "extension" @@ -193,18 +219,18 @@ def _get_extension(model, filepath, ext_regexes): if model == "mom": # Need to check 'sfc.day' specially: the embedded '.' messes up the # general-purpose regex - ext_regexes.append(r'sfc\.day') + ext_regexes.append(r"sfc\.day") # Now add the general-purpose extension regex - ext_regexes.append(r'\w+') + ext_regexes.append(r"\w+") for ext_regex in ext_regexes: - full_regex_str = model+r'\d?_?(\d{4})?\.('+ext_regex+r')[-\w\.]*' + full_regex_str = model + r"\d?_?(\d{4})?\.(" + ext_regex + r")[-\w\.]*" full_regex = re.compile(full_regex_str) m = full_regex.search(basename) if m is not None: if m.group(1) is not None: - result = m.group(1)+'.'+m.group(2) + result = m.group(1) + "." + m.group(2) else: result = m.group(2) return result diff --git a/CIME/XML/batch.py b/CIME/XML/batch.py index 2b0664e5e6f..383f967fb8f 100644 --- a/CIME/XML/batch.py +++ b/CIME/XML/batch.py @@ -11,9 +11,16 @@ logger = logging.getLogger(__name__) -class Batch(GenericXML): - def __init__(self, batch_system=None, machine=None, infile=None, files=None, extra_machines_dir=None): +class Batch(GenericXML): + def __init__( + self, + batch_system=None, + machine=None, + infile=None, + files=None, + extra_machines_dir=None, + ): """ initialize an object @@ -32,9 +39,9 @@ def __init__(self, batch_system=None, machine=None, infile=None, files=None, ext GenericXML.__init__(self, infile, schema=schema) self.batch_system_node = None - self.machine_node = None - self.batch_system = batch_system - self.machine = machine + self.machine_node = None + self.batch_system = batch_system + self.machine = machine # Append the contents of $HOME/.cime/config_batch.xml if it exists. # @@ -42,7 +49,7 @@ def __init__(self, batch_system=None, machine=None, infile=None, files=None, ext # extra_machines_dir, if present. # # This could cause problems if node matches are repeated when only one is expected. - infile = os.path.join(os.environ.get("HOME"),".cime","config_batch.xml") + infile = os.path.join(os.environ.get("HOME"), ".cime", "config_batch.xml") if os.path.exists(infile): GenericXML.read(self, infile) if extra_machines_dir: @@ -63,16 +70,25 @@ def get_optional_batch_node(self, nodename, attributes=None): """ Return data on a node for a batch system """ - expect(self.batch_system_node is not None, "Batch system not set, use parent get_node?") + expect( + self.batch_system_node is not None, + "Batch system not set, use parent get_node?", + ) if self.machine_node is not None: - result = self.get_optional_child(nodename, attributes, root=self.machine_node) + result = self.get_optional_child( + nodename, attributes, root=self.machine_node + ) if result is None: - return self.get_optional_child(nodename, attributes, root=self.batch_system_node) + return self.get_optional_child( + nodename, attributes, root=self.batch_system_node + ) else: return result else: - return self.get_optional_child(nodename, attributes, root=self.batch_system_node) + return self.get_optional_child( + nodename, attributes, root=self.batch_system_node + ) def set_batch_system(self, batch_system, machine=None): """ @@ -80,7 +96,7 @@ def set_batch_system(self, batch_system, machine=None): """ machine = machine if machine is not None else self.machine if self.batch_system != batch_system or self.batch_system_node is None: - nodes = self.get_children("batch_system",{"type" : batch_system}) + nodes = self.get_children("batch_system", {"type": batch_system}) for node in nodes: mach = self.get(node, "MACH") if mach is None: @@ -89,16 +105,22 @@ def set_batch_system(self, batch_system, machine=None): self.machine = machine self.machine_node = node - expect(self.batch_system_node is not None, "No batch system '{}' found".format(batch_system)) + expect( + self.batch_system_node is not None, + "No batch system '{}' found".format(batch_system), + ) return batch_system - #pylint: disable=arguments-differ + # pylint: disable=arguments-differ def get_value(self, name, attribute=None, resolved=True, subgroup=None): """ Get Value of fields in the config_batch.xml file """ - expect(self.batch_system_node is not None, "Batch object has no batch system defined") + expect( + self.batch_system_node is not None, + "Batch object has no batch system defined", + ) expect(subgroup is None, "This class does not support subgroups") value = None diff --git a/CIME/XML/compilerblock.py b/CIME/XML/compilerblock.py index 679b360c6d9..1900a03a130 100644 --- a/CIME/XML/compilerblock.py +++ b/CIME/XML/compilerblock.py @@ -69,6 +69,7 @@ logger = logging.getLogger(__name__) + class CompilerBlock(object): """Data used to translate a single element. @@ -92,7 +93,7 @@ def __init__(self, writer, compiler_elem, machobj, db): """ self._writer = writer self._compiler_elem = compiler_elem - self._db = db + self._db = db self._machobj = machobj # If there's no COMPILER attribute, self._compiler is None. self._compiler = db.get(compiler_elem, "COMPILER") @@ -120,22 +121,27 @@ def _handle_references(self, elem, set_up, tear_down, depends): output = "" logger.debug("Initial output={}".format(output)) - reference_re = re.compile(r'\${?(\w+)}?') - env_ref_re = re.compile(r'\$ENV\{(\w+)\}') + reference_re = re.compile(r"\${?(\w+)}?") + env_ref_re = re.compile(r"\$ENV\{(\w+)\}") shell_prefix = "$SHELL{" for m in reference_re.finditer(output): var_name = m.groups()[0] - if var_name not in ("SHELL","ENV"): + if var_name not in ("SHELL", "ENV"): output = output.replace(m.group(), writer.variable_string(var_name)) depends.add(var_name) logger.debug("preenv pass output={}".format(output)) for m in env_ref_re.finditer(output): - logger.debug("look for {} in env {}".format(output,writer.environment_variable_string(m.groups()[0]))) - output = output.replace(m.group(), - writer.environment_variable_string(m.groups()[0])) + logger.debug( + "look for {} in env {}".format( + output, writer.environment_variable_string(m.groups()[0]) + ) + ) + output = output.replace( + m.group(), writer.environment_variable_string(m.groups()[0]) + ) logger.debug("and output {}".format(output)) logger.debug("postenv pass output={}".format(output)) @@ -154,14 +160,17 @@ def _handle_references(self, elem, set_up, tear_down, depends): command = output[sidx + len(shell_prefix) : idx] logger.debug("execute {} in shell, command {}".format(output, command)) - new_set_up, inline, new_tear_down = \ - writer.shell_command_strings(command) - output = output.replace(output[sidx:idx+1], inline, 1) + new_set_up, inline, new_tear_down = writer.shell_command_strings(command) + output = output.replace(output[sidx : idx + 1], inline, 1) if new_set_up is not None: set_up.append(new_set_up) if new_tear_down is not None: tear_down.append(new_tear_down) - logger.debug("set_up {} inline {} tear_down {}".format(new_set_up,inline,new_tear_down)) + logger.debug( + "set_up {} inline {} tear_down {}".format( + new_set_up, inline, new_tear_down + ) + ) logger.debug("First pass output={}".format(output)) @@ -185,12 +194,10 @@ def _elem_to_setting(self, elem): set_up = [] tear_down = [] depends = set() - value_text = self._handle_references(elem, set_up, - tear_down, depends) + value_text = self._handle_references(elem, set_up, tear_down, depends) # Create the setting object. append = self._db.name(elem) == "append" - setting = ValueSetting(value_text, append, - conditions, set_up, tear_down) + setting = ValueSetting(value_text, append, conditions, set_up, tear_down) return (setting, depends) @@ -205,10 +212,11 @@ def _add_elem_to_lists(self, name, elem, value_lists): """ setting, depends = self._elem_to_setting(elem) if name not in value_lists: - value_lists[name] = PossibleValues(name, setting, - self._specificity, depends) + value_lists[name] = PossibleValues( + name, setting, self._specificity, depends + ) else: - value_lists[name].add_setting(setting, self._specificity,depends) + value_lists[name].add_setting(setting, self._specificity, depends) def add_settings_to_lists(self, flag_vars, value_lists): """Add all data in the element to lists of settings. @@ -233,8 +241,9 @@ def matches_machine(self): """ self._specificity = 0 if self._db.has(self._compiler_elem, "MACH"): - if self._machobj.get_machine_name() == \ - self._db.get(self._compiler_elem, "MACH"): + if self._machobj.get_machine_name() == self._db.get( + self._compiler_elem, "MACH" + ): self._specificity += 2 else: return False diff --git a/CIME/XML/compilers.py b/CIME/XML/compilers.py index 8ba2c53bd67..6c0616da75c 100644 --- a/CIME/XML/compilers.py +++ b/CIME/XML/compilers.py @@ -13,10 +13,18 @@ logger = logging.getLogger(__name__) -class Compilers(GenericXML): - def __init__(self, machobj, infile=None, compiler=None, mpilib=None, files=None, version=None, - extra_machines_dir=None): +class Compilers(GenericXML): + def __init__( + self, + machobj, + infile=None, + compiler=None, + mpilib=None, + files=None, + version=None, + extra_machines_dir=None, + ): """ initialize an object @@ -40,27 +48,27 @@ def __init__(self, machobj, infile=None, compiler=None, mpilib=None, files=None, self._version = self.get_version() self._machobj = machobj - self.machine = machobj.get_machine_name() + self.machine = machobj.get_machine_name() self.os = machobj.get_value("OS") if compiler is None: compiler = machobj.get_default_compiler() - self.compiler = compiler + self.compiler = compiler if mpilib is None: if compiler is None: mpilib = machobj.get_default_MPIlib() else: - mpilib = machobj.get_default_MPIlib(attributes={'compiler':compiler}) + mpilib = machobj.get_default_MPIlib(attributes={"compiler": compiler}) self.mpilib = mpilib - self.compiler_nodes = None # Listed from last to first + self.compiler_nodes = None # Listed from last to first # Append the contents of $HOME/.cime/config_compilers.xml if it exists. # # Also append the contents of a config_compilers.xml file in the directory given by # extra_machines_dir, if present. # # This could cause problems if node matches are repeated when only one is expected. - infile = os.path.join(os.environ.get("HOME"),".cime","config_compilers.xml") + infile = os.path.join(os.environ.get("HOME"), ".cime", "config_compilers.xml") if os.path.exists(infile): GenericXML.read(self, infile, schema=schema) if extra_machines_dir: @@ -73,9 +81,19 @@ def __init__(self, machobj, infile=None, compiler=None, mpilib=None, files=None, if self._version > 1.0: schema_db = GenericXML(infile=schema) - compiler_vars = schema_db.get_child("{http://www.w3.org/2001/XMLSchema}group", attributes={"name":"compilerVars"}) - choice = schema_db.get_child(name="{http://www.w3.org/2001/XMLSchema}choice", root=compiler_vars) - self.flag_vars = set(schema_db.get(elem, "name") for elem in schema_db.get_children(root=choice, attributes={"type":"flagsVar"})) + compiler_vars = schema_db.get_child( + "{http://www.w3.org/2001/XMLSchema}group", + attributes={"name": "compilerVars"}, + ) + choice = schema_db.get_child( + name="{http://www.w3.org/2001/XMLSchema}choice", root=compiler_vars + ) + self.flag_vars = set( + schema_db.get(elem, "name") + for elem in schema_db.get_children( + root=choice, attributes={"type": "flagsVar"} + ) + ) def get_compiler(self): """ @@ -87,17 +105,30 @@ def get_optional_compiler_node(self, nodename, attributes=None): """ Return data on a node for a compiler """ - expect(self.compiler_nodes is not None, "Compiler not set, use parent get_node?") + expect( + self.compiler_nodes is not None, "Compiler not set, use parent get_node?" + ) for compiler_node in self.compiler_nodes: - result = self.get_optional_child(name=nodename, attributes=attributes, root=compiler_node) + result = self.get_optional_child( + name=nodename, attributes=attributes, root=compiler_node + ) if result is not None: return result return None def _is_compatible(self, compiler_node, compiler, machine, os_, mpilib): - for xmlid, value in [ ("COMPILER", compiler), ("MACH", machine), ("OS", os_), ("MPILIB", mpilib) ]: - if value is not None and self.has(compiler_node, xmlid) and value != self.get(compiler_node, xmlid): + for xmlid, value in [ + ("COMPILER", compiler), + ("MACH", machine), + ("OS", os_), + ("MPILIB", mpilib), + ]: + if ( + value is not None + and self.has(compiler_node, xmlid) + and value != self.get(compiler_node, xmlid) + ): return False return True @@ -112,10 +143,16 @@ def set_compiler(self, compiler, machine=None, os_=None, mpilib=None): 'gnu' """ machine = machine if machine else self.machine - os_ = os_ if os_ else self.os - mpilib = mpilib if mpilib else self.mpilib + os_ = os_ if os_ else self.os + mpilib = mpilib if mpilib else self.mpilib - if self.compiler != compiler or self.machine != machine or self.os != os_ or self.mpilib != mpilib or self.compiler_nodes is None: + if ( + self.compiler != compiler + or self.machine != machine + or self.os != os_ + or self.mpilib != mpilib + or self.compiler_nodes is None + ): self.compiler_nodes = [] nodes = self.get_children(name="compiler") for node in nodes: @@ -125,16 +162,18 @@ def set_compiler(self, compiler, machine=None, os_=None, mpilib=None): self.compiler_nodes.reverse() self.compiler = compiler - self.machine = machine - self.os = os_ - self.mpilib = mpilib + self.machine = machine + self.os = os_ + self.mpilib = mpilib - #pylint: disable=arguments-differ + # pylint: disable=arguments-differ def get_value(self, name, attribute=None, resolved=True, subgroup=None): """ Get Value of fields in the config_compilers.xml file """ - expect(self.compiler_nodes is not None, "Compiler object has no compiler defined") + expect( + self.compiler_nodes is not None, "Compiler object has no compiler defined" + ) expect(subgroup is None, "This class does not support subgroups") value = None @@ -150,9 +189,16 @@ def get_value(self, name, attribute=None, resolved=True, subgroup=None): return value - def write_macros_file(self, macros_file="Macros.make", output_format="make", xml=None): + def write_macros_file( + self, macros_file="Macros.make", output_format="make", xml=None + ): if self._version <= 1.0: - expect(False, "config_compilers.xml version '{}' is no longer supported".format(self._version)) + expect( + False, + "config_compilers.xml version '{}' is no longer supported".format( + self._version + ), + ) else: if output_format == "make": format_ = "Makefile" @@ -183,9 +229,10 @@ def _write_macros_file(self, build_system, output, xml=None): elif build_system == "CMake": writer = CMakeMacroWriter(output) else: - expect(False, - "Unrecognized build system provided to write_macros: " + - build_system) + expect( + False, + "Unrecognized build system provided to write_macros: " + build_system, + ) # Start processing the file. value_lists = dict() @@ -208,29 +255,31 @@ def _write_macros_file(self, build_system, output, xml=None): while value_lists: # Variables that are ready to be written. ready_variables = [ - var_name for var_name in value_lists + var_name + for var_name in value_lists if value_lists[var_name].dependencies() <= vars_written ] - expect(len(ready_variables) > 0, - "The file {} has bad $VAR references. " - "Check for circular references or variables that " - "are used in a $VAR but not actually defined.".format(self.filename)) + expect( + len(ready_variables) > 0, + "The file {} has bad $VAR references. " + "Check for circular references or variables that " + "are used in a $VAR but not actually defined.".format(self.filename), + ) big_normal_trees = {} big_append_tree = None for var_name in ready_variables: # Note that we're writing this variable. vars_written.add(var_name) # Make the conditional trees and write them out. - normal_trees, append_tree = \ - value_lists[var_name].to_cond_trees() + normal_trees, append_tree = value_lists[var_name].to_cond_trees() for spec in normal_trees: if spec in big_normal_trees: - big_normal_trees[spec] = merge_optional_trees(normal_trees[spec], - big_normal_trees[spec]) + big_normal_trees[spec] = merge_optional_trees( + normal_trees[spec], big_normal_trees[spec] + ) else: big_normal_trees[spec] = normal_trees[spec] - big_append_tree = merge_optional_trees(append_tree, - big_append_tree) + big_append_tree = merge_optional_trees(append_tree, big_append_tree) # Remove this variable from the list of variables to handle # next iteration. del value_lists[var_name] diff --git a/CIME/XML/component.py b/CIME/XML/component.py index dcf6cb5fd96..b3834a17c91 100644 --- a/CIME/XML/component.py +++ b/CIME/XML/component.py @@ -9,26 +9,29 @@ logger = logging.getLogger(__name__) -class Component(EntryID): +class Component(EntryID): def __init__(self, infile, comp_class): """ initialize a Component obect from the component xml file in infile associate the component class with comp_class if provided. """ self._comp_class = comp_class - if infile == 'testingonly': + if infile == "testingonly": self.filename = infile return files = Files() schema = None EntryID.__init__(self, infile) - schema = files.get_schema("CONFIG_{}_FILE".format(comp_class), attributes={"version":"{}".format(self.get_version())}) + schema = files.get_schema( + "CONFIG_{}_FILE".format(comp_class), + attributes={"version": "{}".format(self.get_version())}, + ) if schema is not None: self.validate_xml_file(infile, schema) - #pylint: disable=arguments-differ + # pylint: disable=arguments-differ def get_value(self, name, attribute=None, resolved=False, subgroup=None): expect(subgroup is None, "This class does not support subgroups") return EntryID.get_value(self, name, attribute, resolved) @@ -39,9 +42,9 @@ def get_valid_model_components(self): from the entries in the model CONFIG_CPL_FILE """ components = [] - comps_node = self.get_child("entry", {"id":"COMP_CLASSES"}) + comps_node = self.get_child("entry", {"id": "COMP_CLASSES"}) comps = self.get_default_value(comps_node) - components = comps.split(',') + components = comps.split(",") return components def _get_value_match(self, node, attributes=None, exact_match=False): @@ -55,7 +58,7 @@ def _get_value_match(self, node, attributes=None, exact_match=False): match_count = 0 match_values = [] expect(not exact_match, " exact_match not implemented in this method") - expect(node is not None," Empty node in _get_value_match") + expect(node is not None, " Empty node in _get_value_match") values = self.get_optional_child("values", root=node) if values is None: return @@ -75,12 +78,16 @@ def _get_value_match(self, node, attributes=None, exact_match=False): for valnode in self.get_children("value", root=values): # loop through all the keys in valnode (value nodes) attributes - for key,value in self.attrib(valnode).items(): + for key, value in self.attrib(valnode).items(): # determine if key is in attributes dictionary match_count = 0 if attributes is not None and key in attributes: if re.search(value, attributes[key]): - logger.debug("Value {} and key {} match with value {}".format(value, key, attributes[key])) + logger.debug( + "Value {} and key {} match with value {}".format( + value, key, attributes[key] + ) + ) match_count += 1 else: match_count = 0 @@ -113,14 +120,17 @@ def _get_value_match(self, node, attributes=None, exact_match=False): match_max = match_count match_value = self.text(valnode) else: - expect(False, "match attribute can only have a value of 'last' or 'first'") + expect( + False, + "match attribute can only have a value of 'last' or 'first'", + ) if len(match_values) > 0: match_value = " ".join(match_values) return match_value - #pylint: disable=arguments-differ + # pylint: disable=arguments-differ def get_description(self, compsetname): if self.get_version() == 3.0: return self._get_description_v3(compsetname, self._comp_class) @@ -129,7 +139,7 @@ def get_description(self, compsetname): def get_forcing_description(self, compsetname): if self.get_version() == 3.0: - return self._get_description_v3(compsetname, 'forcing') + return self._get_description_v3(compsetname, "forcing") else: return "" @@ -150,60 +160,75 @@ def _get_description_v3(self, compsetname, comp_class): component descriptions are matched to the compsetname using a set method """ - expect(comp_class is not None,"comp_class argument required for version3 files") + expect( + comp_class is not None, "comp_class argument required for version3 files" + ) comp_class = comp_class.lower() rootnode = self.get_child("description") desc = "" desc_nodes = self.get_children("desc", root=rootnode) - modifier_mode = self.get(rootnode, 'modifier_mode') + modifier_mode = self.get(rootnode, "modifier_mode") if modifier_mode is None: - modifier_mode = '*' - expect(modifier_mode in ('*','1','?','+'), - "Invalid modifier_mode {} in file {}".format(modifier_mode, self.filename)) + modifier_mode = "*" + expect( + modifier_mode in ("*", "1", "?", "+"), + "Invalid modifier_mode {} in file {}".format(modifier_mode, self.filename), + ) optiondesc = {} if comp_class == "forcing": for node in desc_nodes: - forcing = self.get(node, 'forcing') - if forcing is not None and compsetname.startswith(forcing+'_'): - expect(len(desc)==0, - "Too many matches on forcing field {} in file {}".\ - format(forcing, self.filename)) + forcing = self.get(node, "forcing") + if forcing is not None and compsetname.startswith(forcing + "_"): + expect( + len(desc) == 0, + "Too many matches on forcing field {} in file {}".format( + forcing, self.filename + ), + ) desc = self.text(node) if desc is None: - desc = compsetname.split('_')[0] + desc = compsetname.split("_")[0] return desc - # first pass just make a hash of the option descriptions for node in desc_nodes: - option = self.get(node, 'option') + option = self.get(node, "option") if option is not None: optiondesc[option] = self.text(node) - #second pass find a comp_class match + # second pass find a comp_class match desc = "" for node in desc_nodes: compdesc = self.get(node, comp_class) if compdesc is not None: - opt_parts = [ x.rstrip("]") for x in compdesc.split("[%") ] + opt_parts = [x.rstrip("]") for x in compdesc.split("[%")] parts = opt_parts.pop(0).split("%") reqset = set(parts) - fullset = set(parts+opt_parts) - match, complist = self._get_description_match(compsetname, reqset, fullset, modifier_mode) + fullset = set(parts + opt_parts) + + match, complist = self._get_description_match( + compsetname, reqset, fullset, modifier_mode + ) if match: desc = self.text(node) for opt in complist: if opt in optiondesc: desc += optiondesc[opt] - # cpl and esp components may not have a description - if comp_class not in ['cpl','esp']: - expect(len(desc) > 0, - "No description found for comp_class {} matching compsetname {} in file {}, expected match in {} % {}"\ - .format(comp_class,compsetname, self.filename, list(reqset), list(opt_parts))) + if comp_class not in ["cpl", "esp"]: + expect( + len(desc) > 0, + "No description found for comp_class {} matching compsetname {} in file {}, expected match in {} % {}".format( + comp_class, + compsetname, + self.filename, + list(reqset), + list(opt_parts), + ), + ) return desc def _get_description_match(self, compsetname, reqset, fullset, modifier_mode): @@ -234,25 +259,46 @@ def _get_description_match(self, compsetname, reqset, fullset, modifier_mode): (False, None) >>> obj._get_description_match("1850_CAM50%WCCM_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "+") (True, ['CAM50', 'WCCM']) + >>> obj._get_description_match("scn:1850_atm:CAM50%WCCM_Barn",set(["CAM50", "WCCM"]), set(["CAM50","WCCM","RCO2"]), "+") + (True, ['CAM50', 'WCCM']) """ match = False - comparts = compsetname.split('_') + comparts = compsetname.split("_") matchcomplist = None - for comp in comparts: - complist = comp.split('%') + if ":" in comp: + comp = comp.split(":")[1] + complist = comp.split("%") cset = set(complist) + if cset == reqset or (cset > reqset and cset <= fullset): - if modifier_mode == '1': - expect(len(complist) == 2, - "Expected exactly one modifer found {} in {}".format(len(complist)-1,complist)) - elif modifier_mode == '+': - expect(len(complist) >= 2, - "Expected one or more modifers found {} in {}".format(len(complist)-1, list(reqset))) - elif modifier_mode == '?': - expect(len(complist) <= 2, - "Expected 0 or one modifers found {} in {}".format(len(complist)-1, complist)) - expect(not match,"Found multiple matches in file {} for {}".format(self.filename,comp)) + if modifier_mode == "1": + expect( + len(complist) == 2, + "Expected exactly one modifer found {} in {}".format( + len(complist) - 1, complist + ), + ) + elif modifier_mode == "+": + expect( + len(complist) >= 2, + "Expected one or more modifers found {} in {}".format( + len(complist) - 1, list(reqset) + ), + ) + elif modifier_mode == "?": + expect( + len(complist) <= 2, + "Expected 0 or one modifers found {} in {}".format( + len(complist) - 1, complist + ), + ) + expect( + not match, + "Found multiple matches in file {} for {}".format( + self.filename, comp + ), + ) match = True matchcomplist = complist # found a match @@ -281,7 +327,7 @@ def print_values(self): for entry in entries: name = self.get(entry, "id") text = self.text(self.get_child("desc", root=entry)) - logger.info(" {:20s} : {}".format(name, text.encode('utf-8'))) + logger.info(" {:20s} : {}".format(name, text.encode("utf-8"))) def return_values(self): """ @@ -294,7 +340,7 @@ def return_values(self): if helpnode: helptext = self.text(helpnode) else: - helptext = '' + helptext = "" entries = self.get_children("entry") for entry in entries: item = dict() @@ -305,14 +351,16 @@ def return_values(self): group = self.text(self.get_child("group", root=entry)) filename = self.text(self.get_child("file", root=entry)) text = self.text(self.get_child("desc", root=entry)) - item = {"name":name, - "datatype":datatype, - "valid_values":valid_values, - "value":default_value, - "group":group, - "filename":filename, - "desc":text.encode('utf-8')} + item = { + "name": name, + "datatype": datatype, + "valid_values": valid_values, + "value": default_value, + "group": group, + "filename": filename, + "desc": text.encode("utf-8"), + } items.append(item) - entry_dict = {"items" : items} + entry_dict = {"items": items} return helptext, entry_dict diff --git a/CIME/XML/compsets.py b/CIME/XML/compsets.py index 737e3cc1202..ee46b4ccc95 100644 --- a/CIME/XML/compsets.py +++ b/CIME/XML/compsets.py @@ -9,8 +9,8 @@ logger = logging.getLogger(__name__) -class Compsets(GenericXML): +class Compsets(GenericXML): def __init__(self, infile=None, files=None): if files is None: files = Files() @@ -29,21 +29,23 @@ def get_compset_match(self, name): science_support = [] for node in nodes: - alias = self.get_element_text("alias",root=node) - lname = self.get_element_text("lname",root=node) + alias = self.get_element_text("alias", root=node) + lname = self.get_element_text("lname", root=node) if alias == name or lname == name: science_support_nodes = self.get_children("science_support", root=node) for snode in science_support_nodes: science_support.append(self.get(snode, "grid")) - logger.debug("Found node match with alias: {} and lname: {}".format(alias, lname)) + logger.debug( + "Found node match with alias: {} and lname: {}".format(alias, lname) + ) return (lname, alias, science_support) return (None, None, [False]) def get_compset_var_settings(self, compset, grid): - ''' + """ Variables can be set in config_compsets.xml in entry id settings with compset and grid attributes find and return id value pairs here - ''' + """ entries = self.get_optional_child("entries") result = [] if entries is not None: @@ -51,13 +53,15 @@ def get_compset_var_settings(self, compset, grid): # Get an empty entryid obj to use entryidobj = EntryID() for node in nodes: - value = entryidobj.get_default_value(node, {"grid":grid, "compset":compset}) + value = entryidobj.get_default_value( + node, {"grid": grid, "compset": compset} + ) if value is not None: result.append((self.get(node, "id"), value)) return result - #pylint: disable=arguments-differ + # pylint: disable=arguments-differ def get_value(self, name, attribute=None, resolved=False, subgroup=None): expect(subgroup is None, "This class does not support subgroups") if name == "help": @@ -69,7 +73,11 @@ def get_value(self, name, attribute=None, resolved=False, subgroup=None): nodes = self.get_children("compset") for node in nodes: for child in node: - logger.debug ("Here child is {} with value {}".format(self.name(child),self.text(child))) + logger.debug( + "Here child is {} with value {}".format( + self.name(child), self.text(child) + ) + ) if self.name(child) == "alias": alias = self.text(child) if self.name(child) == "lname": @@ -87,12 +95,16 @@ def print_values(self, arg_help=True): logger.info(" Compset Alias: Compset Long Name ") logger.info(" --------------------------------------") for compset in compsets: - logger.info(" {:20} : {}".format(self.text(self.get_child("alias",root=compset)), - self.text(self.get_child("lname", root=compset)))) + logger.info( + " {:20} : {}".format( + self.text(self.get_child("alias", root=compset)), + self.text(self.get_child("lname", root=compset)), + ) + ) def get_compset_longnames(self): compset_nodes = self.get_children("compset") longnames = [] for comp in compset_nodes: longnames.append(self.text(self.get_child("lname", root=comp))) - return(longnames) + return longnames diff --git a/CIME/XML/entry_id.py b/CIME/XML/entry_id.py index 0cdce134f26..9a9acfcfc0d 100644 --- a/CIME/XML/entry_id.py +++ b/CIME/XML/entry_id.py @@ -11,11 +11,11 @@ logger = logging.getLogger(__name__) -class EntryID(GenericXML): +class EntryID(GenericXML): def __init__(self, infile=None, schema=None, read_only=True): GenericXML.__init__(self, infile, schema, read_only=read_only) - self.groups={} + self.groups = {} def get_default_value(self, node, attributes=None): """ @@ -35,16 +35,24 @@ def get_default_value(self, node, attributes=None): return value def set_default_value(self, vid, val): - node = self.get_optional_child("entry", {"id":vid}) + node = self.get_optional_child("entry", {"id": vid}) if node is not None: val = self.set_element_text("default_value", val, root=node) if val is None: - logger.warning("Called set_default_value on a node without default_value field") + logger.warning( + "Called set_default_value on a node without default_value field" + ) return val - def get_value_match(self, vid, attributes=None, exact_match=False, entry_node=None, - replacement_for_none=None): + def get_value_match( + self, + vid, + attributes=None, + exact_match=False, + entry_node=None, + replacement_for_none=None, + ): """Handle this case: @@ -62,27 +70,36 @@ def get_value_match(self, vid, attributes=None, exact_match=False, entry_node=No """ if entry_node is not None: - value = self._get_value_match(entry_node, attributes, exact_match, - replacement_for_none=replacement_for_none) + value = self._get_value_match( + entry_node, + attributes, + exact_match, + replacement_for_none=replacement_for_none, + ) else: - node = self.get_optional_child("entry", {"id":vid}) + node = self.get_optional_child("entry", {"id": vid}) value = None if node is not None: - value = self._get_value_match(node, attributes, exact_match, - replacement_for_none=replacement_for_none) + value = self._get_value_match( + node, + attributes, + exact_match, + replacement_for_none=replacement_for_none, + ) logger.debug("(get_value_match) vid {} value {}".format(vid, value)) return value - def _get_value_match(self, node, attributes=None, exact_match=False, - replacement_for_none=None): - ''' + def _get_value_match( + self, node, attributes=None, exact_match=False, replacement_for_none=None + ): + """ Note that the component class has a specific version of this function If replacement_for_none is provided, then: if the found text value would give a None value, instead replace it with the value given by the replacement_for_none argument. (However, still return None if no match is found.) This may or may not be needed, but is in place to maintain some old logic. - ''' + """ # if there is a element - check to see if there is a match attribute # if there is NOT a match attribute, then set the default to "first" # this is different than the component class _get_value_match where the default is "last" @@ -106,13 +123,15 @@ def _get_value_match(self, node, attributes=None, exact_match=False, # If some attribute is specified that we don't know about, # or the values don't match, it's not a match we want. if exact_match: - if attribute not in attributes or \ - attributes[attribute] != self.get(vnode, attribute): + if attribute not in attributes or attributes[ + attribute + ] != self.get(vnode, attribute): score = -1 break else: - if attribute not in attributes or not \ - re.search(self.get(vnode, attribute),attributes[attribute]): + if attribute not in attributes or not re.search( + self.get(vnode, attribute), attributes[attribute] + ): score = -1 break @@ -126,7 +145,7 @@ def _get_value_match(self, node, attributes=None, exact_match=False, # Get maximum score using either a "last" or "first" match in case of a tie max_score = -1 mnode = None - for score,node in matches: + for score, node in matches: if match_type == "last": # take the *last* best match if score >= max_score: @@ -138,8 +157,11 @@ def _get_value_match(self, node, attributes=None, exact_match=False, max_score = score mnode = node else: - expect(False, - "match attribute can only have a value of 'last' or 'first', value is %s" %match_type) + expect( + False, + "match attribute can only have a value of 'last' or 'first', value is %s" + % match_type, + ) text = self.text(mnode) if text is None: @@ -149,7 +171,7 @@ def _get_value_match(self, node, attributes=None, exact_match=False, return text def get_node_element_info(self, vid, element_name): - node = self.get_optional_child("entry", {"id":vid}) + node = self.get_optional_child("entry", {"id": vid}) if node is None: return None else: @@ -168,7 +190,7 @@ def _get_type_info(self, node): def get_type_info(self, vid): vid, _, _ = self.check_if_comp_var(vid) - node = self.scan_optional_child("entry", {"id":vid}) + node = self.scan_optional_child("entry", {"id": vid}) return self._get_type_info(node) # pylint: disable=unused-argument @@ -180,7 +202,7 @@ def _get_default(self, node): return self._get_node_element_info(node, "default_value") # Get description , expect child with tag "description" for parent node - def get_description (self, node): + def get_description(self, node): return self._get_node_element_info(node, "desc") # Get group , expect node with tag "group" @@ -191,14 +213,14 @@ def get_groups(self, node): nodes = [] vid = self.get(node, "id") for group in groups: - nodes = self.get_children("entry", attributes={"id":vid}, root=group) + nodes = self.get_children("entry", attributes={"id": vid}, root=group) if nodes: result.append(self.get(group, "id")) return result def get_valid_values(self, vid): - node = self.scan_optional_child("entry", {"id":vid}) + node = self.scan_optional_child("entry", {"id": vid}) if node is None: return None return self._get_valid_values(node) @@ -207,31 +229,43 @@ def _get_valid_values(self, node): valid_values = self.get_element_text("valid_values", root=node) valid_values_list = [] if valid_values: - valid_values_list = [item.lstrip() for item in valid_values.split(',')] + valid_values_list = [item.lstrip() for item in valid_values.split(",")] return valid_values_list def set_valid_values(self, vid, new_valid_values): - node = self.scan_optional_child("entry", {"id":vid}) + node = self.scan_optional_child("entry", {"id": vid}) if node is None: return None return self._set_valid_values(node, new_valid_values) def get_nodes_by_id(self, vid): - return self.scan_children("entry", {"id":vid}) + return self.scan_children("entry", {"id": vid}) def _set_valid_values(self, node, new_valid_values): old_vv = self._get_valid_values(node) if old_vv is None: self.make_child("valid_values", text=new_valid_values) - logger.debug("Adding valid_values {} for {}".format(new_valid_values, self.get(node, "id"))) + logger.debug( + "Adding valid_values {} for {}".format( + new_valid_values, self.get(node, "id") + ) + ) else: vv_text = self.set_element_text("valid_values", new_valid_values, root=node) - logger.debug("Replacing valid_values {} with {} for {}".format(old_vv, vv_text, self.get(node, "id"))) + logger.debug( + "Replacing valid_values {} with {} for {}".format( + old_vv, vv_text, self.get(node, "id") + ) + ) current_value = self.get(node, "value") valid_values_list = self._get_valid_values(node) if current_value is not None and current_value not in valid_values_list: - logger.warning("WARNING: Current setting for {} not in new valid values. Updating setting to \"{}\"".format(self.get(node, "id"), valid_values_list[0])) + logger.warning( + 'WARNING: Current setting for {} not in new valid values. Updating setting to "{}"'.format( + self.get(node, "id"), valid_values_list[0] + ) + ) self._set_value(node, valid_values_list[0]) return new_valid_values @@ -246,17 +280,25 @@ def _set_value(self, node, value, vid=None, subgroup=None, ignore_type=False): self.set(node, "value", str_value) return value - def get_valid_value_string(self, node, value,vid=None, ignore_type=False): + def get_valid_value_string(self, node, value, vid=None, ignore_type=False): valid_values = self._get_valid_values(node) if ignore_type: - expect(isinstance(value, CIME.six.string_types), "Value must be type string if ignore_type is true") + expect( + isinstance(value, CIME.six.string_types), + "Value must be type string if ignore_type is true", + ) str_value = value return str_value type_str = self._get_type_info(node) str_value = convert_to_string(value, type_str, vid) - if valid_values and not str_value.startswith('$'): - expect(str_value in valid_values, "Did not find {} in valid values for {}: {}".format(value, vid, valid_values)) + if valid_values and not str_value.startswith("$"): + expect( + str_value in valid_values, + "Did not find {} in valid values for {}: {}".format( + value, vid, valid_values + ), + ) return str_value def set_value(self, vid, value, subgroup=None, ignore_type=False): @@ -266,8 +308,12 @@ def set_value(self, vid, value, subgroup=None, ignore_type=False): subgroup is ignored in the general routine and applied in specific methods """ val = None - root = self.root if subgroup is None else self.get_optional_child("group", {"id":subgroup}) - node = self.get_optional_child("entry", {"id":vid}, root=root) + root = ( + self.root + if subgroup is None + else self.get_optional_child("group", {"id": subgroup}) + ) + node = self.get_optional_child("entry", {"id": vid}, root=root) if node is not None: val = self._set_value(node, value, vid, subgroup, ignore_type) return val @@ -279,11 +325,13 @@ def get_values(self, vid, attribute=None, resolved=True, subgroup=None): commas """ results = [] - node = self.scan_optional_child("entry", {"id":vid}) + node = self.scan_optional_child("entry", {"id": vid}) if node is None: return results - str_result = self._get_value(node, attribute=attribute, resolved=resolved, subgroup=subgroup) - str_results = str_result.split(',') + str_result = self._get_value( + node, attribute=attribute, resolved=resolved, subgroup=subgroup + ) + str_results = str_result.split(",") for result in str_results: # Return value as right type if we were able to fully resolve # otherwise, we have to leave as string. @@ -291,22 +339,28 @@ def get_values(self, vid, attribute=None, resolved=True, subgroup=None): results.append(result) else: type_str = self._get_type_info(node) - results.append( convert_to_type(result, type_str, vid)) + results.append(convert_to_type(result, type_str, vid)) return results - #pylint: disable=arguments-differ + # pylint: disable=arguments-differ def get_value(self, vid, attribute=None, resolved=True, subgroup=None): """ Get a value for entry with id attribute vid. or from the values field if the attribute argument is provided and matches """ - root = self.root if subgroup is None else self.get_optional_child("group", {"id":subgroup}) - node = self.scan_optional_child("entry", {"id":vid}, root=root) + root = ( + self.root + if subgroup is None + else self.get_optional_child("group", {"id": subgroup}) + ) + node = self.scan_optional_child("entry", {"id": vid}, root=root) if node is None: return - val = self._get_value(node, attribute=attribute, resolved=resolved, subgroup=subgroup) + val = self._get_value( + node, attribute=attribute, resolved=resolved, subgroup=subgroup + ) # Return value as right type if we were able to fully resolve # otherwise, we have to leave as string. if val is None: @@ -327,7 +381,11 @@ def _get_value(self, node, attribute=None, resolved=True, subgroup=None): logger.debug("No node") return val - logger.debug("Found node {} with attributes {}".format(self.name(node) , self.attrib(node))) + logger.debug( + "Found node {} with attributes {}".format( + self.name(node), self.attrib(node) + ) + ) if attribute: vals = self.get_optional_child("values", root=node) node = vals if vals is not None else node @@ -344,7 +402,7 @@ def _get_value(self, node, attribute=None, resolved=True, subgroup=None): def get_child_content(self, vid, childname): val = None - node = self.get_optional_child("entry", {"id" : vid}) + node = self.get_optional_child("entry", {"id": vid}) if node is not None: val = self.get_element_text(childname, root=node) return val @@ -354,7 +412,10 @@ def get_elements_from_child_content(self, childname, childcontent): elements = [] for node in nodes: content = self.get_element_text(childname, root=node) - expect(content is not None,"No childname {} for id {}".format(childname, self.get(node, "id"))) + expect( + content is not None, + "No childname {} for id {}".format(childname, self.get(node, "id")), + ) if content == childcontent: elements.append(node) @@ -370,14 +431,14 @@ def add_elements_by_group(self, srcobj, attributes=None, infile=None): infile = os.path.basename(self.filename) # First get the list of entries in srcobj with matching file children - nodelist = srcobj.get_elements_from_child_content('file', infile) + nodelist = srcobj.get_elements_from_child_content("file", infile) # For matchs found: Remove {, , } # children from each entry and set the default value for the # new entries in self - putting the entries as children of # group elements in file $file for src_node in nodelist: - node = self.copy(src_node) + node = self.copy(src_node) gname = srcobj.get_element_text("group", root=src_node) if gname is None: gname = "group_not_set" @@ -386,7 +447,7 @@ def add_elements_by_group(self, srcobj, attributes=None, infile=None): # then create the group node and add it to infile file if gname not in self.groups.keys(): # initialize an empty list - newgroup = self.make_child(name="group", attributes={"id":gname}) + newgroup = self.make_child(name="group", attributes={"id": gname}) self.groups[gname] = newgroup # Remove {, , } from the entry element @@ -402,7 +463,7 @@ def add_elements_by_group(self, srcobj, attributes=None, infile=None): if value is not None and len(value): self._set_value(node, value) - logger.debug ("Adding to group " + gname) + logger.debug("Adding to group " + gname) return nodelist @@ -415,13 +476,15 @@ def cleanupnode(self, node): def compare_xml(self, other, root=None, otherroot=None): xmldiffs = {} if root is not None: - expect(otherroot is not None," inconsistant request") + expect(otherroot is not None, " inconsistant request") f1nodes = self.scan_children("entry", root=root) for node in f1nodes: vid = self.get(node, "id") logger.debug("Compare vid {}".format(vid)) - f2match = other.scan_optional_child("entry", attributes={"id":vid},root=otherroot) - expect(f2match is not None,"Could not find {} in Locked file".format(vid)) + f2match = other.scan_optional_child( + "entry", attributes={"id": vid}, root=otherroot + ) + expect(f2match is not None, "Could not find {} in Locked file".format(vid)) if node != f2match: f1val = self.get_value(vid, resolved=False) if f1val is not None: @@ -431,21 +494,44 @@ def compare_xml(self, other, root=None, otherroot=None): elif hasattr(self, "_components"): # pylint: disable=no-member for comp in self._components: - f1val = self.get_value("{}_{}".format(vid,comp), resolved=False) + f1val = self.get_value( + "{}_{}".format(vid, comp), resolved=False + ) if f1val is not None: - f2val = other.get_value("{}_{}".format(vid,comp), resolved=False) + f2val = other.get_value( + "{}_{}".format(vid, comp), resolved=False + ) if f1val != f2val: xmldiffs[vid] = [f1val, f2val] else: if node != f2match: f1value_nodes = self.get_children("value", root=node) for valnode in f1value_nodes: - f2valnodes = other.get_children("value", root=f2match, attributes=self.attrib(valnode)) + f2valnodes = other.get_children( + "value", + root=f2match, + attributes=self.attrib(valnode), + ) for f2valnode in f2valnodes: - if self.attrib(valnode) is None and self.attrib(f2valnode) is None or \ - self.attrib(f2valnode) == self.attrib(valnode): - if other.get_resolved_value(self.text(f2valnode)) != self.get_resolved_value(self.text(valnode)): - xmldiffs["{}:{}".format(vid, self.attrib(valnode))] = [self.text(valnode), self.text(f2valnode)] + if ( + self.attrib(valnode) is None + and self.attrib(f2valnode) is None + or self.attrib(f2valnode) + == self.attrib(valnode) + ): + if other.get_resolved_value( + self.text(f2valnode) + ) != self.get_resolved_value( + self.text(valnode) + ): + xmldiffs[ + "{}:{}".format( + vid, self.attrib(valnode) + ) + ] = [ + self.text(valnode), + self.text(f2valnode), + ] return xmldiffs def overwrite_existing_entries(self): @@ -454,7 +540,10 @@ def overwrite_existing_entries(self): vid = self.get(node, "id") samenodes = self.get_nodes_by_id(vid) if len(samenodes) > 1: - expect(len(samenodes) == 2, "Too many matchs for id {} in file {}".format(vid, self.filename)) + expect( + len(samenodes) == 2, + "Too many matchs for id {} in file {}".format(vid, self.filename), + ) logger.debug("Overwriting node {}".format(vid)) read_only = self.read_only if read_only: diff --git a/CIME/XML/env_archive.py b/CIME/XML/env_archive.py index bdeac33408e..3642cda40a8 100644 --- a/CIME/XML/env_archive.py +++ b/CIME/XML/env_archive.py @@ -8,8 +8,7 @@ logger = logging.getLogger(__name__) # pylint: disable=super-init-not-called -class EnvArchive(ArchiveBase,EnvBase): - +class EnvArchive(ArchiveBase, EnvBase): def __init__(self, case_root=None, infile="env_archive.xml", read_only=False): """ initialize an object interface to file env_archive.xml in the case directory @@ -17,22 +16,21 @@ def __init__(self, case_root=None, infile="env_archive.xml", read_only=False): schema = os.path.join(utils.get_schema_path(), "env_archive.xsd") EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) - def get_entries(self): - return self.get_children('comp_archive_spec') + return self.get_children("comp_archive_spec") def get_entry_info(self, archive_entry): - compname = self.get(archive_entry, 'compname') - compclass = self.get(archive_entry, 'compclass') - return compname,compclass + compname = self.get(archive_entry, "compname") + compclass = self.get(archive_entry, "compclass") + return compname, compclass def get_rpointer_contents(self, archive_entry): rpointer_items = [] - rpointer_nodes = self.get_children('rpointer', root=archive_entry) + rpointer_nodes = self.get_children("rpointer", root=archive_entry) for rpointer_node in rpointer_nodes: - file_node = self.get_child('rpointer_file', root=rpointer_node) - content_node = self.get_child('rpointer_content', root=rpointer_node) - rpointer_items.append([self.text(file_node),self.text(content_node)]) + file_node = self.get_child("rpointer_file", root=rpointer_node) + content_node = self.get_child("rpointer_content", root=rpointer_node) + rpointer_items.append([self.text(file_node), self.text(content_node)]) return rpointer_items def get_type_info(self, vid): diff --git a/CIME/XML/env_base.py b/CIME/XML/env_base.py index 801a315a87b..b30d33da988 100644 --- a/CIME/XML/env_base.py +++ b/CIME/XML/env_base.py @@ -5,10 +5,11 @@ from CIME.XML.entry_id import EntryID from CIME.XML.headers import Headers from CIME.utils import convert_to_type + logger = logging.getLogger(__name__) -class EnvBase(EntryID): +class EnvBase(EntryID): def __init__(self, case_root, infile, schema=None, read_only=False): if case_root is None: case_root = os.getcwd() @@ -31,19 +32,25 @@ def __init__(self, case_root, infile, schema=None, read_only=False): self._setup_cache() def _setup_cache(self): - self._id_map = {} # map id directly to nodes - self._group_map = {} # map group name to entry id dict + self._id_map = {} # map id directly to nodes + self._group_map = {} # map group name to entry id dict group_elems = self.get_children("group") for group_elem in group_elems: group_name = self.get(group_elem, "id") - expect(group_name not in self._group_map, "Repeat group '{}'".format(group_name)) + expect( + group_name not in self._group_map, + "Repeat group '{}'".format(group_name), + ) group_map = {} self._group_map[group_name] = group_map entry_elems = self.get_children("entry", root=group_elem) for entry_elem in entry_elems: entry_id = self.get(entry_elem, "id") - expect(entry_id not in group_map, "Repeat entry '{}' in group '{}'".format(entry_id, group_name)) + expect( + entry_id not in group_map, + "Repeat entry '{}' in group '{}'".format(entry_id, group_name), + ) group_map[entry_id] = entry_elem if entry_id in self._id_map: self._id_map[entry_id].append(entry_elem) @@ -58,7 +65,12 @@ def change_file(self, newfile, copy=False): self._setup_cache() def get_children(self, name=None, attributes=None, root=None): - if self.locked and name == "entry" and attributes is not None and attributes.keys() == ["id"]: + if ( + self.locked + and name == "entry" + and attributes is not None + and attributes.keys() == ["id"] + ): entry_id = attributes["id"] if root is None or self.name(root) == "file": if entry_id in self._id_map: @@ -66,37 +78,58 @@ def get_children(self, name=None, attributes=None, root=None): else: return [] else: - expect(self.name(root) == "group", "Unexpected elem '{}' for {}, attrs {}".format(self.name(root), self.filename, self.attrib(root))) + expect( + self.name(root) == "group", + "Unexpected elem '{}' for {}, attrs {}".format( + self.name(root), self.filename, self.attrib(root) + ), + ) group_id = self.get(root, "id") - if group_id in self._group_map and entry_id in self._group_map[group_id]: + if ( + group_id in self._group_map + and entry_id in self._group_map[group_id] + ): return [self._group_map[group_id][entry_id]] else: return [] else: # Non-compliant look up - return EntryID.get_children(self, name=name, attributes=attributes, root=root) + return EntryID.get_children( + self, name=name, attributes=attributes, root=root + ) def scan_children(self, nodename, attributes=None, root=None): - if self.locked and nodename == "entry" and attributes is not None and attributes.keys() == ["id"]: - return EnvBase.get_children(self, name=nodename, attributes=attributes, root=root) + if ( + self.locked + and nodename == "entry" + and attributes is not None + and attributes.keys() == ["id"] + ): + return EnvBase.get_children( + self, name=nodename, attributes=attributes, root=root + ) else: - return EntryID.scan_children(self, nodename, attributes=attributes, root=root) + return EntryID.scan_children( + self, nodename, attributes=attributes, root=root + ) def set_components(self, components): - if hasattr(self, '_components'): + if hasattr(self, "_components"): # pylint: disable=attribute-defined-outside-init self._components = components def check_if_comp_var(self, vid, attribute=None, node=None): comp = None if node is None: - nodes = self.scan_children("entry", {"id" : vid}) + nodes = self.scan_children("entry", {"id": vid}) if len(nodes): node = nodes[0] if node: - valnodes = self.scan_children("value", attributes={"compclass":None}, root=node) + valnodes = self.scan_children( + "value", attributes={"compclass": None}, root=node + ) if len(valnodes) == 0: logger.debug("vid {} is not a compvar".format(vid)) return vid, None, False @@ -109,12 +142,12 @@ def check_if_comp_var(self, vid, attribute=None, node=None): if hasattr(self, "_components") and self._components: new_vid = None for comp in self._components: - if vid.endswith('_'+comp): - new_vid = vid.replace('_'+comp, '', 1) - elif vid.startswith(comp+'_'): - new_vid = vid.replace(comp+'_', '', 1) - elif '_' + comp + '_' in vid: - new_vid = vid.replace(comp+'_','', 1) + if vid.endswith("_" + comp): + new_vid = vid.replace("_" + comp, "", 1) + elif vid.startswith(comp + "_"): + new_vid = vid.replace(comp + "_", "", 1) + elif "_" + comp + "_" in vid: + new_vid = vid.replace(comp + "_", "", 1) if new_vid is not None: break if new_vid is not None: @@ -140,10 +173,10 @@ def get_value(self, vid, attribute=None, resolved=True, subgroup=None): logger.debug("Not enough info to get value for {}".format(vid)) return value if attribute is None: - attribute = {"compclass" : comp} + attribute = {"compclass": comp} else: attribute["compclass"] = comp - node = self.scan_optional_child("entry", {"id":vid}) + node = self.scan_optional_child("entry", {"id": vid}) if node is not None: type_str = self._get_type_info(node) values = self.get_optional_child("values", root=node) @@ -153,10 +186,12 @@ def get_value(self, vid, attribute=None, resolved=True, subgroup=None): if val.startswith("$"): value = val else: - value = convert_to_type(val,type_str, vid) + value = convert_to_type(val, type_str, vid) return value - return EntryID.get_value(self, vid, attribute=attribute, resolved=resolved, subgroup=subgroup) + return EntryID.get_value( + self, vid, attribute=attribute, resolved=resolved, subgroup=subgroup + ) def set_value(self, vid, value, subgroup=None, ignore_type=False): """ @@ -166,26 +201,36 @@ def set_value(self, vid, value, subgroup=None, ignore_type=False): """ vid, comp, iscompvar = self.check_if_comp_var(vid, None) val = None - root = self.root if subgroup is None else self.get_optional_child("group", {"id":subgroup}) - node = self.scan_optional_child("entry", {"id":vid}, root=root) + root = ( + self.root + if subgroup is None + else self.get_optional_child("group", {"id": subgroup}) + ) + node = self.scan_optional_child("entry", {"id": vid}, root=root) if node is not None: if iscompvar and comp is None: # pylint: disable=no-member for comp in self._components: - val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp) + val = self._set_value( + node, value, vid, subgroup, ignore_type, compclass=comp + ) else: - val = self._set_value(node, value, vid, subgroup, ignore_type, compclass=comp) + val = self._set_value( + node, value, vid, subgroup, ignore_type, compclass=comp + ) return val # pylint: disable=arguments-differ - def _set_value(self, node, value, vid=None, subgroup=None, ignore_type=False, compclass=None): + def _set_value( + self, node, value, vid=None, subgroup=None, ignore_type=False, compclass=None + ): if vid is None: vid = self.get(node, "id") vid, _, iscompvar = self.check_if_comp_var(vid, node=node) if iscompvar: expect(compclass is not None, "compclass must be specified if is comp var") - attribute = {"compclass":compclass} + attribute = {"compclass": compclass} str_value = self.get_valid_value_string(node, value, vid, ignore_type) values = self.get_optional_child("values", root=node) node = values if values is not None else node @@ -212,16 +257,22 @@ def cleanupnode(self, node): vnode = self.get_optional_child("values", root=node) if vnode is not None: - componentatt = self.get_children("value", attributes={"component":"ATM"}, root=vnode) + componentatt = self.get_children( + "value", attributes={"component": "ATM"}, root=vnode + ) # backward compatibility (compclasses and component were mixed # now we seperated into component and compclass) if len(componentatt) > 0: - for ccnode in self.get_children("value", attributes={"component":None}, root=vnode): + for ccnode in self.get_children( + "value", attributes={"component": None}, root=vnode + ): val = self.get(ccnode, "component") self.pop(ccnode, "component") self.set(ccnode, "compclass", val) - compclassatt = self.get_children("value", attributes={"compclass":None}, root=vnode) + compclassatt = self.get_children( + "value", attributes={"compclass": None}, root=vnode + ) if len(compclassatt) == 0: self.remove_child(vnode, root=node) diff --git a/CIME/XML/env_batch.py b/CIME/XML/env_batch.py index 12175803476..41abb4da9ee 100644 --- a/CIME/XML/env_batch.py +++ b/CIME/XML/env_batch.py @@ -5,8 +5,16 @@ from CIME.XML.standard_module_setup import * from CIME.XML.env_base import EnvBase from CIME import utils -from CIME.utils import transform_vars, get_cime_root, convert_to_seconds, convert_to_babylonian_time, \ - get_cime_config, get_batch_script_for_job, get_logging_options, format_time +from CIME.utils import ( + transform_vars, + get_cime_root, + convert_to_seconds, + convert_to_babylonian_time, + get_cime_config, + get_batch_script_for_job, + get_logging_options, + format_time, +) from CIME.locked_files import lock_file, unlock_file from collections import OrderedDict import stat, re, math @@ -15,8 +23,8 @@ # pragma pylint: disable=attribute-defined-outside-init -class EnvBatch(EnvBase): +class EnvBatch(EnvBase): def __init__(self, case_root=None, infile="env_batch.xml", read_only=False): """ initialize an object interface to file env_batch.xml in the case directory @@ -25,7 +33,9 @@ def __init__(self, case_root=None, infile="env_batch.xml", read_only=False): # This arbitrary setting should always be overwritten self._default_walltime = "00:20:00" schema = os.path.join(utils.get_schema_path(), "env_batch.xsd") - super(EnvBatch,self).__init__(case_root, infile, schema=schema, read_only=read_only) + super(EnvBatch,self).__init__( + case_root, infile, schema=schema, read_only=read_only + ) # pylint: disable=arguments-differ def set_value(self, item, value, subgroup=None, ignore_type=False): @@ -35,23 +45,27 @@ def set_value(self, item, value, subgroup=None, ignore_type=False): val = None if item == "JOB_QUEUE": - expect(value in self._get_all_queue_names() or ignore_type, - "Unknown Job Queue specified use --force to set") + expect( + value in self._get_all_queue_names() or ignore_type, + "Unknown Job Queue specified use --force to set", + ) # allow the user to set item for all jobs if subgroup is not provided if subgroup is None: gnodes = self.get_children("group") for gnode in gnodes: - node = self.get_optional_child("entry", {"id":item}, root=gnode) + node = self.get_optional_child("entry", {"id": item}, root=gnode) if node is not None: self._set_value(node, value, vid=item, ignore_type=ignore_type) val = value else: - group = self.get_optional_child("group", {"id":subgroup}) + group = self.get_optional_child("group", {"id": subgroup}) if group is not None: - node = self.get_optional_child("entry", {"id":item}, root=group) + node = self.get_optional_child("entry", {"id": item}, root=group) if node is not None: - val = self._set_value(node, value, vid=item, ignore_type=ignore_type) + val = self._set_value( + node, value, vid=item, ignore_type=ignore_type + ) return val @@ -64,7 +78,7 @@ def get_value(self, item, attribute=None, resolved=True, subgroup=None): value = None node = self.get_optional_child(item, attribute) if item in ("BATCH_SYSTEM", "PROJECT_REQUIRED"): - return super(EnvBatch, self).get_value(item,attribute,resolved) + return super(EnvBatch, self).get_value(item, attribute, resolved) if not node: # this will take the last instance of item listed in all batch_system elements @@ -83,15 +97,19 @@ def get_value(self, item, attribute=None, resolved=True, subgroup=None): def get_type_info(self, vid): gnodes = self.get_children("group") for gnode in gnodes: - nodes = self.get_children("entry",{"id":vid}, root=gnode) + nodes = self.get_children("entry", {"id": vid}, root=gnode) type_info = None for node in nodes: new_type_info = self._get_type_info(node) if type_info is None: type_info = new_type_info else: - expect( type_info == new_type_info, - "Inconsistent type_info for entry id={} {} {}".format(vid, new_type_info, type_info)) + expect( + type_info == new_type_info, + "Inconsistent type_info for entry id={} {} {}".format( + vid, new_type_info, type_info + ), + ) return type_info def get_jobs(self): @@ -107,8 +125,11 @@ def create_job_groups(self, batch_jobs, is_test): # Subtle: in order to support dynamic batch jobs, we need to remove the # job_submission group and replace with job-based groups - orig_group = self.get_child("group", {"id":"job_submission"}, - err_msg="Looks like job groups have already been created") + orig_group = self.get_child( + "group", + {"id": "job_submission"}, + err_msg="Looks like job groups have already been created", + ) orig_group_children = super(EnvBatch, self).get_children(root=orig_group) childnodes = [] @@ -119,16 +140,18 @@ def create_job_groups(self, batch_jobs, is_test): for name, jdict in batch_jobs: if name == "case.run" and is_test: - pass # skip + pass # skip elif name == "case.test" and not is_test: - pass # skip + pass # skip elif name == "case.run.sh": - pass # skip + pass # skip else: - new_job_group = self.make_child("group", {"id":name}) + new_job_group = self.make_child("group", {"id": name}) for field in jdict.keys(): val = jdict[field] - node = self.make_child("entry", {"id":field,"value":val}, root=new_job_group) + node = self.make_child( + "entry", {"id": field, "value": val}, root=new_job_group + ) self.make_child("type", root=node, text="char") for child in childnodes: @@ -152,12 +175,14 @@ def set_batch_system(self, batchobj, batch_system_type=None): self.set_batch_system_type(batch_system_type) if batchobj.batch_system_node is not None and batchobj.machine_node is not None: - for node in batchobj.get_children("",root=batchobj.machine_node): + for node in batchobj.get_children("", root=batchobj.machine_node): name = self.name(node) - if name != 'directives': - oldnode = batchobj.get_optional_child(name, root=batchobj.batch_system_node) + if name != "directives": + oldnode = batchobj.get_optional_child( + name, root=batchobj.batch_system_node + ) if oldnode is not None: - logger.debug( "Replacing {}".format(self.name(oldnode))) + logger.debug("Replacing {}".format(self.name(oldnode))) batchobj.remove_child(oldnode, root=batchobj.batch_system_node) if batchobj.batch_system_node is not None: @@ -171,20 +196,26 @@ def set_batch_system(self, batchobj, batch_system_type=None): lock_file(os.path.basename(batchobj.filename), caseroot=self._caseroot) def get_job_overrides(self, job, case): - env_workflow = case.get_env('workflow') - total_tasks, num_nodes, tasks_per_node, thread_count, ngpus_per_node = env_workflow.get_job_specs(case, job) + env_workflow = case.get_env("workflow") + ( + total_tasks, + num_nodes, + tasks_per_node, + thread_count, + ngpus_per_node, + ) = env_workflow.get_job_specs(case, job) overrides = {} if total_tasks: overrides["total_tasks"] = total_tasks - overrides["num_nodes"] = num_nodes - overrides["tasks_per_node"] = tasks_per_node + overrides["num_nodes"] = num_nodes + overrides["tasks_per_node"] = tasks_per_node if thread_count: overrides["thread_count"] = thread_count else: - total_tasks = case.get_value("TOTALPES")*int(case.thread_count) + total_tasks = case.get_value("TOTALPES") * int(case.thread_count) thread_count = case.thread_count - if int(total_tasks)*int(thread_count) < case.get_value("MAX_TASKS_PER_NODE"): + if int(total_tasks) * int(thread_count) < case.get_value("MAX_TASKS_PER_NODE"): overrides["max_tasks_per_node"] = int(total_tasks) overrides["ngpus_per_node"] = ngpus_per_node @@ -192,29 +223,42 @@ def get_job_overrides(self, job, case): return overrides def make_batch_script(self, input_template, job, case, outfile=None): - expect(os.path.exists(input_template), "input file '{}' does not exist".format(input_template)) + expect( + os.path.exists(input_template), + "input file '{}' does not exist".format(input_template), + ) overrides = self.get_job_overrides(job, case) ext = os.path.splitext(job)[-1] if len(ext) == 0: ext = job - if ext.startswith('.'): + if ext.startswith("."): ext = ext[1:] - # A job name or job array name can be at most 230 characters. It must consist only of alphabetic, numeric, plus + # A job name or job array name can be at most 230 characters. It must consist only of alphabetic, numeric, plus # sign ("+"), dash or minus or hyphen ("-"), underscore ("_"), and dot or period (".") characters # most of these are checked in utils:check_name, but % is not one of them. - overrides["job_id"] = ext + '.' + case.get_value("CASE").replace('%','') - - overrides["batchdirectives"] = self.get_batch_directives(case, job, overrides=overrides) - output_text = transform_vars(open(input_template,"r").read(), case=case, subgroup=job, overrides=overrides) + overrides["job_id"] = ext + "." + case.get_value("CASE").replace("%", "") + + overrides["batchdirectives"] = self.get_batch_directives( + case, job, overrides=overrides + ) + output_text = transform_vars( + open(input_template, "r").read(), + case=case, + subgroup=job, + overrides=overrides, + ) output_name = get_batch_script_for_job(job) if outfile is None else outfile logger.info("Creating file {}".format(output_name)) with open(output_name, "w") as fd: fd.write(output_text) # make sure batch script is exectuble - os.chmod(output_name, os.stat(output_name).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) + os.chmod( + output_name, + os.stat(output_name).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH, + ) def set_job_defaults(self, batch_jobs, case): if self._batchtype is None: @@ -222,53 +266,97 @@ def set_job_defaults(self, batch_jobs, case): if self._batchtype == "none": return - env_workflow = case.get_env('workflow') + env_workflow = case.get_env("workflow") known_jobs = env_workflow.get_jobs() for job, jsect in batch_jobs: if job not in known_jobs: continue - walltime = case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) if case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) else None - force_queue = case.get_value("USER_REQUESTED_QUEUE", subgroup=job) if case.get_value("USER_REQUESTED_QUEUE", subgroup=job) else None - walltime_format = case.get_value("walltime_format", subgroup=job) if case.get_value("walltime_format", subgroup=job) else None - logger.info("job is {} USER_REQUESTED_WALLTIME {} USER_REQUESTED_QUEUE {} WALLTIME_FORMAT {}".format(job, walltime, force_queue, walltime_format)) - task_count = int(jsect["task_count"]) if "task_count" in jsect else case.total_tasks + walltime = ( + case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) + if case.get_value("USER_REQUESTED_WALLTIME", subgroup=job) + else None + ) + force_queue = ( + case.get_value("USER_REQUESTED_QUEUE", subgroup=job) + if case.get_value("USER_REQUESTED_QUEUE", subgroup=job) + else None + ) + walltime_format = ( + case.get_value("walltime_format", subgroup=job) + if case.get_value("walltime_format", subgroup=job) + else None + ) + logger.info( + "job is {} USER_REQUESTED_WALLTIME {} USER_REQUESTED_QUEUE {} WALLTIME_FORMAT {}".format( + job, walltime, force_queue, walltime_format + ) + ) + task_count = ( + int(jsect["task_count"]) if "task_count" in jsect else case.total_tasks + ) if "walltime" in jsect and walltime is None: walltime = jsect["walltime"] - logger.debug("Using walltime {!r} from batch job " - "spec".format(walltime)) + logger.debug( + "Using walltime {!r} from batch job " "spec".format(walltime) + ) if "task_count" in jsect: # job is using custom task_count, need to compute a node_count based on this - node_count = int(math.ceil(float(task_count)/float(case.tasks_per_node))) + node_count = int( + math.ceil(float(task_count) / float(case.tasks_per_node)) + ) else: node_count = case.num_nodes - queue = self.select_best_queue(node_count, task_count, name=force_queue, walltime=walltime, job=job) + queue = self.select_best_queue( + node_count, task_count, name=force_queue, walltime=walltime, job=job + ) if queue is None and walltime is not None: # Try to see if walltime was the holdup - queue = self.select_best_queue(node_count, task_count, name=force_queue, walltime=None, job=job) + queue = self.select_best_queue( + node_count, task_count, name=force_queue, walltime=None, job=job + ) if queue is not None: # It was, override the walltime if a test, otherwise just warn the user new_walltime = self.get_queue_specs(queue)[5] expect(new_walltime is not None, "Should never make it here") - logger.warning("WARNING: Requested walltime '{}' could not be matched by any {} queue".format(walltime, force_queue)) + logger.warning( + "WARNING: Requested walltime '{}' could not be matched by any {} queue".format( + walltime, force_queue + ) + ) if case.get_value("TEST"): - logger.warning(" Using walltime '{}' instead".format(new_walltime)) + logger.warning( + " Using walltime '{}' instead".format(new_walltime) + ) walltime = new_walltime else: - logger.warning(" Continuing with suspect walltime, batch submission may fail") + logger.warning( + " Continuing with suspect walltime, batch submission may fail" + ) if queue is None: - logger.warning("WARNING: No queue on this system met the requirements for this job. Falling back to defaults") + logger.warning( + "WARNING: No queue on this system met the requirements for this job. Falling back to defaults" + ) queue = self.get_default_queue() walltime = self.get_queue_specs(queue)[5] - _, _, _, walltimedef, walltimemin, walltimemax, _, _, _ = \ - self.get_queue_specs(queue) + ( + _, + _, + _, + walltimedef, + walltimemin, + walltimemax, + _, + _, + _, + ) = self.get_queue_specs(queue) if walltime is None: # Use default walltime if available for queue @@ -285,16 +373,19 @@ def set_job_defaults(self, batch_jobs, case): # Queue is unknown, use specs from default queue walltime = self.get(self.get_default_queue(), "walltimemax") - logger.debug("Using walltimemax {!r} from default " - "queue {!r}".format( - walltime, self.text(queue))) + logger.debug( + "Using walltimemax {!r} from default " + "queue {!r}".format(walltime, self.text(queue)) + ) # Still no walltime, use the hardcoded default if walltime is None: walltime = self._default_walltime - logger.debug("Last resort using default walltime " - "{!r}".format(walltime)) + logger.debug( + "Last resort using default walltime " + "{!r}".format(walltime) + ) # only enforce when not running a test if not case.get_value("TEST"): @@ -305,24 +396,28 @@ def set_job_defaults(self, batch_jobs, case): walltimemin_seconds = convert_to_seconds(walltimemin) if walltime_seconds < walltimemin_seconds: - logger.warning("WARNING: Job {!r} walltime " - "{!r} is less than queue " - "{!r} minimum walltime " - "{!r}, job might fail".format( - job, walltime, self.text(queue), - walltimemin)) + logger.warning( + "WARNING: Job {!r} walltime " + "{!r} is less than queue " + "{!r} minimum walltime " + "{!r}, job might fail".format( + job, walltime, self.text(queue), walltimemin + ) + ) # walltime must not be more than walltimemax if walltimemax is not None: walltimemax_seconds = convert_to_seconds(walltimemax) if walltime_seconds > walltimemax_seconds: - logger.warning("WARNING: Job {!r} walltime " - "{!r} is more than queue " - "{!r} maximum walltime " - "{!r}, job might fail". format( - job, walltime, self.text(queue), - walltimemax)) + logger.warning( + "WARNING: Job {!r} walltime " + "{!r} is more than queue " + "{!r} maximum walltime " + "{!r}, job might fail".format( + job, walltime, self.text(queue), walltimemax + ) + ) walltime_format = self.get_value("walltime_format") if walltime_format: @@ -330,10 +425,13 @@ def set_job_defaults(self, batch_jobs, case): full_bab_time = convert_to_babylonian_time(seconds) walltime = format_time(walltime_format, "%H:%M:%S", full_bab_time) - env_workflow.set_value("JOB_QUEUE", self.text(queue), subgroup=job, - ignore_type=False) + env_workflow.set_value( + "JOB_QUEUE", self.text(queue), subgroup=job, ignore_type=False + ) env_workflow.set_value("JOB_WALLCLOCK_TIME", walltime, subgroup=job) - logger.debug("Job {} queue {} walltime {}".format(job, self.text(queue), walltime)) + logger.debug( + "Job {} queue {} walltime {}".format(job, self.text(queue), walltime) + ) def _match_attribs(self, attribs, case, queue): # check for matches with case-vars @@ -348,7 +446,11 @@ def _match_attribs(self, attribs, case, queue): else: val = case.get_value(attrib.upper()) - expect(val is not None, "Cannot match attrib '%s', case has no value for it" % attrib.upper()) + expect( + val is not None, + "Cannot match attrib '%s', case has no value for it" + % attrib.upper(), + ) if not self._match(val, attribs[attrib]): return False @@ -356,19 +458,22 @@ def _match_attribs(self, attribs, case, queue): def _match(self, my_value, xml_value): if xml_value.startswith("!"): - result = re.match(xml_value[1:],str(my_value)) is None + result = re.match(xml_value[1:], str(my_value)) is None elif isinstance(my_value, bool): - if my_value: result = xml_value == "TRUE" - else: result = xml_value == "FALSE" + if my_value: + result = xml_value == "TRUE" + else: + result = xml_value == "FALSE" else: - result = re.match(xml_value+'$',str(my_value)) is not None + result = re.match(xml_value + "$", str(my_value)) is not None - logger.debug("(env_mach_specific) _match {} {} {}".format(my_value, xml_value, result)) + logger.debug( + "(env_mach_specific) _match {} {} {}".format(my_value, xml_value, result) + ) return result - def get_batch_directives(self, case, job, overrides=None, output_format='default'): - """ - """ + def get_batch_directives(self, case, job, overrides=None, output_format="default"): + """ """ result = [] directive_prefix = None @@ -384,13 +489,16 @@ def get_batch_directives(self, case, job, overrides=None, output_format='default for root in roots: if root is not None: if directive_prefix is None: - if output_format == 'default': - directive_prefix = self.get_element_text("batch_directive", root=root) - elif output_format == 'cylc': + if output_format == "default": + directive_prefix = self.get_element_text( + "batch_directive", root=root + ) + elif output_format == "cylc": directive_prefix = " " if unknown_queue: - unknown_queue_directives = self.get_element_text("unknown_queue_directives", - root=root) + unknown_queue_directives = self.get_element_text( + "unknown_queue_directives", root=root + ) if unknown_queue_directives is None: queue = default_queue else: @@ -401,34 +509,54 @@ def get_batch_directives(self, case, job, overrides=None, output_format='default nodes = self.get_children("directive", root=dnode) if self._match_attribs(self.attrib(dnode), case, queue): for node in nodes: - directive = self.get_resolved_value("" if self.text(node) is None else self.text(node)) - if output_format == 'cylc': - if self._batchtype == 'pbs': + directive = self.get_resolved_value( + "" if self.text(node) is None else self.text(node) + ) + if output_format == "cylc": + if self._batchtype == "pbs": # cylc includes the -N itself, no need to add if directive.startswith("-N"): - directive='' + directive = "" continue - m = re.match(r'\s*(-[\w])', directive) + m = re.match(r"\s*(-[\w])", directive) if m: - directive = re.sub(r'(-[\w]) ','{} = '.format(m.group(1)), directive) + directive = re.sub( + r"(-[\w]) ", + "{} = ".format(m.group(1)), + directive, + ) default = self.get(node, "default") if default is None: - directive = transform_vars(directive, case=case, subgroup=job, default=default, overrides=overrides) + directive = transform_vars( + directive, + case=case, + subgroup=job, + default=default, + overrides=overrides, + ) else: directive = transform_vars(directive, default=default) custom_prefix = self.get(node, "prefix") - prefix = directive_prefix if custom_prefix is None else custom_prefix - - result.append("{}{}".format("" if not prefix else (prefix + " "), directive)) + prefix = ( + directive_prefix + if custom_prefix is None + else custom_prefix + ) + + result.append( + "{}{}".format( + "" if not prefix else (prefix + " "), directive + ) + ) return "\n".join(result) def get_submit_args(self, case, job): - ''' + """ return a list of touples (flag, name) - ''' + """ submitargs = " " bs_nodes = self.get_children("batch_system") submit_arg_nodes = [] @@ -436,24 +564,24 @@ def get_submit_args(self, case, job): for node in bs_nodes: sanode = self.get_optional_child("submit_args", root=node) if sanode is not None: - submit_arg_nodes += self.get_children("arg",root=sanode) + submit_arg_nodes += self.get_children("arg", root=sanode) for arg in submit_arg_nodes: flag = self.get(arg, "flag") name = self.get(arg, "name") if self._batchtype == "cobalt" and job == "case.st_archive": if flag == "-n": - name = 'task_count' + name = "task_count" if flag == "--mode": continue if name is None: - submitargs+=" {}".format(flag) + submitargs += " {}".format(flag) else: if name.startswith("$"): name = name[1:] - if '$' in name: + if "$" in name: # We have a complex expression and must rely on get_resolved_value. # Hopefully, none of the values require subgroup val = case.get_resolved_value(name) @@ -480,36 +608,57 @@ def get_submit_args(self, case, job): if flag == "-n" and rval <= 0: rval = 1 - if flag == "-q" and rval == "batch" and case.get_value("MACH") == "blues": + if ( + flag == "-q" + and rval == "batch" + and case.get_value("MACH") == "blues" + ): # Special case. Do not provide '-q batch' for blues continue - if flag.rfind("=", len(flag)-1, len(flag)) >= 0 or\ - flag.rfind(":", len(flag)-1, len(flag)) >= 0: - submitargs+=" {}{}".format(flag,str(rval).strip()) + if ( + flag.rfind("=", len(flag) - 1, len(flag)) >= 0 + or flag.rfind(":", len(flag) - 1, len(flag)) >= 0 + ): + submitargs += " {}{}".format(flag, str(rval).strip()) else: - submitargs+=" {} {}".format(flag,str(rval).strip()) + submitargs += " {} {}".format(flag, str(rval).strip()) return submitargs - def submit_jobs(self, case, no_batch=False, job=None, user_prereq=None, skip_pnl=False, - allow_fail=False, resubmit_immediate=False, mail_user=None, mail_type=None, - batch_args=None, dry_run=False, workflow=True): + def submit_jobs( + self, + case, + no_batch=False, + job=None, + user_prereq=None, + skip_pnl=False, + allow_fail=False, + resubmit_immediate=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + workflow=True, + ): """ - no_batch indicates that the jobs should be run directly rather that submitted to a queueing system - job is the first job in the workflow sequence to start - user_prereq is a batch system prerequisite as requested by the user - skip_pnl indicates that the preview_namelist should not be run by this job - allow_fail indicates that the prereq job need only complete not nessasarily successfully to start the next job - resubmit_immediate indicates that all jobs indicated by the RESUBMIT option should be submitted at the same time instead of - waiting to resubmit at the end of the first sequence - workflow is a logical indicating whether only "job" is submitted or the workflow sequence starting with "job" is submitted + no_batch indicates that the jobs should be run directly rather that submitted to a queueing system + job is the first job in the workflow sequence to start + user_prereq is a batch system prerequisite as requested by the user + skip_pnl indicates that the preview_namelist should not be run by this job + allow_fail indicates that the prereq job need only complete not nessasarily successfully to start the next job + resubmit_immediate indicates that all jobs indicated by the RESUBMIT option should be submitted at the same time instead of + waiting to resubmit at the end of the first sequence + workflow is a logical indicating whether only "job" is submitted or the workflow sequence starting with "job" is submitted """ - env_workflow = case.get_env('workflow') + env_workflow = case.get_env("workflow") external_workflow = case.get_value("EXTERNAL_WORKFLOW") alljobs = env_workflow.get_jobs() - alljobs = [j for j in alljobs - if os.path.isfile(os.path.join(self._caseroot,get_batch_script_for_job(j)))] + alljobs = [ + j + for j in alljobs + if os.path.isfile(os.path.join(self._caseroot, get_batch_script_for_job(j))) + ] startindex = 0 jobs = [] @@ -518,20 +667,32 @@ def submit_jobs(self, case, no_batch=False, job=None, user_prereq=None, skip_pnl expect(job in alljobs, "Do not know about batch job {}".format(job)) startindex = alljobs.index(job) for index, job in enumerate(alljobs): - logger.debug( "Index {:d} job {} startindex {:d}".format(index, job, startindex)) + logger.debug( + "Index {:d} job {} startindex {:d}".format(index, job, startindex) + ) if index < startindex: continue try: - prereq = env_workflow.get_value('prereq', subgroup=job, resolved=False) - if external_workflow or prereq is None or job == firstjob or (dry_run and prereq == "$BUILD_COMPLETE"): + prereq = env_workflow.get_value("prereq", subgroup=job, resolved=False) + if ( + external_workflow + or prereq is None + or job == firstjob + or (dry_run and prereq == "$BUILD_COMPLETE") + ): prereq = True else: prereq = case.get_resolved_value(prereq) prereq = eval(prereq) except Exception: - expect(False,"Unable to evaluate prereq expression '{}' for job '{}'".format(self.get_value('prereq',subgroup=job), job)) + expect( + False, + "Unable to evaluate prereq expression '{}' for job '{}'".format( + self.get_value("prereq", subgroup=job), job + ), + ) if prereq: - jobs.append((job, env_workflow.get_value('dependency', subgroup=job))) + jobs.append((job, env_workflow.get_value("dependency", subgroup=job))) if self._batchtype == "cobalt": break @@ -565,20 +726,23 @@ def submit_jobs(self, case, no_batch=False, job=None, user_prereq=None, skip_pnl dep_jobs.append(prev_job) logger.debug("job {} depends on {}".format(job, dep_jobs)) - result = self._submit_single_job(case, job, - skip_pnl=skip_pnl, - resubmit_immediate=resubmit_immediate, - dep_jobs=dep_jobs, - allow_fail=allow_fail, - no_batch=no_batch, - mail_user=mail_user, - mail_type=mail_type, - batch_args=batch_args, - dry_run=dry_run, - workflow=workflow) + result = self._submit_single_job( + case, + job, + skip_pnl=skip_pnl, + resubmit_immediate=resubmit_immediate, + dep_jobs=dep_jobs, + allow_fail=allow_fail, + no_batch=no_batch, + mail_user=mail_user, + mail_type=mail_type, + batch_args=batch_args, + dry_run=dry_run, + workflow=workflow, + ) batch_job_id = str(alljobs.index(job)) if dry_run else result depid[job] = batch_job_id - jobcmds.append( (job, result) ) + jobcmds.append((job, result)) if self._batchtype == "cobalt" or external_workflow or not workflow: break @@ -653,15 +817,29 @@ def _build_run_args_str(self, job, no_batch, **run_args): batch_system = self.get_value("BATCH_SYSTEM", subgroup=None) logger.debug("batch_system: {}: ".format(batch_system)) if batch_system == "lsf": - return "{} \"all, ARGS_FOR_SCRIPT={}\"".format(batch_env_flag, run_args_str) + return '{} "all, ARGS_FOR_SCRIPT={}"'.format( + batch_env_flag, run_args_str + ) else: return "{} ARGS_FOR_SCRIPT='{}'".format(batch_env_flag, run_args_str) else: return "" - def _submit_single_job(self, case, job, dep_jobs=None, allow_fail=False, - no_batch=False, skip_pnl=False, mail_user=None, mail_type=None, - batch_args=None, dry_run=False, resubmit_immediate=False, workflow=True): + def _submit_single_job( + self, + case, + job, + dep_jobs=None, + allow_fail=False, + no_batch=False, + skip_pnl=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + resubmit_immediate=False, + workflow=True, + ): if not dry_run: logger.warning("Submit job {}".format(job)) @@ -669,19 +847,36 @@ def _submit_single_job(self, case, job, dep_jobs=None, allow_fail=False, if batch_system is None or batch_system == "none" or no_batch: logger.info("Starting job script {}".format(job)) function_name = job.replace(".", "_") - job_name = "."+job + job_name = "." + job if not dry_run: - args = self._build_run_args(job, True, skip_pnl=skip_pnl, set_continue_run=resubmit_immediate, - submit_resubmits=workflow and not resubmit_immediate) + args = self._build_run_args( + job, + True, + skip_pnl=skip_pnl, + set_continue_run=resubmit_immediate, + submit_resubmits=workflow and not resubmit_immediate, + ) try: if hasattr(case, function_name): - getattr(case, function_name)(**{k: v for k, (v, _) in args.items()}) + getattr(case, function_name)( + **{k: v for k, (v, _) in args.items()} + ) else: - expect(os.path.isfile(job_name),"Could not find file {}".format(job_name)) - run_cmd_no_fail(os.path.join(self._caseroot,job_name), combine_output=True, verbose=True, from_dir=self._caseroot) + expect( + os.path.isfile(job_name), + "Could not find file {}".format(job_name), + ) + run_cmd_no_fail( + os.path.join(self._caseroot, job_name), + combine_output=True, + verbose=True, + from_dir=self._caseroot, + ) except Exception as e: # We don't want exception from the run phases getting into submit phase - logger.warning("Exception from {}: {}".format(function_name, str(e))) + logger.warning( + "Exception from {}: {}".format(function_name, str(e)) + ) return @@ -695,21 +890,31 @@ def _submit_single_job(self, case, job, dep_jobs=None, allow_fail=False, if allow_fail: dep_string = self.get_value("depend_allow_string", subgroup=None) if dep_string is None: - logger.warning("'depend_allow_string' is not defined for this batch system, " + - "falling back to the 'depend_string'") + logger.warning( + "'depend_allow_string' is not defined for this batch system, " + + "falling back to the 'depend_string'" + ) dep_string = self.get_value("depend_string", subgroup=None) else: dep_string = self.get_value("depend_string", subgroup=None) - expect(dep_string is not None, "'depend_string' is not defined for this batch system") + expect( + dep_string is not None, + "'depend_string' is not defined for this batch system", + ) separator_string = self.get_value("depend_separator", subgroup=None) - expect(separator_string is not None,"depend_separator string not defined") + expect(separator_string is not None, "depend_separator string not defined") - expect("jobid" in dep_string, "depend_string is missing jobid for prerequisite jobs") + expect( + "jobid" in dep_string, + "depend_string is missing jobid for prerequisite jobs", + ) dep_ids_str = str(dep_jobs[0]) for dep_id in dep_jobs[1:]: dep_ids_str += separator_string + str(dep_id) - dep_string = dep_string.replace("jobid",dep_ids_str.strip()) # pylint: disable=maybe-no-member + dep_string = dep_string.replace( + "jobid", dep_ids_str.strip() + ) # pylint: disable=maybe-no-member submitargs += " " + dep_string if batch_args is not None: @@ -721,12 +926,14 @@ def _submit_single_job(self, case, job, dep_jobs=None, allow_fail=False, mail_user = cime_config.get("main", "MAIL_USER") if mail_user is not None: - mail_user_flag = self.get_value('batch_mail_flag', subgroup=None) + mail_user_flag = self.get_value("batch_mail_flag", subgroup=None) if mail_user_flag is not None: submitargs += " " + mail_user_flag + " " + mail_user if mail_type is None: - if job == "case.test" and cime_config.has_option("create_test", "MAIL_TYPE"): + if job == "case.test" and cime_config.has_option( + "create_test", "MAIL_TYPE" + ): mail_type = cime_config.get("create_test", "MAIL_TYPE") elif cime_config.has_option("main", "MAIL_TYPE"): mail_type = cime_config.get("main", "MAIL_TYPE") @@ -734,7 +941,7 @@ def _submit_single_job(self, case, job, dep_jobs=None, allow_fail=False, mail_type = self.get_value("batch_mail_default") if mail_type: - mail_type = mail_type.split(",") # pylint: disable=no-member + mail_type = mail_type.split(",") # pylint: disable=no-member if mail_type: mail_type_flag = self.get_value("batch_mail_type_flag", subgroup=None) @@ -746,27 +953,57 @@ def _submit_single_job(self, case, job, dep_jobs=None, allow_fail=False, if mail_type_flag == "-m": # hacky, PBS-type systems pass multiple mail-types differently - submitargs += " {} {}".format(mail_type_flag, "".join(mail_type_args)) + submitargs += " {} {}".format( + mail_type_flag, "".join(mail_type_args) + ) else: - submitargs += " {} {}".format(mail_type_flag, " {} ".format(mail_type_flag).join(mail_type_args)) + submitargs += " {} {}".format( + mail_type_flag, + " {} ".format(mail_type_flag).join(mail_type_args), + ) batchsubmit = self.get_value("batch_submit", subgroup=None) - expect(batchsubmit is not None, - "Unable to determine the correct command for batch submission.") + expect( + batchsubmit is not None, + "Unable to determine the correct command for batch submission.", + ) batchredirect = self.get_value("batch_redirect", subgroup=None) batch_env_flag = self.get_value("batch_env", subgroup=None) - run_args = self._build_run_args_str(job, False, skip_pnl=skip_pnl, set_continue_run=resubmit_immediate, - submit_resubmits=workflow and not resubmit_immediate) - if batch_system == 'lsf' and not batch_env_flag: - sequence = (run_args, batchsubmit, submitargs, batchredirect, get_batch_script_for_job(job)) + run_args = self._build_run_args_str( + job, + False, + skip_pnl=skip_pnl, + set_continue_run=resubmit_immediate, + submit_resubmits=workflow and not resubmit_immediate, + ) + if batch_system == "lsf" and not batch_env_flag: + sequence = ( + run_args, + batchsubmit, + submitargs, + batchredirect, + get_batch_script_for_job(job), + ) elif batch_env_flag: - sequence = (batchsubmit, submitargs, run_args, batchredirect, get_batch_script_for_job(job)) + sequence = ( + batchsubmit, + submitargs, + run_args, + batchredirect, + get_batch_script_for_job(job), + ) else: - sequence = (batchsubmit, submitargs, batchredirect, get_batch_script_for_job(job), run_args) + sequence = ( + batchsubmit, + submitargs, + batchredirect, + get_batch_script_for_job(job), + run_args, + ) submitcmd = " ".join(s.strip() for s in sequence if s is not None) if submitcmd.startswith("ssh"): # add ` before cd $CASEROOT and at end of command - submitcmd = submitcmd.replace("cd $CASEROOT","\'cd $CASEROOT") + "\'" + submitcmd = submitcmd.replace("cd $CASEROOT", "'cd $CASEROOT") + "'" if dry_run: return submitcmd @@ -779,8 +1016,10 @@ def _submit_single_job(self, case, job, dep_jobs=None, allow_fail=False, return jobid def get_batch_mail_type(self, mail_type): - raw = self.get_value("batch_mail_type", subgroup=None) - mail_types = [item.strip() for item in raw.split(",")] # pylint: disable=no-member + raw = self.get_value("batch_mail_type", subgroup=None) + mail_types = [ + item.strip() for item in raw.split(",") + ] # pylint: disable=no-member idx = ["never", "all", "begin", "end", "fail"].index(mail_type) return mail_types[idx] if idx < len(mail_types) else None @@ -798,27 +1037,38 @@ def set_batch_system_type(self, batchtype): def get_job_id(self, output): jobid_pattern = self.get_value("jobid_pattern", subgroup=None) - expect(jobid_pattern is not None, "Could not find jobid_pattern in env_batch.xml") + expect( + jobid_pattern is not None, "Could not find jobid_pattern in env_batch.xml" + ) search_match = re.search(jobid_pattern, output) - expect(search_match is not None, - "Couldn't match jobid_pattern '{}' within submit output:\n '{}'".format(jobid_pattern, output)) + expect( + search_match is not None, + "Couldn't match jobid_pattern '{}' within submit output:\n '{}'".format( + jobid_pattern, output + ), + ) jobid = search_match.group(1) return jobid def queue_meets_spec(self, queue, num_nodes, num_tasks, walltime=None, job=None): specs = self.get_queue_specs(queue) - nodemin, nodemax, jobname, _, _, walltimemax, jobmin, \ - jobmax, strict = specs + nodemin, nodemax, jobname, _, _, walltimemax, jobmin, jobmax, strict = specs # A job name match automatically meets spec if job is not None and jobname is not None: return jobname == job - if nodemin is not None and num_nodes < nodemin or \ - nodemax is not None and num_nodes > nodemax or \ - jobmin is not None and num_tasks < jobmin or \ - jobmax is not None and num_tasks > jobmax: + if ( + nodemin is not None + and num_nodes < nodemin + or nodemax is not None + and num_nodes > nodemax + or jobmin is not None + and num_tasks < jobmin + or jobmax is not None + and num_tasks > jobmax + ): return False if walltime is not None and walltimemax is not None and strict: @@ -839,16 +1089,22 @@ def _get_all_queue_names(self): return queue_names - def select_best_queue(self, num_nodes, num_tasks, name=None, walltime=None, job=None): - logger.debug("Selecting best queue with criteria nodes={!r}, " - "tasks={!r}, name={!r}, walltime={!r}, job={!r}".format( - num_nodes, num_tasks, name, walltime, job - )) + def select_best_queue( + self, num_nodes, num_tasks, name=None, walltime=None, job=None + ): + logger.debug( + "Selecting best queue with criteria nodes={!r}, " + "tasks={!r}, name={!r}, walltime={!r}, job={!r}".format( + num_nodes, num_tasks, name, walltime, job + ) + ) # Make sure to check default queue first. qnodes = self.get_all_queues(name=name) for qnode in qnodes: - if self.queue_meets_spec(qnode, num_nodes, num_tasks, walltime=walltime, job=job): + if self.queue_meets_spec( + qnode, num_nodes, num_tasks, walltime=walltime, job=job + ): logger.debug("Selected queue {!r}".format(self.text(qnode))) return qnode @@ -871,8 +1127,14 @@ def get_queue_specs(self, qnode): jobmax = self.get(qnode, "jobmax") jobmax = None if jobmax is None else int(jobmax) - expect( nodemin is None or jobmin is None, "Cannot specify both nodemin and jobmin for a queue") - expect( nodemax is None or jobmax is None, "Cannot specify both nodemax and jobmax for a queue") + expect( + nodemin is None or jobmin is None, + "Cannot specify both nodemin and jobmin for a queue", + ) + expect( + nodemax is None or jobmax is None, + "Cannot specify both nodemax and jobmax for a queue", + ) jobname = self.get(qnode, "jobname") walltimedef = self.get(qnode, "walltimedef") @@ -880,8 +1142,17 @@ def get_queue_specs(self, qnode): walltimemax = self.get(qnode, "walltimemax") strict = self.get(qnode, "strict") == "true" - return nodemin, nodemax, jobname, walltimedef, walltimemin, \ - walltimemax, jobmin, jobmax, strict + return ( + nodemin, + nodemax, + jobname, + walltimedef, + walltimemin, + walltimemax, + jobmin, + jobmax, + strict, + ) def get_default_queue(self): bs_nodes = self.get_children("batch_system") @@ -889,7 +1160,9 @@ def get_default_queue(self): for bsnode in bs_nodes: qnodes = self.get_children("queues", root=bsnode) for qnode in qnodes: - node = self.get_optional_child("queue", attributes={"default" : "true"}, root=qnode) + node = self.get_optional_child( + "queue", attributes={"default": "true"}, root=qnode + ) if node is None: node = self.get_optional_child("queue", root=qnode) @@ -920,9 +1193,13 @@ def get_all_queues(self, name=None): def get_children(self, name=None, attributes=None, root=None): if name == "PROJECT_REQUIRED": - nodes = super(EnvBatch, self).get_children("entry", attributes={"id":name}, root=root) + nodes = super(EnvBatch, self).get_children( + "entry", attributes={"id": name}, root=root + ) else: - nodes = super(EnvBatch, self).get_children(name, attributes=attributes, root=root) + nodes = super(EnvBatch, self).get_children( + name, attributes=attributes, root=root + ) return nodes @@ -939,7 +1216,9 @@ def get_status(self, jobid): status, out, err = run_cmd(cmd) if status != 0: - logger.warning("Batch query command '{}' failed with error '{}'".format(cmd, err)) + logger.warning( + "Batch query command '{}' failed with error '{}'".format(cmd, err) + ) else: return out.strip() @@ -949,11 +1228,15 @@ def cancel_job(self, jobid): logger.warning("Batch cancellation not supported on this platform") return False else: - cmd = self.text(batch_cancel) + " " + str(jobid) + cmd = self.text(batch_cancel) + " " + str(jobid) status, out, err = run_cmd(cmd) if status != 0: - logger.warning("Batch cancel command '{}' failed with error '{}'".format(cmd, out + "\n" + err)) + logger.warning( + "Batch cancel command '{}' failed with error '{}'".format( + cmd, out + "\n" + err + ) + ) else: return True @@ -961,9 +1244,8 @@ def compare_xml(self, other): xmldiffs = {} f1batchnodes = self.get_children("batch_system") for bnode in f1batchnodes: - f2bnodes = other.get_children("batch_system", - attributes = self.attrib(bnode)) - f2bnode=None + f2bnodes = other.get_children("batch_system", attributes=self.attrib(bnode)) + f2bnode = None if len(f2bnodes): f2bnode = f2bnodes[0] f1batchnodes = self.get_children(root=bnode) @@ -973,13 +1255,13 @@ def compare_xml(self, other): text2 = "" attribs = self.attrib(node) f2matches = other.scan_children(name, attributes=attribs, root=f2bnode) - foundmatch=False + foundmatch = False for chkmatch in f2matches: name2 = other.name(chkmatch) attribs2 = other.attrib(chkmatch) text2 = other.text(chkmatch) - if(name == name2 and attribs==attribs2 and text1==text2): - foundmatch=True + if name == name2 and attribs == attribs2 and text1 == text2: + foundmatch = True break if not foundmatch: xmldiffs[name] = [text1, text2] @@ -987,25 +1269,36 @@ def compare_xml(self, other): f1groups = self.get_children("group") for node in f1groups: group = self.get(node, "id") - f2group = other.get_child("group", attributes={"id":group}) - xmldiffs.update(super(EnvBatch, self).compare_xml(other, - root=node, otherroot=f2group)) + f2group = other.get_child("group", attributes={"id": group}) + xmldiffs.update( + super(EnvBatch, self).compare_xml(other, root=node, otherroot=f2group) + ) return xmldiffs def make_all_batch_files(self, case): - machdir = case.get_value("MACHDIR") + machdir = case.get_value("MACHDIR") env_workflow = case.get_env("workflow") logger.info("Creating batch scripts") jobs = env_workflow.get_jobs() for job in jobs: - template = case.get_resolved_value(env_workflow.get_value('template', subgroup=job)) + template = case.get_resolved_value( + env_workflow.get_value("template", subgroup=job) + ) if os.path.isabs(template): input_batch_script = template else: - input_batch_script = os.path.join(machdir,template) + input_batch_script = os.path.join(machdir, template) if os.path.isfile(input_batch_script): - logger.info("Writing {} script from input template {}".format(job, input_batch_script)) + logger.info( + "Writing {} script from input template {}".format( + job, input_batch_script + ) + ) self.make_batch_script(input_batch_script, job, case) else: - logger.warning("Input template file {} for job {} does not exist or cannot be read.".format(input_batch_script, job)) + logger.warning( + "Input template file {} for job {} does not exist or cannot be read.".format( + input_batch_script, job + ) + ) diff --git a/CIME/XML/env_build.py b/CIME/XML/env_build.py index 4bc3b01b1ca..7bd805b1c0f 100644 --- a/CIME/XML/env_build.py +++ b/CIME/XML/env_build.py @@ -8,9 +8,12 @@ logger = logging.getLogger(__name__) + class EnvBuild(EnvBase): # pylint: disable=unused-argument - def __init__(self, case_root=None, infile="env_build.xml",components=None, read_only=False): + def __init__( + self, case_root=None, infile="env_build.xml", components=None, read_only=False + ): """ initialize an object interface to file env_build.xml in the case directory """ diff --git a/CIME/XML/env_case.py b/CIME/XML/env_case.py index cacb7457f5c..1b4c85d6f88 100644 --- a/CIME/XML/env_case.py +++ b/CIME/XML/env_case.py @@ -8,9 +8,12 @@ logger = logging.getLogger(__name__) + class EnvCase(EnvBase): # pylint: disable=unused-argument - def __init__(self, case_root=None, infile="env_case.xml", components=None, read_only=False): + def __init__( + self, case_root=None, infile="env_case.xml", components=None, read_only=False + ): """ initialize an object interface to file env_case.xml in the case directory """ diff --git a/CIME/XML/env_mach_pes.py b/CIME/XML/env_mach_pes.py index 1e61f9cfb14..9d1d1698870 100644 --- a/CIME/XML/env_mach_pes.py +++ b/CIME/XML/env_mach_pes.py @@ -8,9 +8,16 @@ logger = logging.getLogger(__name__) -class EnvMachPes(EnvBase): - def __init__(self, case_root=None, infile="env_mach_pes.xml", components=None, read_only=False, comp_interface="mct"): +class EnvMachPes(EnvBase): + def __init__( + self, + case_root=None, + infile="env_mach_pes.xml", + components=None, + read_only=False, + comp_interface="mct", + ): """ initialize an object interface to file env_mach_pes.xml in the case directory """ @@ -28,7 +35,14 @@ def add_comment(self, comment): self.remove_child(node) self.add_child(node, position=1) - def get_value(self, vid, attribute=None, resolved=True, subgroup=None, max_mpitasks_per_node=None): # pylint: disable=arguments-differ + def get_value( + self, + vid, + attribute=None, + resolved=True, + subgroup=None, + max_mpitasks_per_node=None, + ): # pylint: disable=arguments-differ # Special variable NINST_MAX is used to determine the number of # drivers in multi-driver mode. if vid == "NINST_MAX": @@ -45,7 +59,7 @@ def get_value(self, vid, attribute=None, resolved=True, subgroup=None, max_mpita if max_mpitasks_per_node is None: max_mpitasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") if value is not None and value < 0: - value = -1*value*max_mpitasks_per_node + value = -1 * value * max_mpitasks_per_node # in the nuopc driver there is only one NINST value # so that NINST_{comp} = NINST if "NINST_" in vid and value is None: @@ -64,21 +78,28 @@ def set_value(self, vid, value, subgroup=None, ignore_type=False): if comp == "CPL": continue ninst = self.get_value("NINST_{}".format(comp)) - expect(ninst == ninst_max, - "All components must have the same NINST value in multi_driver mode. NINST_{}={} shoud be {}".format(comp,ninst,ninst_max)) + expect( + ninst == ninst_max, + "All components must have the same NINST value in multi_driver mode. NINST_{}={} shoud be {}".format( + comp, ninst, ninst_max + ), + ) if "NTASKS" in vid or "NTHRDS" in vid: expect(value != 0, "Cannot set NTASKS or NTHRDS to 0") - - return EnvBase.set_value(self, vid, value, subgroup=subgroup, ignore_type=ignore_type) - + return EnvBase.set_value( + self, vid, value, subgroup=subgroup, ignore_type=ignore_type + ) def get_max_thread_count(self, comp_classes): - ''' Find the maximum number of openmp threads for any component in the case ''' + """Find the maximum number of openmp threads for any component in the case""" max_threads = 1 for comp in comp_classes: - threads = self.get_value("NTHRDS",attribute={"compclass":comp}) - expect(threads is not None, "Error no thread count found for component class {}".format(comp)) + threads = self.get_value("NTHRDS", attribute={"compclass": comp}) + expect( + threads is not None, + "Error no thread count found for component class {}".format(comp), + ) if threads > max_threads: max_threads = threads return max_threads @@ -89,36 +110,42 @@ def get_total_tasks(self, comp_classes): if maxinst: comp_interface = "nuopc" else: - comp_interface = 'unknown' + comp_interface = "unknown" maxinst = 1 for comp in comp_classes: - ntasks = self.get_value("NTASKS", attribute={"compclass":comp}) - rootpe = self.get_value("ROOTPE", attribute={"compclass":comp}) - pstrid = self.get_value("PSTRID", attribute={"compclass":comp}) + ntasks = self.get_value("NTASKS", attribute={"compclass": comp}) + rootpe = self.get_value("ROOTPE", attribute={"compclass": comp}) + pstrid = self.get_value("PSTRID", attribute={"compclass": comp}) esmf_aware_threading = self.get_value("ESMF_AWARE_THREADING") # mct is unaware of threads and they should not be counted here # if esmf is thread aware they are included if comp_interface == "nuopc" and esmf_aware_threading: - nthrds = self.get_value("NTHRDS", attribute={"compclass":comp}) + nthrds = self.get_value("NTHRDS", attribute={"compclass": comp}) else: nthrds = 1 - if comp != "CPL" and comp_interface!="nuopc": - ninst = self.get_value("NINST", attribute={"compclass":comp}) + if comp != "CPL" and comp_interface != "nuopc": + ninst = self.get_value("NINST", attribute={"compclass": comp}) maxinst = max(maxinst, ninst) - tt = rootpe + nthrds*((ntasks - 1) * pstrid + 1) + tt = rootpe + nthrds * ((ntasks - 1) * pstrid + 1) total_tasks = max(tt, total_tasks) if self.get_value("MULTI_DRIVER"): total_tasks *= maxinst return total_tasks def get_tasks_per_node(self, total_tasks, max_thread_count): - expect(total_tasks > 0,"totaltasks > 0 expected, totaltasks = {}".format(total_tasks)) - if self._comp_interface == 'nuopc' and self.get_value("ESMF_AWARE_THREADING"): + expect( + total_tasks > 0, + "totaltasks > 0 expected, totaltasks = {}".format(total_tasks), + ) + if self._comp_interface == "nuopc" and self.get_value("ESMF_AWARE_THREADING"): tasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") else: - tasks_per_node = min(self.get_value("MAX_TASKS_PER_NODE")// max_thread_count, - self.get_value("MAX_MPITASKS_PER_NODE"), total_tasks) + tasks_per_node = min( + self.get_value("MAX_TASKS_PER_NODE") // max_thread_count, + self.get_value("MAX_MPITASKS_PER_NODE"), + total_tasks, + ) return tasks_per_node if tasks_per_node > 0 else 1 def get_total_nodes(self, total_tasks, max_thread_count): @@ -126,7 +153,7 @@ def get_total_nodes(self, total_tasks, max_thread_count): Return (num_active_nodes, num_spare_nodes) """ # threads have already been included in nuopc interface - if self._comp_interface == 'nuopc' and self.get_value("ESMF_AWARE_THREADING"): + if self._comp_interface == "nuopc" and self.get_value("ESMF_AWARE_THREADING"): max_thread_count = 1 tasks_per_node = self.get_tasks_per_node(total_tasks, max_thread_count) num_nodes = int(math.ceil(float(total_tasks) / tasks_per_node)) @@ -140,9 +167,9 @@ def get_spare_nodes(self, num_nodes): if self.get_value("ALLOCATE_SPARE_NODES"): ten_pct = int(math.ceil(float(num_nodes) * 0.1)) if ten_pct < 1: - return 1 # Always provide at lease one spare node + return 1 # Always provide at lease one spare node elif ten_pct > 10: - return 10 # Never provide more than 10 spare nodes + return 10 # Never provide more than 10 spare nodes else: return ten_pct else: diff --git a/CIME/XML/env_mach_specific.py b/CIME/XML/env_mach_specific.py index 91dfecbdc40..7139973b460 100644 --- a/CIME/XML/env_mach_specific.py +++ b/CIME/XML/env_mach_specific.py @@ -15,9 +15,16 @@ # get_type) otherwise need to implement own functions and make GenericXML parent class class EnvMachSpecific(EnvBase): # pylint: disable=unused-argument - def __init__(self, caseroot=None, infile="env_mach_specific.xml", - components=None, unit_testing=False, read_only=False, - standalone_configure=False, comp_interface=None): + def __init__( + self, + caseroot=None, + infile="env_mach_specific.xml", + components=None, + unit_testing=False, + read_only=False, + standalone_configure=False, + comp_interface=None, + ): """ initialize an object interface to file env_mach_specific.xml in the case directory @@ -27,27 +34,34 @@ def __init__(self, caseroot=None, infile="env_mach_specific.xml", """ schema = os.path.join(utils.get_schema_path(), "env_mach_specific.xsd") EnvBase.__init__(self, caseroot, infile, schema=schema, read_only=read_only) - self._allowed_mpi_attributes = ("compiler", "mpilib", "threaded", "unit_testing", "queue", "comp_interface") + self._allowed_mpi_attributes = ( + "compiler", + "mpilib", + "threaded", + "unit_testing", + "queue", + "comp_interface", + ) self._comp_interface = comp_interface self._unit_testing = unit_testing self._standalone_configure = standalone_configure def populate(self, machobj, attributes=None): """Add entries to the file using information from a Machines object. - mpilib must match attributes if set + mpilib must match attributes if set """ items = ("module_system", "environment_variables", "resource_limits", "mpirun") default_run_suffix = machobj.get_child("default_run_suffix", root=machobj.root) - group_node = self.make_child("group", {"id":"compliant_values"}) - settings = {"run_exe":None,"run_misc_suffix":None} + group_node = self.make_child("group", {"id": "compliant_values"}) + settings = {"run_exe": None, "run_misc_suffix": None} for item in items: nodes = machobj.get_first_child_nodes(item) if item == "environment_variables": if len(nodes) == 0: example_text = """This section is for the user to specify any additional machine-specific env var, or to overwite existing ones.\n \n ARGUMENT\n \n """ - self.make_child_comment(text = example_text) + self.make_child_comment(text=example_text) if item == "mpirun": for node in nodes: @@ -73,18 +87,32 @@ def populate(self, machobj, attributes=None): for node in nodes: self.add_child(node) - for item in ("run_exe","run_misc_suffix"): + for item in ("run_exe", "run_misc_suffix"): if settings[item]: value = settings[item] else: - value = self.text(machobj.get_child("default_"+item, root=default_run_suffix)) + value = self.text( + machobj.get_child("default_" + item, root=default_run_suffix) + ) - entity_node = self.make_child("entry", {"id":item,"value":value}, root=group_node) + entity_node = self.make_child( + "entry", {"id": item, "value": value}, root=group_node + ) self.make_child("type", root=entity_node, text="char") - self.make_child("desc", root=entity_node, text=("executable name" if item == "run_exe" else "redirect for job output")) + self.make_child( + "desc", + root=entity_node, + text=( + "executable name" + if item == "run_exe" + else "redirect for job output" + ), + ) def _get_modules_for_case(self, case, job=None): - module_nodes = self.get_children("modules", root=self.get_child("module_system")) + module_nodes = self.get_children( + "modules", root=self.get_child("module_system") + ) modules_to_load = None if module_nodes is not None: modules_to_load = self._compute_module_actions(module_nodes, case, job=job) @@ -107,11 +135,13 @@ def load_env(self, case, force_method=None, job=None, verbose=False): # Do the modules so we can refer to env vars set by the modules # in the environment_variables block modules_to_load = self._get_modules_for_case(case) - if (modules_to_load is not None): - self._load_modules(modules_to_load, force_method=force_method, verbose=verbose) + if modules_to_load is not None: + self._load_modules( + modules_to_load, force_method=force_method, verbose=verbose + ) envs_to_set = self._get_envs_for_case(case, job=job) - if (envs_to_set is not None): + if envs_to_set is not None: self._load_envs(envs_to_set, verbose=verbose) self._get_resources_for_case(case) @@ -125,19 +155,23 @@ def _get_resources_for_case(self, case): for name, val in nodes: attr = getattr(resource, name) limits = resource.getrlimit(attr) - logger.info("Setting resource.{} to {} from {}".format(name, val, limits)) + logger.info( + "Setting resource.{} to {} from {}".format(name, val, limits) + ) limits = (int(val), limits[1]) resource.setrlimit(attr, limits) def _load_modules(self, modules_to_load, force_method=None, verbose=False): - module_system = self.get_module_system_type() if force_method is None else force_method - if (module_system == "module"): + module_system = ( + self.get_module_system_type() if force_method is None else force_method + ) + if module_system == "module": self._load_module_modules(modules_to_load, verbose=verbose) - elif (module_system == "soft"): + elif module_system == "soft": self._load_modules_generic(modules_to_load, verbose=verbose) - elif (module_system == "generic"): + elif module_system == "generic": self._load_modules_generic(modules_to_load, verbose=verbose) - elif (module_system == "none"): + elif module_system == "none": self._load_none_modules(modules_to_load) else: expect(False, "Unhandled module system '{}'".format(module_system)) @@ -154,14 +188,16 @@ def list_modules(self): else: source_cmd = "" - if (module_system in ["module"]): - return run_cmd_no_fail("{}module list".format(source_cmd), combine_output=True) - elif (module_system == "soft"): + if module_system in ["module"]: + return run_cmd_no_fail( + "{}module list".format(source_cmd), combine_output=True + ) + elif module_system == "soft": # Does soft really not provide this capability? return "" - elif (module_system == "generic"): + elif module_system == "generic": return run_cmd_no_fail("{}use -lv".format(source_cmd)) - elif (module_system == "none"): + elif module_system == "none": return "" else: expect(False, "Unhandled module system '{}'".format(module_system)) @@ -177,15 +213,17 @@ def save_all_env_info(self, filename): def get_overrides_nodes(self, case): overrides = {} - overrides["num_nodes"] = case.num_nodes + overrides["num_nodes"] = case.num_nodes fnm = "env_mach_specific.xml" - output_text = transform_vars(open(fnm,"r").read(), case=case, subgroup=None, overrides=overrides) + output_text = transform_vars( + open(fnm, "r").read(), case=case, subgroup=None, overrides=overrides + ) logger.info("Updating file {}".format(fnm)) with open(fnm, "w") as fd: fd.write(output_text) return overrides - def make_env_mach_specific_file(self, shell, case, output_dir=''): + def make_env_mach_specific_file(self, shell, case, output_dir=""): """Writes .env_mach_specific.sh or .env_mach_specific.csh Args: @@ -196,10 +234,14 @@ def make_env_mach_specific_file(self, shell, case, output_dir=''): module_system = self.get_module_system_type() sh_init_cmd = self.get_module_system_init_path(shell) sh_mod_cmd = self.get_module_system_cmd_path(shell) - lines = ["# This file is for user convenience only and is not used by the model"] + lines = [ + "# This file is for user convenience only and is not used by the model" + ] lines.append("# Changes to this file will be ignored and overwritten") - lines.append("# Changes to the environment should be made in env_mach_specific.xml") + lines.append( + "# Changes to the environment should be made in env_mach_specific.xml" + ) lines.append("# Run ./case.setup --reset to regenerate this file") if sh_init_cmd: lines.append("source {}".format(sh_init_cmd)) @@ -221,7 +263,11 @@ def make_env_mach_specific_file(self, shell, case, output_dir=''): lines.extend(self._get_module_commands(modules_to_load, shell)) else: for action, argument in modules_to_load: - lines.append("{} {} {}".format(sh_mod_cmd, action, "" if argument is None else argument)) + lines.append( + "{} {} {}".format( + sh_mod_cmd, action, "" if argument is None else argument + ) + ) if envs_to_set is not None: for env_name, env_value in envs_to_set: @@ -254,8 +300,8 @@ def _load_envs(self, envs_to_set, verbose=False): logger_func("Unsetting Environment {}".format(env_name)) elif env_value is not None: if env_name == "source": - shell, cmd = env_value.split(" ",1) - self._source_shell_file("source "+cmd, shell, verbose=verbose) + shell, cmd = env_value.split(" ", 1) + self._source_shell_file("source " + cmd, shell, verbose=verbose) else: if verbose: print("Setting Environment {}={}".format(env_name, env_value)) @@ -272,42 +318,59 @@ def _compute_resource_actions(self, resource_nodes, case, job=None): return self._compute_actions(resource_nodes, "resource", case, job=job) def _compute_actions(self, nodes, child_tag, case, job=None): - result = [] # list of tuples ("name", "argument") + result = [] # list of tuples ("name", "argument") compiler, mpilib = case.get_value("COMPILER"), case.get_value("MPILIB") for node in nodes: - if (self._match_attribs(self.attrib(node), case, job=job)): + if self._match_attribs(self.attrib(node), case, job=job): for child in self.get_children(root=node): - expect(self.name(child) == child_tag, "Expected {} element".format(child_tag)) - if (self._match_attribs(self.attrib(child), case, job=job)): + expect( + self.name(child) == child_tag, + "Expected {} element".format(child_tag), + ) + if self._match_attribs(self.attrib(child), case, job=job): val = self.text(child) if val is not None: # We allow a couple special substitutions for these fields - for repl_this, repl_with in [("$COMPILER", compiler), ("$MPILIB", mpilib)]: + for repl_this, repl_with in [ + ("$COMPILER", compiler), + ("$MPILIB", mpilib), + ]: val = val.replace(repl_this, repl_with) val = self.get_resolved_value(val) - expect("$" not in val, "Not safe to leave unresolved items in env var value: '{}'".format(val)) + expect( + "$" not in val, + "Not safe to leave unresolved items in env var value: '{}'".format( + val + ), + ) # intentional unindent, result is appended even if val is None name = self.get(child, "name") if name: - result.append( (name, val) ) + result.append((name, val)) else: - result.append( ("source", self.get(child, "source") + " " + val) ) + result.append( + ("source", self.get(child, "source") + " " + val) + ) return result def _match_attribs(self, attribs, case, job=None): # check for matches with case-vars for attrib in attribs: - if attrib == "unit_testing": # special case + if attrib == "unit_testing": # special case if not self._match(self._unit_testing, attribs["unit_testing"].upper()): return False elif attrib == "queue": if job is not None: val = case.get_value("JOB_QUEUE", subgroup=job) - expect(val is not None, "Cannot match attrib '%s', case has no value for it" % attrib.upper()) + expect( + val is not None, + "Cannot match attrib '%s', case has no value for it" + % attrib.upper(), + ) if not self._match(val, attribs[attrib]): return False elif attrib == "name": @@ -316,7 +379,11 @@ def _match_attribs(self, attribs, case, job=None): pass else: val = case.get_value(attrib.upper()) - expect(val is not None, "Cannot match attrib '%s', case has no value for it" % attrib.upper()) + expect( + val is not None, + "Cannot match attrib '%s', case has no value for it" + % attrib.upper(), + ) if not self._match(val, attribs[attrib]): return False @@ -324,14 +391,18 @@ def _match_attribs(self, attribs, case, job=None): def _match(self, my_value, xml_value): if xml_value.startswith("!"): - result = re.match(xml_value[1:] + "$",str(my_value)) is None + result = re.match(xml_value[1:] + "$", str(my_value)) is None elif isinstance(my_value, bool): - if my_value: result = xml_value == "TRUE" - else: result = xml_value == "FALSE" + if my_value: + result = xml_value == "TRUE" + else: + result = xml_value == "FALSE" else: - result = re.match(xml_value + "$",str(my_value)) is not None + result = re.match(xml_value + "$", str(my_value)) is not None - logger.debug("(env_mach_specific) _match {} {} {}".format(my_value, xml_value, result)) + logger.debug( + "(env_mach_specific) _match {} {} {}".format(my_value, xml_value, result) + ) return result def _get_module_commands(self, modules_to_load, shell): @@ -339,7 +410,7 @@ def _get_module_commands(self, modules_to_load, shell): mod_cmd = self.get_module_system_cmd_path(shell) cmds = [] last_action = None - last_cmd = None + last_cmd = None # Normally, we will try to combine or batch module commands together... # @@ -368,7 +439,9 @@ def _get_module_commands(self, modules_to_load, shell): if last_cmd is not None: cmds.append(last_cmd) - last_cmd = "{} {} {}".format(mod_cmd, action, "" if argument is None else argument) + last_cmd = "{} {} {}".format( + mod_cmd, action, "" if argument is None else argument + ) last_action = action if last_cmd: @@ -381,8 +454,10 @@ def _load_module_modules(self, modules_to_load, verbose=False): for cmd in self._get_module_commands(modules_to_load, "python"): logger_func("module command is {}".format(cmd)) stat, py_module_code, errout = run_cmd(cmd) - expect(stat==0 and (len(errout) == 0 or self.allow_error()), - "module command {} failed with message:\n{}".format(cmd, errout)) + expect( + stat == 0 and (len(errout) == 0 or self.allow_error()), + "module command {} failed with message:\n{}".format(cmd, errout), + ) exec(py_module_code) def _load_modules_generic(self, modules_to_load, verbose=False): @@ -401,8 +476,10 @@ def _load_modules_generic(self, modules_to_load, verbose=False): if "SOFTENV_LOAD" in os.environ: cmd += " && source $SOFTENV_LOAD" - for action,argument in modules_to_load: - cmd += " && {} {} {}".format(sh_mod_cmd, action, "" if argument is None else argument) + for action, argument in modules_to_load: + cmd += " && {} {} {}".format( + sh_mod_cmd, action, "" if argument is None else argument + ) self._source_shell_file(cmd, verbose=verbose) @@ -418,7 +495,7 @@ def _source_shell_file(self, cmd, shell="sh", verbose=False): # Parse the output to set the os.environ dictionary ################################################### newenv = OrderedDict() - for line in output.split('\0'): + for line in output.split("\0"): if "=" in line: key, val = line.split("=", 1) newenv[key] = val @@ -442,21 +519,25 @@ def _load_none_modules(self, modules_to_load): """ No Action required """ - expect(not modules_to_load, - "Module system was specified as 'none' yet there are modules that need to be loaded?") + expect( + not modules_to_load, + "Module system was specified as 'none' yet there are modules that need to be loaded?", + ) def _mach_specific_header(self, shell): - ''' + """ write a shell module file for this case. - ''' - header = ''' + """ + header = """ #!/usr/bin/env {} #=============================================================================== # Automatically generated module settings for $self->{{machine}} # DO NOT EDIT THIS FILE DIRECTLY! Please edit env_mach_specific.xml # in your CASEROOT. This file is overwritten every time modules are loaded! #=============================================================================== -'''.format(shell) +""".format( + shell + ) header += "source {}".format(self.get_module_system_init_path(shell)) return header @@ -479,11 +560,15 @@ def allow_error(self): return value.upper() == "TRUE" if value is not None else False def get_module_system_init_path(self, lang): - init_nodes = self.get_optional_child("init_path", attributes={"lang":lang}, root=self.get_child("module_system")) + init_nodes = self.get_optional_child( + "init_path", attributes={"lang": lang}, root=self.get_child("module_system") + ) return self.text(init_nodes) if init_nodes is not None else None def get_module_system_cmd_path(self, lang): - cmd_nodes = self.get_optional_child("cmd_path", attributes={"lang":lang}, root=self.get_child("module_system")) + cmd_nodes = self.get_optional_child( + "cmd_path", attributes={"lang": lang}, root=self.get_child("module_system") + ) return self.text(cmd_nodes) if cmd_nodes is not None else None def get_mpirun(self, case, attribs, job, exe_only=False, overrides=None): @@ -503,7 +588,10 @@ def get_mpirun(self, case, attribs, job, exe_only=False, overrides=None): is_default = False for key, value in attribs.items(): - expect(key in self._allowed_mpi_attributes, "Unexpected key {} in mpirun attributes".format(key)) + expect( + key in self._allowed_mpi_attributes, + "Unexpected key {} in mpirun attributes".format(key), + ) if key in xml_attribs: if xml_attribs[key].lower() == "false": xml_attrib = False @@ -514,7 +602,11 @@ def get_mpirun(self, case, attribs, job, exe_only=False, overrides=None): if xml_attrib == value: matches += 1 - elif key == "mpilib" and value != "mpi-serial" and xml_attrib == "default": + elif ( + key == "mpilib" + and value != "mpi-serial" + and xml_attrib == "default" + ): is_default = True else: all_match = False @@ -531,11 +623,17 @@ def get_mpirun(self, case, attribs, job, exe_only=False, overrides=None): best_num_matched = matches # if there are no special arguments required for mpi-serial it need not have an entry in config_machines.xml - if "mpilib" in attribs and attribs["mpilib"] == "mpi-serial" and best_match is None: - return "",[],None,None - - expect(best_match is not None or default_match is not None, - "Could not find a matching MPI for attributes: {}".format(attribs)) + if ( + "mpilib" in attribs + and attribs["mpilib"] == "mpi-serial" + and best_match is None + ): + return "", [], None, None + + expect( + best_match is not None or default_match is not None, + "Could not find a matching MPI for attributes: {}".format(attribs), + ) the_match = best_match if best_match is not None else default_match @@ -545,23 +643,28 @@ def get_mpirun(self, case, attribs, job, exe_only=False, overrides=None): if arg_node: arg_nodes = self.get_children("arg", root=arg_node) for arg_node in arg_nodes: - arg_value = transform_vars(self.text(arg_node), - case=case, - subgroup=job,overrides=overrides, - default=self.get(arg_node, "default")) + arg_value = transform_vars( + self.text(arg_node), + case=case, + subgroup=job, + overrides=overrides, + default=self.get(arg_node, "default"), + ) args.append(arg_value) exec_node = self.get_child("executable", root=the_match) - expect(exec_node is not None,"No executable found") + expect(exec_node is not None, "No executable found") executable = self.text(exec_node) run_exe = None run_misc_suffix = None - run_exe_node = self.get_optional_child('run_exe', root=the_match) + run_exe_node = self.get_optional_child("run_exe", root=the_match) if run_exe_node: run_exe = self.text(run_exe_node) - run_misc_suffix_node = self.get_optional_child('run_misc_suffix', root=the_match) + run_misc_suffix_node = self.get_optional_child( + "run_misc_suffix", root=the_match + ) if run_misc_suffix_node: run_misc_suffix = self.text(run_misc_suffix_node) diff --git a/CIME/XML/env_run.py b/CIME/XML/env_run.py index 731f557d5c4..ea5f14af4ca 100644 --- a/CIME/XML/env_run.py +++ b/CIME/XML/env_run.py @@ -10,9 +10,11 @@ logger = logging.getLogger(__name__) -class EnvRun(EnvBase): - def __init__(self, case_root=None, infile="env_run.xml", components=None, read_only=False): +class EnvRun(EnvBase): + def __init__( + self, case_root=None, infile="env_run.xml", components=None, read_only=False + ): """ initialize an object interface to file env_run.xml in the case directory """ @@ -50,11 +52,10 @@ def set_value(self, vid, value, subgroup=None, ignore_type=False): logger.warning("Only CPL settings are used for PIO in async mode") subgroup = "CPL" - if vid == "PIO_ASYNC_INTERFACE": if type(value) == type(True): self._pio_async_interface = value else: - self._pio_async_interface = convert_to_type(value, "logical" , vid) + self._pio_async_interface = convert_to_type(value, "logical", vid) return EnvBase.set_value(self, vid, value, subgroup, ignore_type) diff --git a/CIME/XML/env_test.py b/CIME/XML/env_test.py index d37568e39e3..9dbe3615eff 100644 --- a/CIME/XML/env_test.py +++ b/CIME/XML/env_test.py @@ -8,15 +8,18 @@ logger = logging.getLogger(__name__) + class EnvTest(EnvBase): # pylint: disable=unused-argument - def __init__(self, case_root=None, infile="env_test.xml", components=None, read_only=False): + def __init__( + self, case_root=None, infile="env_test.xml", components=None, read_only=False + ): """ initialize an object interface to file env_test.xml in the case directory """ EnvBase.__init__(self, case_root, infile, read_only=read_only) - def add_test(self,testnode): + def add_test(self, testnode): self.add_child(testnode) self.write() @@ -30,14 +33,20 @@ def set_initial_values(self, case): tnode = self.get_child("test") for child in self.get_children(root=tnode): if self.text(child) is not None: - logger.debug("Setting {} to {} for test".format(self.name(child), self.text(child))) + logger.debug( + "Setting {} to {} for test".format( + self.name(child), self.text(child) + ) + ) if "$" in self.text(child): - case.set_value(self.name(child),self.text(child),ignore_type=True) + case.set_value(self.name(child), self.text(child), ignore_type=True) else: item_type = case.get_type_info(self.name(child)) if item_type: - value = convert_to_type(self.text(child),item_type,self.name(child)) - case.set_value(self.name(child),value) + value = convert_to_type( + self.text(child), item_type, self.name(child) + ) + case.set_value(self.name(child), value) case.flush() return @@ -47,7 +56,7 @@ def set_test_parameter(self, name, value): otherwise create a node and initialize it to value """ case = self.get_value("TESTCASE") - tnode = self.get_child("test",{"NAME":case}) + tnode = self.get_child("test", {"NAME": case}) idnode = self.get_optional_child(name, root=tnode) if idnode is None: @@ -57,14 +66,14 @@ def set_test_parameter(self, name, value): def get_test_parameter(self, name): case = self.get_value("TESTCASE") - tnode = self.get_child("test",{"NAME":case}) + tnode = self.get_child("test", {"NAME": case}) value = None idnode = self.get_optional_child(name, root=tnode) if idnode is not None: value = self.text(idnode) return value - def get_step_phase_cnt(self,step): + def get_step_phase_cnt(self, step): bldnodes = self.get_children(step) cnt = 0 for node in bldnodes: @@ -72,25 +81,29 @@ def get_step_phase_cnt(self,step): return cnt def get_settings_for_phase(self, name, cnt): - node = self.get_optional_child(name,attributes={"phase":cnt}) + node = self.get_optional_child(name, attributes={"phase": cnt}) settings = [] if node is not None: for child in node: - logger.debug ("Here child is {} with value {}".format(self.name(child), self.text(child))) + logger.debug( + "Here child is {} with value {}".format( + self.name(child), self.text(child) + ) + ) settings.append((self.name(child), self.text(child))) return settings def run_phase_get_clone_name(self, phase): - node = self.get_child("RUN",attributes={"phase":str(phase)}) + node = self.get_child("RUN", attributes={"phase": str(phase)}) if self.has(node, "clone"): return self.get(node, "clone") return None def cleanupnode(self, node): - ''' + """ keep the values component set - ''' + """ fnode = self.get_child(name="file", root=node) self.remove_child(fnode, root=node) gnode = self.get_child(name="group", root=node) diff --git a/CIME/XML/env_workflow.py b/CIME/XML/env_workflow.py index 308b3cf3643..3c976693639 100644 --- a/CIME/XML/env_workflow.py +++ b/CIME/XML/env_workflow.py @@ -11,8 +11,8 @@ # pragma pylint: disable=attribute-defined-outside-init -class EnvWorkflow(EnvBase): +class EnvWorkflow(EnvBase): def __init__(self, case_root=None, infile="env_workflow.xml", read_only=False): """ initialize an object interface to file env_workflow.xml in the case directory @@ -21,13 +21,18 @@ def __init__(self, case_root=None, infile="env_workflow.xml", read_only=False): # schema = os.path.join(get_cime_root(), "CIME", "config", "xml_schemas", "env_workflow.xsd") # TODO: define schema for this file schema = None - super(EnvWorkflow,self).__init__(case_root, infile, schema=schema, read_only=read_only) + super(EnvWorkflow, self).__init__( + case_root, infile, schema=schema, read_only=read_only + ) def create_job_groups(self, batch_jobs, is_test): # Subtle: in order to support dynamic batch jobs, we need to remove the # job_submission group and replace with job-based groups - orig_group = self.get_optional_child("group", {"id":"job_submission"}, - err_msg="Looks like job groups have already been created") + orig_group = self.get_optional_child( + "group", + {"id": "job_submission"}, + err_msg="Looks like job groups have already been created", + ) expect(orig_group, "No workflow groups found") orig_group_children = super(EnvWorkflow, self).get_children(root=orig_group) @@ -39,18 +44,20 @@ def create_job_groups(self, batch_jobs, is_test): for name, jdict in batch_jobs: if name == "case.run" and is_test: - pass # skip + pass # skip elif name == "case.test" and not is_test: - pass # skip + pass # skip elif name == "case.run.sh": - pass # skip + pass # skip else: - new_job_group = self.make_child("group", {"id":name}) + new_job_group = self.make_child("group", {"id": name}) for field in jdict.keys(): if field == "runtime_parameters": continue val = jdict[field] - node = self.make_child("entry", {"id":field,"value":val}, root=new_job_group) + node = self.make_child( + "entry", {"id": field, "value": val}, root=new_job_group + ) self.make_child("type", root=node, text="char") for child in childnodes: @@ -67,21 +74,29 @@ def get_type_info(self, vid): gnodes = self.get_children("group") type_info = None for gnode in gnodes: - nodes = self.get_children("entry",{"id":vid}, root=gnode) + nodes = self.get_children("entry", {"id": vid}, root=gnode) type_info = None for node in nodes: new_type_info = self._get_type_info(node) if type_info is None: type_info = new_type_info else: - expect( type_info == new_type_info, - "Inconsistent type_info for entry id={} {} {}".format(vid, new_type_info, type_info)) + expect( + type_info == new_type_info, + "Inconsistent type_info for entry id={} {} {}".format( + vid, new_type_info, type_info + ), + ) return type_info def get_job_specs(self, case, job): task_count = case.get_resolved_value(self.get_value("task_count", subgroup=job)) - tasks_per_node = case.get_resolved_value(self.get_value("tasks_per_node", subgroup=job)) - thread_count = case.get_resolved_value(self.get_value("thread_count", subgroup=job)) + tasks_per_node = case.get_resolved_value( + self.get_value("tasks_per_node", subgroup=job) + ) + thread_count = case.get_resolved_value( + self.get_value("thread_count", subgroup=job) + ) max_gpus_per_node = case.get_value("MAX_GPUS_PER_NODE") ngpus_per_node = case.get_value("NGPUS_PER_NODE") num_nodes = None @@ -90,8 +105,8 @@ def get_job_specs(self, case, job): ngpus_per_node = 0 if task_count is not None and tasks_per_node is not None: task_count = int(task_count) - num_nodes = int(math.ceil(float(task_count)/float(tasks_per_node))) - tasks_per_node = task_count//num_nodes + num_nodes = int(math.ceil(float(task_count) / float(tasks_per_node))) + tasks_per_node = task_count // num_nodes if not thread_count: thread_count = 1 if ngpus_per_node > max_gpus_per_node: @@ -108,9 +123,11 @@ def get_value(self, item, attribute=None, resolved=True, subgroup="PRIMARY"): if subgroup == "PRIMARY": subgroup = "case.test" if "case.test" in self.get_jobs() else "case.run" - #pylint: disable=assignment-from-none + # pylint: disable=assignment-from-none if value is None: - value = super(EnvWorkflow, self).get_value(item, attribute=attribute, resolved=resolved, subgroup=subgroup) + value = super(EnvWorkflow, self).get_value( + item, attribute=attribute, resolved=resolved, subgroup=subgroup + ) return value @@ -125,24 +142,35 @@ def set_value(self, item, value, subgroup=None, ignore_type=False): if subgroup is None: gnodes = self.get_children("group") for gnode in gnodes: - node = self.get_optional_child("entry", {"id":item}, root=gnode) + node = self.get_optional_child("entry", {"id": item}, root=gnode) if node is not None: self._set_value(node, value, vid=item, ignore_type=ignore_type) val = value else: - group = self.get_optional_child("group", {"id":subgroup}) + group = self.get_optional_child("group", {"id": subgroup}) if group is not None: - node = self.get_optional_child("entry", {"id":item}, root=group) + node = self.get_optional_child("entry", {"id": item}, root=group) if node is not None: - val = self._set_value(node, value, vid=item, ignore_type=ignore_type) + val = self._set_value( + node, value, vid=item, ignore_type=ignore_type + ) return val def get_children(self, name=None, attributes=None, root=None): - if name in ("JOB_WALLCLOCK_TIME", "PROJECT", "CHARGE_ACCOUNT", - "JOB_QUEUE", "BATCH_COMMAND_FLAGS"): - nodes = super(EnvWorkflow, self).get_children("entry", attributes={"id":name}, root=root) + if name in ( + "JOB_WALLCLOCK_TIME", + "PROJECT", + "CHARGE_ACCOUNT", + "JOB_QUEUE", + "BATCH_COMMAND_FLAGS", + ): + nodes = super(EnvWorkflow, self).get_children( + "entry", attributes={"id": name}, root=root + ) else: - nodes = super(EnvWorkflow, self).get_children(name, attributes=attributes, root=root) + nodes = super(EnvWorkflow, self).get_children( + name, attributes=attributes, root=root + ) return nodes diff --git a/CIME/XML/expected_fails_file.py b/CIME/XML/expected_fails_file.py index dd41da8fb4d..3b253638b14 100644 --- a/CIME/XML/expected_fails_file.py +++ b/CIME/XML/expected_fails_file.py @@ -50,10 +50,12 @@ logger = logging.getLogger(__name__) -class ExpectedFailsFile(GenericXML): +class ExpectedFailsFile(GenericXML): def __init__(self, infile): - schema = os.path.join(utils.get_schema_path(), "expected_fails_file.xsd") + schema = os.path.join( + utils.get_schema_path(), "expected_fails_file.xsd" + ) GenericXML.__init__(self, infile, schema=schema) def get_expected_fails(self): diff --git a/CIME/XML/files.py b/CIME/XML/files.py index 3d645bfb5f7..d865fa5eb46 100644 --- a/CIME/XML/files.py +++ b/CIME/XML/files.py @@ -10,9 +10,9 @@ logger = logging.getLogger(__name__) -class Files(EntryID): - def __init__(self, comp_interface="mct"): +class Files(EntryID): + def __init__(self, comp_interface=None): """ initialize an object @@ -20,6 +20,8 @@ def __init__(self, comp_interface="mct"): >>> files.get_value('CASEFILE_HEADERS',resolved=False) '$CIMEROOT/config/config_headers.xml' """ + if comp_interface is None: + comp_interface = "mct" cimeroot = get_cime_root() cimeroot_parent = os.path.dirname(cimeroot) config_path = get_config_path() @@ -50,49 +52,71 @@ def get_value(self, vid, attribute=None, resolved=True, subgroup=None): elif attribute: self._cpl_comp = attribute else: - self._cpl_comp['component'] = 'cpl' + self._cpl_comp["component"] = "cpl" if "COMP_ROOT_DIR" in vid: if vid in self.COMP_ROOT_DIR: if attribute is not None: - if vid+attribute["component"] in self.COMP_ROOT_DIR: - return self.COMP_ROOT_DIR[vid+attribute["component"]] + if vid + attribute["component"] in self.COMP_ROOT_DIR: + return self.COMP_ROOT_DIR[vid + attribute["component"]] else: return self.COMP_ROOT_DIR[vid] - newatt = {"comp_interface":self._comp_interface} + newatt = {"comp_interface": self._comp_interface} if attribute: newatt.update(attribute) - value = super(Files, self).get_value(vid, attribute=newatt, resolved=False, subgroup=subgroup) + value = super(Files, self).get_value( + vid, attribute=newatt, resolved=False, subgroup=subgroup + ) if value is None and attribute is not None: - value = super(Files, self).get_value(vid, attribute=attribute, resolved=False, subgroup=subgroup) + value = super(Files, self).get_value( + vid, attribute=attribute, resolved=False, subgroup=subgroup + ) if value is None: - value = super(Files, self).get_value(vid, attribute=None, resolved=False, subgroup=subgroup) - - if "COMP_ROOT_DIR" not in vid and value is not None and "COMP_ROOT_DIR" in value: + value = super(Files, self).get_value( + vid, attribute=None, resolved=False, subgroup=subgroup + ) + + if ( + "COMP_ROOT_DIR" not in vid + and value is not None + and "COMP_ROOT_DIR" in value + ): m = re.search("(COMP_ROOT_DIR_[^/]+)/", value) comp_root_dir_var_name = m.group(1) - newatt = {"comp_interface":self._comp_interface} + newatt = {"comp_interface": self._comp_interface} if attribute: newatt.update(attribute) - crd_node = self.scan_optional_child(comp_root_dir_var_name, attributes=newatt) + crd_node = self.scan_optional_child( + comp_root_dir_var_name, attributes=newatt + ) if crd_node: - comp_root_dir = self.get_value(comp_root_dir_var_name, attribute=newatt, resolved=False, subgroup=subgroup) + comp_root_dir = self.get_value( + comp_root_dir_var_name, + attribute=newatt, + resolved=False, + subgroup=subgroup, + ) else: - comp_root_dir = self.get_value(comp_root_dir_var_name, attribute=attribute, resolved=False, subgroup=subgroup) - self.set_value(comp_root_dir_var_name, comp_root_dir,subgroup=attribute) + comp_root_dir = self.get_value( + comp_root_dir_var_name, + attribute=attribute, + resolved=False, + subgroup=subgroup, + ) + self.set_value(comp_root_dir_var_name, comp_root_dir, subgroup=attribute) if resolved: - value = value.replace("$"+comp_root_dir_var_name, comp_root_dir) + value = value.replace("$" + comp_root_dir_var_name, comp_root_dir) if resolved and value is not None: value = value.replace("$COMP_INTERFACE", self._comp_interface) value = self.get_resolved_value(value) return value - def set_value(self, vid, value,subgroup=None,ignore_type=False): + def set_value(self, vid, value, subgroup=None, ignore_type=False): if "COMP_ROOT_DIR" in vid: if subgroup is not None: - self.COMP_ROOT_DIR[vid+subgroup["component"]] = value + self.COMP_ROOT_DIR[vid + subgroup["component"]] = value else: self.COMP_ROOT_DIR[vid] = value @@ -100,9 +124,8 @@ def set_value(self, vid, value,subgroup=None,ignore_type=False): expect(False, "Attempt to set a nonmutable variable {}".format(vid)) return value - def get_schema(self, nodename, attributes=None): - node = self.get_optional_child("entry", {"id":nodename}) + node = self.get_optional_child("entry", {"id": nodename}) schemanode = self.get_optional_child("schema", root=node, attributes=attributes) if schemanode is not None: logger.debug("Found schema for {}".format(nodename)) @@ -110,9 +133,11 @@ def get_schema(self, nodename, attributes=None): return None def get_components(self, nodename): - node = self.get_optional_child("entry", {"id":nodename}) + node = self.get_optional_child("entry", {"id": nodename}) if node is not None: - valnodes = self.get_children("value", root=self.get_child("values", root=node)) + valnodes = self.get_children( + "value", root=self.get_child("values", root=node) + ) values = [] for valnode in valnodes: value = self.get(valnode, "component") diff --git a/CIME/XML/generic_xml.py b/CIME/XML/generic_xml.py index 6355ce74685..00f7fdad84b 100644 --- a/CIME/XML/generic_xml.py +++ b/CIME/XML/generic_xml.py @@ -6,7 +6,8 @@ from CIME.utils import safe_copy, get_src_root import xml.etree.ElementTree as ET -#pylint: disable=import-error + +# pylint: disable=import-error from distutils.spawn import find_executable import getpass import CIME.six @@ -15,18 +16,20 @@ logger = logging.getLogger(__name__) -class _Element(object): # private class, don't want users constructing directly or calling methods on it +class _Element( + object +): # private class, don't want users constructing directly or calling methods on it def __init__(self, xml_element): self.xml_element = xml_element def __eq__(self, rhs): expect(isinstance(rhs, _Element), "Wrong type") - return self.xml_element == rhs.xml_element # pylint: disable=protected-access + return self.xml_element == rhs.xml_element # pylint: disable=protected-access def __ne__(self, rhs): expect(isinstance(rhs, _Element), "Wrong type") - return self.xml_element != rhs.xml_element # pylint: disable=protected-access + return self.xml_element != rhs.xml_element # pylint: disable=protected-access def __hash__(self): return hash(self.xml_element) @@ -34,6 +37,7 @@ def __hash__(self): def __deepcopy__(self, _): return _Element(deepcopy(self.xml_element)) + class GenericXML(object): _FILEMAP = {} @@ -45,7 +49,14 @@ def invalidate(cls, filename): if filename in cls._FILEMAP: del cls._FILEMAP[filename] - def __init__(self, infile=None, schema=None, root_name_override=None, root_attrib_override=None, read_only=True): + def __init__( + self, + infile=None, + schema=None, + root_name_override=None, + root_attrib_override=None, + read_only=True, + ): """ Initialize an object """ @@ -59,22 +70,35 @@ def __init__(self, infile=None, schema=None, root_name_override=None, root_attri if infile is None: return - if os.path.isfile(infile) and os.access(infile, os.R_OK) and os.stat(infile).st_size > 0: + if ( + os.path.isfile(infile) + and os.access(infile, os.R_OK) + and os.stat(infile).st_size > 0 + ): # If file is defined and exists, read it self.read(infile, schema) else: # if file does not exist create a root xml element # and set it's id to file - expect(not self.read_only, "Makes no sense to have empty read-only file: {}".format(infile)) + expect( + not self.read_only, + "Makes no sense to have empty read-only file: {}".format(infile), + ) logger.debug("File {} does not exist.".format(infile)) - expect("$" not in infile,"File path not fully resolved: {}".format(infile)) + expect("$" not in infile, "File path not fully resolved: {}".format(infile)) root = _Element(ET.Element("xml")) if root_name_override: - self.root = self.make_child(root_name_override, root=root, attributes=root_attrib_override) + self.root = self.make_child( + root_name_override, root=root, attributes=root_attrib_override + ) else: - self.root = self.make_child("file", root=root, attributes={"id":os.path.basename(infile), "version":"2.0"}) + self.root = self.make_child( + "file", + root=root, + attributes={"id": os.path.basename(infile), "version": "2.0"}, + ) self.tree = ET.ElementTree(root) @@ -87,17 +111,25 @@ def read(self, infile, schema=None): cached_read = False if not self.DISABLE_CACHING and infile in self._FILEMAP: timestamp_cache = self._FILEMAP[infile].modtime - timestamp_file = os.path.getmtime(infile) + timestamp_file = os.path.getmtime(infile) if timestamp_file == timestamp_cache: logger.debug("read (cached): {}".format(infile)) - expect(self.read_only or not self.filename or not self.needsrewrite, - "Reading into object marked for rewrite, file {}".format(self.filename)) + expect( + self.read_only or not self.filename or not self.needsrewrite, + "Reading into object marked for rewrite, file {}".format( + self.filename + ), + ) self.tree, self.root, _ = self._FILEMAP[infile] cached_read = True if not cached_read: logger.debug("read: {}".format(infile)) - file_open = (lambda x: open(x, 'r', encoding='utf-8')) if CIME.six.PY3 else (lambda x: open(x, 'r')) + file_open = ( + (lambda x: open(x, "r", encoding="utf-8")) + if CIME.six.PY3 + else (lambda x: open(x, "r")) + ) with file_open(infile) as fd: self.read_fd(fd) @@ -106,11 +138,15 @@ def read(self, infile, schema=None): logger.debug("File version is {}".format(str(self.get_version()))) - self._FILEMAP[infile] = self.CacheEntry(self.tree, self.root, os.path.getmtime(infile)) + self._FILEMAP[infile] = self.CacheEntry( + self.tree, self.root, os.path.getmtime(infile) + ) def read_fd(self, fd): - expect(self.read_only or not self.filename or not self.needsrewrite, - "Reading into object marked for rewrite, file {}".format(self.filename)) + expect( + self.read_only or not self.filename or not self.needsrewrite, + "Reading into object marked for rewrite, file {}".format(self.filename), + ) read_only = self.read_only if self.tree: addroot = _Element(ET.parse(fd).getroot()) @@ -134,8 +170,10 @@ def read_fd(self, fd): # Then recursively add the included files. for elem in include_elems: path = os.path.abspath( - os.path.join(os.getcwd(), os.path.dirname(self.filename), - self.get(elem, "href"))) + os.path.join( + os.getcwd(), os.path.dirname(self.filename), self.get(elem, "href") + ) + ) logger.debug("Include file {}".format(path)) self.read(path) @@ -172,31 +210,61 @@ def has(self, node, attrib_name): def set(self, node, attrib_name, value): if self.get(node, attrib_name) != value: - expect(not self.read_only, "read_only: cannot set attrib[{}]={} for node {} in file {}".format(attrib_name, value, self.name(node), self.filename)) + expect( + not self.read_only, + "read_only: cannot set attrib[{}]={} for node {} in file {}".format( + attrib_name, value, self.name(node), self.filename + ), + ) if attrib_name == "id": - expect(not self.locked, "locked: cannot set attrib[{}]={} for node {} in file {}".format(attrib_name, value, self.name(node), self.filename)) + expect( + not self.locked, + "locked: cannot set attrib[{}]={} for node {} in file {}".format( + attrib_name, value, self.name(node), self.filename + ), + ) self.needsrewrite = True return node.xml_element.set(attrib_name, value) def pop(self, node, attrib_name): - expect(not self.read_only, "read_only: cannot pop attrib[{}] for node {} in file {}".format(attrib_name, self.name(node), self.filename)) + expect( + not self.read_only, + "read_only: cannot pop attrib[{}] for node {} in file {}".format( + attrib_name, self.name(node), self.filename + ), + ) if attrib_name == "id": - expect(not self.locked, "locked: cannot pop attrib[{}] for node {} in file {}".format(attrib_name, self.name(node), self.filename)) + expect( + not self.locked, + "locked: cannot pop attrib[{}] for node {} in file {}".format( + attrib_name, self.name(node), self.filename + ), + ) self.needsrewrite = True return node.xml_element.attrib.pop(attrib_name) def attrib(self, node): # Return a COPY. We do not want clients making changes directly - return None if node.xml_element.attrib is None else dict(node.xml_element.attrib) + return ( + None if node.xml_element.attrib is None else dict(node.xml_element.attrib) + ) def set_name(self, node, name): - expect(not self.read_only, "read_only: set node name {} in file {}".format(name, self.filename)) + expect( + not self.read_only, + "read_only: set node name {} in file {}".format(name, self.filename), + ) if node.xml_element.tag != name: self.needsrewrite = True node.xml_element.tag = name def set_text(self, node, text): - expect(not self.read_only, "read_only: set node text {} for node {} in file {}".format(text, self.name(node), self.filename)) + expect( + not self.read_only, + "read_only: set node text {} for node {} in file {}".format( + text, self.name(node), self.filename + ), + ) if node.xml_element.text != text: node.xml_element.text = text self.needsrewrite = True @@ -211,7 +279,14 @@ def add_child(self, node, root=None, position=None): """ Add element node to self at root """ - expect(not self.locked and not self.read_only, "{}: cannot add child {} in file {}".format("read_only" if self.read_only else "locked", self.name(node), self.filename)) + expect( + not self.locked and not self.read_only, + "{}: cannot add child {} in file {}".format( + "read_only" if self.read_only else "locked", + self.name(node), + self.filename, + ), + ) self.needsrewrite = True root = root if root is not None else self.root if position is not None: @@ -223,13 +298,25 @@ def copy(self, node): return deepcopy(node) def remove_child(self, node, root=None): - expect(not self.locked and not self.read_only, "{}: cannot remove child {} in file {}".format("read_only" if self.read_only else "locked", self.name(node), self.filename)) + expect( + not self.locked and not self.read_only, + "{}: cannot remove child {} in file {}".format( + "read_only" if self.read_only else "locked", + self.name(node), + self.filename, + ), + ) self.needsrewrite = True root = root if root is not None else self.root root.xml_element.remove(node.xml_element) def make_child(self, name, attributes=None, root=None, text=None): - expect(not self.locked and not self.read_only, "{}: cannot make child {} in file {}".format("read_only" if self.read_only else "locked", name, self.filename)) + expect( + not self.locked and not self.read_only, + "{}: cannot make child {} in file {}".format( + "read_only" if self.read_only else "locked", name, self.filename + ), + ) root = root if root is not None else self.root self.needsrewrite = True if attributes is None: @@ -243,7 +330,12 @@ def make_child(self, name, attributes=None, root=None, text=None): return node def make_child_comment(self, root=None, text=None): - expect(not self.locked and not self.read_only, "{}: cannot make child {} in file {}".format("read_only" if self.read_only else "locked", text, self.filename)) + expect( + not self.locked and not self.read_only, + "{}: cannot make child {} in file {}".format( + "read_only" if self.read_only else "locked", text, self.filename + ), + ) root = root if root is not None else self.root self.needsrewrite = True et_comment = ET.Comment(text) @@ -287,8 +379,17 @@ def get_children(self, name=None, attributes=None, root=None): return children def get_child(self, name=None, attributes=None, root=None, err_msg=None): - child = self.get_optional_child(root=root, name=name, attributes=attributes, err_msg=err_msg) - expect(child, err_msg if err_msg else "Expected one child, found None with name '{}' and attribs '{}' in file {}".format(name, attributes, self.filename)) + child = self.get_optional_child( + root=root, name=name, attributes=attributes, err_msg=err_msg + ) + expect( + child, + err_msg + if err_msg + else "Expected one child, found None with name '{}' and attribs '{}' in file {}".format( + name, attributes, self.filename + ), + ) return child def get_optional_child(self, name=None, attributes=None, root=None, err_msg=None): @@ -301,17 +402,28 @@ def get_optional_child(self, name=None, attributes=None, root=None, err_msg=None attlen = len(attributes) children = [c for c in children if len(c.xml_element.attrib) == attlen] - expect(len(children) <= 1, err_msg if err_msg else "Multiple matches for name '{}' and attribs '{}' in file {}".format(name, attributes, self.filename)) + expect( + len(children) <= 1, + err_msg + if err_msg + else "Multiple matches for name '{}' and attribs '{}' in file {}".format( + name, attributes, self.filename + ), + ) return children[0] if children else None def get_element_text(self, element_name, attributes=None, root=None): - element_node = self.get_optional_child(name=element_name, attributes=attributes, root=root) + element_node = self.get_optional_child( + name=element_name, attributes=attributes, root=root + ) if element_node is not None: return self.text(element_node) return None def set_element_text(self, element_name, new_text, attributes=None, root=None): - element_node = self.get_optional_child(name=element_name, attributes=attributes, root=root) + element_node = self.get_optional_child( + name=element_name, attributes=attributes, root=root + ) if element_node is not None: self.set_text(element_node, new_text) return new_text @@ -335,15 +447,19 @@ def check_timestamp(self): """ timestamp_cache = self._FILEMAP[self.filename].modtime if timestamp_cache != 0.0: - timestamp_file = os.path.getmtime(self.filename) + timestamp_file = os.path.getmtime(self.filename) return timestamp_file == timestamp_cache else: return True def validate_timestamp(self): timestamp_ok = self.check_timestamp() - expect(timestamp_ok, - "File {} appears to have changed without a corresponding invalidation.".format(self.filename)) + expect( + timestamp_ok, + "File {} appears to have changed without a corresponding invalidation.".format( + self.filename + ), + ) def write(self, outfile=None, force_write=False): """ @@ -357,7 +473,10 @@ def write(self, outfile=None, force_write=False): if outfile is None: outfile = self.filename - logger.debug("write: " + (outfile if isinstance(outfile, CIME.six.string_types) else str(outfile))) + logger.debug( + "write: " + + (outfile if isinstance(outfile, CIME.six.string_types) else str(outfile)) + ) xmlstr = self.get_raw_record() @@ -366,15 +485,22 @@ def write(self, outfile=None, force_write=False): if xmllint is not None: if isinstance(outfile, CIME.six.string_types): - run_cmd_no_fail("{} --format --output {} -".format(xmllint, outfile), input_str=xmlstr) + run_cmd_no_fail( + "{} --format --output {} -".format(xmllint, outfile), + input_str=xmlstr, + ) else: - outfile.write(run_cmd_no_fail("{} --format -".format(xmllint), input_str=xmlstr)) + outfile.write( + run_cmd_no_fail("{} --format -".format(xmllint), input_str=xmlstr) + ) else: - with open(outfile,'w') as xmlout: + with open(outfile, "w") as xmlout: xmlout.write(xmlstr) - self._FILEMAP[self.filename] = self.CacheEntry(self.tree, self.root, os.path.getmtime(self.filename)) + self._FILEMAP[self.filename] = self.CacheEntry( + self.tree, self.root, os.path.getmtime(self.filename) + ) self.needsrewrite = False @@ -387,7 +513,12 @@ def scan_child(self, nodename, attributes=None, root=None): nodes = self.scan_children(nodename, attributes=attributes, root=root) - expect(len(nodes) == 1, "Incorrect number of matches, {:d}, for nodename '{}' and attrs '{}' in file '{}'".format(len(nodes), nodename, attributes, self.filename)) + expect( + len(nodes) == 1, + "Incorrect number of matches, {:d}, for nodename '{}' and attrs '{}' in file '{}'".format( + len(nodes), nodename, attributes, self.filename + ), + ) return nodes[0] def scan_optional_child(self, nodename, attributes=None, root=None): @@ -398,18 +529,27 @@ def scan_optional_child(self, nodename, attributes=None, root=None): """ nodes = self.scan_children(nodename, attributes=attributes, root=root) - expect(len(nodes) <= 1, "Multiple matches for nodename '{}' and attrs '{}' in file '{}', found {} matches".format(nodename, attributes, self.filename, len(nodes))) + expect( + len(nodes) <= 1, + "Multiple matches for nodename '{}' and attrs '{}' in file '{}', found {} matches".format( + nodename, attributes, self.filename, len(nodes) + ), + ) return nodes[0] if nodes else None def scan_children(self, nodename, attributes=None, root=None): - logger.debug("(get_nodes) Input values: {}, {}, {}, {}".format(self.__class__.__name__, nodename, attributes, root)) + logger.debug( + "(get_nodes) Input values: {}, {}, {}, {}".format( + self.__class__.__name__, nodename, attributes, root + ) + ) if root is None: root = self.root nodes = [] - namespace = {"xi" : "http://www.w3.org/2001/XInclude"} + namespace = {"xi": "http://www.w3.org/2001/XInclude"} xpath = ".//" + (nodename if nodename else "") @@ -422,14 +562,16 @@ def scan_children(self, nodename, attributes=None, root=None): if value is None: xpath = ".//{}[@{}]".format(nodename, key) else: - xpath = ".//{}[@{}=\'{}\']".format(nodename, key, value) + xpath = ".//{}[@{}='{}']".format(nodename, key, value) logger.debug("xpath is {}".format(xpath)) try: newnodes = root.xml_element.findall(xpath, namespace) except Exception as e: - expect(False, "Bad xpath search term '{}', error: {}".format(xpath, e)) + expect( + False, "Bad xpath search term '{}', error: {}".format(xpath, e) + ) if not nodes: nodes = newnodes @@ -448,7 +590,9 @@ def scan_children(self, nodename, attributes=None, root=None): return [_Element(node) for node in nodes] - def get_value(self, item, attribute=None, resolved=True, subgroup=None): # pylint: disable=unused-argument + def get_value( + self, item, attribute=None, resolved=True, subgroup=None + ): # pylint: disable=unused-argument """ get_value is expected to be defined by the derived classes, if you get here the value was not found in the class. @@ -456,11 +600,15 @@ def get_value(self, item, attribute=None, resolved=True, subgroup=None): # pylin logger.debug("Get Value for " + item) return None - def get_values(self, vid, attribute=None, resolved=True, subgroup=None):# pylint: disable=unused-argument + def get_values( + self, vid, attribute=None, resolved=True, subgroup=None + ): # pylint: disable=unused-argument logger.debug("Get Values for " + vid) return [] - def set_value(self, vid, value, subgroup=None, ignore_type=True): # pylint: disable=unused-argument + def set_value( + self, vid, value, subgroup=None, ignore_type=True + ): # pylint: disable=unused-argument """ ignore_type is not used in this flavor """ @@ -489,10 +637,10 @@ def get_resolved_value(self, raw_value, allow_unresolved_envvars=False): True """ logger.debug("raw_value {}".format(raw_value)) - reference_re = re.compile(r'\${?(\w+)}?') - env_ref_re = re.compile(r'\$ENV\{(\w+)\}') - shell_ref_re = re.compile(r'\$SHELL\{([^}]+)\}') - math_re = re.compile(r'\s[+-/*]\s') + reference_re = re.compile(r"\${?(\w+)}?") + env_ref_re = re.compile(r"\$ENV\{(\w+)\}") + shell_ref_re = re.compile(r"\$SHELL\{([^}]+)\}") + math_re = re.compile(r"\s[+-/*]\s") item_data = raw_value if item_data is None: @@ -520,11 +668,13 @@ def get_resolved_value(self, raw_value, allow_unresolved_envvars=False): logger.debug("find: {}".format(var)) # The overridden versions of this method do not simply return None # so the pylint should not be flagging this - ref = self.get_value(var) # pylint: disable=assignment-from-none + ref = self.get_value(var) # pylint: disable=assignment-from-none if ref is not None: logger.debug("resolve: " + str(ref)) - item_data = item_data.replace(m.group(), self.get_resolved_value(str(ref))) + item_data = item_data.replace( + m.group(), self.get_resolved_value(str(ref)) + ) elif var == "CIMEROOT": cimeroot = get_cime_root() item_data = item_data.replace(m.group(), cimeroot) @@ -547,13 +697,20 @@ def validate_xml_file(self, filename, schema): """ validate an XML file against a provided schema file using pylint """ - expect(os.path.isfile(filename),"xml file not found {}".format(filename)) - expect(os.path.isfile(schema),"schema file not found {}".format(schema)) + expect(os.path.isfile(filename), "xml file not found {}".format(filename)) + expect(os.path.isfile(schema), "schema file not found {}".format(schema)) xmllint = find_executable("xmllint") - expect(os.path.isfile(xmllint), " xmllint not found in PATH, xmllint is required for cime. PATH={}".format(os.environ["PATH"])) + expect( + os.path.isfile(xmllint), + " xmllint not found in PATH, xmllint is required for cime. PATH={}".format( + os.environ["PATH"] + ), + ) logger.debug("Checking file {} against schema {}".format(filename, schema)) - run_cmd_no_fail("{} --xinclude --noout --schema {} {}".format(xmllint, schema, filename)) + run_cmd_no_fail( + "{} --xinclude --noout --schema {} {}".format(xmllint, schema, filename) + ) def get_raw_record(self, root=None): logger.debug("writing file {}".format(self.filename)) @@ -563,7 +720,12 @@ def get_raw_record(self, root=None): xmlstr = ET.tostring(root.xml_element) except ET.ParseError as e: ET.dump(root.xml_element) - expect(False, "Could not write file {}, xml formatting error '{}'".format(self.filename, e)) + expect( + False, + "Could not write file {}, xml formatting error '{}'".format( + self.filename, e + ), + ) return xmlstr def get_id(self): diff --git a/CIME/XML/grids.py b/CIME/XML/grids.py index 28dc5e292eb..cbd1ef8ecce 100644 --- a/CIME/XML/grids.py +++ b/CIME/XML/grids.py @@ -14,13 +14,13 @@ # for GLC when there are multiple ice sheet grids). It is important that this character # NOT appear in any file names - or anywhere in the path of directories holding input # data. -GRID_SEP = ':' +GRID_SEP = ":" -class Grids(GenericXML): - def __init__(self, infile=None, files=None): +class Grids(GenericXML): + def __init__(self, infile=None, files=None, comp_interface=None): if files is None: - files = Files() + files = Files(comp_interface=comp_interface) if infile is None: infile = files.get_value("GRIDS_SPEC_FILE") logger.debug(" Grid specification file is {}".format(infile)) @@ -57,19 +57,19 @@ def get_grid_info(self, name, compset, driver): atmnlev = None lndnlev = None - #mechanism to specify atm levels + # mechanism to specify atm levels atmlevregex = re.compile(r"([^_]+)z(\d+)(.*)$") levmatch = re.match(atmlevregex, name) - if levmatch: + if levmatch: atmnlev = levmatch.group(2) - name = levmatch.group(1)+levmatch.group(3) + name = levmatch.group(1) + levmatch.group(3) - #mechanism to specify lnd levels + # mechanism to specify lnd levels lndlevregex = re.compile(r"(.*_)([^_]+)z(\d+)(_[^m].*)$") levmatch = re.match(lndlevregex, name) - if levmatch: + if levmatch: lndnlev = levmatch.group(3) - name = levmatch.group(1)+levmatch.group(2)+levmatch.group(4) + name = levmatch.group(1) + levmatch.group(2) + levmatch.group(4) # determine component_grids dictionary and grid longname lname = self._read_config_grids(name, compset, atmnlev, lndnlev) @@ -77,7 +77,7 @@ def get_grid_info(self, name, compset, driver): component_grids = _ComponentGrids(lname) # determine domains given component_grids - domains = self._get_domains(component_grids, atmlevregex, lndlevregex, driver) + domains = self._get_domains(component_grids, atmlevregex, lndlevregex, driver) gridinfo.update(domains) @@ -129,24 +129,33 @@ def _read_config_grids(self, name, compset, atmnlev, lndnlev): if compset_match is not None and not_compset_match is None: foundcompset = True model_gridnode = node - logger.debug("Found match for {} with compset_match {} and not_compset_match {}" - .format(alias, compset_attrib, not_compset_attrib)) + logger.debug( + "Found match for {} with compset_match {} and not_compset_match {}".format( + alias, compset_attrib, not_compset_attrib + ) + ) break elif compset_attrib: compset_match = re.search(compset_attrib, compset) if compset_match is not None: foundcompset = True model_gridnode = node - logger.debug("Found match for {} with compset_match {}" - .format(alias, compset_attrib)) + logger.debug( + "Found match for {} with compset_match {}".format( + alias, compset_attrib + ) + ) break elif not_compset_attrib: not_compset_match = re.search(not_compset_attrib, compset) if not_compset_match is None: foundcompset = True model_gridnode = node - logger.debug("Found match for {} with not_compset_match {}" - .format(alias, not_compset_attrib)) + logger.debug( + "Found match for {} with not_compset_match {}".format( + alias, not_compset_attrib + ) + ) break else: foundcompset = True @@ -155,7 +164,9 @@ def _read_config_grids(self, name, compset, atmnlev, lndnlev): break expect(foundalias, "no alias {} defined".format(name)) # if no match is found in config_grids.xml - exit - expect(foundcompset, "grid alias {} not valid for compset {}".format(name, compset)) + expect( + foundcompset, "grid alias {} not valid for compset {}".format(name, compset) + ) # for the match - find all of the component grid settings grid_nodes = self.get_children("grid", root=model_gridnode) @@ -164,7 +175,7 @@ def _read_config_grids(self, name, compset, atmnlev, lndnlev): value = self.text(grid_node) if model_grid[name] != "null": model_grid[name] = value - mask_node = self.get_optional_child("mask",root=model_gridnode) + mask_node = self.get_optional_child("mask", root=model_gridnode) if mask_node is not None: model_grid["mask"] = self.text(mask_node) else: @@ -172,7 +183,16 @@ def _read_config_grids(self, name, compset, atmnlev, lndnlev): # determine component grids and associated required domains and gridmaps # TODO: this should be in XML, not here - prefix = {"atm":"a%", "lnd":"l%", "ocnice":"oi%", "rof":"r%", "wav":"w%", "glc":"g%", "mask":"m%", "iac":"z%"} + prefix = { + "atm": "a%", + "lnd": "l%", + "ocnice": "oi%", + "rof": "r%", + "wav": "w%", + "glc": "g%", + "mask": "m%", + "iac": "z%", + } lname = "" for component_gridname in self._comp_gridnames: if lname: @@ -181,57 +201,61 @@ def _read_config_grids(self, name, compset, atmnlev, lndnlev): lname = prefix[component_gridname] if model_grid[component_gridname] is not None: lname += model_grid[component_gridname] - if component_gridname == 'atm' and atmnlev is not None: + if component_gridname == "atm" and atmnlev is not None: if not ("a{:n}ull" in lname): lname += "z" + atmnlev - elif component_gridname == 'lnd' and lndnlev is not None: + elif component_gridname == "lnd" and lndnlev is not None: if not ("l{:n}ull" in lname): lname += "z" + lndnlev else: - lname += 'null' + lname += "null" return lname def _get_domains(self, component_grids, atmlevregex, lndlevregex, driver): - """ determine domains dictionary for config_grids.xml v2 schema""" + """determine domains dictionary for config_grids.xml v2 schema""" domains = {} - mask_name = component_grids.get_comp_gridname('mask') + mask_name = component_grids.get_comp_gridname("mask") for comp_name in component_grids.get_compnames(include_mask=True): for grid_name in component_grids.get_comp_gridlist(comp_name): # Determine grid name with no nlev suffix if there is one grid_name_nonlev = grid_name levmatch = re.match(atmlevregex, grid_name) - if levmatch: - grid_name_nonlev = levmatch.group(1)+levmatch.group(3) + if levmatch: + grid_name_nonlev = levmatch.group(1) + levmatch.group(3) levmatch = re.match(lndlevregex, grid_name) - if levmatch: - grid_name_nonlev = levmatch.group(1)+levmatch.group(2)+levmatch.group(4) - self._get_domains_for_one_grid(domains=domains, - comp_name=comp_name.upper(), - grid_name=grid_name, - grid_name_nonlev=grid_name_nonlev, - mask_name=mask_name, - driver=driver) + if levmatch: + grid_name_nonlev = ( + levmatch.group(1) + levmatch.group(2) + levmatch.group(4) + ) + self._get_domains_for_one_grid( + domains=domains, + comp_name=comp_name.upper(), + grid_name=grid_name, + grid_name_nonlev=grid_name_nonlev, + mask_name=mask_name, + driver=driver, + ) if driver == "nuopc": - mask_domain_node = self.get_optional_child("domain", attributes={"name":domains["MASK_GRID"]}, - root=self.get_child("domains")) - mesh_nodes = self.get_children("mesh", root=mask_domain_node) - for mesh_node in mesh_nodes: - driver_attrib = self.get(mesh_node, "driver") - if driver == driver_attrib: - domains["MASK_MESH"] = self.text(mesh_node) - if "LND_DOMAIN_FILE" in domains: - if domains["LND_DOMAIN_FILE"] != 'UNSET': - domains["PTS_DOMAINFILE"] = os.path.join("$DIN_LOC_ROOT/share/domains",domains["LND_DOMAIN_FILE"]) + # Obtain the root node for the domain entry that sets the mask + if domains["MASK_GRID"] != "null": + mask_domain_node = self.get_optional_child( + "domain", + attributes={"name": domains["MASK_GRID"]}, + root=self.get_child("domains"), + ) + # Now obtain the mesh for the mask for the domain node for that component grid + mesh_node = self.get_child("mesh", root=mask_domain_node) + domains["MASK_MESH"] = self.text(mesh_node) return domains - def _get_domains_for_one_grid(self, domains, comp_name, - grid_name, grid_name_nonlev, - mask_name, driver): + def _get_domains_for_one_grid( + self, domains, comp_name, grid_name, grid_name_nonlev, mask_name, driver + ): """Get domain information for the given grid, adding elements to the domains dictionary Args: @@ -242,29 +266,39 @@ def _get_domains_for_one_grid(self, domains, comp_name, - mask_name: the mask being used in this case - driver: the name of the driver being used in this case """ - domain_node = self.get_optional_child("domain", attributes={"name":grid_name_nonlev}, - root=self.get_child("domains")) + domain_node = self.get_optional_child( + "domain", + attributes={"name": grid_name_nonlev}, + root=self.get_child("domains"), + ) if not domain_node: - domain_root = self.get_optional_child("domains",{"driver":driver}) + domain_root = self.get_optional_child("domains", {"driver": driver}) if domain_root: - domain_node = self.get_optional_child("domain", attributes={"name":grid_name_nonlev}, - root=domain_root) + domain_node = self.get_optional_child( + "domain", attributes={"name": grid_name_nonlev}, root=domain_root + ) if domain_node: # determine xml variable name if not "PTS_LAT" in domains: - domains["PTS_LAT"] = '-999.99' + domains["PTS_LAT"] = "-999.99" if not "PTS_LON" in domains: - domains["PTS_LON"] = '-999.99' + domains["PTS_LON"] = "-999.99" if not comp_name == "MASK": if self.get_element_text("nx", root=domain_node): # If there are multiple grids for this component, then the component # _NX and _NY values won't end up being used, so we simply set them to 1 - _add_grid_info(domains, comp_name + "_NX", - int(self.get_element_text("nx", root=domain_node)), - value_for_multiple=1) - _add_grid_info(domains, comp_name + "_NY", - int(self.get_element_text("ny", root=domain_node)), - value_for_multiple=1) + _add_grid_info( + domains, + comp_name + "_NX", + int(self.get_element_text("nx", root=domain_node)), + value_for_multiple=1, + ) + _add_grid_info( + domains, + comp_name + "_NY", + int(self.get_element_text("ny", root=domain_node)), + value_for_multiple=1, + ) elif self.get_element_text("lon", root=domain_node): # No need to call _add_grid_info here because, for multiple grids, the # end result will be the same as the hard-coded 1 used here @@ -281,43 +315,54 @@ def _get_domains_for_one_grid(self, domains, comp_name, # set up dictionary of domain files for every component _add_grid_info(domains, comp_name + "_GRID", grid_name) - file_nodes = self.get_children("file", root=domain_node) - domain_file = "" - for file_node in file_nodes: - grid_attrib = self.get(file_node, "grid") - mask_attrib = self.get(file_node, "mask") - if grid_attrib is not None and mask_attrib is not None: - grid_match = re.search(comp_name.lower(), grid_attrib) - mask_match = False - if mask_name is not None: + if driver == "mct": + # mct + file_nodes = self.get_children("file", root=domain_node) + domain_file = "" + for file_node in file_nodes: + grid_attrib = self.get(file_node, "grid") + mask_attrib = self.get(file_node, "mask") + if grid_attrib is not None and mask_attrib is not None: + grid_match = re.search(comp_name.lower(), grid_attrib) + mask_match = False + if mask_name is not None: + mask_match = mask_name == mask_attrib + if grid_match is not None and mask_match: + domain_file = self.text(file_node) + elif grid_attrib is not None: + grid_match = re.search(comp_name.lower(), grid_attrib) + if grid_match is not None: + domain_file = self.text(file_node) + elif mask_attrib is not None: mask_match = mask_name == mask_attrib - if grid_match is not None and mask_match: - domain_file = self.text(file_node) - elif grid_attrib is not None: - grid_match = re.search(comp_name.lower(), grid_attrib) - if grid_match is not None: - domain_file = self.text(file_node) - elif mask_attrib is not None: - mask_match = mask_name == mask_attrib - if mask_match: - domain_file = self.text(file_node) - - if domain_file: - _add_grid_info(domains, comp_name + "_DOMAIN_FILE", os.path.basename(domain_file)) - path = os.path.dirname(domain_file) - if len(path) > 0: - _add_grid_info(domains, comp_name + "_DOMAIN_PATH", path) - - if not comp_name == "MASK": - mesh_nodes = self.get_children("mesh", root=domain_node) - mesh_file = "" - for mesh_node in mesh_nodes: - driver_attrib = self.get(mesh_node, "driver") - if driver == driver_attrib: + if mask_match: + domain_file = self.text(file_node) + if domain_file: + _add_grid_info( + domains, + comp_name + "_DOMAIN_FILE", + os.path.basename(domain_file), + ) + path = os.path.dirname(domain_file) + if len(path) > 0: + _add_grid_info(domains, comp_name + "_DOMAIN_PATH", path) + + if driver == "nuopc": + if not comp_name == "MASK": + mesh_nodes = self.get_children("mesh", root=domain_node) + mesh_file = "" + for mesh_node in mesh_nodes: mesh_file = self.text(mesh_node) - - if mesh_file: - _add_grid_info(domains, comp_name + "_DOMAIN_MESH", mesh_file) + if mesh_file: + _add_grid_info(domains, comp_name + "_DOMAIN_MESH", mesh_file) + if comp_name == "LND": + # Note: ONLY want to define PTS_DOMAINFILE for land + file_node = self.get_optional_child("file", root=domain_node) + if file_node is not None: + domain_file = self.text(file_node) + domains["PTS_DOMAINFILE"] = os.path.join( + "$DIN_LOC_ROOT/share/domains", domain_file + ) def _get_gridmaps(self, component_grids, driver, compset): """Set all mapping files for config_grids.xml v2 schema @@ -341,34 +386,47 @@ def _get_gridmaps(self, component_grids, driver, compset): # # Exclude the ice component from the list of compnames because it is assumed to be # on the same grid as ocn, so doesn't have any gridmaps of its own - compnames = component_grids.get_compnames(include_mask=False, - exclude_comps=['ice']) + compnames = component_grids.get_compnames( + include_mask=False, exclude_comps=["ice"] + ) for idx, compname in enumerate(compnames): - for other_compname in compnames[idx+1:]: + for other_compname in compnames[idx + 1 :]: for gridvalue in component_grids.get_comp_gridlist(compname): - for other_gridvalue in component_grids.get_comp_gridlist(other_compname): - self._get_gridmaps_for_one_grid_pair(gridmaps=gridmaps, - driver=driver, - compname=compname, - other_compname=other_compname, - gridvalue=gridvalue, - other_gridvalue=other_gridvalue) + for other_gridvalue in component_grids.get_comp_gridlist( + other_compname + ): + self._get_gridmaps_for_one_grid_pair( + gridmaps=gridmaps, + driver=driver, + compname=compname, + other_compname=other_compname, + gridvalue=gridvalue, + other_gridvalue=other_gridvalue, + ) # (2) set all possibly required gridmaps to 'idmap' for mct and 'unset/idmap' for # nuopc, if they aren't already set required_gridmaps_node = self.get_child("required_gridmaps") - tmp_gridmap_nodes = self.get_children("required_gridmap", root=required_gridmaps_node) + tmp_gridmap_nodes = self.get_children( + "required_gridmap", root=required_gridmaps_node + ) required_gridmap_nodes = [] for node in tmp_gridmap_nodes: - compset_att = self.get(node,"compset") - not_compset_att = self.get(node,"not_compset") - if compset_att and not compset_att in compset or \ - not_compset_att and not_compset_att in compset: + compset_att = self.get(node, "compset") + not_compset_att = self.get(node, "not_compset") + if ( + compset_att + and not compset_att in compset + or not_compset_att + and not_compset_att in compset + ): continue required_gridmap_nodes.append(node) mapname = self.text(node) if mapname not in gridmaps: - gridmaps[mapname] = _get_unset_gridmap_value(mapname, component_grids, driver) + gridmaps[mapname] = _get_unset_gridmap_value( + mapname, component_grids, driver + ) # (3) check that all necessary maps are not set to idmap # @@ -382,21 +440,36 @@ def _get_gridmaps(self, component_grids, driver, compset): grid1_value = component_grids.get_comp_gridname(comp1_name) grid2_value = component_grids.get_comp_gridname(comp2_name) if grid1_value is not None and grid2_value is not None: - if grid1_value != grid2_value and grid1_value != 'null' and grid2_value != 'null': + if ( + grid1_value != grid2_value + and grid1_value != "null" + and grid2_value != "null" + ): map_ = gridmaps[self.text(node)] - if map_ == 'idmap': + if map_ == "idmap": if comp1_name == "ocn" and grid1_value == atm_gridvalue: - logger.debug('ocn_grid == atm_grid so this is not an idmap error') + logger.debug( + "ocn_grid == atm_grid so this is not an idmap error" + ) else: if driver == "nuopc": - gridmaps[self.text(node)] = 'unset' + gridmaps[self.text(node)] = "unset" else: - logger.warning("Warning: missing non-idmap {} for {}, {} and {} {} ". - format(self.text(node), comp1_name, grid1_value, comp2_name, grid2_value)) + logger.warning( + "Warning: missing non-idmap {} for {}, {} and {} {} ".format( + self.text(node), + comp1_name, + grid1_value, + comp2_name, + grid2_value, + ) + ) return gridmaps - def _get_gridmaps_for_one_grid_pair(self, gridmaps, driver, compname, other_compname, gridvalue, other_gridvalue): + def _get_gridmaps_for_one_grid_pair( + self, gridmaps, driver, compname, other_compname, gridvalue, other_gridvalue + ): """Get gridmap information for one pair of grids, adding elements to the gridmaps dictionary Args: @@ -414,8 +487,16 @@ def _get_gridmaps_for_one_grid_pair(self, gridmaps, driver, compname, other_comp if gmdriver is None or gmdriver == driver: gridname = compname + "_grid" other_gridname = other_compname + "_grid" - gridmap_nodes.extend(self.get_children("gridmap", root=root, - attributes={gridname:gridvalue, other_gridname:other_gridvalue})) + gridmap_nodes.extend( + self.get_children( + "gridmap", + root=root, + attributes={ + gridname: gridvalue, + other_gridname: other_gridvalue, + }, + ) + ) # We first create a dictionary of gridmaps just for this pair of grids, then later # add these grids to the main gridmaps dict using _add_grid_info. The reason for @@ -427,16 +508,17 @@ def _get_gridmaps_for_one_grid_pair(self, gridmaps, driver, compname, other_comp # leaving this possibility in place.) these_gridmaps = {} for gridmap_node in gridmap_nodes: - expect(len(self.attrib(gridmap_node)) == 2, - " Bad attribute count in gridmap node %s"%self.attrib(gridmap_node)) - map_nodes = self.get_children("map",root=gridmap_node) + expect( + len(self.attrib(gridmap_node)) == 2, + " Bad attribute count in gridmap node %s" % self.attrib(gridmap_node), + ) + map_nodes = self.get_children("map", root=gridmap_node) for map_node in map_nodes: name = self.get(map_node, "name") value = self.text(map_node) if name is not None and value is not None: these_gridmaps[name] = value - logger.debug(" gridmap name,value are {}: {}" - .format(name,value)) + logger.debug(" gridmap name,value are {}: {}".format(name, value)) for name, value in these_gridmaps.items(): _add_grid_info(gridmaps, name, value) @@ -446,11 +528,21 @@ def print_values(self, long_output=None): helptext = self.get_element_text("help") logger.info("{} ".format(helptext)) - logger.info("{:5s}-------------------------------------------------------------".format("")) + logger.info( + "{:5s}-------------------------------------------------------------".format( + "" + ) + ) logger.info("{:10s} default component grids:\n".format("")) - logger.info(" component compset value " ) - logger.info("{:5s}-------------------------------------------------------------".format("")) - default_nodes = self.get_children("model_grid_defaults", root=self.get_child("grids")) + logger.info(" component compset value ") + logger.info( + "{:5s}-------------------------------------------------------------".format( + "" + ) + ) + default_nodes = self.get_children( + "model_grid_defaults", root=self.get_child("grids") + ) for default_node in default_nodes: grid_nodes = self.get_children("grid", root=default_node) for grid_node in grid_nodes: @@ -458,14 +550,18 @@ def print_values(self, long_output=None): compset = self.get(grid_node, "compset") value = self.text(grid_node) logger.info(" {:6s} {:15s} {:10s}".format(name, compset, value)) - logger.info("{:5s}-------------------------------------------------------------".format("")) + logger.info( + "{:5s}-------------------------------------------------------------".format( + "" + ) + ) domains = {} if long_output is not None: - domain_nodes = self.get_children("domain",root=self.get_child("domains")) + domain_nodes = self.get_children("domain", root=self.get_child("domains")) for domain_node in domain_nodes: - name = self.get(domain_node, 'name') - if name == 'null': + name = self.get(domain_node, "name") + if name == "null": continue desc = self.text(self.get_child("desc", root=domain_node)) files = "" @@ -483,7 +579,9 @@ def print_values(self, long_output=None): files += " grid match: " + grid_attrib if mask_attrib or grid_attrib: files += ")" - domains[name] = "\n {} with domain file(s): {} ".format(desc, files) + domains[name] = "\n {} with domain file(s): {} ".format( + desc, files + ) model_grid_nodes = self.get_children("model_grid", root=self.get_child("grids")) for model_grid_node in model_grid_nodes: @@ -496,7 +594,7 @@ def print_values(self, long_output=None): if not_compset: restriction += "only for compsets that are not {} ".format(not_compset) if restriction: - logger.info("\n alias: {} ({})".format(alias,restriction)) + logger.info("\n alias: {} ({})".format(alias, restriction)) else: logger.info("\n alias: {}".format(alias)) grid_nodes = self.get_children("grid", root=model_grid_node) @@ -513,12 +611,14 @@ def print_values(self, long_output=None): gridnames = set(gridnames) for gridname in gridnames: if gridname != "null": - logger.info (" {}".format(domains[gridname])) + logger.info(" {}".format(domains[gridname])) + # ------------------------------------------------------------------------ # Helper class: _ComponentGrids # ------------------------------------------------------------------------ + class _ComponentGrids(object): """This class stores the grid names for each component and allows retrieval in a variety of formats @@ -531,15 +631,19 @@ class _ComponentGrids(object): # iterations. # # TODO: this should be in XML, not here - _COMP_NAMES = OrderedDict([('atm', 'a'), - ('lnd', 'l'), - ('ocn', 'o'), - ('ice', 'i'), - ('rof', 'r'), - ('glc', 'g'), - ('wav', 'w'), - ('iac', 'z'), - ('mask', 'm')]) + _COMP_NAMES = OrderedDict( + [ + ("atm", "a"), + ("lnd", "l"), + ("ocn", "o"), + ("ice", "i"), + ("rof", "r"), + ("glc", "g"), + ("wav", "w"), + ("iac", "z"), + ("mask", "m"), + ] + ) def __init__(self, grid_longname): self._comp_gridnames = self._get_component_grids_from_longname(grid_longname) @@ -548,7 +652,7 @@ def _get_component_grids_from_longname(self, name): """Return a dictionary mapping each compname to its gridname""" grid_re = re.compile(r"[_]{0,1}[a-z]{1,2}%") grids = grid_re.split(name)[1:] - prefixes = re.findall("[a-z]+%",name) + prefixes = re.findall("[a-z]+%", name) component_grids = {} i = 0 while i < len(grids): @@ -559,7 +663,7 @@ def _get_component_grids_from_longname(self, name): i += 1 component_grids["i"] = component_grids["oi"] component_grids["o"] = component_grids["oi"] - del component_grids['oi'] + del component_grids["oi"] result = {} for compname, prefix in self._COMP_NAMES.items(): @@ -584,9 +688,8 @@ def get_compnames(self, include_mask=True, exclude_comps=None): else: all_exclude_comps = exclude_comps if not include_mask: - all_exclude_comps.append('mask') - result = [k for k in self._COMP_NAMES - if k not in all_exclude_comps] + all_exclude_comps.append("mask") + result = [k for k in self._COMP_NAMES if k not in all_exclude_comps] return result def get_comp_gridname(self, compname): @@ -663,14 +766,19 @@ def check_num_elements(self, gridinfo): # component with multiple grids continue num_elements = len(value.split(GRID_SEP)) - expect(num_elements == expected_num_elements, - "Unexpected number of colon-delimited elements in {}: {} (expected {} elements)".format( - name, value, expected_num_elements)) + expect( + num_elements == expected_num_elements, + "Unexpected number of colon-delimited elements in {}: {} (expected {} elements)".format( + name, value, expected_num_elements + ), + ) + # ------------------------------------------------------------------------ # Some helper functions # ------------------------------------------------------------------------ + def _get_compnames_from_mapname(mapname): """Given a mapname like ATM2OCN_FMAPNAME, return the two component names @@ -682,16 +790,17 @@ def _get_compnames_from_mapname(mapname): comp2_name = mapname[4:7].lower() return comp1_name, comp2_name + def _strip_grid_from_name(name): """Given some string 'name', strip trailing '_grid' from name and return result Raises an exception if 'name' doesn't end with '_grid' """ - expect(name.endswith('_grid'), "{} does not end with _grid".format(name)) - return name[:-len('_grid')] + expect(name.endswith("_grid"), "{} does not end with _grid".format(name)) + return name[: -len("_grid")] -def _add_grid_info(info_dict, key, value, - value_for_multiple=None): + +def _add_grid_info(info_dict, key, value, value_for_multiple=None): """Add a value to info_dict, handling the possibility of multiple grids for a component In the basic case, where key is not yet present in info_dict, this is equivalent to @@ -708,8 +817,10 @@ def _add_grid_info(info_dict, key, value, """ if not isinstance(value, str): - expect(value_for_multiple is not None, - "_add_grid_info: value_for_multiple must be specified if value is not a string") + expect( + value_for_multiple is not None, + "_add_grid_info: value_for_multiple must be specified if value is not a string", + ) if key in info_dict: if value_for_multiple is not None: info_dict[key] = value_for_multiple @@ -718,17 +829,18 @@ def _add_grid_info(info_dict, key, value, else: info_dict[key] = value + def _get_unset_gridmap_value(mapname, component_grids, driver): """Return the appropriate setting for a given gridmap that has not been explicitly set This will be 'unset' or 'idmap' depending on various parameters. """ - if driver == 'nuopc': + if driver == "nuopc": comp1_name, comp2_name = _get_compnames_from_mapname(mapname) grid1 = component_grids.get_comp_gridname(comp1_name) grid2 = component_grids.get_comp_gridname(comp2_name) if grid1 == grid2: - if grid1 != 'null' and grid2 != 'null': + if grid1 != "null" and grid2 != "null": gridmap = "idmap" else: gridmap = "unset" diff --git a/CIME/XML/headers.py b/CIME/XML/headers.py index d2e98ca9c68..656d43dc8d4 100644 --- a/CIME/XML/headers.py +++ b/CIME/XML/headers.py @@ -8,8 +8,9 @@ logger = logging.getLogger(__name__) + class Headers(GenericXML): - def __init__(self,infile=None): + def __init__(self, infile=None): """ initialize an object @@ -19,10 +20,10 @@ def __init__(self,infile=None): """ if infile is None: files = Files() - infile = files.get_value('CASEFILE_HEADERS', resolved=True) + infile = files.get_value("CASEFILE_HEADERS", resolved=True) super(Headers, self).__init__(infile) def get_header_node(self, fname): - fnode = self.get_child("file", attributes={"name" : fname}) + fnode = self.get_child("file", attributes={"name": fname}) headernode = self.get_child("header", root=fnode) return headernode diff --git a/CIME/XML/inputdata.py b/CIME/XML/inputdata.py index f703f1eb7fa..18b71dca4dd 100644 --- a/CIME/XML/inputdata.py +++ b/CIME/XML/inputdata.py @@ -8,8 +8,8 @@ logger = logging.getLogger(__name__) -class Inputdata(GenericXML): +class Inputdata(GenericXML): def __init__(self, infile=None, files=None): """ initialize a files object given input pes specification file @@ -27,8 +27,8 @@ def __init__(self, infile=None, files=None): def get_next_server(self, attributes=None): protocol = None address = None - user = '' - passwd = '' + user = "" + passwd = "" chksum_file = None ic_filepath = None servernodes = self.get_children("server", attributes=attributes) @@ -46,30 +46,30 @@ def get_next_server(self, attributes=None): else: prevserver = self._servernode for i, node in enumerate(servernodes): - if self._servernode == node and len(servernodes)>i+1: - self._servernode = servernodes[i+1] + if self._servernode == node and len(servernodes) > i + 1: + self._servernode = servernodes[i + 1] break if prevserver is not None and self._servernode == prevserver: self._servernode = None if self._servernode: - protocol = self.text(self.get_child("protocol", root = self._servernode)) - address = self.text(self.get_child("address", root = self._servernode)) - unode = self.get_optional_child("user", root = self._servernode) + protocol = self.text(self.get_child("protocol", root=self._servernode)) + address = self.text(self.get_child("address", root=self._servernode)) + unode = self.get_optional_child("user", root=self._servernode) if unode: - user = self.text(unode) - invnode = self.get_optional_child("inventory", root = self._servernode) + user = self.text(unode) + invnode = self.get_optional_child("inventory", root=self._servernode) if invnode: inventory = self.text(invnode) - pnode = self.get_optional_child("password", root = self._servernode) + pnode = self.get_optional_child("password", root=self._servernode) if pnode: - passwd = self.text(pnode) - csnode = self.get_optional_child("checksum", root = self._servernode) + passwd = self.text(pnode) + csnode = self.get_optional_child("checksum", root=self._servernode) if csnode: - chksum_file = self.text(csnode) - icnode = self.get_optional_child("ic_filepath", root = self._servernode) + chksum_file = self.text(csnode) + icnode = self.get_optional_child("ic_filepath", root=self._servernode) if icnode: - ic_filepath = self.text(icnode) + ic_filepath = self.text(icnode) return protocol, address, user, passwd, chksum_file, ic_filepath, inventory diff --git a/CIME/XML/machines.py b/CIME/XML/machines.py index 305c8d4c15e..91e457a7085 100644 --- a/CIME/XML/machines.py +++ b/CIME/XML/machines.py @@ -10,8 +10,8 @@ logger = logging.getLogger(__name__) -class Machines(GenericXML): +class Machines(GenericXML): def __init__(self, infile=None, files=None, machine=None, extra_machines_dir=None): """ initialize an object @@ -50,7 +50,9 @@ def __init__(self, infile=None, files=None, machine=None, extra_machines_dir=Non # extra_machines_dir, if present. # # This could cause problems if node matches are repeated when only one is expected. - local_infile = os.path.join(os.environ.get("HOME"),".cime","config_machines.xml") + local_infile = os.path.join( + os.environ.get("HOME"), ".cime", "config_machines.xml" + ) logger.debug("Infile: {}".format(local_infile)) if os.path.exists(local_infile): GenericXML.read(self, local_infile, schema) @@ -71,14 +73,26 @@ def __init__(self, infile=None, files=None, machine=None, extra_machines_dir=Non machine = self.probe_machine_name() if machine is None: for potential_model in get_all_cime_models(): - local_infile = os.path.join(get_cime_root(), "CIME", "config",potential_model,"machines","config_machines.xml") + local_infile = os.path.join( + get_cime_root(), + "CIME", + "config", + potential_model, + "machines", + "config_machines.xml", + ) if local_infile != infile: GenericXML.read(self, local_infile, schema) if self.probe_machine_name() is not None: supported_models.append(potential_model) GenericXML.change_file(self, infile, schema) - expect(machine is not None, "Could not initialize machine object from {} or {}. This machine is not available for the target CIME_MODEL. The supported CIME_MODELS that can be used are: {}".format(infile, local_infile, supported_models)) + expect( + machine is not None, + "Could not initialize machine object from {} or {}. This machine is not available for the target CIME_MODEL. The supported CIME_MODELS that can be used are: {}".format( + infile, local_infile, supported_models + ), + ) self.set_machine(machine) def get_child(self, name=None, attributes=None, root=None, err_msg=None): @@ -123,7 +137,7 @@ def list_available_machines(self): Return a list of machines defined for a given CIME_MODEL """ machines = [] - nodes = self.get_children("machine") + nodes = self.get_children("machine") for node in nodes: mach = self.get(node, "MACH") machines.append(mach) @@ -150,9 +164,13 @@ def probe_machine_name(self, warn=True): names_not_found.append(nametomatch) names_not_found_quoted = ["'" + name + "'" for name in names_not_found] - names_not_found_str = ' or '.join(names_not_found_quoted) + names_not_found_str = " or ".join(names_not_found_quoted) if warn: - logger.warning("Could not find machine match for {}".format(names_not_found_str)) + logger.warning( + "Could not find machine match for {}".format( + names_not_found_str + ) + ) return machine @@ -169,25 +187,38 @@ def _probe_machine_name_one_guess(self, nametomatch): machtocheck = self.get(node, "MACH") logger.debug("machine is " + machtocheck) regex_str_node = self.get_optional_child("NODENAME_REGEX", root=node) - regex_str = machtocheck if regex_str_node is None else self.text(regex_str_node) - + regex_str = ( + machtocheck if regex_str_node is None else self.text(regex_str_node) + ) + if regex_str is not None: logger.debug("machine regex string is " + regex_str) # an environment variable can be used if regex_str.startswith("$ENV"): - machine = self.get_resolved_value(regex_str, allow_unresolved_envvars=True) - if not machine.startswith("$ENV"): + machine_value = self.get_resolved_value( + regex_str, allow_unresolved_envvars=True + ) + if not machine_value.startswith("$ENV"): try: - match,machine = machine.split(":") + match, this_machine = machine_value.split(":") except ValueError: - expect(False,"Bad formation of NODENAME_REGEX. Expected envvar:value, found {}".format(regex_str)) - if match == machine: + expect( + False, + "Bad formation of NODENAME_REGEX. Expected envvar:value, found {}".format( + regex_str + ), + ) + if match == this_machine: machine = machtocheck break - else: + else: regex = re.compile(regex_str) if regex.match(nametomatch): - logger.debug("Found machine: {} matches {}".format(machtocheck, nametomatch)) + logger.debug( + "Found machine: {} matches {}".format( + machtocheck, nametomatch + ) + ) machine = machtocheck break @@ -208,12 +239,16 @@ def set_machine(self, machine): if machine == "Query": self.machine = machine elif self.machine != machine or self.machine_node is None: - self.machine_node = super(Machines,self).get_child("machine", {"MACH" : machine}, err_msg="No machine {} found".format(machine)) + self.machine_node = super(Machines, self).get_child( + "machine", + {"MACH": machine}, + err_msg="No machine {} found".format(machine), + ) self.machine = machine return machine - #pylint: disable=arguments-differ + # pylint: disable=arguments-differ def get_value(self, name, attributes=None, resolved=True, subgroup=None): """ Get Value of fields in the config_machines.xml file @@ -235,7 +270,9 @@ def get_value(self, name, attributes=None, resolved=True, subgroup=None): elif name == "MPILIB": value = self.get_default_MPIlib(attributes) else: - node = self.get_optional_child(name, root=self.machine_node, attributes=attributes) + node = self.get_optional_child( + name, root=self.machine_node, attributes=attributes + ) if node is not None: value = self.text(node) @@ -261,9 +298,11 @@ def get_field_from_list(self, listname, reqval=None, attributes=None): if supported_values is None: supported_values = self.get_value(listname, attributes=None) - expect(supported_values is not None, - "No list found for " + listname + " on machine " + self.machine) - supported_values = supported_values.split(",") #pylint: disable=no-member + expect( + supported_values is not None, + "No list found for " + listname + " on machine " + self.machine, + ) + supported_values = supported_values.split(",") # pylint: disable=no-member if reqval is None or reqval == "UNSET": return supported_values[0] @@ -278,9 +317,14 @@ def get_default_compiler(self): Get the compiler to use from the list of COMPILERS """ cime_config = get_cime_config() - if cime_config.has_option('main','COMPILER'): - value = cime_config.get('main', 'COMPILER') - expect(self.is_valid_compiler(value), "User-selected compiler {} is not supported on machine {}".format(value, self.machine)) + if cime_config.has_option("main", "COMPILER"): + value = cime_config.get("main", "COMPILER") + expect( + self.is_valid_compiler(value), + "User-selected compiler {} is not supported on machine {}".format( + value, self.machine + ), + ) else: value = self.get_field_from_list("COMPILERS") return value @@ -291,7 +335,7 @@ def get_default_MPIlib(self, attributes=None): """ return self.get_field_from_list("MPILIBS", attributes=attributes) - def is_valid_compiler(self,compiler): + def is_valid_compiler(self, compiler): """ Check the compiler is valid for the current machine @@ -315,8 +359,11 @@ def is_valid_MPIlib(self, mpilib, attributes=None): >>> machobj.is_valid_MPIlib("fake-mpi") False """ - return mpilib == "mpi-serial" or \ - self.get_field_from_list("MPILIBS", reqval=mpilib, attributes=attributes) is not None + return ( + mpilib == "mpi-serial" + or self.get_field_from_list("MPILIBS", reqval=mpilib, attributes=attributes) + is not None + ) def has_batch_system(self): """ @@ -333,7 +380,10 @@ def has_batch_system(self): result = False batch_system = self.get_optional_child("BATCH_SYSTEM", root=self.machine_node) if batch_system is not None: - result = (self.text(batch_system) is not None and self.text(batch_system) != "none") + result = ( + self.text(batch_system) is not None + and self.text(batch_system) != "none" + ) logger.debug("Machine {} has batch: {}".format(self.machine, result)) return result @@ -357,24 +407,26 @@ def print_values(self): for machine in machines: name = self.get(machine, "MACH") desc = self.get_child("DESC", root=machine) - os_ = self.get_child("OS", root=machine) + os_ = self.get_child("OS", root=machine) compilers = self.get_child("COMPILERS", root=machine) max_tasks_per_node = self.get_child("MAX_TASKS_PER_NODE", root=machine) - max_mpitasks_per_node = self.get_child("MAX_MPITASKS_PER_NODE", root=machine) + max_mpitasks_per_node = self.get_child( + "MAX_MPITASKS_PER_NODE", root=machine + ) max_gpus_per_node = self.get_child("MAX_GPUS_PER_NODE", root=machine) - print( " {} : {} ".format(name , self.text(desc))) - print( " os ", self.text(os_)) - print( " compilers ",self.text(compilers)) + print(" {} : {} ".format(name, self.text(desc))) + print(" os ", self.text(os_)) + print(" compilers ", self.text(compilers)) if max_mpitasks_per_node is not None: - print(" pes/node ",self.text(max_mpitasks_per_node)) + print(" pes/node ", self.text(max_mpitasks_per_node)) if max_tasks_per_node is not None: - print(" max_tasks/node ",self.text(max_tasks_per_node)) + print(" max_tasks/node ", self.text(max_tasks_per_node)) if max_gpus_per_node is not None: - print(" max_gpus/node ",self.text(max_gpus_per_node)) + print(" max_gpus/node ", self.text(max_gpus_per_node)) def return_values(self): - """ return a dictionary of machine info + """return a dictionary of machine info This routine is used by external tools in https://github.com/NCAR/CESM_xml2html """ machines = self.get_children("machine") @@ -383,16 +435,20 @@ def return_values(self): for machine in machines: name = self.get(machine, "MACH") desc = self.get_child("DESC", root=machine) - mach_dict[(name,"description")] = self.text(desc) - os_ = self.get_child("OS", root=machine) - mach_dict[(name,"os")] = self.text(os_) + mach_dict[(name, "description")] = self.text(desc) + os_ = self.get_child("OS", root=machine) + mach_dict[(name, "os")] = self.text(os_) compilers = self.get_child("COMPILERS", root=machine) - mach_dict[(name,"compilers")] = self.text(compilers) + mach_dict[(name, "compilers")] = self.text(compilers) max_tasks_per_node = self.get_child("MAX_TASKS_PER_NODE", root=machine) - mach_dict[(name,"max_tasks_per_node")] = self.text(max_tasks_per_node) - max_mpitasks_per_node = self.get_child("MAX_MPITASKS_PER_NODE", root=machine) - mach_dict[(name,"max_mpitasks_per_node")] = self.text(max_mpitasks_per_node) + mach_dict[(name, "max_tasks_per_node")] = self.text(max_tasks_per_node) + max_mpitasks_per_node = self.get_child( + "MAX_MPITASKS_PER_NODE", root=machine + ) + mach_dict[(name, "max_mpitasks_per_node")] = self.text( + max_mpitasks_per_node + ) max_gpus_per_node = self.get_child("MAX_GPUS_PER_NODE", root=machine) - mach_dict[(name,"max_gpus_per_node")] = self.text(max_gpus_per_node) + mach_dict[(name, "max_gpus_per_node")] = self.text(max_gpus_per_node) return mach_dict diff --git a/CIME/XML/namelist_definition.py b/CIME/XML/namelist_definition.py index 2d90549b521..d3a4d035dd3 100644 --- a/CIME/XML/namelist_definition.py +++ b/CIME/XML/namelist_definition.py @@ -13,9 +13,14 @@ import re import collections -from CIME.namelist import fortran_namelist_base_value, \ - is_valid_fortran_namelist_literal, character_literal_to_string, \ - expand_literal_list, Namelist, get_fortran_name_only +from CIME.namelist import ( + fortran_namelist_base_value, + is_valid_fortran_namelist_literal, + character_literal_to_string, + expand_literal_list, + Namelist, + get_fortran_name_only, +) from CIME.XML.standard_module_setup import * from CIME.XML.entry_id import EntryID @@ -23,12 +28,13 @@ logger = logging.getLogger(__name__) -_array_size_re = re.compile(r'^(?P[^(]+)\((?P[^)]+)\)$') +_array_size_re = re.compile(r"^(?P[^(]+)\((?P[^)]+)\)$") + class CaseInsensitiveDict(dict): """Basic case insensitive dict with strings only keys. - From https://stackoverflow.com/a/27890005 """ + From https://stackoverflow.com/a/27890005""" proxy = {} @@ -57,6 +63,7 @@ def __setitem__(self, k, v): super(CaseInsensitiveDict, self).__setitem__(k, v) self.proxy[k.lower()] = k + class NamelistDefinition(EntryID): """Class representing variable definitions for a namelist. @@ -126,31 +133,31 @@ def set_nodes(self, skip_groups=None): def get_group_name(self, node=None): if self.get_version() == 1.0: - group = self.get(node, 'group') + group = self.get(node, "group") elif self.get_version() >= 2.0: group = self.get_element_text("group", root=node) - return(group) + return group def _get_type(self, node): if self.get_version() == 1.0: - type_info = self.get(node, 'type') + type_info = self.get(node, "type") elif self.get_version() >= 2.0: type_info = self._get_type_info(node) - return(type_info) + return type_info def _get_valid_values(self, node): # The "valid_values" attribute is not required, and an empty string has # the same effect as not specifying it. # Returns a list from a comma seperated string in xml - valid_values = '' + valid_values = "" if self.get_version() == 1.0: - valid_values = self.get(node, 'valid_values') + valid_values = self.get(node, "valid_values") elif self.get_version() >= 2.0: valid_values = self._get_node_element_info(node, "valid_values") - if valid_values == '': + if valid_values == "": valid_values = None if valid_values is not None: - valid_values = valid_values.split(',') + valid_values = valid_values.split(",") return valid_values def get_group(self, name): @@ -202,11 +209,15 @@ def get_value_match(self, vid, attributes=None, exact_match=True, entry_node=Non entry_node = self._nodes[vid] # NOTE(wjs, 2021-06-04) In the following call, replacement_for_none='' may not # actually be needed, but I'm setting it to maintain some old logic, to be safe. - value = super(NamelistDefinition, self).get_value_match(vid.lower(),attributes=all_attributes, exact_match=exact_match, - entry_node=entry_node, - replacement_for_none='') + value = super(NamelistDefinition, self).get_value_match( + vid.lower(), + attributes=all_attributes, + exact_match=exact_match, + entry_node=entry_node, + replacement_for_none="", + ) if value is not None: - value = self._split_defaults_text(value) + value = self._split_defaults_text(value) return value @@ -225,10 +236,10 @@ def _split_defaults_text(string): if char in ('"', "'"): # if we have a quote character, start a string. delim = char - elif char == ',': + elif char == ",": # if we have a comma, this is a new value. value.append(string[pos:i].strip()) - pos = i+1 + pos = i + 1 else: # If inside a string, the only thing that can happen is the end # of the string. @@ -250,35 +261,45 @@ def split_type_string(self, name): type_string = self._entry_types[name] # 'char' is frequently used as an abbreviation of 'character'. - type_string = type_string.replace('char', 'character') + type_string = type_string.replace("char", "character") # Separate into a size and the rest of the type. size_match = _array_size_re.search(type_string) if size_match: - type_string = size_match.group('type') - size_string = size_match.group('size') + type_string = size_match.group("type") + size_string = size_match.group("size") try: size = int(size_string) except ValueError: - expect(False, - "In namelist definition, variable {} had the non-integer string {!r} specified as an array size.".format(name, size_string)) + expect( + False, + "In namelist definition, variable {} had the non-integer string {!r} specified as an array size.".format( + name, size_string + ), + ) else: size = 1 # Separate into a type and an optional length. - type_, star, length = type_string.partition('*') - if star == '*': + type_, star, length = type_string.partition("*") + if star == "*": # Length allowed only for character variables. - expect(type_ == 'character', - "In namelist definition, length specified for non-character " - "variable {}.".format(name)) + expect( + type_ == "character", + "In namelist definition, length specified for non-character " + "variable {}.".format(name), + ) # Check that the length is actually an integer, to make the error # message a bit cleaner if the xml input is bad. try: max_len = int(length) except ValueError: - expect(False, - "In namelist definition, character variable {} had the non-integer string {!r} specified as a length.".format(name, length)) + expect( + False, + "In namelist definition, character variable {} had the non-integer string {!r} specified as a length.".format( + name, length + ), + ) else: max_len = None return type_, max_len, size @@ -286,13 +307,13 @@ def split_type_string(self, name): @staticmethod def _canonicalize_value(type_, value): """Create 'canonical' version of a value for comparison purposes.""" - canonical_value = [fortran_namelist_base_value(scalar) - for scalar in value] - canonical_value = [scalar for scalar in canonical_value if scalar != ''] - if type_ == 'character': - canonical_value = [character_literal_to_string(scalar) - for scalar in canonical_value] - elif type_ == 'integer': + canonical_value = [fortran_namelist_base_value(scalar) for scalar in value] + canonical_value = [scalar for scalar in canonical_value if scalar != ""] + if type_ == "character": + canonical_value = [ + character_literal_to_string(scalar) for scalar in canonical_value + ] + elif type_ == "integer": canonical_value = [int(scalar) for scalar in canonical_value] return canonical_value @@ -328,9 +349,13 @@ def is_valid_value(self, name, value): # Check valid value constraints (if applicable). valid_values = self._valid_values[name] if valid_values is not None: - expect(type_ in ('integer', 'character'), - "Found valid_values attribute for variable {} with type {}, but valid_values only allowed for character and integer variables.".format(name, type_)) - if type_ == 'integer': + expect( + type_ in ("integer", "character"), + "Found valid_values attribute for variable {} with type {}, but valid_values only allowed for character and integer variables.".format( + name, type_ + ), + ) + if type_ == "integer": compare_list = [int(vv) for vv in valid_values] else: compare_list = valid_values @@ -343,27 +368,43 @@ def is_valid_value(self, name, value): # Check size of input array. if len(expand_literal_list(value)) > size: - expect(False, "Value index exceeds variable size for variable {}, allowed array length is {} value array size is {}".format(name, size, len(expand_literal_list(value)))) + expect( + False, + "Value index exceeds variable size for variable {}, allowed array length is {} value array size is {}".format( + name, size, len(expand_literal_list(value)) + ), + ) return True def _expect_variable_in_definition(self, name, variable_template): """Used to get a better error message for an unexpected variable. - case insensitve match""" + case insensitve match""" - expect(name in self._entry_ids, - (variable_template + " is not in the namelist definition.").format(str(name))) + expect( + name in self._entry_ids, + (variable_template + " is not in the namelist definition.").format( + str(name) + ), + ) def _user_modifiable_in_variable_definition(self, name): # Is name user modifiable? - node = self.get_optional_child("entry", attributes={'id': name}) - user_modifiable_only_by_xml = self.get(node, 'modify_via_xml') + node = self.get_optional_child("entry", attributes={"id": name}) + user_modifiable_only_by_xml = self.get(node, "modify_via_xml") if user_modifiable_only_by_xml is not None: - expect(False, - "Cannot change {} in user_nl file: set via xml variable {}".format(name, user_modifiable_only_by_xml)) - user_cannot_modify = self.get(node, 'cannot_modify_by_user_nl') + expect( + False, + "Cannot change {} in user_nl file: set via xml variable {}".format( + name, user_modifiable_only_by_xml + ), + ) + user_cannot_modify = self.get(node, "cannot_modify_by_user_nl") if user_cannot_modify is not None: - expect(False, - "Cannot change {} in user_nl file: {}".format(name, user_cannot_modify)) + expect( + False, + "Cannot change {} in user_nl file: {}".format(name, user_cannot_modify), + ) + def _generate_variable_template(self, filename): # Improve error reporting when a file name is provided. if filename is None: @@ -378,7 +419,7 @@ def _generate_variable_template(self, filename): variable_template = "Variable {!r} from file " + repr(str(msgfn)) return variable_template - def validate(self, namelist,filename=None): + def validate(self, namelist, filename=None): """Validate a namelist object against this definition. The optional `filename` argument can be used to assist in error @@ -391,21 +432,34 @@ def validate(self, namelist,filename=None): for variable_name in namelist.get_variable_names(group_name): # Check that the variable is defined... qualified_variable_name = get_fortran_name_only(variable_name) - self._expect_variable_in_definition(qualified_variable_name, variable_template) + self._expect_variable_in_definition( + qualified_variable_name, variable_template + ) # Check if can actually change this variable via filename change if filename is not None: - self._user_modifiable_in_variable_definition(qualified_variable_name) + self._user_modifiable_in_variable_definition( + qualified_variable_name + ) # and has the right group name... var_group = self.get_group(qualified_variable_name) - expect(var_group == group_name, - (variable_template + " is in a group named {!r}, but should be in {!r}.").format(str(variable_name), str(group_name), str(var_group))) + expect( + var_group == group_name, + ( + variable_template + + " is in a group named {!r}, but should be in {!r}." + ).format(str(variable_name), str(group_name), str(var_group)), + ) # and has a valid value. value = namelist.get_variable_value(group_name, variable_name) - expect(self.is_valid_value(qualified_variable_name, value), - (variable_template + " has invalid value {!r}.").format(str(variable_name), [str(scalar) for scalar in value])) + expect( + self.is_valid_value(qualified_variable_name, value), + (variable_template + " has invalid value {!r}.").format( + str(variable_name), [str(scalar) for scalar in value] + ), + ) def dict_to_namelist(self, dict_, filename=None): """Converts a dictionary of name-value pairs to a `Namelist`. @@ -426,7 +480,9 @@ def dict_to_namelist(self, dict_, filename=None): qualified_varname = get_fortran_name_only(variable_lc) self._expect_variable_in_definition(qualified_varname, variable_template) group_name = self.get_group(qualified_varname) - expect (group_name is not None, "No group found for var {}".format(variable_lc)) + expect( + group_name is not None, "No group found for var {}".format(variable_lc) + ) if group_name not in groups: groups[group_name] = collections.OrderedDict() groups[group_name][variable_lc] = dict_[variable_name] @@ -435,10 +491,10 @@ def dict_to_namelist(self, dict_, filename=None): def get_input_pathname(self, name): node = self._nodes[name] if self.get_version() == 1.0: - input_pathname = self.get(node, 'input_pathname') + input_pathname = self.get(node, "input_pathname") elif self.get_version() >= 2.0: input_pathname = self._get_node_element_info(node, "input_pathname") - return(input_pathname) + return input_pathname # pylint: disable=arguments-differ def get_default_value(self, item, attribute=None): diff --git a/CIME/XML/pes.py b/CIME/XML/pes.py index 05762261b34..fadadff2b9e 100644 --- a/CIME/XML/pes.py +++ b/CIME/XML/pes.py @@ -8,8 +8,8 @@ logger = logging.getLogger(__name__) -class Pes(GenericXML): +class Pes(GenericXML): def __init__(self, infile, files=None): """ initialize a files object given input pes specification file @@ -20,7 +20,7 @@ def __init__(self, infile, files=None): logger.debug("DEBUG: infile is {}".format(infile)) GenericXML.__init__(self, infile, schema=schema) - def find_pes_layout(self, grid, compset, machine, pesize_opts='M', mpilib=None): + def find_pes_layout(self, grid, compset, machine, pesize_opts="M", mpilib=None): opes_ntasks = {} opes_nthrds = {} opes_rootpe = {} @@ -33,8 +33,17 @@ def find_pes_layout(self, grid, compset, machine, pesize_opts='M', mpilib=None): overrides = self.get_optional_child("overrides") ocomments = None if overrides is not None: - o_grid_nodes = self.get_children("grid", root = overrides) - opes_ntasks, opes_nthrds, opes_rootpe, opes_pstrid, oother_settings, ocomments = self._find_matches(o_grid_nodes, grid, compset, machine, pesize_opts, True) + o_grid_nodes = self.get_children("grid", root=overrides) + ( + opes_ntasks, + opes_nthrds, + opes_rootpe, + opes_pstrid, + oother_settings, + ocomments, + ) = self._find_matches( + o_grid_nodes, grid, compset, machine, pesize_opts, True + ) # Get all the nodes grid_nodes = self.get_children("grid") @@ -44,8 +53,14 @@ def find_pes_layout(self, grid, compset, machine, pesize_opts='M', mpilib=None): gn_set.difference_update(ogn_set) grid_nodes = list(gn_set) - - pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other_settings, comments = self._find_matches(grid_nodes, grid, compset, machine, pesize_opts, False) + ( + pes_ntasks, + pes_nthrds, + pes_rootpe, + pes_pstrid, + other_settings, + comments, + ) = self._find_matches(grid_nodes, grid, compset, machine, pesize_opts, False) pes_ntasks.update(opes_ntasks) pes_nthrds.update(opes_nthrds) pes_rootpe.update(opes_rootpe) @@ -54,7 +69,6 @@ def find_pes_layout(self, grid, compset, machine, pesize_opts='M', mpilib=None): if ocomments is not None: comments = ocomments - if mpilib == "mpi-serial": for i in iter(pes_ntasks): pes_ntasks[i] = 1 @@ -75,19 +89,27 @@ def find_pes_layout(self, grid, compset, machine, pesize_opts='M', mpilib=None): return pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other_settings, comments - def _find_matches(self, grid_nodes, grid, compset, machine, pesize_opts, override=False): + def _find_matches( + self, grid_nodes, grid, compset, machine, pesize_opts, override=False + ): grid_choice = None mach_choice = None compset_choice = None pesize_choice = None max_points = -1 - pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other_settings = {}, {}, {}, {}, {} + pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other_settings = ( + {}, + {}, + {}, + {}, + {}, + ) pe_select = None comment = None for grid_node in grid_nodes: grid_match = self.get(grid_node, "name") - if grid_match == "any" or re.search(grid_match,grid): - mach_nodes = self.get_children("mach",root=grid_node) + if grid_match == "any" or re.search(grid_match, grid): + mach_nodes = self.get_children("mach", root=grid_node) for mach_node in mach_nodes: mach_match = self.get(mach_node, "name") if mach_match == "any" or re.search(mach_match, machine): @@ -95,13 +117,23 @@ def _find_matches(self, grid_nodes, grid, compset, machine, pesize_opts, overrid for pes_node in pes_nodes: pesize_match = self.get(pes_node, "pesize") compset_match = self.get(pes_node, "compset") - if (pesize_match == "any" or (pesize_opts is not None and \ - pesize_match == pesize_opts)) and \ - (compset_match == "any" or \ - re.search(compset_match,compset)): + if ( + pesize_match == "any" + or ( + pesize_opts is not None + and pesize_match == pesize_opts + ) + ) and ( + compset_match == "any" + or re.search(compset_match, compset) + ): - points = int(grid_match!="any")*3+int(mach_match!="any")*7+\ - int(compset_match!="any")*2+int(pesize_match!="any") + points = ( + int(grid_match != "any") * 3 + + int(mach_match != "any") * 7 + + int(compset_match != "any") * 2 + + int(pesize_match != "any") + ) if override and points > 0: for node in self.get_children(root=pes_node): vid = self.name(node) @@ -110,17 +142,25 @@ def _find_matches(self, grid_nodes, grid, compset, machine, pesize_opts, overrid comment = self.text(node) elif "ntasks" in vid: for child in self.get_children(root=node): - pes_ntasks[self.name(child).upper()] = int(self.text(child)) + pes_ntasks[ + self.name(child).upper() + ] = int(self.text(child)) elif "nthrds" in vid: for child in self.get_children(root=node): - pes_nthrds[self.name(child).upper()] = int(self.text(child)) + pes_nthrds[ + self.name(child).upper() + ] = int(self.text(child)) elif "rootpe" in vid: for child in self.get_children(root=node): - pes_rootpe[self.name(child).upper()] = int(self.text(child)) + pes_rootpe[ + self.name(child).upper() + ] = int(self.text(child)) elif "pstrid" in vid: for child in self.get_children(root=node): - pes_pstrid[self.name(child).upper()] = int(self.text(child)) - # if the value is already upper case its something else we are trying to set + pes_pstrid[ + self.name(child).upper() + ] = int(self.text(child)) + # if the value is already upper case its something else we are trying to set elif vid == self.name(node): other_settings[vid] = self.text(node) @@ -133,12 +173,31 @@ def _find_matches(self, grid_nodes, grid, compset, machine, pesize_opts, overrid compset_choice = compset_match pesize_choice = pesize_match elif points == max_points: - logger.warning("mach_choice {} mach_match {}".format(mach_choice, mach_match)) - logger.warning("grid_choice {} grid_match {}".format(grid_choice, grid_match)) - logger.warning("compset_choice {} compset_match {}".format(compset_choice, compset_match)) - logger.warning("pesize_choice {} pesize_match {}".format(pesize_choice, pesize_match)) + logger.warning( + "mach_choice {} mach_match {}".format( + mach_choice, mach_match + ) + ) + logger.warning( + "grid_choice {} grid_match {}".format( + grid_choice, grid_match + ) + ) + logger.warning( + "compset_choice {} compset_match {}".format( + compset_choice, compset_match + ) + ) + logger.warning( + "pesize_choice {} pesize_match {}".format( + pesize_choice, pesize_match + ) + ) logger.warning("points = {:d}".format(points)) - expect(False, "More than one PE layout matches given PE specs") + expect( + False, + "More than one PE layout matches given PE specs", + ) if not override: for node in self.get_children(root=pe_select): vid = self.name(node) @@ -157,18 +216,18 @@ def _find_matches(self, grid_nodes, grid, compset, machine, pesize_opts, overrid elif "pstrid" in vid: for child in self.get_children(root=node): pes_pstrid[self.name(child).upper()] = int(self.text(child)) - # if the value is already upper case its something else we are trying to set + # if the value is already upper case its something else we are trying to set elif vid == self.name(node): text = self.text(node).strip() if len(text): other_settings[vid] = self.text(node) - if grid_choice != 'any' or logger.isEnabledFor(logging.DEBUG): - logger.info("Pes setting: grid match is {} ".format(grid_choice )) - if mach_choice != 'any' or logger.isEnabledFor(logging.DEBUG): + if grid_choice != "any" or logger.isEnabledFor(logging.DEBUG): + logger.info("Pes setting: grid match is {} ".format(grid_choice)) + if mach_choice != "any" or logger.isEnabledFor(logging.DEBUG): logger.info("Pes setting: machine match is {} ".format(mach_choice)) - if compset_choice != 'any' or logger.isEnabledFor(logging.DEBUG): + if compset_choice != "any" or logger.isEnabledFor(logging.DEBUG): logger.info("Pes setting: compset_match is {} ".format(compset_choice)) - if pesize_choice != 'any' or logger.isEnabledFor(logging.DEBUG): + if pesize_choice != "any" or logger.isEnabledFor(logging.DEBUG): logger.info("Pes setting: pesize match is {} ".format(pesize_choice)) return pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other_settings, comment diff --git a/CIME/XML/pio.py b/CIME/XML/pio.py index 18e91a7831f..54af6112bf0 100644 --- a/CIME/XML/pio.py +++ b/CIME/XML/pio.py @@ -9,8 +9,8 @@ logger = logging.getLogger(__name__) -class PIO(EntryID): +class PIO(EntryID): def __init__(self, comp_classes, infile=None, files=None): if infile is None: if files is None: @@ -25,19 +25,21 @@ def check_if_comp_var(self, vid, attribute=None, node=None): comp = None new_vid = None for comp in self._components: - if vid.endswith('_'+comp): - new_vid = vid.replace('_'+comp, '', 1) - elif vid.startswith(comp+'_'): - new_vid = vid.replace(comp+'_', '', 1) - elif '_' + comp + '_' in vid: - new_vid = vid.replace(comp+'_','', 1) + if vid.endswith("_" + comp): + new_vid = vid.replace("_" + comp, "", 1) + elif vid.startswith(comp + "_"): + new_vid = vid.replace(comp + "_", "", 1) + elif "_" + comp + "_" in vid: + new_vid = vid.replace(comp + "_", "", 1) if new_vid is not None: return new_vid, comp, True return vid, None, False - def get_defaults(self, grid=None, compset=None, mach=None, compiler=None, mpilib=None): # pylint: disable=unused-argument + def get_defaults( + self, grid=None, compset=None, mach=None, compiler=None, mpilib=None + ): # pylint: disable=unused-argument # should we have a env_pio file defaults = OrderedDict() save_for_last = [] @@ -55,7 +57,7 @@ def get_defaults(self, grid=None, compset=None, mach=None, compiler=None, mpilib myid = self.get(node, "id") iscompvar = self.check_if_comp_var(myid)[-1] if iscompvar: - save_for_last.append( (myid, value) ) + save_for_last.append((myid, value)) else: defaults[myid] = value diff --git a/CIME/XML/standard_module_setup.py b/CIME/XML/standard_module_setup.py index d22ba32c4ac..1c934407da5 100644 --- a/CIME/XML/standard_module_setup.py +++ b/CIME/XML/standard_module_setup.py @@ -1,4 +1,3 @@ - # pragma pylint: disable=unused-import import logging, os, sys, re diff --git a/CIME/XML/stream.py b/CIME/XML/stream.py index 8729b916c0f..95dfa30ef7d 100644 --- a/CIME/XML/stream.py +++ b/CIME/XML/stream.py @@ -10,8 +10,8 @@ logger = logging.getLogger(__name__) -class Stream(GenericXML): +class Stream(GenericXML): def __init__(self, infile=None, files=None): """ initialize an object @@ -28,7 +28,7 @@ def get_value(self, item, attribute=None, resolved=True, subgroup=None): expect(subgroup is None, "This class does not support subgroups") value = None node = None - names = item.split('/') + names = item.split("/") node = None for name in names: node = self.scan_child(name, root=node) @@ -37,7 +37,7 @@ def get_value(self, item, attribute=None, resolved=True, subgroup=None): if value is None: # if all else fails - #pylint: disable=assignment-from-none + # pylint: disable=assignment-from-none value = GenericXML.get_value(self, item, attribute, resolved, subgroup) if resolved: diff --git a/CIME/XML/test_reporter.py b/CIME/XML/test_reporter.py index 1394b33880d..fc65defda09 100644 --- a/CIME/XML/test_reporter.py +++ b/CIME/XML/test_reporter.py @@ -2,70 +2,89 @@ Interface to the testreporter xml. This class inherits from GenericXML.py """ -#pylint: disable=import-error +# pylint: disable=import-error import urllib.parse import urllib.request from CIME.XML.standard_module_setup import * from CIME.XML.generic_xml import GenericXML -from CIME.utils import expect,get_model -#pylint: disable=protected-access +from CIME.utils import expect, get_model +import ssl + +# pylint: disable=protected-access +ssl._create_default_https_context = ssl._create_unverified_context -class TestReporter(GenericXML): +class TestReporter(GenericXML): def __init__(self): """ initialize an object """ - expect(get_model() == 'cesm', "testreport is only meant to populate the CESM test database." ) + expect( + get_model() == "cesm", + "testreport is only meant to populate the CESM test database.", + ) self.root = None - GenericXML.__init__(self, root_name_override="testrecord", read_only=False, infile="TestRecord.xml") + GenericXML.__init__( + self, + root_name_override="testrecord", + read_only=False, + infile="TestRecord.xml", + ) - def setup_header(self, tagname,machine,compiler,mpilib,testroot,testtype,baseline): + def setup_header( + self, tagname, machine, compiler, mpilib, testroot, testtype, baseline + ): # # Create the XML header that the testdb is expecting to recieve # - for name, text, attribs in [ ("tag_name" , tagname , None), - ("mach" , machine , None), - ("compiler" , compiler, {"version":""}), - ("mpilib" , mpilib , {"version":""}), - ("testroot" , testroot, None), - ("testtype" , testtype, None), - ("baselinetag", baseline, None) ]: + for name, text, attribs in [ + ("tag_name", tagname, None), + ("mach", machine, None), + ("compiler", compiler, {"version": ""}), + ("mpilib", mpilib, {"version": ""}), + ("testroot", testroot, None), + ("testtype", testtype, None), + ("baselinetag", baseline, None), + ]: self.make_child(name, attributes=attribs, text=text) - def add_result(self,test_name,test_status): + def add_result(self, test_name, test_status): # # Add a test result to the XML structure. # - tlelem = self.make_child("tests", {"testname":test_name}) + tlelem = self.make_child("tests", {"testname": test_name}) - for attrib_name, text in [ ("casestatus", None), - ("comment", test_status["COMMENT"]), - ("compare", test_status["BASELINE"]), - ("memcomp", test_status["MEMCOMP"]), - ("memleak", test_status["MEMLEAK"]), - ("nlcomp", test_status["NLCOMP"]), - ("status", test_status["STATUS"]), - ("tputcomp", test_status["TPUTCOMP"]) ]: + for attrib_name, text in [ + ("casestatus", None), + ("comment", test_status["COMMENT"]), + ("compare", test_status["BASELINE"]), + ("memcomp", test_status["MEMCOMP"]), + ("memleak", test_status["MEMLEAK"]), + ("nlcomp", test_status["NLCOMP"]), + ("status", test_status["STATUS"]), + ("tputcomp", test_status["TPUTCOMP"]), + ]: - self.make_child("category", attributes={"name": attrib_name}, text=text, root=tlelem) + self.make_child( + "category", attributes={"name": attrib_name}, text=text, root=tlelem + ) def push2testdb(self): # # Post test result XML to CESM test database # xmlstr = self.get_raw_record() - username=input("Username:") + username = input("Username:") os.system("stty -echo") - password=input("Password:") + password = input("Password:") os.system("stty echo") print() - params={'username':username,'password':password,'testXML':xmlstr} - url="https://csegweb.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi" + params = {"username": username, "password": password, "testXML": xmlstr} + url = "https://csegweb.cgd.ucar.edu/testdb/cgi-bin/processXMLtest.cgi" data = urllib.parse.urlencode(params) - data = data.encode('ascii') + data = data.encode("ascii") req = urllib.request.Request(url, data) - result = urllib.request.urlopen(req) + result = urllib.request.urlopen(req) print(result.read()) diff --git a/CIME/XML/testlist.py b/CIME/XML/testlist.py index 17d00c8ec5e..6dbe79b8f98 100644 --- a/CIME/XML/testlist.py +++ b/CIME/XML/testlist.py @@ -42,9 +42,9 @@ logger = logging.getLogger(__name__) -class Testlist(GenericXML): - def __init__(self,infile, files=None): +class Testlist(GenericXML): + def __init__(self, infile, files=None): """ initialize an object """ @@ -53,34 +53,53 @@ def __init__(self,infile, files=None): files = Files() schema = files.get_schema("TESTS_SPEC_FILE") GenericXML.__init__(self, infile, schema=schema) - expect(self.get_version() >= 2.0, - "{} is an unsupported version of the testfile format and will be ignored".format(infile)) - - def get_tests(self, machine=None, category=None, compiler=None, compset=None, grid=None, supported_only=False): + expect( + self.get_version() >= 2.0, + "{} is an unsupported version of the testfile format and will be ignored".format( + infile + ), + ) + + def get_tests( + self, + machine=None, + category=None, + compiler=None, + compset=None, + grid=None, + supported_only=False, + ): tests = [] attributes = {} if compset is not None: - attributes['compset'] = compset + attributes["compset"] = compset if grid is not None: - attributes['grid'] = grid + attributes["grid"] = grid testnodes = self.get_children("test", attributes=attributes) machatts = {} if machine is not None: - machatts["name"] = machine + machatts["name"] = machine if category is not None: - machatts["category"] = category + machatts["category"] = category if compiler is not None: - machatts["compiler"] = compiler - + machatts["compiler"] = compiler for tnode in testnodes: - if supported_only and self.has(tnode, "supported") and self.get(tnode, "supported") == 'false': + if ( + supported_only + and self.has(tnode, "supported") + and self.get(tnode, "supported") == "false" + ): continue machnode = self.get_optional_child("machines", root=tnode) - machnodes = None if machnode is None else self.get_children("machine",machatts,root=machnode) + machnodes = ( + None + if machnode is None + else self.get_children("machine", machatts, root=machnode) + ) if machnodes: this_test_node = {} for key, value in self.attrib(tnode).items(): @@ -89,8 +108,6 @@ def get_tests(self, machine=None, category=None, compiler=None, compset=None, gr else: this_test_node[key] = value - - # Get options that apply to all machines/compilers for this test options = self.get_children("options", root=tnode) if len(options) > 0: @@ -108,13 +125,17 @@ def get_tests(self, machine=None, category=None, compiler=None, compset=None, gr this_test["options"] = {} for onode in optionnodes: - this_test['options'][self.get(onode, 'name')] = self.text(onode) + this_test["options"][self.get(onode, "name")] = self.text(onode) # Now get options specific to this machine/compiler options = self.get_optional_child("options", root=mach) - optionnodes = [] if options is None else self.get_children("option", root=options) + optionnodes = ( + [] + if options is None + else self.get_children("option", root=options) + ) for onode in optionnodes: - this_test['options'][self.get(onode, 'name')] = self.text(onode) + this_test["options"][self.get(onode, "name")] = self.text(onode) tests.append(this_test) diff --git a/CIME/XML/tests.py b/CIME/XML/tests.py index 86b9c965e5d..297659b2c33 100644 --- a/CIME/XML/tests.py +++ b/CIME/XML/tests.py @@ -8,9 +8,9 @@ logger = logging.getLogger(__name__) -class Tests(GenericXML): - def __init__(self, infile=None, files=None): +class Tests(GenericXML): + def __init__(self, infile=None, files=None): """ initialize an object interface to file config_tests.xml """ @@ -18,18 +18,18 @@ def __init__(self, infile=None, files=None): if files is None: files = Files() infile = files.get_value("CONFIG_TESTS_FILE") - GenericXML.__init__(self, infile) + GenericXML.__init__(self, infile) # append any component specific config_tests.xml files for comp in files.get_components("CONFIG_TESTS_FILE"): if comp is None: continue - infile = files.get_value("CONFIG_TESTS_FILE", attribute={"component":comp}) + infile = files.get_value("CONFIG_TESTS_FILE", attribute={"component": comp}) if os.path.isfile(infile): self.read(infile) def get_test_node(self, testname): logger.debug("Get settings for {}".format(testname)) - node = self.get_child("test",{"NAME":testname}) + node = self.get_child("test", {"NAME": testname}) logger.debug("Found {}".format(self.text(node))) return node @@ -48,8 +48,10 @@ def print_values(self, skip_infrastructure_tests=True): for one_test in all_tests: if skip_infrastructure_tests: infrastructure_test = self.get(one_test, "INFRASTRUCTURE_TEST") - if (infrastructure_test is not None and - infrastructure_test.upper() == "TRUE"): + if ( + infrastructure_test is not None + and infrastructure_test.upper() == "TRUE" + ): continue name = self.get(one_test, "NAME") desc = self.get_element_text("DESC", root=one_test) diff --git a/CIME/XML/testspec.py b/CIME/XML/testspec.py index a3647d1dd42..9b4e7c37724 100644 --- a/CIME/XML/testspec.py +++ b/CIME/XML/testspec.py @@ -7,8 +7,8 @@ logger = logging.getLogger(__name__) -class TestSpec(GenericXML): +class TestSpec(GenericXML): def __init__(self, infile): """ initialize an object @@ -17,34 +17,50 @@ def __init__(self, infile): self._testnodes = {} self._testlist_node = None if os.path.isfile(infile): - testnodes = self.get_children('test') + testnodes = self.get_children("test") for node in testnodes: self._testnodes[self.get(node, "name")] = node - def set_header(self, testroot, machine, testid, baselinetag=None, baselineroot=None): + def set_header( + self, testroot, machine, testid, baselinetag=None, baselineroot=None + ): tlelem = self.make_child("testlist") - for name, text in [ ("testroot", testroot), ("machine", machine), ("testid", testid), ("baselinetag", baselinetag), ("baselineroot", baselineroot) ]: + for name, text in [ + ("testroot", testroot), + ("machine", machine), + ("testid", testid), + ("baselinetag", baselinetag), + ("baselineroot", baselineroot), + ]: if text is not None: self.make_child(name, root=tlelem, text=text) self._testlist_node = tlelem def add_test(self, compiler, mpilib, testname): - expect(testname not in self._testnodes, "Test {} already in testlist".format(testname)) + expect( + testname not in self._testnodes, + "Test {} already in testlist".format(testname), + ) - telem = self.make_child("test", attributes={"name":testname}, root=self._testlist_node) + telem = self.make_child( + "test", attributes={"name": testname}, root=self._testlist_node + ) - for name, text in [ ("compiler", compiler), ("mpilib", mpilib) ]: + for name, text in [("compiler", compiler), ("mpilib", mpilib)]: self.make_child(name, root=telem, text=text) self._testnodes[testname] = telem def update_test_status(self, testname, phase, status): - expect(testname in self._testnodes, "Test {} not defined in testlist".format(testname)) + expect( + testname in self._testnodes, + "Test {} not defined in testlist".format(testname), + ) root = self._testnodes[testname] - pnode = self.get_optional_child("section", {"name":phase}, root=root) + pnode = self.get_optional_child("section", {"name": phase}, root=root) if pnode is not None: self.set(pnode, "status", status) else: - self.make_child("section", {"name":phase, "status":status}, root=root) + self.make_child("section", {"name": phase, "status": status}, root=root) diff --git a/CIME/XML/workflow.py b/CIME/XML/workflow.py index 8bdadbc3551..4824e03b666 100644 --- a/CIME/XML/workflow.py +++ b/CIME/XML/workflow.py @@ -9,8 +9,8 @@ logger = logging.getLogger(__name__) -class Workflow(GenericXML): +class Workflow(GenericXML): def __init__(self, infile=None, files=None): """ initialize an object @@ -25,9 +25,9 @@ def __init__(self, infile=None, files=None): GenericXML.__init__(self, infile, schema=schema) - #Append the contents of $HOME/.cime/config_workflow.xml if it exists - #This could cause problems if node matchs are repeated when only one is expected - infile = os.path.join(os.environ.get("HOME"),".cime","config_workflow.xml") + # Append the contents of $HOME/.cime/config_workflow.xml if it exists + # This could cause problems if node matchs are repeated when only one is expected + infile = os.path.join(os.environ.get("HOME"), ".cime", "config_workflow.xml") if os.path.exists(infile): GenericXML.read(self, infile) @@ -41,8 +41,13 @@ def get_workflow_jobs(self, machine, workflowid="default"): findmore = True prepend = False while findmore: - bnode = self.get_optional_child("workflow_jobs", attributes={"id":workflowid}) - expect(bnode,"No workflow {} found in file {}".format(workflowid, self.filename)) + bnode = self.get_optional_child( + "workflow_jobs", attributes={"id": workflowid} + ) + expect( + bnode, + "No workflow {} found in file {}".format(workflowid, self.filename), + ) if prepend: bnodes = [bnode] + bnodes else: @@ -64,7 +69,7 @@ def get_workflow_jobs(self, machine, workflowid="default"): for child in self.get_children(root=jnode): if self.name(child) == "runtime_parameters": attrib = self.attrib(child) - if attrib and attrib == {'MACH' : machine}: + if attrib and attrib == {"MACH": machine}: for rtchild in self.get_children(root=child): jdict[self.name(rtchild)] = self.text(rtchild) elif not attrib: diff --git a/CIME/aprun.py b/CIME/aprun.py index 30c053c07d0..835cdb15f06 100755 --- a/CIME/aprun.py +++ b/CIME/aprun.py @@ -10,11 +10,20 @@ logger = logging.getLogger(__name__) ############################################################################### -def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, - max_tasks_per_node, max_mpitasks_per_node, - pio_numtasks, pio_async_interface, - compiler, machine, run_exe): -############################################################################### +def _get_aprun_cmd_for_case_impl( + ntasks, + nthreads, + rootpes, + pstrids, + max_tasks_per_node, + max_mpitasks_per_node, + pio_numtasks, + pio_async_interface, + compiler, + machine, + run_exe, +): + ############################################################################### """ No one really understands this code, but we can at least test it. @@ -70,13 +79,23 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, maxt[c1] = 1 # Compute task and thread settings for batch commands - tasks_per_node, min_tasks_per_node, task_count, thread_count, max_thread_count, total_node_count, total_task_count, aprun_args = \ - 0, max_mpitasks_per_node, 1, maxt[0], maxt[0], 0, 0, "" + ( + tasks_per_node, + min_tasks_per_node, + task_count, + thread_count, + max_thread_count, + total_node_count, + total_task_count, + aprun_args, + ) = (0, max_mpitasks_per_node, 1, maxt[0], maxt[0], 0, 0, "") c1list = list(range(1, total_tasks)) c1list.append(None) for c1 in c1list: if c1 is None or maxt[c1] != thread_count: - tasks_per_node = min(max_mpitasks_per_node, int(max_tasks_per_node / thread_count)) + tasks_per_node = min( + max_mpitasks_per_node, int(max_tasks_per_node / thread_count) + ) tasks_per_node = min(task_count, tasks_per_node) @@ -88,7 +107,13 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, if compiler == "intel": aprun_args += " -cc numa_node" - aprun_args += " -n {:d} -N {:d} -d {:d} {} {}".format(task_count, tasks_per_node, thread_count, run_exe, "" if c1 is None else ":") + aprun_args += " -n {:d} -N {:d} -d {:d} {} {}".format( + task_count, + tasks_per_node, + thread_count, + run_exe, + "" if c1 is None else ":", + ) node_count = int(math.ceil(float(task_count) / tasks_per_node)) total_node_count += node_count @@ -105,11 +130,18 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, else: task_count += 1 - return aprun_args, total_node_count, total_task_count, min_tasks_per_node, max_thread_count + return ( + aprun_args, + total_node_count, + total_task_count, + min_tasks_per_node, + max_thread_count, + ) + ############################################################################### def get_aprun_cmd_for_case(case, run_exe, overrides=None): -############################################################################### + ############################################################################### """ Given a case, construct and return the aprun command and optimized node count """ @@ -117,25 +149,30 @@ def get_aprun_cmd_for_case(case, run_exe, overrides=None): ntasks, nthreads, rootpes, pstrids = [], [], [], [] for model in models: model = "CPL" if model == "DRV" else model - for the_list, item_name in zip([ntasks, nthreads, rootpes, pstrids], - ["NTASKS", "NTHRDS", "ROOTPE", "PSTRID"]): + for the_list, item_name in zip( + [ntasks, nthreads, rootpes, pstrids], + ["NTASKS", "NTHRDS", "ROOTPE", "PSTRID"], + ): the_list.append(case.get_value("_".join([item_name, model]))) max_tasks_per_node = case.get_value("MAX_TASKS_PER_NODE") if overrides: - if 'max_tasks_per_node' in overrides: - max_tasks_per_node = overrides['max_tasks_per_node'] - if 'total_tasks' in overrides: - ntasks = [overrides['total_tasks'] if x > 1 else x for x in ntasks] - if 'thread_count' in overrides: - nthreads = [overrides['thread_count'] if x > 1 else x for x in nthreads] - - - - return _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, - max_tasks_per_node, - case.get_value("MAX_MPITASKS_PER_NODE"), - case.get_value("PIO_NUMTASKS"), - case.get_value("PIO_ASYNC_INTERFACE"), - case.get_value("COMPILER"), - case.get_value("MACH"), - run_exe) + if "max_tasks_per_node" in overrides: + max_tasks_per_node = overrides["max_tasks_per_node"] + if "total_tasks" in overrides: + ntasks = [overrides["total_tasks"] if x > 1 else x for x in ntasks] + if "thread_count" in overrides: + nthreads = [overrides["thread_count"] if x > 1 else x for x in nthreads] + + return _get_aprun_cmd_for_case_impl( + ntasks, + nthreads, + rootpes, + pstrids, + max_tasks_per_node, + case.get_value("MAX_MPITASKS_PER_NODE"), + case.get_value("PIO_NUMTASKS"), + case.get_value("PIO_ASYNC_INTERFACE"), + case.get_value("COMPILER"), + case.get_value("MACH"), + run_exe, + ) diff --git a/CIME/bless_test_results.py b/CIME/bless_test_results.py index f33c77f86f6..9d6c5141f70 100644 --- a/CIME/bless_test_results.py +++ b/CIME/bless_test_results.py @@ -1,16 +1,31 @@ import CIME.compare_namelists, CIME.simple_compare from CIME.test_scheduler import NAMELIST_PHASE -from CIME.utils import run_cmd, get_scripts_root, get_model, EnvironmentContext, parse_test_name +from CIME.utils import ( + run_cmd, + get_scripts_root, + get_model, + EnvironmentContext, + parse_test_name, +) from CIME.test_status import * from CIME.hist_utils import generate_baseline, compare_baseline from CIME.case import Case from CIME.test_utils import get_test_status_files import os, time + logger = logging.getLogger(__name__) ############################################################################### -def bless_namelists(test_name, report_only, force, baseline_name, baseline_root, new_test_root=None, new_test_id=None): -############################################################################### +def bless_namelists( + test_name, + report_only, + force, + baseline_name, + baseline_root, + new_test_root=None, + new_test_id=None, +): + ############################################################################### # Be aware that restart test will overwrite the original namelist files # with versions of the files that should not be blessed. This forces us to # re-run create_test. @@ -20,13 +35,24 @@ def bless_namelists(test_name, report_only, force, baseline_name, baseline_root, if (not report_only and (force or input("Update namelists (y/n)? ").upper() in ["Y", "YES"])): - create_test_gen_args = " -g {} ".format(baseline_name if get_model() == "cesm" else " -g -b {} ".format(baseline_name)) + create_test_gen_args = " -g {} ".format( + baseline_name + if get_model() == "cesm" + else " -g -b {} ".format(baseline_name) + ) if new_test_root is not None: - create_test_gen_args += " --test-root={0} --output-root={0} ".format(new_test_root) + create_test_gen_args += " --test-root={0} --output-root={0} ".format( + new_test_root + ) if new_test_id is not None: create_test_gen_args += " -t {}".format(new_test_id) - stat, out, _ = run_cmd("{}/create_test {} -n {} --baseline-root {} -o".format(get_scripts_root(), test_name, create_test_gen_args, baseline_root), combine_output=True) + stat, out, _ = run_cmd( + "{}/create_test {} -n {} --baseline-root {} -o".format( + get_scripts_root(), test_name, create_test_gen_args, baseline_root + ), + combine_output=True, + ) if stat != 0: return False, "Namelist regen failed: '{}'".format(out) else: @@ -34,15 +60,20 @@ def bless_namelists(test_name, report_only, force, baseline_name, baseline_root, else: return True, None + ############################################################################### def bless_history(test_name, case, baseline_name, baseline_root, report_only, force): -############################################################################### + ############################################################################### real_user = case.get_value("REALUSER") with EnvironmentContext(USER=real_user): - baseline_full_dir = os.path.join(baseline_root, baseline_name, case.get_value("CASEBASEID")) + baseline_full_dir = os.path.join( + baseline_root, baseline_name, case.get_value("CASEBASEID") + ) - cmp_result, cmp_comments = compare_baseline(case, baseline_dir=baseline_full_dir, outfile_suffix=None) + cmp_result, cmp_comments = compare_baseline( + case, baseline_dir=baseline_full_dir, outfile_suffix=None + ) if cmp_result: logger.info("Diff appears to have been already resolved.") return True, None @@ -52,7 +83,9 @@ def bless_history(test_name, case, baseline_name, baseline_root, report_only, fo (force or input("Update this diff (y/n)? ").upper() in ["Y", "YES"])): gen_result, gen_comments = generate_baseline(case, baseline_dir=baseline_full_dir) if not gen_result: - logger.warning("Hist file bless FAILED for test {}".format(test_name)) + logger.warning( + "Hist file bless FAILED for test {}".format(test_name) + ) return False, "Generate baseline failed: {}".format(gen_comments) else: logger.info(gen_comments) @@ -60,10 +93,24 @@ def bless_history(test_name, case, baseline_name, baseline_root, report_only, fo else: return True, None + ############################################################################### -def bless_test_results(baseline_name, baseline_root, test_root, compiler, test_id=None, namelists_only=False, hist_only=False, - report_only=False, force=False, bless_tests=None, no_skip_pass=False, new_test_root=None, new_test_id=None): -############################################################################### +def bless_test_results( + baseline_name, + baseline_root, + test_root, + compiler, + test_id=None, + namelists_only=False, + hist_only=False, + report_only=False, + force=False, + bless_tests=None, + no_skip_pass=False, + new_test_root=None, + new_test_id=None, +): + ############################################################################### test_status_files = get_test_status_files(test_root, compiler, test_id=test_id) # auto-adjust test-id if multiple rounds of tests were matched @@ -72,8 +119,10 @@ def bless_test_results(baseline_name, baseline_root, test_root, compiler, test_i timestamp = os.path.basename(os.path.dirname(test_status_file)).split(".")[-1] timestamps.add(timestamp) - if (len(timestamps) > 1): - logger.warning("Multiple sets of tests were matched! Selected only most recent tests.") + if len(timestamps) > 1: + logger.warning( + "Multiple sets of tests were matched! Selected only most recent tests." + ) most_recent = sorted(timestamps)[-1] logger.info("Matched test batch is {}".format(most_recent)) @@ -93,17 +142,26 @@ def bless_test_results(baseline_name, baseline_root, test_root, compiler, test_i if test_name is None: case_dir = os.path.basename(test_dir) test_name = CIME.utils.normalize_case_id(case_dir) - if (bless_tests in [[], None] or CIME.utils.match_any(test_name, bless_tests)): - broken_blesses.append(("unknown", "test had invalid TestStatus file: '{}'".format(test_status_file))) + if bless_tests in [[], None] or CIME.utils.match_any( + test_name, bless_tests + ): + broken_blesses.append( + ( + "unknown", + "test had invalid TestStatus file: '{}'".format( + test_status_file + ), + ) + ) continue else: continue - if (bless_tests in [[], None] or CIME.utils.match_any(test_name, bless_tests)): + if bless_tests in [[], None] or CIME.utils.match_any(test_name, bless_tests): overall_result = ts.get_overall_test_status()[0] # See if we need to bless namelist - if (not hist_only): + if not hist_only: if no_skip_pass: nl_bless = True else: @@ -112,15 +170,21 @@ def bless_test_results(baseline_name, baseline_root, test_root, compiler, test_i nl_bless = False # See if we need to bless baselines - if (not namelists_only and not build_only): + if not namelists_only and not build_only: run_result = ts.get_status(RUN_PHASE) - if (run_result is None): + if run_result is None: broken_blesses.append((test_name, "no run phase")) - logger.warning("Test '{}' did not make it to run phase".format(test_name)) + logger.warning( + "Test '{}' did not make it to run phase".format(test_name) + ) hist_bless = False - elif (run_result != TEST_PASS_STATUS): + elif run_result != TEST_PASS_STATUS: broken_blesses.append((test_name, "test did not pass")) - logger.warning("Test '{}' did not pass, not safe to bless, test status = {}".format(test_name, ts.phase_statuses_dump())) + logger.warning( + "Test '{}' did not pass, not safe to bless, test status = {}".format( + test_name, ts.phase_statuses_dump() + ) + ) hist_bless = False elif no_skip_pass: hist_bless = True @@ -131,13 +195,25 @@ def bless_test_results(baseline_name, baseline_root, test_root, compiler, test_i # Now, do the bless if not nl_bless and not hist_bless: - logger.info("Nothing to bless for test: {}, overall status: {}".format(test_name, overall_result)) + logger.info( + "Nothing to bless for test: {}, overall status: {}".format( + test_name, overall_result + ) + ) else: - logger.info("###############################################################################") - logger.info("Blessing results for test: {}, most recent result: {}".format(test_name, overall_result)) + logger.info( + "###############################################################################" + ) + logger.info( + "Blessing results for test: {}, most recent result: {}".format( + test_name, overall_result + ) + ) logger.info("Case dir: {}".format(test_dir)) - logger.info("###############################################################################") + logger.info( + "###############################################################################" + ) if not force: time.sleep(2) @@ -146,7 +222,9 @@ def bless_test_results(baseline_name, baseline_root, test_root, compiler, test_i if baseline_name is None: baseline_name_resolved = case.get_value("BASELINE_NAME_CMP") if not baseline_name_resolved: - baseline_name_resolved = CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) + baseline_name_resolved = CIME.utils.get_current_branch( + repo=CIME.utils.get_cime_root() + ) else: baseline_name_resolved = baseline_name @@ -156,16 +234,28 @@ def bless_test_results(baseline_name, baseline_root, test_root, compiler, test_i baseline_root_resolved = baseline_root if baseline_name_resolved is None: - broken_blesses.append((test_name, "Could not determine baseline name")) + broken_blesses.append( + (test_name, "Could not determine baseline name") + ) continue if baseline_root_resolved is None: - broken_blesses.append((test_name, "Could not determine baseline root")) + broken_blesses.append( + (test_name, "Could not determine baseline root") + ) continue # Bless namelists if nl_bless: - success, reason = bless_namelists(test_name, report_only, force, baseline_name_resolved, baseline_root_resolved, new_test_root=new_test_root, new_test_id=new_test_id) + success, reason = bless_namelists( + test_name, + report_only, + force, + baseline_name_resolved, + baseline_root_resolved, + new_test_root=new_test_root, + new_test_id=new_test_id, + ) if not success: broken_blesses.append((test_name, reason)) @@ -173,17 +263,28 @@ def bless_test_results(baseline_name, baseline_root, test_root, compiler, test_i if hist_bless: if "HOMME" in test_name: success = False - reason = "HOMME tests cannot be blessed with bless_for_tests" + reason = ( + "HOMME tests cannot be blessed with bless_for_tests" + ) else: - success, reason = bless_history(test_name, case, baseline_name_resolved, baseline_root_resolved, report_only, force) + success, reason = bless_history( + test_name, + case, + baseline_name_resolved, + baseline_root_resolved, + report_only, + force, + ) - if (not success): + if not success: broken_blesses.append((test_name, reason)) # Make sure user knows that some tests were not blessed success = True for broken_bless, reason in broken_blesses: - logger.warning("FAILED TO BLESS TEST: {}, reason {}".format(broken_bless, reason)) + logger.warning( + "FAILED TO BLESS TEST: {}, reason {}".format(broken_bless, reason) + ) success = False return success diff --git a/CIME/build.py b/CIME/build.py index a5b1d54b0f0..cb1876db733 100644 --- a/CIME/build.py +++ b/CIME/build.py @@ -3,24 +3,89 @@ """ import glob, shutil, time, threading, subprocess from pathlib import Path -from CIME.XML.standard_module_setup import * -from CIME.utils import get_model, analyze_build_log, \ - stringify_bool, run_and_log_case_status, get_timestamp, run_sub_or_cmd, \ - run_cmd, get_batch_script_for_job, gzip_existing_file, safe_copy, \ - is_python_executable, get_logging_options, import_from_file -from CIME.provenance import save_build_provenance as save_build_provenance_sub -from CIME.locked_files import lock_file, unlock_file -from CIME.XML.files import Files +from CIME.XML.standard_module_setup import * +from CIME.utils import ( + get_model, + analyze_build_log, + stringify_bool, + run_and_log_case_status, + get_timestamp, + run_sub_or_cmd, + run_cmd, + get_batch_script_for_job, + gzip_existing_file, + safe_copy, + is_python_executable, + get_logging_options, + import_from_file, +) +from CIME.provenance import save_build_provenance as save_build_provenance_sub +from CIME.locked_files import lock_file, unlock_file +from CIME.XML.files import Files logger = logging.getLogger(__name__) -_CMD_ARGS_FOR_BUILD = \ - ("CASEROOT", "CASETOOLS", "CIMEROOT", "SRCROOT", "COMP_INTERFACE", - "COMPILER", "DEBUG", "EXEROOT", "INCROOT", "LIBROOT", - "MACH", "MPILIB", "NINST_VALUE", "OS", "PIO_VERSION", - "SHAREDLIBROOT", "SMP_PRESENT", "USE_ESMF_LIB", "USE_MOAB", - "CAM_CONFIG_OPTS", "COMP_LND", "COMPARE_TO_NUOPC", "HOMME_TARGET", - "OCN_SUBMODEL", "CISM_USE_TRILINOS", "USE_TRILINOS", "USE_ALBANY", "USE_PETSC") +_CMD_ARGS_FOR_BUILD = ( + "CASEROOT", + "CASETOOLS", + "CIMEROOT", + "SRCROOT", + "COMP_INTERFACE", + "COMPILER", + "DEBUG", + "EXEROOT", + "INCROOT", + "LIBROOT", + "MACH", + "MPILIB", + "NINST_VALUE", + "OS", + "PIO_VERSION", + "SHAREDLIBROOT", + "SMP_PRESENT", + "USE_ESMF_LIB", + "USE_MOAB", + "CAM_CONFIG_OPTS", + "COMP_LND", + "COMPARE_TO_NUOPC", + "HOMME_TARGET", + "OCN_SUBMODEL", + "CISM_USE_TRILINOS", + "USE_TRILINOS", + "USE_ALBANY", + "USE_PETSC", +) + + +def get_makefile_vars(case, caseroot, comp=None): + """ + Run cmake and process output to a list of variable settings + """ + cmake_args = get_standard_cmake_args(case, "DO_NOT_USE", shared_lib=True) + dcomp = "-DCOMP_NAME={}".format(comp) if comp else "" + output = run_cmd_no_fail( + "cmake -DCONVERT_TO_MAKE=ON {dcomp} {cmake_args} .".format( + dcomp=dcomp, cmake_args=cmake_args + ), + combine_output=True, + from_dir=os.path.join(caseroot, "cmaketmp"), + ) + + lines_to_keep = [] + for line in output.splitlines(): + if "CIME_SET_MAKEFILE_VAR" in line and "BUILD_INTERNAL_IGNORE" not in line: + lines_to_keep.append(line) + + output_to_keep = "\n".join(lines_to_keep) + "\n" + output_to_keep = ( + output_to_keep.replace("CIME_SET_MAKEFILE_VAR ", "") + .replace("CPPDEFS := ", "CPPDEFS := $(CPPDEFS) ") + .replace("SLIBS := ", "SLIBS := $(SLIBS) ") + + "\n" + ) + + return output_to_keep + def generate_makefile_macro(case, caseroot): """ @@ -33,55 +98,74 @@ def generate_makefile_macro(case, caseroot): if not os.path.isdir(new_cmake_dir): return cmake_lists = os.path.join(new_cmake_dir, "CMakeLists.txt") - Path(os.path.join(caseroot,"cmaketmp")).mkdir(parents=False, exist_ok=True) + Path(os.path.join(caseroot, "cmaketmp")).mkdir(parents=False, exist_ok=True) safe_copy(cmake_lists, "cmaketmp") # Append CMakeLists.txt with compset specific stuff comps = _get_compset_comps(case) - comps.extend(["mct", "pio{}".format(case.get_value("PIO_VERSION")), "gptl", "csm_share", "csm_share_cpl7"]) + comps.extend( + [ + "mct", + "pio{}".format(case.get_value("PIO_VERSION")), + "gptl", + "csm_share", + "csm_share_cpl7", + ] + ) cmake_macro = os.path.join(caseroot, "Macros.cmake") - expect(os.path.exists(cmake_macro), "Cannot generate Makefile macro without {}".format(cmake_macro)) + expect( + os.path.exists(cmake_macro), + "Cannot generate Makefile macro without {}".format(cmake_macro), + ) - cmake_args = get_standard_cmake_args(case, "DO_NOT_USE", shared_lib=True) # run once with no COMP_NAME - output = run_cmd_no_fail("cmake -DCONVERT_TO_MAKE=ON {} . 2>&1 | grep CIME_SET_MAKEFILE_VAR | grep -v BUILD_INTERNAL_IGNORE".format(cmake_args), from_dir=os.path.join(caseroot,"cmaketmp")) - real_output = output.replace("CIME_SET_MAKEFILE_VAR ", "").\ - replace("CPPDEFS := ", "CPPDEFS := $(CPPDEFS) ").\ - replace("SLIBS := ", "SLIBS := $(SLIBS) ") + "\n" - base_output = real_output.splitlines() + no_comp_output = get_makefile_vars(case, caseroot) + all_output = no_comp_output + no_comp_lines = no_comp_output.splitlines() + for comp in comps: - output = run_cmd_no_fail("cmake -DCOMP_NAME={comp} -DCONVERT_TO_MAKE=ON {cmake_args} . 2>&1 | grep CIME_SET_MAKEFILE_VAR | grep -v BUILD_INTERNAL_IGNORE".format(comp=comp, cmake_args=cmake_args), from_dir=os.path.join(caseroot,"cmaketmp")) + comp_output = get_makefile_vars(case, caseroot, comp=comp) # The Tools/Makefile may have already adding things to CPPDEFS and SLIBS - comp_output = (output.replace("CIME_SET_MAKEFILE_VAR ", "").\ - replace("CPPDEFS := ", "CPPDEFS := $(CPPDEFS) ").\ - replace("SLIBS := ", "SLIBS := $(SLIBS) ")).splitlines() - for line in comp_output: - if line not in base_output: - real_output += "ifeq \"$(COMP_NAME)\" \"{}\"\n".format(comp) - real_output += " "+line+"\n" - real_output += "\nendif\n" + comp_lines = comp_output.splitlines() + first = True + for comp_line in comp_lines: + if comp_line not in no_comp_lines: + if first: + all_output += 'ifeq "$(COMP_NAME)" "{}"\n'.format(comp) + first = False + + all_output += " " + comp_line + "\n" + + if not first: + all_output += "endif\n" with open(os.path.join(caseroot, "Macros.make"), "w") as fd: fd.write( -""" + """ # This file is auto-generated, do not edit. If you want to change # sharedlib flags, you can edit the cmake_macros in this case. You # can change flags for specific sharedlibs only by checking COMP_NAME. -""") - fd.write(real_output) +""" + ) + fd.write(all_output) + + shutil.rmtree(os.path.join(caseroot, "cmaketmp")) - shutil.rmtree(os.path.join(caseroot,"cmaketmp")) def get_standard_makefile_args(case, shared_lib=False): make_args = "CIME_MODEL={} ".format(case.get_value("MODEL")) make_args += " SMP={} ".format(stringify_bool(case.get_build_threaded())) - expect(not (uses_kokkos(case) and not shared_lib), "Kokkos is not supported for classic Makefile build system") + expect( + not (uses_kokkos(case) and not shared_lib), + "Kokkos is not supported for classic Makefile build system", + ) for var in _CMD_ARGS_FOR_BUILD: make_args += xml_to_make_variable(case, var) return make_args + def _get_compset_comps(case): comps = [] driver = case.get_value("COMP_INTERFACE") @@ -95,17 +179,22 @@ def _get_compset_comps(case): comps.append(comp) return comps + def get_standard_cmake_args(case, sharedpath, shared_lib=False): cmake_args = "-DCIME_MODEL={} ".format(case.get_value("MODEL")) cmake_args += "-DSRC_ROOT={} ".format(case.get_value("SRCROOT")) - cmake_args += " -Dcompile_threaded={} ".format(stringify_bool(case.get_build_threaded())) + cmake_args += " -Dcompile_threaded={} ".format( + stringify_bool(case.get_build_threaded()) + ) ocn_model = case.get_value("COMP_OCN") atm_model = case.get_value("COMP_ATM") - if ocn_model == 'mom' or atm_model == "fv3gfs": + if ocn_model == "mom" or atm_model == "fv3gfs": cmake_args += " -DUSE_FMS=TRUE " - cmake_args += " -DINSTALL_SHAREDPATH={} ".format(os.path.join(case.get_value("EXEROOT"), sharedpath)) + cmake_args += " -DINSTALL_SHAREDPATH={} ".format( + os.path.join(case.get_value("EXEROOT"), sharedpath) + ) if not shared_lib: cmake_args += " -DUSE_KOKKOS={} ".format(stringify_bool(uses_kokkos(case))) @@ -123,6 +212,7 @@ def get_standard_cmake_args(case, sharedpath, shared_lib=False): return cmake_args + def xml_to_make_variable(case, varname, cmake=False): varvalue = case.get_value(varname) if varvalue is None: @@ -131,22 +221,38 @@ def xml_to_make_variable(case, varname, cmake=False): varvalue = stringify_bool(varvalue) if cmake or isinstance(varvalue, str): - return "{}{}=\"{}\" ".format("-D" if cmake else "", varname, varvalue) + return '{}{}="{}" '.format("-D" if cmake else "", varname, varvalue) else: return "{}={} ".format(varname, varvalue) + ############################################################################### def uses_kokkos(case): -############################################################################### + ############################################################################### cam_target = case.get_value("CAM_TARGET") # atm_comp = case.get_value("COMP_ATM") # scream does not use the shared kokkoslib for now - return get_model() == "e3sm" and cam_target in ("preqx_kokkos", "theta-l", "theta-l_kokkos") + return get_model() == "e3sm" and cam_target in ( + "preqx_kokkos", + "theta-l", + "theta-l_kokkos", + ) + ############################################################################### -def _build_model(build_threaded, exeroot, incroot, complist, - lid, caseroot, cimeroot, compiler, buildlist, comp_interface): -############################################################################### +def _build_model( + build_threaded, + exeroot, + incroot, + complist, + lid, + caseroot, + cimeroot, + compiler, + buildlist, + comp_interface, +): + ############################################################################### logs = [] thread_bad_results = [] @@ -187,15 +293,28 @@ def _build_model(build_threaded, exeroot, incroot, complist, # build the component library # thread_bad_results captures error output from thread (expected to be empty) # logs is a list of log files to be compressed and added to the case logs/bld directory - t = threading.Thread(target=_build_model_thread, - args=(config_dir, model, comp, caseroot, libroot, bldroot, incroot, file_build, - thread_bad_results, smp, compiler)) + t = threading.Thread( + target=_build_model_thread, + args=( + config_dir, + model, + comp, + caseroot, + libroot, + bldroot, + incroot, + file_build, + thread_bad_results, + smp, + compiler, + ), + ) t.start() logs.append(file_build) # Wait for threads to finish - while(threading.active_count() > 1): + while threading.active_count() > 1: time.sleep(1) expect(not thread_bad_results, "\n".join(thread_bad_results)) @@ -209,49 +328,78 @@ def _build_model(build_threaded, exeroot, incroot, complist, file_build = os.path.join(exeroot, "{}.bldlog.{}".format(cime_model, lid)) ufs_driver = os.environ.get("UFS_DRIVER") - if cime_model == 'ufs' and ufs_driver == 'nems': - config_dir = os.path.join(cimeroot,os.pardir,"src","model","NEMS","cime","cime_config") + if cime_model == "ufs" and ufs_driver == "nems": + config_dir = os.path.join( + cimeroot, os.pardir, "src", "model", "NEMS", "cime", "cime_config" + ) else: files = Files(comp_interface=comp_interface) if comp_interface == "nuopc": - config_dir = os.path.join(os.path.dirname(files.get_value("BUILD_LIB_FILE",{"lib":"CMEPS"}))) + config_dir = os.path.join( + os.path.dirname(files.get_value("BUILD_LIB_FILE", {"lib": "CMEPS"})) + ) else: - config_dir = os.path.join(files.get_value("COMP_ROOT_DIR_CPL"),"cime_config") - - expect(os.path.exists(config_dir), "Config directory not found {}".format(config_dir)) + config_dir = os.path.join( + files.get_value("COMP_ROOT_DIR_CPL"), "cime_config" + ) + + expect( + os.path.exists(config_dir), + "Config directory not found {}".format(config_dir), + ) if "cpl" in complist: bldroot = os.path.join(exeroot, "cpl", "obj") if not os.path.isdir(bldroot): os.makedirs(bldroot) - logger.info("Building {} from {}/buildexe with output to {} ".format(cime_model, config_dir, file_build)) + logger.info( + "Building {} from {}/buildexe with output to {} ".format( + cime_model, config_dir, file_build + ) + ) with open(file_build, "w") as fd: - stat = run_cmd("{}/buildexe {} {} {} " - .format(config_dir, caseroot, libroot, bldroot), - from_dir=bldroot, arg_stdout=fd, - arg_stderr=subprocess.STDOUT)[0] + stat = run_cmd( + "{}/buildexe {} {} {} ".format(config_dir, caseroot, libroot, bldroot), + from_dir=bldroot, + arg_stdout=fd, + arg_stderr=subprocess.STDOUT, + )[0] analyze_build_log("{} exe".format(cime_model), file_build, compiler) expect(stat == 0, "BUILD FAIL: buildexe failed, cat {}".format(file_build)) # Copy the just-built ${MODEL}.exe to ${MODEL}.exe.$LID - safe_copy("{}/{}.exe".format(exeroot, cime_model), "{}/{}.exe.{}".format(exeroot, cime_model, lid)) + safe_copy( + "{}/{}.exe".format(exeroot, cime_model), + "{}/{}.exe.{}".format(exeroot, cime_model, lid), + ) logs.append(file_build) return logs + ############################################################################### -def _build_model_cmake(exeroot, complist, lid, buildlist, comp_interface, - sharedpath, separate_builds, ninja, dry_run, case): -############################################################################### +def _build_model_cmake( + exeroot, + complist, + lid, + buildlist, + comp_interface, + sharedpath, + separate_builds, + ninja, + dry_run, + case, +): + ############################################################################### cime_model = get_model() - bldroot = os.path.join(exeroot, "cmake-bld") - libroot = os.path.join(exeroot, "lib") - bldlog = os.path.join(exeroot, "{}.bldlog.{}".format(cime_model, lid)) - srcroot = case.get_value("SRCROOT") - gmake_j = case.get_value("GMAKE_J") - gmake = case.get_value("GMAKE") + bldroot = os.path.join(exeroot, "cmake-bld") + libroot = os.path.join(exeroot, "lib") + bldlog = os.path.join(exeroot, "{}.bldlog.{}".format(cime_model, lid)) + srcroot = case.get_value("SRCROOT") + gmake_j = case.get_value("GMAKE_J") + gmake = case.get_value("GMAKE") # make sure bldroot and libroot exist for build_dir in [bldroot, libroot]: @@ -266,9 +414,13 @@ def _build_model_cmake(exeroot, complist, lid, buildlist, comp_interface, for model, _, _, _, config_dir in complist: # Create the Filepath and CIME_cppdefs files if model == "cpl": - config_dir = os.path.join(files.get_value("COMP_ROOT_DIR_CPL"),"cime_config") + config_dir = os.path.join( + files.get_value("COMP_ROOT_DIR_CPL"), "cime_config" + ) - cmp_cmake_args += _create_build_metadata_for_component(config_dir, libroot, bldroot, case) + cmp_cmake_args += _create_build_metadata_for_component( + config_dir, libroot, bldroot, case + ) all_models.append(model) # Call CMake @@ -285,12 +437,18 @@ def _build_model_cmake(exeroot, complist, lid, buildlist, comp_interface, # - component-specific cmake args # - path to src folder do_timing = "/usr/bin/time -p " if os.path.exists("/usr/bin/time") else "" - cmake_cmd = "{} {}cmake {} {} {}/components".format(cmake_env, do_timing, cmake_args, cmp_cmake_args, srcroot) + cmake_cmd = "{} {}cmake {} {} {}/components".format( + cmake_env, do_timing, cmake_args, cmp_cmake_args, srcroot + ) stat = 0 if dry_run: logger.info("CMake cmd:\ncd {} && {}\n\n".format(bldroot, cmake_cmd)) else: - logger.info("Configuring full {} model with output to file {}".format(cime_model, bldlog)) + logger.info( + "Configuring full {} model with output to file {}".format( + cime_model, bldlog + ) + ) logger.info(" Calling cmake directly, see top of log file for specific call") with open(bldlog, "w") as fd: fd.write("Configuring with cmake cmd:\n{}\n\n".format(cmake_cmd)) @@ -298,7 +456,10 @@ def _build_model_cmake(exeroot, complist, lid, buildlist, comp_interface, # Add logging before running cmake_cmd = "({}) >> {} 2>&1".format(cmake_cmd, bldlog) stat = run_cmd(cmake_cmd, from_dir=bldroot)[0] - expect(stat == 0, "BUILD FAIL: cmake config {} failed, cat {}".format(cime_model, bldlog)) + expect( + stat == 0, + "BUILD FAIL: cmake config {} failed, cat {}".format(cime_model, bldlog), + ) # Set up buildlist if not buildlist: @@ -309,14 +470,18 @@ def _build_model_cmake(exeroot, complist, lid, buildlist, comp_interface, if "cpl" in buildlist: buildlist.remove("cpl") - buildlist.append("cpl") # must come at end + buildlist.append("cpl") # must come at end # Call Make logs = [] for model in buildlist: t1 = time.time() - make_cmd = "{}{} -j {}".format(do_timing, gmake if not ninja else "{} -v".format(os.path.join(ninja_path, "ninja")), gmake_j) + make_cmd = "{}{} -j {}".format( + do_timing, + gmake if not ninja else "{} -v".format(os.path.join(ninja_path, "ninja")), + gmake_j, + ) if model != "cpl": make_cmd += " {}".format(model) curr_log = os.path.join(exeroot, "{}.bldlog.{}".format(model, lid)) @@ -328,7 +493,9 @@ def _build_model_cmake(exeroot, complist, lid, buildlist, comp_interface, if dry_run: logger.info("Build cmd:\ncd {} && {}\n\n".format(bldroot, make_cmd)) else: - logger.info("Building {} model with output to file {}".format(model_name, curr_log)) + logger.info( + "Building {} model with output to file {}".format(model_name, curr_log) + ) logger.info(" Calling make, see top of log file for specific call") with open(curr_log, "a") as fd: fd.write("\n\nBuilding with cmd:\n{}\n\n".format(make_cmd)) @@ -336,7 +503,10 @@ def _build_model_cmake(exeroot, complist, lid, buildlist, comp_interface, # Add logging before running make_cmd = "({}) >> {} 2>&1".format(make_cmd, curr_log) stat = run_cmd(make_cmd, from_dir=bldroot)[0] - expect(stat == 0, "BUILD FAIL: build {} failed, cat {}".format(model_name, curr_log)) + expect( + stat == 0, + "BUILD FAIL: build {} failed, cat {}".format(model_name, curr_log), + ) t2 = time.time() if separate_builds: @@ -348,23 +518,41 @@ def _build_model_cmake(exeroot, complist, lid, buildlist, comp_interface, # Copy the just-built ${MODEL}.exe to ${MODEL}.exe.$LID if "cpl" in buildlist: - safe_copy("{}/{}.exe".format(exeroot, cime_model), "{}/{}.exe.{}".format(exeroot, cime_model, lid)) + safe_copy( + "{}/{}.exe".format(exeroot, cime_model), + "{}/{}.exe.{}".format(exeroot, cime_model, lid), + ) return logs + ############################################################################### -def _build_checks(case, build_threaded, comp_interface, - debug, compiler, mpilib, complist, ninst_build, smp_value, - model_only, buildlist): -############################################################################### +def _build_checks( + case, + build_threaded, + comp_interface, + debug, + compiler, + mpilib, + complist, + ninst_build, + smp_value, + model_only, + buildlist, +): + ############################################################################### """ check if a build needs to be done and warn if a clean is warrented first returns the relative sharedpath directory for sharedlibraries """ - smp_build = case.get_value("SMP_BUILD") + smp_build = case.get_value("SMP_BUILD") build_status = case.get_value("BUILD_STATUS") - expect(comp_interface in ("mct", "moab", "nuopc"), - "Only supporting mct nuopc, or moab comp_interfaces at this time, found {}".format(comp_interface)) + expect( + comp_interface in ("mct", "moab", "nuopc"), + "Only supporting mct nuopc, or moab comp_interfaces at this time, found {}".format( + comp_interface + ), + ) smpstr = "" ninst_value = "" for model, _, nthrds, ninst, _ in complist: @@ -374,7 +562,7 @@ def _build_checks(case, build_threaded, comp_interface, smpstr += "{}1".format(model[0]) else: smpstr += "{}0".format(model[0]) - ninst_value += "{}{:d}".format((model[0]),ninst) + ninst_value += "{}{:d}".format((model[0]), ninst) case.set_value("SMP_VALUE", smpstr) case.set_value("NINST_VALUE", ninst_value) @@ -383,11 +571,15 @@ def _build_checks(case, build_threaded, comp_interface, threaddir = "threads" if build_threaded else "nothreads" sharedpath = os.path.join(compiler, mpilib, debugdir, threaddir, comp_interface) - logger.debug("compiler={} mpilib={} debugdir={} threaddir={}" - .format(compiler,mpilib,debugdir,threaddir)) + logger.debug( + "compiler={} mpilib={} debugdir={} threaddir={}".format( + compiler, mpilib, debugdir, threaddir + ) + ) - expect(ninst_build == ninst_value or ninst_build == "0", - """ + expect( + ninst_build == ninst_value or ninst_build == "0", + """ ERROR, NINST VALUES HAVE CHANGED NINST_BUILD = {} NINST_VALUE = {} @@ -399,10 +591,14 @@ def _build_checks(case, build_threaded, comp_interface, You can override this error message at your own risk by executing: ./xmlchange -file env_build.xml -id NINST_BUILD -val 0 Then rerun the build script interactively -""".format(ninst_build, ninst_value)) - - expect(smp_build == smpstr or smp_build == "0", - """ +""".format( + ninst_build, ninst_value + ), + ) + + expect( + smp_build == smpstr or smp_build == "0", + """ ERROR, SMP VALUES HAVE CHANGED SMP_BUILD = {} SMP_VALUE = {} @@ -415,15 +611,20 @@ def _build_checks(case, build_threaded, comp_interface, You can override this error message at your own risk by executing: ./xmlchange -file env_build.xml -id SMP_BUILD -val 0 Then rerun the build script interactively -""".format(smp_build, smp_value, smpstr)) - - expect(build_status == 0, - """ +""".format( + smp_build, smp_value, smpstr + ), + ) + + expect( + build_status == 0, + """ ERROR env_build HAS CHANGED A manual clean of your obj directories is required You should execute the following: ./case.build --clean-all -""") +""", + ) case.set_value("BUILD_COMPLETE", False) @@ -437,14 +638,27 @@ def _build_checks(case, build_threaded, comp_interface, return sharedpath + ############################################################################### -def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid, compiler, buildlist, comp_interface, complist): -############################################################################### +def _build_libraries( + case, + exeroot, + sharedpath, + caseroot, + cimeroot, + libroot, + lid, + compiler, + buildlist, + comp_interface, + complist, +): + ############################################################################### shared_lib = os.path.join(exeroot, sharedpath, "lib") shared_inc = os.path.join(exeroot, sharedpath, "include") for shared_item in [shared_lib, shared_inc]: - if (not os.path.exists(shared_item)): + if not os.path.exists(shared_item): os.makedirs(shared_item) mpilib = case.get_value("MPILIB") @@ -455,7 +669,7 @@ def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid cpl_in_complist = True if ufs_driver: logger.info("UFS_DRIVER is set to {}".format(ufs_driver)) - if ufs_driver and ufs_driver == 'nems' and not cpl_in_complist: + if ufs_driver and ufs_driver == "nems" and not cpl_in_complist: libs = [] elif case.get_value("MODEL") == "cesm" and comp_interface == "nuopc": libs = ["gptl", "mct", "pio", "csm_share"] @@ -472,17 +686,17 @@ def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid # Build shared code of CDEPS nuopc data models build_script = {} - if (comp_interface == "nuopc" and (not ufs_driver or ufs_driver != 'nems')): + if comp_interface == "nuopc" and (not ufs_driver or ufs_driver != "nems"): libs.append("CDEPS") ocn_model = case.get_value("COMP_OCN") atm_model = case.get_value("COMP_ATM") - if ocn_model == 'mom' or atm_model == "fv3gfs": + if ocn_model == "mom" or atm_model == "fv3gfs": libs.append("FMS") files = Files(comp_interface=comp_interface) for lib in libs: - build_script[lib] = files.get_value("BUILD_LIB_FILE",{"lib":lib}) + build_script[lib] = files.get_value("BUILD_LIB_FILE", {"lib": lib}) sharedlibroot = os.path.abspath(case.get_value("SHAREDLIBROOT")) # Check if we need to build our own cprnc @@ -493,14 +707,13 @@ def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid case.set_value("CCSM_CPRNC", os.path.join(full_lib_path, "cprnc")) if not os.path.isdir(full_lib_path): os.makedirs(full_lib_path) - libs.insert(0,"cprnc") + libs.insert(0, "cprnc") logs = [] # generate Makefile macro generate_makefile_macro(case, caseroot) - for lib in libs: if buildlist is not None and lib not in buildlist: continue @@ -516,19 +729,29 @@ def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid full_lib_path = os.path.join(sharedlibroot, sharedpath, lib) # pio build creates its own directory - if (lib != "pio" and not os.path.isdir(full_lib_path)): + if lib != "pio" and not os.path.isdir(full_lib_path): os.makedirs(full_lib_path) file_build = os.path.join(exeroot, "{}.bldlog.{}".format(lib, lid)) if lib in build_script.keys(): my_file = build_script[lib] else: - my_file = os.path.join(cimeroot, "src", "build_scripts", "buildlib.{}".format(lib)) - expect(os.path.exists(my_file),"Build script {} for component {} not found.".format(my_file, lib)) - logger.info("Building {} with output to file {}".format(lib,file_build)) - - run_sub_or_cmd(my_file, [full_lib_path, os.path.join(exeroot, sharedpath), caseroot], 'buildlib', - [full_lib_path, os.path.join(exeroot, sharedpath), case], logfile=file_build) + my_file = os.path.join( + cimeroot, "src", "build_scripts", "buildlib.{}".format(lib) + ) + expect( + os.path.exists(my_file), + "Build script {} for component {} not found.".format(my_file, lib), + ) + logger.info("Building {} with output to file {}".format(lib, file_build)) + + run_sub_or_cmd( + my_file, + [full_lib_path, os.path.join(exeroot, sharedpath), caseroot], + "buildlib", + [full_lib_path, os.path.join(exeroot, sharedpath), case], + logfile=file_build, + ) analyze_build_log(lib, file_build, compiler) logs.append(file_build) @@ -544,32 +767,59 @@ def _build_libraries(case, exeroot, sharedpath, caseroot, cimeroot, libroot, lid if comp_lnd == "clm": logging.info(" - Building clm library ") esmfdir = "esmf" if case.get_value("USE_ESMF_LIB") else "noesmf" - bldroot = os.path.join(sharedlibroot, sharedpath, comp_interface, esmfdir, "clm","obj" ) + bldroot = os.path.join( + sharedlibroot, sharedpath, comp_interface, esmfdir, "clm", "obj" + ) libroot = os.path.join(exeroot, sharedpath, comp_interface, esmfdir, "lib") - incroot = os.path.join(exeroot, sharedpath, comp_interface, esmfdir, "include") - file_build = os.path.join(exeroot, "lnd.bldlog.{}".format( lid)) + incroot = os.path.join( + exeroot, sharedpath, comp_interface, esmfdir, "include" + ) + file_build = os.path.join(exeroot, "lnd.bldlog.{}".format(lid)) config_lnd_dir = os.path.dirname(case.get_value("CONFIG_LND_FILE")) for ndir in [bldroot, libroot, incroot]: - if (not os.path.isdir(ndir)): + if not os.path.isdir(ndir): os.makedirs(ndir) smp = "SMP" in os.environ and os.environ["SMP"] == "TRUE" # thread_bad_results captures error output from thread (expected to be empty) # logs is a list of log files to be compressed and added to the case logs/bld directory thread_bad_results = [] - _build_model_thread(config_lnd_dir, "lnd", comp_lnd, caseroot, libroot, bldroot, incroot, - file_build, thread_bad_results, smp, compiler) + _build_model_thread( + config_lnd_dir, + "lnd", + comp_lnd, + caseroot, + libroot, + bldroot, + incroot, + file_build, + thread_bad_results, + smp, + compiler, + ) logs.append(file_build) expect(not thread_bad_results, "\n".join(thread_bad_results)) - case.flush() # python sharedlib subs may have made XML modifications + case.flush() # python sharedlib subs may have made XML modifications return logs + ############################################################################### -def _build_model_thread(config_dir, compclass, compname, caseroot, libroot, bldroot, incroot, file_build, - thread_bad_results, smp, compiler): -############################################################################### +def _build_model_thread( + config_dir, + compclass, + compname, + caseroot, + libroot, + bldroot, + incroot, + file_build, + thread_bad_results, + smp, + compiler, +): + ############################################################################### logger.info("Building {} with output to {}".format(compclass, file_build)) t1 = time.time() cmd = os.path.join(caseroot, "SourceMods", "src." + compname, "buildlib") @@ -579,8 +829,14 @@ def _build_model_thread(config_dir, compclass, compname, caseroot, libroot, bldr cmd = os.path.join(config_dir, "buildlib") expect(os.path.isfile(cmd), "Could not find buildlib for {}".format(compname)) - compile_cmd = "COMP_CLASS={compclass} COMP_NAME={compname} {cmd} {caseroot} {libroot} {bldroot} ".\ - format(compclass=compclass, compname=compname, cmd=cmd, caseroot=caseroot, libroot=libroot, bldroot=bldroot) + compile_cmd = "COMP_CLASS={compclass} COMP_NAME={compname} {cmd} {caseroot} {libroot} {bldroot} ".format( + compclass=compclass, + compname=compname, + cmd=cmd, + caseroot=caseroot, + libroot=libroot, + bldroot=bldroot, + ) if get_model() != "ufs": compile_cmd = "SMP={} {}".format(stringify_bool(smp), compile_cmd) @@ -590,12 +846,14 @@ def _build_model_thread(config_dir, compclass, compname, caseroot, libroot, bldr compile_cmd = compile_cmd + logging_options with open(file_build, "w") as fd: - stat = run_cmd(compile_cmd, - from_dir=bldroot, arg_stdout=fd, - arg_stderr=subprocess.STDOUT)[0] + stat = run_cmd( + compile_cmd, from_dir=bldroot, arg_stdout=fd, arg_stderr=subprocess.STDOUT + )[0] if stat != 0: - thread_bad_results.append("BUILD FAIL: {}.buildlib failed, cat {}".format(compname, file_build)) + thread_bad_results.append( + "BUILD FAIL: {}.buildlib failed, cat {}".format(compname, file_build) + ) analyze_build_log(compclass, file_build, compiler) @@ -605,42 +863,48 @@ def _build_model_thread(config_dir, compclass, compname, caseroot, libroot, bldr t2 = time.time() logger.info("{} built in {:f} seconds".format(compname, (t2 - t1))) + ############################################################################### def _create_build_metadata_for_component(config_dir, libroot, bldroot, case): -############################################################################### + ############################################################################### """ Ensure that crucial Filepath and CIME_CPPDEFS files exist for this component. In many cases, the bld/configure script will have already created these. """ bc_path = os.path.join(config_dir, "buildlib_cmake") expect(os.path.exists(bc_path), "Missing: {}".format(bc_path)) - buildlib = import_from_file("buildlib_cmake", os.path.join(config_dir, - "buildlib_cmake")) + buildlib = import_from_file( + "buildlib_cmake", os.path.join(config_dir, "buildlib_cmake") + ) cmake_args = buildlib.buildlib(bldroot, libroot, case) return "" if cmake_args is None else cmake_args + ############################################################################### def _clean_impl(case, cleanlist, clean_all, clean_depends): -############################################################################### + ############################################################################### exeroot = os.path.abspath(case.get_value("EXEROOT")) case.load_env() if clean_all: # If cleanlist is empty just remove the bld directory - expect(exeroot is not None,"No EXEROOT defined in case") + expect(exeroot is not None, "No EXEROOT defined in case") if os.path.isdir(exeroot): logging.info("cleaning directory {}".format(exeroot)) shutil.rmtree(exeroot) # if clean_all is True also remove the sharedlibpath sharedlibroot = os.path.abspath(case.get_value("SHAREDLIBROOT")) - expect(sharedlibroot is not None,"No SHAREDLIBROOT defined in case") + expect(sharedlibroot is not None, "No SHAREDLIBROOT defined in case") if sharedlibroot != exeroot and os.path.isdir(sharedlibroot): logging.warning("cleaning directory {}".format(sharedlibroot)) shutil.rmtree(sharedlibroot) else: - expect((cleanlist is not None and len(cleanlist) > 0) or - (clean_depends is not None and len(clean_depends)),"Empty cleanlist not expected") + expect( + (cleanlist is not None and len(cleanlist) > 0) + or (clean_depends is not None and len(clean_depends)), + "Empty cleanlist not expected", + ) gmake = case.get_value("GMAKE") cleanlist = [] if cleanlist is None else cleanlist @@ -649,8 +913,11 @@ def _clean_impl(case, cleanlist, clean_all, clean_depends): cmake_comp_root = os.path.join(exeroot, "cmake-bld", "cmake") casetools = case.get_value("CASETOOLS") - classic_cmd = "{} -f {} {}".format(gmake, os.path.join(casetools, "Makefile"), - get_standard_makefile_args(case, shared_lib=True)) + classic_cmd = "{} -f {} {}".format( + gmake, + os.path.join(casetools, "Makefile"), + get_standard_makefile_args(case, shared_lib=True), + ) for clean_item in things_to_clean: logging.info("Cleaning {}".format(clean_item)) @@ -662,7 +929,11 @@ def _clean_impl(case, cleanlist, clean_all, clean_depends): # Item was created by classic build system # do I need this? generate_makefile_macro(case, caseroot, clean_item) - clean_cmd = "{} {}{}".format(classic_cmd, "clean" if clean_item in cleanlist else "clean_depends", clean_item) + clean_cmd = "{} {}{}".format( + classic_cmd, + "clean" if clean_item in cleanlist else "clean_depends", + clean_item, + ) logger.info("calling {}".format(clean_cmd)) run_cmd_no_fail(clean_cmd) @@ -671,23 +942,37 @@ def _clean_impl(case, cleanlist, clean_all, clean_depends): unlock_file("env_build.xml") # reset following values in xml files - case.set_value("SMP_BUILD",str(0)) - case.set_value("NINST_BUILD",str(0)) - case.set_value("BUILD_STATUS",str(0)) - case.set_value("BUILD_COMPLETE","FALSE") + case.set_value("SMP_BUILD", str(0)) + case.set_value("NINST_BUILD", str(0)) + case.set_value("BUILD_STATUS", str(0)) + case.set_value("BUILD_COMPLETE", "FALSE") case.flush() + ############################################################################### -def _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist, - save_build_provenance, separate_builds, ninja, dry_run): -############################################################################### +def _case_build_impl( + caseroot, + case, + sharedlib_only, + model_only, + buildlist, + save_build_provenance, + separate_builds, + ninja, + dry_run, +): + ############################################################################### t1 = time.time() - expect(not (sharedlib_only and model_only), - "Contradiction: both sharedlib_only and model_only") - expect(not (dry_run and not model_only), - "Dry-run is only for model builds, please build sharedlibs first") + expect( + not (sharedlib_only and model_only), + "Contradiction: both sharedlib_only and model_only", + ) + expect( + not (dry_run and not model_only), + "Dry-run is only for model builds, please build sharedlibs first", + ) logger.info("Building case in directory {}".format(caseroot)) logger.info("sharedlib_only is {}".format(sharedlib_only)) logger.info("model_only is {}".format(model_only)) @@ -695,8 +980,10 @@ def _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist, expect(os.path.isdir(caseroot), "'{}' is not a valid directory".format(caseroot)) os.chdir(caseroot) - expect(os.path.exists(get_batch_script_for_job(case.get_primary_job())), - "ERROR: must invoke case.setup script before calling build script ") + expect( + os.path.exists(get_batch_script_for_job(case.get_primary_job())), + "ERROR: must invoke case.setup script before calling build script ", + ) cimeroot = case.get_value("CIMEROOT") @@ -709,46 +996,61 @@ def _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist, # needs to be unset before building again. if "MODEL" in os.environ: del os.environ["MODEL"] - build_threaded = case.get_build_threaded() - exeroot = os.path.abspath(case.get_value("EXEROOT")) - incroot = os.path.abspath(case.get_value("INCROOT")) - libroot = os.path.abspath(case.get_value("LIBROOT")) + build_threaded = case.get_build_threaded() + exeroot = os.path.abspath(case.get_value("EXEROOT")) + incroot = os.path.abspath(case.get_value("INCROOT")) + libroot = os.path.abspath(case.get_value("LIBROOT")) multi_driver = case.get_value("MULTI_DRIVER") complist = [] ninst = 1 - comp_interface = case.get_value("COMP_INTERFACE") + comp_interface = case.get_value("COMP_INTERFACE") for comp_class in comp_classes: if comp_class == "CPL": config_dir = None if multi_driver: ninst = case.get_value("NINST_MAX") else: - config_dir = os.path.dirname(case.get_value("CONFIG_{}_FILE".format(comp_class))) + config_dir = os.path.dirname( + case.get_value("CONFIG_{}_FILE".format(comp_class)) + ) if multi_driver: ninst = 1 else: ninst = case.get_value("NINST_{}".format(comp_class)) comp = case.get_value("COMP_{}".format(comp_class)) - if comp_interface == 'nuopc' and comp in ('satm', 'slnd', 'sesp', 'sglc', 'srof', 'sice', 'socn', 'swav', 'siac'): + if comp_interface == "nuopc" and comp in ( + "satm", + "slnd", + "sesp", + "sglc", + "srof", + "sice", + "socn", + "swav", + "siac", + ): continue - thrds = case.get_value("NTHRDS_{}".format(comp_class)) - expect(ninst is not None,"Failed to get ninst for comp_class {}".format(comp_class)) - complist.append((comp_class.lower(), comp, thrds, ninst, config_dir )) + thrds = case.get_value("NTHRDS_{}".format(comp_class)) + expect( + ninst is not None, + "Failed to get ninst for comp_class {}".format(comp_class), + ) + complist.append((comp_class.lower(), comp, thrds, ninst, config_dir)) os.environ["COMP_{}".format(comp_class)] = comp - compiler = case.get_value("COMPILER") - mpilib = case.get_value("MPILIB") - debug = case.get_value("DEBUG") - ninst_build = case.get_value("NINST_BUILD") - smp_value = case.get_value("SMP_VALUE") - clm_use_petsc = case.get_value("CLM_USE_PETSC") - cism_use_trilinos = case.get_value("CISM_USE_TRILINOS") - mali_use_albany = case.get_value("MALI_USE_ALBANY") - mach = case.get_value("MACH") + compiler = case.get_value("COMPILER") + mpilib = case.get_value("MPILIB") + debug = case.get_value("DEBUG") + ninst_build = case.get_value("NINST_BUILD") + smp_value = case.get_value("SMP_VALUE") + clm_use_petsc = case.get_value("CLM_USE_PETSC") + cism_use_trilinos = case.get_value("CISM_USE_TRILINOS") + mali_use_albany = case.get_value("MALI_USE_ALBANY") + mach = case.get_value("MACH") # Load some params into env - os.environ["BUILD_THREADED"] = stringify_bool(build_threaded) + os.environ["BUILD_THREADED"] = stringify_bool(build_threaded) cime_model = get_model() if cime_model == "e3sm" and mach == "titan" and compiler == "pgiacc": @@ -757,7 +1059,7 @@ def _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist, # This is a timestamp for the build , not the same as the testid, # and this case may not be a test anyway. For a production # experiment there may be many builds of the same case. - lid = get_timestamp("%y%m%d-%H%M%S") + lid = get_timestamp("%y%m%d-%H%M%S") os.environ["LID"] = lid # Set the overall USE_PETSC variable to TRUE if any of the @@ -790,32 +1092,83 @@ def _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist, # Load modules case.load_env() - sharedpath = _build_checks(case, build_threaded, comp_interface, - debug, compiler, mpilib, complist, ninst_build, smp_value, - model_only, buildlist) + sharedpath = _build_checks( + case, + build_threaded, + comp_interface, + debug, + compiler, + mpilib, + complist, + ninst_build, + smp_value, + model_only, + buildlist, + ) logs = [] if not model_only: - logs = _build_libraries(case, exeroot, sharedpath, caseroot, - cimeroot, libroot, lid, compiler, buildlist, comp_interface, complist) + logs = _build_libraries( + case, + exeroot, + sharedpath, + caseroot, + cimeroot, + libroot, + lid, + compiler, + buildlist, + comp_interface, + complist, + ) if not sharedlib_only: if get_model() == "e3sm": - logs.extend(_build_model_cmake(exeroot, complist, lid, buildlist, comp_interface, - sharedpath, separate_builds, ninja, dry_run, case)) + logs.extend( + _build_model_cmake( + exeroot, + complist, + lid, + buildlist, + comp_interface, + sharedpath, + separate_builds, + ninja, + dry_run, + case, + ) + ) else: - os.environ["INSTALL_SHAREDPATH"] = os.path.join(exeroot, sharedpath) # for MPAS makefile generators - logs.extend(_build_model(build_threaded, exeroot, incroot, complist, - lid, caseroot, cimeroot, compiler, buildlist, comp_interface)) + os.environ["INSTALL_SHAREDPATH"] = os.path.join( + exeroot, sharedpath + ) # for MPAS makefile generators + logs.extend( + _build_model( + build_threaded, + exeroot, + incroot, + complist, + lid, + caseroot, + cimeroot, + compiler, + buildlist, + comp_interface, + ) + ) if not buildlist: # in case component build scripts updated the xml files, update the case object case.read_xml() # Note, doing buildlists will never result in the system thinking the build is complete - post_build(case, logs, build_complete=not (buildlist or sharedlib_only), - save_build_provenance=save_build_provenance) + post_build( + case, + logs, + build_complete=not (buildlist or sharedlib_only), + save_build_provenance=save_build_provenance, + ) t2 = time.time() @@ -825,15 +1178,18 @@ def _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist, return True + ############################################################################### def post_build(case, logs, build_complete=False, save_build_provenance=True): -############################################################################### + ############################################################################### for log in logs: gzip_existing_file(log) if build_complete: # must ensure there's an lid - lid = os.environ["LID"] if "LID" in os.environ else get_timestamp("%y%m%d-%H%M%S") + lid = ( + os.environ["LID"] if "LID" in os.environ else get_timestamp("%y%m%d-%H%M%S") + ) if save_build_provenance: save_build_provenance_sub(case, lid=lid) # Set XML to indicate build complete @@ -846,20 +1202,43 @@ def post_build(case, logs, build_complete=False, save_build_provenance=True): lock_file("env_build.xml", caseroot=case.get_value("CASEROOT")) + ############################################################################### -def case_build(caseroot, case, sharedlib_only=False, model_only=False, buildlist=None, save_build_provenance=True, separate_builds=False, ninja=False, dry_run=False): -############################################################################### - functor = lambda: _case_build_impl(caseroot, case, sharedlib_only, model_only, buildlist, - save_build_provenance, separate_builds, ninja, dry_run) +def case_build( + caseroot, + case, + sharedlib_only=False, + model_only=False, + buildlist=None, + save_build_provenance=True, + separate_builds=False, + ninja=False, + dry_run=False, +): + ############################################################################### + functor = lambda: _case_build_impl( + caseroot, + case, + sharedlib_only, + model_only, + buildlist, + save_build_provenance, + separate_builds, + ninja, + dry_run, + ) cb = "case.build" - if (sharedlib_only == True): + if sharedlib_only == True: cb = cb + " (SHAREDLIB_BUILD)" - if (model_only == True): + if model_only == True: cb = cb + " (MODEL_BUILD)" return run_and_log_case_status(functor, cb, caseroot=caseroot) + ############################################################################### def clean(case, cleanlist=None, clean_all=False, clean_depends=None): -############################################################################### + ############################################################################### functor = lambda: _clean_impl(case, cleanlist, clean_all, clean_depends) - return run_and_log_case_status(functor, "build.clean", caseroot=case.get_value("CASEROOT")) + return run_and_log_case_status( + functor, "build.clean", caseroot=case.get_value("CASEROOT") + ) diff --git a/CIME/build_scripts/buildlib.cprnc b/CIME/build_scripts/buildlib.cprnc index 5db5b43a4ff..1683a9a6296 100755 --- a/CIME/build_scripts/buildlib.cprnc +++ b/CIME/build_scripts/buildlib.cprnc @@ -1,7 +1,8 @@ #!/usr/bin/env python3 -import sys,os +import sys, os + _CIMEROOT = os.getenv("CIMEROOT") -sys.path.append(os.path.join(_CIMEROOT,"scripts","Tools")) +sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) from standard_script_setup import * from CIME.utils import run_bld_cmd_ensure_logging @@ -10,8 +11,9 @@ from CIME.build import get_standard_cmake_args logger = logging.getLogger(__name__) + def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [--debug] OR @@ -22,52 +24,65 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# Run \033[0m > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("buildroot", - help="build path root") + parser.add_argument("buildroot", help="build path root") - parser.add_argument("installpath", - help="install path ") + parser.add_argument("installpath", help="install path ") - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.buildroot, args.installpath, args.caseroot + ############################################################################### def buildlib(bldroot, installpath, case): -############################################################################### + ############################################################################### cimeroot = case.get_value("CIMEROOT") # Generate macros and environment compiler = case.get_value("COMPILER") - run_bld_cmd_ensure_logging("{}/tools/configure --mpilib=mpi-serial --macros-format=CMake --machine={} --compiler={}".format(cimeroot, case.get_value("MACH"), compiler), logger, from_dir=bldroot) + run_bld_cmd_ensure_logging( + "{}/tools/configure --mpilib=mpi-serial --macros-format=CMake --machine={} --compiler={}".format( + cimeroot, case.get_value("MACH"), compiler + ), + logger, + from_dir=bldroot, + ) cmake_args = get_standard_cmake_args(case, "ignore_sharedpath", shared_lib=True) os.environ["CIMEROOT"] = cimeroot - cmake_cmd = ". ./.env_mach_specific.sh && NETCDF=$(dirname $(dirname $(which nf-config))) cmake {cmake_args} -DMPILIB=mpi-serial -DDEBUG=FALSE -C Macros.cmake {cimeroot}/tools/cprnc -DCMAKE_PREFIX_PATH={dest_path} -DBLDROOT={bldroot}".\ - format(cimeroot=cimeroot, dest_path=installpath, cmake_args=cmake_args, bldroot=bldroot) + cmake_cmd = ". ./.env_mach_specific.sh && NETCDF=$(dirname $(dirname $(which nf-config))) cmake {cmake_args} -DMPILIB=mpi-serial -DDEBUG=FALSE -C Macros.cmake {cimeroot}/tools/cprnc -DCMAKE_PREFIX_PATH={dest_path} -DBLDROOT={bldroot}".format( + cimeroot=cimeroot, dest_path=installpath, cmake_args=cmake_args, bldroot=bldroot + ) run_bld_cmd_ensure_logging(cmake_cmd, logger, from_dir=bldroot) gmake_cmd = case.get_value("GMAKE") gmake_j = case.get_value("GMAKE_J") - run_bld_cmd_ensure_logging(". ./.env_mach_specific.sh && {} VERBOSE=1 -j {}".format(gmake_cmd, gmake_j), logger, from_dir=bldroot) + run_bld_cmd_ensure_logging( + ". ./.env_mach_specific.sh && {} VERBOSE=1 -j {}".format(gmake_cmd, gmake_j), + logger, + from_dir=bldroot, + ) + def _main(argv, documentation): bldroot, installpath, caseroot = parse_command_line(argv, documentation) with Case(caseroot, read_only=False) as case: buildlib(bldroot, installpath, case) -if (__name__ == "__main__"): + +if __name__ == "__main__": _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib.gptl b/CIME/build_scripts/buildlib.gptl index 8f637aa03d5..14d84ef2329 100755 --- a/CIME/build_scripts/buildlib.gptl +++ b/CIME/build_scripts/buildlib.gptl @@ -1,7 +1,8 @@ #!/usr/bin/env python3 -import sys,os +import sys, os + cimeroot = os.getenv("CIMEROOT") -sys.path.append(os.path.join(cimeroot,"scripts","Tools")) +sys.path.append(os.path.join(cimeroot, "scripts", "Tools")) from standard_script_setup import * from CIME.utils import run_bld_cmd_ensure_logging @@ -10,8 +11,9 @@ from CIME.build import get_standard_makefile_args logger = logging.getLogger(__name__) + def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [--debug] OR @@ -22,38 +24,42 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# Run \033[0m > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("buildroot", - help="build path root") + parser.add_argument("buildroot", help="build path root") - parser.add_argument("installpath", - help="install path ") + parser.add_argument("installpath", help="install path ") - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.buildroot, args.installpath, args.caseroot + def buildlib(bldroot, installpath, case): -############################################################################### + ############################################################################### caseroot = case.get_value("CASEROOT") comp_interface = case.get_value("COMP_INTERFACE") gptl_dir = os.path.join(case.get_value("CIMEROOT"), "src", "share", "timing") - gmake_opts = "-f {gptl}/Makefile install -C {bldroot} MACFILE={macfile} COMP_NAME=gptl GPTL_DIR={gptl} GPTL_LIBDIR={bldroot}"\ - " SHAREDPATH={install} COMP_INTERFACE={comp_interface} {stdargs} "\ - .format(gptl=gptl_dir, bldroot=bldroot, macfile=os.path.join(caseroot,"Macros.make"), - install=installpath, comp_interface=comp_interface, stdargs=get_standard_makefile_args(case, shared_lib=True)) + gmake_opts = "-f {gptl}/Makefile install -C {bldroot} MACFILE={macfile} COMP_NAME=gptl GPTL_DIR={gptl} GPTL_LIBDIR={bldroot}" " SHAREDPATH={install} COMP_INTERFACE={comp_interface} {stdargs} ".format( + gptl=gptl_dir, + bldroot=bldroot, + macfile=os.path.join(caseroot, "Macros.make"), + install=installpath, + comp_interface=comp_interface, + stdargs=get_standard_makefile_args(case, shared_lib=True), + ) gmake_cmd = case.get_value("GMAKE") @@ -61,10 +67,12 @@ def buildlib(bldroot, installpath, case): cmd = "{} {}".format(gmake_cmd, gmake_opts) run_bld_cmd_ensure_logging(cmd, logger) + def _main(argv, documentation): bldroot, installpath, caseroot = parse_command_line(argv, documentation) with Case(caseroot) as case: buildlib(bldroot, installpath, case) -if (__name__ == "__main__"): + +if __name__ == "__main__": _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib.internal_components b/CIME/build_scripts/buildlib.internal_components index f7a71981183..6bc08770d54 100755 --- a/CIME/build_scripts/buildlib.internal_components +++ b/CIME/build_scripts/buildlib.internal_components @@ -16,6 +16,7 @@ from standard_script_setup import * from CIME.buildlib import build_cime_component_lib, parse_input from CIME.case import Case + def buildlib(bldroot, libroot, case, compname=None): if compname is None: thisdir = os.path.dirname(os.path.abspath(__file__)) @@ -24,13 +25,15 @@ def buildlib(bldroot, libroot, case, compname=None): if dir1 == "cime_config": compname = dir2 else: - compname = dir1.split('.')[1] + compname = dir1.split(".")[1] build_cime_component_lib(case, compname, libroot, bldroot) + def _main_func(args): caseroot, libroot, bldroot = parse_input(args) with Case(caseroot) as case: buildlib(bldroot, libroot, case) + if __name__ == "__main__": _main_func(sys.argv) diff --git a/CIME/build_scripts/buildlib.kokkos b/CIME/build_scripts/buildlib.kokkos index 258162f3868..b9d3d545239 100755 --- a/CIME/build_scripts/buildlib.kokkos +++ b/CIME/build_scripts/buildlib.kokkos @@ -7,8 +7,9 @@ from CIME.build import get_standard_makefile_args logger = logging.getLogger(__name__) + def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [--debug] OR @@ -19,31 +20,31 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# Run \033[0m > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("buildroot", - help="build path root") + parser.add_argument("buildroot", help="build path root") - parser.add_argument("installpath", - help="install path ") + parser.add_argument("installpath", help="install path ") - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.buildroot, args.installpath, args.caseroot + ############################################################################### def buildlib(bldroot, installpath, case): -############################################################################### + ############################################################################### srcroot = case.get_value("SRCROOT") kokkos_dir = os.path.join(srcroot, "externals", "kokkos") expect(os.path.isdir(kokkos_dir), "Missing kokkos submodule") @@ -52,7 +53,9 @@ def buildlib(bldroot, installpath, case): # (generated from config_compilers.xml), but we want to otherwise # let kokkos control flags make_args = get_standard_makefile_args(case, shared_lib=True) - stat, output, _ = run_cmd("make -f Macros.make {} -p | grep KOKKOS_OPTIONS".format(make_args)) + stat, output, _ = run_cmd( + "make -f Macros.make {} -p | grep KOKKOS_OPTIONS".format(make_args) + ) if stat == 0: kokkos_options = output.split(":=")[-1].strip() else: @@ -61,28 +64,43 @@ def buildlib(bldroot, installpath, case): build_threaded = case.get_build_threaded() if build_threaded: kokkos_options += " --with-openmp" - logger.warning("Failed to find custom kokkos options, using default: {:s}.". - format(kokkos_options)) + logger.warning( + "Failed to find custom kokkos options, using default: {:s}.".format( + kokkos_options + ) + ) if "--with-cuda" in kokkos_options: cxx = os.path.join(kokkos_dir, "bin/nvcc_wrapper") else: - cxx = run_cmd_no_fail("make -f Macros.make {} -p | grep SCXX".format(make_args)).split(":=")[-1].strip() + cxx = ( + run_cmd_no_fail("make -f Macros.make {} -p | grep SCXX".format(make_args)) + .split(":=")[-1] + .strip() + ) gmake_cmd = case.get_value("GMAKE") gmake_j = case.get_value("GMAKE_J") - gen_makefile_cmd = "{kokkos_dir}/generate_makefile.bash {kokkos_options} --disable-tests --compiler={cxx} --prefix={installpath}"\ - .format(kokkos_dir=kokkos_dir, kokkos_options=kokkos_options, cxx=cxx, installpath=installpath) + gen_makefile_cmd = "{kokkos_dir}/generate_makefile.bash {kokkos_options} --disable-tests --compiler={cxx} --prefix={installpath}".format( + kokkos_dir=kokkos_dir, + kokkos_options=kokkos_options, + cxx=cxx, + installpath=installpath, + ) run_bld_cmd_ensure_logging(gen_makefile_cmd, logger, from_dir=bldroot) - run_bld_cmd_ensure_logging("{} -j {}".format(gmake_cmd, gmake_j), logger, from_dir=bldroot) + run_bld_cmd_ensure_logging( + "{} -j {}".format(gmake_cmd, gmake_j), logger, from_dir=bldroot + ) run_bld_cmd_ensure_logging("{} install".format(gmake_cmd), logger, from_dir=bldroot) + def _main(argv, documentation): bldroot, installpath, caseroot = parse_command_line(argv, documentation) with Case(caseroot, read_only=False) as case: buildlib(bldroot, installpath, case) -if (__name__ == "__main__"): + +if __name__ == "__main__": _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib.mct b/CIME/build_scripts/buildlib.mct index dda0c947c5a..0a83e076f51 100755 --- a/CIME/build_scripts/buildlib.mct +++ b/CIME/build_scripts/buildlib.mct @@ -1,7 +1,8 @@ #!/usr/bin/env python3 -import sys,os +import sys, os + _CIMEROOT = os.getenv("CIMEROOT") -sys.path.append(os.path.join(_CIMEROOT,"scripts","Tools")) +sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) from standard_script_setup import * from CIME.utils import copyifnewer, run_bld_cmd_ensure_logging, get_model, expect @@ -11,8 +12,9 @@ import glob logger = logging.getLogger(__name__) + def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [--debug] OR @@ -23,50 +25,56 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# Run \033[0m > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("buildroot", - help="build path root") + parser.add_argument("buildroot", help="build path root") - parser.add_argument("installpath", - help="install path ") + parser.add_argument("installpath", help="install path ") - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.buildroot, args.installpath, args.caseroot + def buildlib(bldroot, installpath, case): -############################################################################### + ############################################################################### caseroot = case.get_value("CASEROOT") cimeroot = case.get_value("CIMEROOT") - expect(os.path.abspath(os.path.realpath(cimeroot)) == os.path.abspath(os.path.realpath(_CIMEROOT)), "CIMEROOT mismatch {} vs {}".format(_CIMEROOT,cimeroot)) - srcroot = case.get_value("SRCROOT") + expect( + os.path.abspath(os.path.realpath(cimeroot)) + == os.path.abspath(os.path.realpath(_CIMEROOT)), + "CIMEROOT mismatch {} vs {}".format(_CIMEROOT, cimeroot), + ) + srcroot = case.get_value("SRCROOT") if get_model() == "cesm": - mct_dir = os.path.join(srcroot,"libraries","mct") + mct_dir = os.path.join(srcroot, "libraries", "mct") else: - mct_dir = os.path.join(srcroot,"externals","mct") + mct_dir = os.path.join(srcroot, "externals", "mct") for _dir in ("mct", "mpeu"): - if not os.path.isdir(os.path.join(bldroot,_dir)): - os.makedirs(os.path.join(bldroot,_dir)) - copyifnewer(os.path.join(mct_dir,_dir,"Makefile"), - os.path.join(bldroot,_dir,"Makefile")) - - gmake_opts = "-f {} ".format(os.path.join(caseroot,"Tools","Makefile")) + if not os.path.isdir(os.path.join(bldroot, _dir)): + os.makedirs(os.path.join(bldroot, _dir)) + copyifnewer( + os.path.join(mct_dir, _dir, "Makefile"), + os.path.join(bldroot, _dir, "Makefile"), + ) + + gmake_opts = "-f {} ".format(os.path.join(caseroot, "Tools", "Makefile")) gmake_opts += " -C {} ".format(bldroot) gmake_opts += get_standard_makefile_args(case, shared_lib=True) - gmake_opts += "COMP_NAME=mct {}".format(os.path.join(bldroot,"Makefile.conf")) + gmake_opts += "COMP_NAME=mct {}".format(os.path.join(bldroot, "Makefile.conf")) gmake_cmd = case.get_value("GMAKE") @@ -75,7 +83,7 @@ def buildlib(bldroot, installpath, case): run_bld_cmd_ensure_logging(cmd, logger) # Now we run the mct make command - gmake_opts = "-f {} ".format(os.path.join(mct_dir,"Makefile")) + gmake_opts = "-f {} ".format(os.path.join(mct_dir, "Makefile")) gmake_opts += " -C {} ".format(bldroot) gmake_opts += " -j {} ".format(case.get_value("GMAKE_J")) gmake_opts += " SRCDIR={} ".format(os.path.join(mct_dir)) @@ -84,17 +92,23 @@ def buildlib(bldroot, installpath, case): run_bld_cmd_ensure_logging(cmd, logger) for _dir in ("mct", "mpeu"): - for _file in glob.iglob(os.path.join(bldroot,_dir,"*.a")): - logger.info("Installing {} to {}".format(_file,installpath)) - copyifnewer(_file, os.path.join(installpath, "lib", os.path.basename(_file))) - for _file in glob.iglob(os.path.join(bldroot,_dir,"*.mod")): - logger.info("Installing {} to {}".format(_file,installpath)) - copyifnewer(_file, os.path.join(installpath, "include", os.path.basename(_file))) + for _file in glob.iglob(os.path.join(bldroot, _dir, "*.a")): + logger.info("Installing {} to {}".format(_file, installpath)) + copyifnewer( + _file, os.path.join(installpath, "lib", os.path.basename(_file)) + ) + for _file in glob.iglob(os.path.join(bldroot, _dir, "*.mod")): + logger.info("Installing {} to {}".format(_file, installpath)) + copyifnewer( + _file, os.path.join(installpath, "include", os.path.basename(_file)) + ) + def _main(argv, documentation): bldroot, installpath, caseroot = parse_command_line(argv, documentation) with Case(caseroot) as case: buildlib(bldroot, installpath, case) -if (__name__ == "__main__"): + +if __name__ == "__main__": _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib.mpi-serial b/CIME/build_scripts/buildlib.mpi-serial index b789711f9a8..8a6f90306ad 100755 --- a/CIME/build_scripts/buildlib.mpi-serial +++ b/CIME/build_scripts/buildlib.mpi-serial @@ -7,8 +7,9 @@ import glob logger = logging.getLogger(__name__) + def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [--debug] OR @@ -19,45 +20,47 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# Run \033[0m > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("buildroot", - help="build path root") + parser.add_argument("buildroot", help="build path root") - parser.add_argument("installpath", - help="install path ") + parser.add_argument("installpath", help="install path ") - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.buildroot, args.installpath, args.caseroot + def buildlib(bldroot, installpath, case): -############################################################################### + ############################################################################### caseroot = case.get_value("CASEROOT") srcroot = case.get_value("SRCROOT") if get_model() == "cesm": - mct_dir = os.path.join(srcroot,"libraries","mct") + mct_dir = os.path.join(srcroot, "libraries", "mct") else: - mct_dir = os.path.join(srcroot,"externals","mct") + mct_dir = os.path.join(srcroot, "externals", "mct") - for _file in glob.iglob(os.path.join(mct_dir, "mpi-serial","*.h")): - copyifnewer(_file, os.path.join(bldroot,os.path.basename(_file))) + for _file in glob.iglob(os.path.join(mct_dir, "mpi-serial", "*.h")): + copyifnewer(_file, os.path.join(bldroot, os.path.basename(_file))) - gmake_opts = "-f {} ".format(os.path.join(caseroot,"Tools","Makefile")) + gmake_opts = "-f {} ".format(os.path.join(caseroot, "Tools", "Makefile")) gmake_opts += " -C {} ".format(bldroot) gmake_opts += " {} ".format(get_standard_makefile_args(case, shared_lib=True)) - gmake_opts += "COMP_NAME=mpi-serial {}".format(os.path.join(bldroot,"Makefile.conf")) + gmake_opts += "COMP_NAME=mpi-serial {}".format( + os.path.join(bldroot, "Makefile.conf") + ) gmake_cmd = case.get_value("GMAKE") @@ -66,7 +69,7 @@ def buildlib(bldroot, installpath, case): run_bld_cmd_ensure_logging(cmd, logger) # Now we run the mpi-serial make command - gmake_opts = "-f {} ".format(os.path.join(mct_dir,"mpi-serial","Makefile")) + gmake_opts = "-f {} ".format(os.path.join(mct_dir, "mpi-serial", "Makefile")) gmake_opts += " -C {} ".format(bldroot) gmake_opts += " -j {} ".format(case.get_value("GMAKE_J")) gmake_opts += " SRCDIR={} ".format(os.path.join(mct_dir)) @@ -74,15 +77,23 @@ def buildlib(bldroot, installpath, case): cmd = "{} {}".format(gmake_cmd, gmake_opts) run_bld_cmd_ensure_logging(cmd, logger) - copyifnewer(os.path.join(bldroot, "libmpi-serial.a"), os.path.join(installpath, "lib", "libmpi-serial.a")) + copyifnewer( + os.path.join(bldroot, "libmpi-serial.a"), + os.path.join(installpath, "lib", "libmpi-serial.a"), + ) for _file in ("mpi.h", "mpif.h", "mpi.mod", "MPI.mod"): - if os.path.isfile(os.path.join(bldroot,_file)): - copyifnewer(os.path.join(bldroot, _file), os.path.join(installpath, "include", _file)) + if os.path.isfile(os.path.join(bldroot, _file)): + copyifnewer( + os.path.join(bldroot, _file), + os.path.join(installpath, "include", _file), + ) + def _main(argv, documentation): bldroot, installpath, caseroot = parse_command_line(argv, documentation) with Case(caseroot) as case: buildlib(bldroot, installpath, case) -if (__name__ == "__main__"): + +if __name__ == "__main__": _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib.pio b/CIME/build_scripts/buildlib.pio index f55c3777565..d02af9484f2 100755 --- a/CIME/build_scripts/buildlib.pio +++ b/CIME/build_scripts/buildlib.pio @@ -1,7 +1,8 @@ #!/usr/bin/env python3 -import sys,os +import sys, os + cimeroot = os.getenv("CIMEROOT") -sys.path.append(os.path.join(cimeroot,"scripts","Tools")) +sys.path.append(os.path.join(cimeroot, "scripts", "Tools")) import glob, re from standard_script_setup import * @@ -11,8 +12,9 @@ from CIME.case import Case logger = logging.getLogger(__name__) + def parse_command_line(args, description): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [--debug] OR @@ -23,31 +25,31 @@ OR \033[1mEXAMPLES:\033[0m \033[1;32m# Run \033[0m > {0} -""" .format (os.path.basename(args[0])), - -description=description, - -formatter_class=argparse.ArgumentDefaultsHelpFormatter -) +""".format( + os.path.basename(args[0]) + ), + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("buildroot", - help="build path root") + parser.add_argument("buildroot", help="build path root") - parser.add_argument("installpath", - help="install path ") + parser.add_argument("installpath", help="install path ") - parser.add_argument("caseroot", nargs="?", default=os.getcwd(), - help="Case directory to build") + parser.add_argument( + "caseroot", nargs="?", default=os.getcwd(), help="Case directory to build" + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) return args.buildroot, args.installpath, args.caseroot + ############################################################################### def buildlib(bldroot, installpath, case): -############################################################################### + ############################################################################### cime_model = case.get_value("MODEL") caseroot = case.get_value("CASEROOT") pio_version = case.get_value("PIO_VERSION") @@ -59,12 +61,15 @@ def buildlib(bldroot, installpath, case): scorpio_classic_dir = "scorpio_classic" # Scorpio is derived from PIO2 scorpio_dir = "scorpio" - scorpio_classic_src_dir = os.path.join(scorpio_src_root_dir, - scorpio_classic_dir) + scorpio_classic_src_dir = os.path.join( + scorpio_src_root_dir, scorpio_classic_dir + ) scorpio_src_dir = os.path.join(scorpio_src_root_dir, scorpio_dir) - if (not os.path.isdir(scorpio_src_root_dir) or - not os.path.isdir(scorpio_classic_src_dir) or - not os.path.isdir(scorpio_src_dir)): + if ( + not os.path.isdir(scorpio_src_root_dir) + or not os.path.isdir(scorpio_classic_src_dir) + or not os.path.isdir(scorpio_src_dir) + ): scorpio_src_root_dir = None # If variable PIO_VERSION_MAJOR is defined in the environment then @@ -74,7 +79,11 @@ def buildlib(bldroot, installpath, case): # also defined in the environment. In this case we # will use the installed pio and not build it here. installed_pio_version = os.environ.get("PIO_VERSION_MAJOR") - logger.info("pio_version_major = {} pio_version = {}".format(installed_pio_version, pio_version)) + logger.info( + "pio_version_major = {} pio_version = {}".format( + installed_pio_version, pio_version + ) + ) if installed_pio_version is not None and int(installed_pio_version) == pio_version: logger.info("Using installed PIO library") _set_pio_valid_values(case, os.environ.get("PIO_TYPENAME_VALID_VALUES")) @@ -86,33 +95,42 @@ def buildlib(bldroot, installpath, case): os.makedirs(pio_dir) casetools = case.get_value("CASETOOLS") if pio_version == 1 or scorpio_src_root_dir: - cmake_opts = "\"-D GENF90_PATH=$CIMEROOT/CIME/data/genf90 \"" + cmake_opts = '"-D GENF90_PATH=$CIMEROOT/CIME/data/genf90 "' else: - cmake_opts = "\"-D GENF90_PATH="+srcroot+"/libraries/parallelio/scripts/ \"" - + cmake_opts = '"-D GENF90_PATH=' + srcroot + '/libraries/parallelio/scripts/ "' stdargs = get_standard_makefile_args(case, shared_lib=True) - gmake_vars = "CASEROOT={caseroot} COMP_NAME={pio_model} "\ - "USER_CMAKE_OPTS={cmake_opts} "\ - "PIO_LIBDIR={pio_dir} CASETOOLS={casetools} "\ - "USER_CPPDEFS=-DTIMING"\ - .format(caseroot=caseroot, pio_model=pio_model, - cmake_opts=cmake_opts, pio_dir=pio_dir, - casetools=casetools) + gmake_vars = ( + "CASEROOT={caseroot} COMP_NAME={pio_model} " + "USER_CMAKE_OPTS={cmake_opts} " + "PIO_LIBDIR={pio_dir} CASETOOLS={casetools} " + "USER_CPPDEFS=-DTIMING".format( + caseroot=caseroot, + pio_model=pio_model, + cmake_opts=cmake_opts, + pio_dir=pio_dir, + casetools=casetools, + ) + ) if scorpio_src_root_dir is not None: - gmake_vars += " IO_LIB_SRCROOT={scorpio_src_root_dir} "\ - " IO_LIB_v1_SRCDIR={scorpio_classic_dir} "\ - " IO_LIB_v2_SRCDIR={scorpio_dir} "\ - .format(scorpio_src_root_dir=scorpio_src_root_dir, - scorpio_classic_dir=scorpio_classic_dir, - scorpio_dir=scorpio_dir) - - gmake_opts = "{pio_dir}/Makefile -C {pio_dir} "\ - " {gmake_vars} {stdargs} -f {casetools}/Makefile"\ - .format(pio_dir=pio_dir, gmake_vars=gmake_vars, - casetools=casetools, stdargs=stdargs) + gmake_vars += ( + " IO_LIB_SRCROOT={scorpio_src_root_dir} " + " IO_LIB_v1_SRCDIR={scorpio_classic_dir} " + " IO_LIB_v2_SRCDIR={scorpio_dir} ".format( + scorpio_src_root_dir=scorpio_src_root_dir, + scorpio_classic_dir=scorpio_classic_dir, + scorpio_dir=scorpio_dir, + ) + ) + + gmake_opts = ( + "{pio_dir}/Makefile -C {pio_dir} " + " {gmake_vars} {stdargs} -f {casetools}/Makefile".format( + pio_dir=pio_dir, gmake_vars=gmake_vars, casetools=casetools, stdargs=stdargs + ) + ) gmake_cmd = case.get_value("GMAKE") @@ -121,53 +139,60 @@ def buildlib(bldroot, installpath, case): run_bld_cmd_ensure_logging(cmd, logger, from_dir=pio_dir) # This runs the pio make command from the cmake generated Makefile - run_bld_cmd_ensure_logging("{} -j {}".format(gmake_cmd, case.get_value("GMAKE_J")), logger, from_dir=pio_dir) + run_bld_cmd_ensure_logging( + "{} -j {}".format(gmake_cmd, case.get_value("GMAKE_J")), + logger, + from_dir=pio_dir, + ) if pio_version == 1: - installed_lib = os.path.join(installpath,"lib","libpio.a") + installed_lib = os.path.join(installpath, "lib", "libpio.a") installed_lib_time = 0 if os.path.isfile(installed_lib): installed_lib_time = os.path.getmtime(installed_lib) - newlib = os.path.join(pio_dir,"pio","libpio.a") + newlib = os.path.join(pio_dir, "pio", "libpio.a") newlib_time = os.path.getmtime(newlib) if newlib_time > installed_lib_time: logger.info("Installing pio version 1") safe_copy(newlib, installed_lib) for glob_to_copy in ("*.h", "*.mod"): - for item in glob.glob(os.path.join(pio_dir,"pio",glob_to_copy)): + for item in glob.glob(os.path.join(pio_dir, "pio", glob_to_copy)): safe_copy(item, "{}/include".format(installpath)) expect_string = "D_NETCDF;" pnetcdf_string = "D_PNETCDF" netcdf4_string = "D_NETCDF4" else: - globs_to_copy = (os.path.join("src","clib","libpioc.*"), - os.path.join("src","flib","libpiof.*"), - os.path.join("src","clib","*.h"), - os.path.join("src","flib","*.mod")) + globs_to_copy = ( + os.path.join("src", "clib", "libpioc.*"), + os.path.join("src", "flib", "libpiof.*"), + os.path.join("src", "clib", "*.h"), + os.path.join("src", "flib", "*.mod"), + ) for glob_to_copy in globs_to_copy: installed_file_time = 0 - for item in glob.glob(os.path.join(pio_dir,glob_to_copy)): + for item in glob.glob(os.path.join(pio_dir, glob_to_copy)): if item.endswith(".a") or item.endswith(".so"): installdir = "lib" else: installdir = "include" - installed_file = os.path.join(installpath,installdir,os.path.basename(item)) + installed_file = os.path.join( + installpath, installdir, os.path.basename(item) + ) item_time = os.path.getmtime(item) if os.path.isfile(installed_file): installed_file_time = os.path.getmtime(installed_file) - if item_time > installed_file_time: + if item_time > installed_file_time: safe_copy(item, installed_file) expect_string = "NetCDF_C_LIBRARY-ADVANCED" pnetcdf_string = "WITH_PNETCDF:BOOL=ON" netcdf4_string = "NetCDF_C_HAS_PARALLEL:BOOL=TRUE" - # make sure case pio_typename valid_values is set correctly expect_string_found = False pnetcdf_found = False netcdf4_parallel_found = False - cache_file = open(os.path.join(pio_dir,"CMakeCache.txt"), "r") + cache_file = open(os.path.join(pio_dir, "CMakeCache.txt"), "r") for line in cache_file: if re.search(expect_string, line): expect_string_found = True @@ -191,19 +216,23 @@ def _set_pio_valid_values(case, valid_values): valid_values += ",nothing" logger.warning("Updating valid_values for PIO_TYPENAME: {}".format(valid_values)) env_run = case.get_env("run") - env_run.set_valid_values("PIO_TYPENAME",valid_values) + env_run.set_valid_values("PIO_TYPENAME", valid_values) for comp in case.get_values("COMP_CLASSES"): comp_pio_typename = "{}_PIO_TYPENAME".format(comp) current_value = case.get_value(comp_pio_typename) if current_value not in valid_values: - logger.warning("Resetting PIO_TYPENAME=netcdf for component {}".format(comp)) - env_run.set_value(comp_pio_typename,"netcdf") + logger.warning( + "Resetting PIO_TYPENAME=netcdf for component {}".format(comp) + ) + env_run.set_value(comp_pio_typename, "netcdf") + def _main(argv, documentation): bldroot, installpath, caseroot = parse_command_line(argv, documentation) with Case(caseroot, read_only=False) as case: buildlib(bldroot, installpath, case) -if (__name__ == "__main__"): + +if __name__ == "__main__": _main(sys.argv, __doc__) diff --git a/CIME/build_scripts/buildlib_cmake.internal_components b/CIME/build_scripts/buildlib_cmake.internal_components index f2f3a611221..3546c940e63 100755 --- a/CIME/build_scripts/buildlib_cmake.internal_components +++ b/CIME/build_scripts/buildlib_cmake.internal_components @@ -9,13 +9,16 @@ import sys, os _CIMEROOT = os.environ.get("CIMEROOT") if _CIMEROOT == None: - raise ValueError("ERROR: CIMEROOT not defined in buildlib_cmake.internal_components.") + raise ValueError( + "ERROR: CIMEROOT not defined in buildlib_cmake.internal_components." + ) sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) from standard_script_setup import * from CIME.buildlib import build_cime_component_lib, parse_input from CIME.case import Case + def buildlib(bldroot, libroot, case, compname=None): if compname is None: thisdir = os.path.dirname(os.path.abspath(__file__)) @@ -24,13 +27,15 @@ def buildlib(bldroot, libroot, case, compname=None): if dir1 == "cime_config": compname = dir2 else: - compname = dir1.split('.')[1] + compname = dir1.split(".")[1] build_cime_component_lib(case, compname, libroot, bldroot) + def _main_func(args): caseroot, libroot, bldroot = parse_input(args) with Case(caseroot) as case: buildlib(bldroot, libroot, case) + if __name__ == "__main__": _main_func(sys.argv) diff --git a/CIME/buildlib.py b/CIME/buildlib.py index e7d192c4e7c..510ef301125 100644 --- a/CIME/buildlib.py +++ b/CIME/buildlib.py @@ -4,29 +4,32 @@ from CIME.XML.standard_module_setup import * from CIME.case import Case -from CIME.utils import parse_args_and_handle_standard_logging_options, setup_standard_logging_options, get_model, safe_copy +from CIME.utils import ( + parse_args_and_handle_standard_logging_options, + setup_standard_logging_options, + get_model, + safe_copy, +) from CIME.build import get_standard_makefile_args -from CIME.XML.files import Files +from CIME.XML.files import Files import sys, os, argparse + logger = logging.getLogger(__name__) ############################################################################### def parse_input(argv): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser() setup_standard_logging_options(parser) - parser.add_argument("caseroot", default=os.getcwd(), - help="Case directory") + parser.add_argument("caseroot", default=os.getcwd(), help="Case directory") - parser.add_argument("libroot", - help="root for creating the library") + parser.add_argument("libroot", help="root for creating the library") - parser.add_argument("bldroot", - help="root for building library") + parser.add_argument("bldroot", help="root for building library") args = parse_args_and_handle_standard_logging_options(argv, parser) @@ -37,34 +40,40 @@ def parse_input(argv): with Case(args.caseroot) as case: os.environ["EXEROOT"] = os.path.relpath(case.get_value("EXEROOT"), args.bldroot) - return args.caseroot, args.libroot, args.bldroot + ############################################################################### def build_cime_component_lib(case, compname, libroot, bldroot): -############################################################################### + ############################################################################### casebuild = case.get_value("CASEBUILD") - compclass = compname[1:] # This very hacky + compclass = compname[1:] # This very hacky comp_interface = case.get_value("COMP_INTERFACE") - confdir = os.path.join(casebuild, "{}conf".format(compname)) + confdir = os.path.join(casebuild, "{}conf".format(compname)) if not os.path.exists(confdir): os.mkdir(confdir) - with open(os.path.join(confdir, 'Filepath'), 'w') as out: - out.write(os.path.join(case.get_value('CASEROOT'), "SourceMods", - "src.{}\n".format(compname)) + "\n") + with open(os.path.join(confdir, "Filepath"), "w") as out: + out.write( + os.path.join( + case.get_value("CASEROOT"), "SourceMods", "src.{}\n".format(compname) + ) + + "\n" + ) files = Files(comp_interface=comp_interface) - compdir = files.get_value("COMP_ROOT_DIR_"+compclass.upper(),{"component":compname}) - if compname.startswith('d'): - out.write(os.path.join(compdir,"src") + "\n") + compdir = files.get_value( + "COMP_ROOT_DIR_" + compclass.upper(), {"component": compname} + ) + if compname.startswith("d"): + out.write(os.path.join(compdir, "src") + "\n") out.write(os.path.join(compdir) + "\n") - elif compname.startswith('x'): - out.write(os.path.join(compdir,"..","xshare") + "\n") - out.write(os.path.join(compdir,"src") + "\n") - elif compname.startswith('s'): - out.write(os.path.join(compdir,"src") + "\n") + elif compname.startswith("x"): + out.write(os.path.join(compdir, "..", "xshare") + "\n") + out.write(os.path.join(compdir, "src") + "\n") + elif compname.startswith("s"): + out.write(os.path.join(compdir, "src") + "\n") with open(os.path.join(confdir, "CIME_cppdefs"), "w") as out: out.write("") @@ -78,30 +87,39 @@ def build_cime_component_lib(case, compname, libroot, bldroot): safe_copy(os.path.join(confdir, "CCSM_cppdefs"), bldroot) run_gmake(case, compclass, compname, libroot, bldroot) + ############################################################################### def run_gmake(case, compclass, compname, libroot, bldroot, libname="", user_cppdefs=""): -############################################################################### + ############################################################################### gmake_args = get_standard_makefile_args(case) - gmake_j = case.get_value("GMAKE_J") - gmake = case.get_value("GMAKE") + gmake_j = case.get_value("GMAKE_J") + gmake = case.get_value("GMAKE") complib = "" if libname: - complib = os.path.join(libroot, "lib{}.a".format(libname)) + complib = os.path.join(libroot, "lib{}.a".format(libname)) else: - complib = os.path.join(libroot, "lib{}.a".format(compclass)) + complib = os.path.join(libroot, "lib{}.a".format(compclass)) makefile = os.path.join(case.get_value("CASETOOLS"), "Makefile") - cmd = "{gmake} complib -j {gmake_j:d} COMP_CLASS={compclass} COMP_NAME={compname} COMPLIB={complib} {gmake_args} -f {makefile} -C {bldroot} " \ - .format(gmake=gmake, gmake_j=gmake_j, compclass=compclass, compname=compname, complib=complib, gmake_args=gmake_args, makefile=makefile, bldroot=bldroot) + cmd = "{gmake} complib -j {gmake_j:d} COMP_CLASS={compclass} COMP_NAME={compname} COMPLIB={complib} {gmake_args} -f {makefile} -C {bldroot} ".format( + gmake=gmake, + gmake_j=gmake_j, + compclass=compclass, + compname=compname, + complib=complib, + gmake_args=gmake_args, + makefile=makefile, + bldroot=bldroot, + ) if user_cppdefs: - cmd = cmd + "USER_CPPDEFS='{}'".format(user_cppdefs ) + cmd = cmd + "USER_CPPDEFS='{}'".format(user_cppdefs) stat, out, err = run_cmd(cmd, combine_output=True) print(out) if stat: - logger.info("buildlib stat={} err={}".format(stat,err)) + logger.info("buildlib stat={} err={}".format(stat, err)) os.unlink(complib) return stat diff --git a/CIME/buildnml.py b/CIME/buildnml.py index 379321ff0d7..69cbc5f7dca 100644 --- a/CIME/buildnml.py +++ b/CIME/buildnml.py @@ -5,7 +5,11 @@ """ from CIME.XML.standard_module_setup import * -from CIME.utils import expect, parse_args_and_handle_standard_logging_options, setup_standard_logging_options +from CIME.utils import ( + expect, + parse_args_and_handle_standard_logging_options, + setup_standard_logging_options, +) from CIME.utils import safe_copy import sys, os, argparse, glob @@ -13,35 +17,37 @@ ############################################################################### def parse_input(argv): -############################################################################### + ############################################################################### parser = argparse.ArgumentParser() setup_standard_logging_options(parser) - parser.add_argument("caseroot", default=os.getcwd(), - help="Case directory") + parser.add_argument("caseroot", default=os.getcwd(), help="Case directory") args = parse_args_and_handle_standard_logging_options(argv, parser) return args.caseroot + ############################################################################### -#pylint: disable=unused-argument +# pylint: disable=unused-argument def build_xcpl_nml(case, caseroot, compname): -############################################################################### + ############################################################################### compclasses = case.get_values("COMP_CLASSES") compclass = None for compclass in compclasses: if case.get_value("COMP_{}".format(compclass)) == compname: break - expect(compclass is not None, - "Could not identify compclass for compname {}".format(compname)) + expect( + compclass is not None, + "Could not identify compclass for compname {}".format(compname), + ) rundir = case.get_value("RUNDIR") comp_interface = case.get_value("COMP_INTERFACE") if comp_interface != "nuopc": - ninst = case.get_value("NINST_{}".format(compclass.upper())) + ninst = case.get_value("NINST_{}".format(compclass.upper())) else: ninst = case.get_value("NINST") if not ninst: @@ -52,7 +58,7 @@ def build_xcpl_nml(case, caseroot, compname): if comp_interface != "nuopc": if compname == "xrof": - flood_mode = case.get_value('XROF_FLOOD_MODE') + flood_mode = case.get_value("XROF_FLOOD_MODE") extras = [] dtype = 1 npes = 0 @@ -60,10 +66,10 @@ def build_xcpl_nml(case, caseroot, compname): if compname == "xatm": if ny == 1: dtype = 2 - extras = [["24", - "ncpl number of communications w/coupler per dat"], - ["0.0", - "simul time proxy (secs): time between cpl comms"]] + extras = [ + ["24", "ncpl number of communications w/coupler per dat"], + ["0.0", "simul time proxy (secs): time between cpl comms"], + ] elif compname == "xglc" or compname == "xice": dtype = 2 elif compname == "xlnd": @@ -85,25 +91,34 @@ def build_xcpl_nml(case, caseroot, compname): else: filename = os.path.join(rundir, "{}_in_{:04d}".format(compname, i)) - with open(filename, 'w') as infile: + with open(filename, "w") as infile: infile.write("{:<20d} ! i-direction global dimension\n".format(nx)) infile.write("{:<20d} ! j-direction global dimension\n".format(ny)) if comp_interface != "nuopc": - infile.write("{:<20d} ! decomp_type 1=1d-by-lat, 2=1d-by-lon, 3=2d, 4=2d evensquare, 11=segmented\n".format(dtype)) + infile.write( + "{:<20d} ! decomp_type 1=1d-by-lat, 2=1d-by-lon, 3=2d, 4=2d evensquare, 11=segmented\n".format( + dtype + ) + ) infile.write("{:<20d} ! num of pes for i (type 3 only)\n".format(npes)) - infile.write("{:<20d} ! length of segments (type 4 only)\n".format(length)) + infile.write( + "{:<20d} ! length of segments (type 4 only)\n".format(length) + ) for extra in extras: infile.write("{:<20s} ! {}\n".format(extra[0], extra[1])) + ############################################################################### def create_namelist_infile(case, user_nl_file, namelist_infile, infile_text=""): -############################################################################### + ############################################################################### lines_input = [] if os.path.isfile(user_nl_file): with open(user_nl_file, "r") as file_usernl: lines_input = file_usernl.readlines() else: - logger.warning("WARNING: No file {} found in case directory".format(user_nl_file)) + logger.warning( + "WARNING: No file {} found in case directory".format(user_nl_file) + ) lines_output = [] lines_output.append("&comp_inparm \n") @@ -123,18 +138,19 @@ def create_namelist_infile(case, user_nl_file, namelist_infile, infile_text=""): with open(namelist_infile, "w") as file_infile: file_infile.write("\n".join(lines_output)) + def copy_inputs_to_rundir(caseroot, compname, confdir, rundir, inst_string): if os.path.isdir(rundir): filename = compname + "_in" - file_src = os.path.join(confdir, filename) + file_src = os.path.join(confdir, filename) file_dest = os.path.join(rundir, filename) if inst_string: file_dest += inst_string - safe_copy(file_src,file_dest) + safe_copy(file_src, file_dest) for xmlfile in glob.glob(os.path.join(confdir, "*streams*.xml")): - casexml = os.path.join(caseroot,os.path.basename(xmlfile)) + casexml = os.path.join(caseroot, os.path.basename(xmlfile)) if os.path.exists(casexml): logger.info("Using {} for {} streams".format(casexml, compname)) safe_copy(casexml, rundir) diff --git a/CIME/case/README b/CIME/case/README index de33f9e01b9..529c7f8cdd5 100644 --- a/CIME/case/README +++ b/CIME/case/README @@ -1 +1 @@ -Files in this directory are members of the class Case defined in file case.py and should not be directly imported. \ No newline at end of file +Files in this directory are members of the class Case defined in file case.py and should not be directly imported. diff --git a/CIME/case/case.py b/CIME/case/case.py index 9cbf4ee7844..3295e0e2185 100644 --- a/CIME/case/case.py +++ b/CIME/case/case.py @@ -9,40 +9,41 @@ import sys import glob, os, shutil, math, CIME.six, time, hashlib, socket, getpass from CIME.XML.standard_module_setup import * -#pylint: disable=import-error,redefined-builtin -from CIME.six.moves import input -from CIME import utils -from CIME.utils import expect, get_cime_root, append_status -from CIME.utils import convert_to_type, get_model, set_model -from CIME.utils import get_project, get_charge_account, check_name -from CIME.utils import get_current_commit, safe_copy, get_cime_default_driver -from CIME.locked_files import LOCKED_DIR, lock_file -from CIME.XML.machines import Machines -from CIME.XML.pes import Pes -from CIME.XML.files import Files -from CIME.XML.testlist import Testlist -from CIME.XML.component import Component -from CIME.XML.compsets import Compsets -from CIME.XML.grids import Grids -from CIME.XML.batch import Batch -from CIME.XML.workflow import Workflow -from CIME.XML.pio import PIO -from CIME.XML.archive import Archive -from CIME.XML.env_test import EnvTest -from CIME.XML.env_mach_specific import EnvMachSpecific -from CIME.XML.env_case import EnvCase -from CIME.XML.env_mach_pes import EnvMachPes -from CIME.XML.env_build import EnvBuild -from CIME.XML.env_run import EnvRun -from CIME.XML.env_archive import EnvArchive -from CIME.XML.env_batch import EnvBatch -from CIME.XML.env_workflow import EnvWorkflow -from CIME.XML.generic_xml import GenericXML -from CIME.user_mod_support import apply_user_mods + +# pylint: disable=import-error,redefined-builtin +from six.moves import input +from CIME.utils import expect, get_cime_root, append_status +from CIME.utils import convert_to_type, get_model, set_model +from CIME.utils import get_project, get_charge_account, check_name +from CIME.utils import get_current_commit, safe_copy, get_cime_default_driver +from CIME.locked_files import LOCKED_DIR, lock_file +from CIME.XML.machines import Machines +from CIME.XML.pes import Pes +from CIME.XML.files import Files +from CIME.XML.testlist import Testlist +from CIME.XML.component import Component +from CIME.XML.compsets import Compsets +from CIME.XML.grids import Grids +from CIME.XML.batch import Batch +from CIME.XML.workflow import Workflow +from CIME.XML.pio import PIO +from CIME.XML.archive import Archive +from CIME.XML.env_test import EnvTest +from CIME.XML.env_mach_specific import EnvMachSpecific +from CIME.XML.env_case import EnvCase +from CIME.XML.env_mach_pes import EnvMachPes +from CIME.XML.env_build import EnvBuild +from CIME.XML.env_run import EnvRun +from CIME.XML.env_archive import EnvArchive +from CIME.XML.env_batch import EnvBatch +from CIME.XML.env_workflow import EnvWorkflow +from CIME.XML.generic_xml import GenericXML +from CIME.user_mod_support import apply_user_mods from CIME.aprun import get_aprun_cmd_for_case logger = logging.getLogger(__name__) + class Case(object): """ https://github.com/ESMCI/cime/wiki/Developers-Introduction @@ -71,23 +72,43 @@ class Case(object): This class extends across multiple files, class members external to this file are listed in the following imports """ + from CIME.case.case_setup import case_setup from CIME.case.case_clone import create_clone, _copy_user_modified_to_clone - from CIME.case.case_test import case_test + from CIME.case.case_test import case_test from CIME.case.case_submit import check_DA_settings, check_case, submit - from CIME.case.case_st_archive import case_st_archive, restore_from_archive, \ - archive_last_restarts, test_st_archive, test_env_archive + from CIME.case.case_st_archive import ( + case_st_archive, + restore_from_archive, + archive_last_restarts, + test_st_archive, + test_env_archive, + ) from CIME.case.case_run import case_run from CIME.case.case_cmpgen_namelists import case_cmpgen_namelists - from CIME.case.check_lockedfiles import check_lockedfile, check_lockedfiles, check_pelayouts_require_rebuild + from CIME.case.check_lockedfiles import ( + check_lockedfile, + check_lockedfiles, + check_pelayouts_require_rebuild, + ) from CIME.case.preview_namelists import create_dirs, create_namelists - from CIME.case.check_input_data import check_all_input_data, stage_refcase, check_input_data + from CIME.case.check_input_data import ( + check_all_input_data, + stage_refcase, + check_input_data, + ) def __init__(self, case_root=None, read_only=True, record=False): if case_root is None: case_root = os.getcwd() - expect(not os.path.isdir(case_root) or os.path.isfile(os.path.join(case_root,"env_case.xml")), "Directory {} does not appear to be a valid case directory".format(case_root)) + expect( + not os.path.isdir(case_root) + or os.path.isfile(os.path.join(case_root, "env_case.xml")), + "Directory {} does not appear to be a valid case directory".format( + case_root + ), + ) self._caseroot = case_root logger.debug("Initializing Case.") @@ -127,7 +148,7 @@ def __init__(self, case_root=None, read_only=True, record=False): self.lookups = {} self.set_lookup_value('CIMEROOT', cimeroot) self._cime_model = get_model() - self.set_lookup_value('MODEL', self._cime_model) + self.set_lookup_value("MODEL", self._cime_model) self._compsetname = None self._gridname = None self._pesfile = None @@ -172,36 +193,48 @@ def initialize_derived_attributes(self): for variable substitution using the {{ var }} syntax """ set_model(self.get_value("MODEL")) - env_mach_pes = self.get_env("mach_pes") - env_mach_spec = self.get_env('mach_specific') - comp_classes = self.get_values("COMP_CLASSES") - max_mpitasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") + env_mach_pes = self.get_env("mach_pes") + env_mach_spec = self.get_env("mach_specific") + comp_classes = self.get_values("COMP_CLASSES") + max_mpitasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") self.async_io = self.get_value("PIO_ASYNC_INTERFACE") if self.async_io: - self.iotasks = max(1,self.get_value("PIO_NUMTASKS_CPL")) + self.iotasks = max(1, self.get_value("PIO_NUMTASKS_CPL")) self.thread_count = env_mach_pes.get_max_thread_count(comp_classes) mpi_attribs = { - "compiler" : self.get_value("COMPILER"), - "mpilib" : self.get_value("MPILIB"), - "threaded" : self.get_build_threaded(), - } + "compiler": self.get_value("COMPILER"), + "mpilib": self.get_value("MPILIB"), + "threaded": self.get_build_threaded(), + } job = self.get_primary_job() executable = env_mach_spec.get_mpirun(self, mpi_attribs, job, exe_only=True)[0] if executable is not None and "aprun" in executable: - _, self.num_nodes, self.total_tasks, self.tasks_per_node, self.thread_count = get_aprun_cmd_for_case(self, "e3sm.exe") + ( + _, + self.num_nodes, + self.total_tasks, + self.tasks_per_node, + self.thread_count, + ) = get_aprun_cmd_for_case(self, "e3sm.exe") self.spare_nodes = env_mach_pes.get_spare_nodes(self.num_nodes) self.num_nodes += self.spare_nodes else: self.total_tasks = env_mach_pes.get_total_tasks(comp_classes) + self.iotasks - self.tasks_per_node = env_mach_pes.get_tasks_per_node(self.total_tasks, self.thread_count) + self.tasks_per_node = env_mach_pes.get_tasks_per_node( + self.total_tasks, self.thread_count + ) - self.num_nodes, self.spare_nodes = env_mach_pes.get_total_nodes(self.total_tasks, self.thread_count) + self.num_nodes, self.spare_nodes = env_mach_pes.get_total_nodes( + self.total_tasks, self.thread_count + ) self.num_nodes += self.spare_nodes - logger.debug("total_tasks {} thread_count {}".format(self.total_tasks, self.thread_count)) + logger.debug( + "total_tasks {} thread_count {}".format(self.total_tasks, self.thread_count) + ) max_gpus_per_node = self.get_value("MAX_GPUS_PER_NODE") @@ -209,16 +242,22 @@ def initialize_derived_attributes(self): self.ngpus_per_node = self.get_value("NGPUS_PER_NODE") self.tasks_per_numa = int(math.ceil(self.tasks_per_node / 2.0)) - smt_factor = max(1,int(self.get_value("MAX_TASKS_PER_NODE") / max_mpitasks_per_node)) + smt_factor = max( + 1, int(self.get_value("MAX_TASKS_PER_NODE") / max_mpitasks_per_node) + ) threads_per_node = self.tasks_per_node * self.thread_count - threads_per_core = 1 if (threads_per_node <= max_mpitasks_per_node) else smt_factor + threads_per_core = ( + 1 if (threads_per_node <= max_mpitasks_per_node) else smt_factor + ) self.cores_per_task = self.thread_count / threads_per_core os.environ["OMP_NUM_THREADS"] = str(self.thread_count) - self.srun_binding = math.floor(smt_factor*max_mpitasks_per_node / self.tasks_per_node) - self.srun_binding = max(1,int(self.srun_binding)) + self.srun_binding = math.floor( + smt_factor * max_mpitasks_per_node / self.tasks_per_node + ) + self.srun_binding = max(1, int(self.srun_binding)) # Define __enter__ and __exit__ so that we can use this as a context manager # and force a flush on exit. @@ -234,26 +273,62 @@ def __exit__(self, *_): def read_xml(self): for env_file in self._files: - expect(not env_file.needsrewrite, "Potential loss of unflushed changes in {}".format(env_file.filename)) + expect( + not env_file.needsrewrite, + "Potential loss of unflushed changes in {}".format(env_file.filename), + ) self._env_entryid_files = [] - self._env_entryid_files.append(EnvCase(self._caseroot, components=None, read_only=self._force_read_only)) + self._env_entryid_files.append( + EnvCase(self._caseroot, components=None, read_only=self._force_read_only) + ) components = self._env_entryid_files[0].get_values("COMP_CLASSES") - self._env_entryid_files.append(EnvRun(self._caseroot, components=components, read_only=self._force_read_only)) - self._env_entryid_files.append(EnvBuild(self._caseroot, components=components, read_only=self._force_read_only)) + self._env_entryid_files.append( + EnvRun( + self._caseroot, components=components, read_only=self._force_read_only + ) + ) + self._env_entryid_files.append( + EnvBuild( + self._caseroot, components=components, read_only=self._force_read_only + ) + ) self._comp_interface = self._env_entryid_files[-1].get_value("COMP_INTERFACE") - self._env_entryid_files.append(EnvMachPes(self._caseroot, components=components, read_only=self._force_read_only, - comp_interface=self._comp_interface)) - self._env_entryid_files.append(EnvBatch(self._caseroot, read_only=self._force_read_only)) - self._env_entryid_files.append(EnvWorkflow(self._caseroot, read_only=self._force_read_only)) - - if os.path.isfile(os.path.join(self._caseroot,"env_test.xml")): - self._env_entryid_files.append(EnvTest(self._caseroot, components=components, read_only=self._force_read_only)) + self._env_entryid_files.append( + EnvMachPes( + self._caseroot, + components=components, + read_only=self._force_read_only, + comp_interface=self._comp_interface, + ) + ) + self._env_entryid_files.append( + EnvBatch(self._caseroot, read_only=self._force_read_only) + ) + self._env_entryid_files.append( + EnvWorkflow(self._caseroot, read_only=self._force_read_only) + ) + + if os.path.isfile(os.path.join(self._caseroot, "env_test.xml")): + self._env_entryid_files.append( + EnvTest( + self._caseroot, + components=components, + read_only=self._force_read_only, + ) + ) self._env_generic_files = [] - self._env_generic_files.append(EnvMachSpecific(self._caseroot, read_only=self._force_read_only, - comp_interface=self._comp_interface)) - self._env_generic_files.append(EnvArchive(self._caseroot, read_only=self._force_read_only)) + self._env_generic_files.append( + EnvMachSpecific( + self._caseroot, + read_only=self._force_read_only, + comp_interface=self._comp_interface, + ) + ) + self._env_generic_files.append( + EnvArchive(self._caseroot, read_only=self._force_read_only) + ) self._files = self._env_entryid_files + self._env_generic_files def get_case_root(self): @@ -267,7 +342,7 @@ def get_env(self, short_name, allow_missing=False): return env_file if allow_missing: return None - expect(False,"Could not find object for {} in case".format(full_name)) + expect(False, "Could not find object for {} in case".format(full_name)) def check_timestamps(self, short_name=None): if short_name is not None: @@ -279,7 +354,7 @@ def check_timestamps(self, short_name=None): def copy(self, newcasename, newcaseroot, newcimeroot=None, newsrcroot=None): newcase = deepcopy(self) - for env_file in newcase._files: # pylint: disable=protected-access + for env_file in newcase._files: # pylint: disable=protected-access basename = os.path.basename(env_file.filename) newfile = os.path.join(newcaseroot, basename) env_file.change_file(newfile, copy=True) @@ -290,16 +365,16 @@ def copy(self, newcasename, newcaseroot, newcimeroot=None, newsrcroot=None): if newsrcroot is not None: newcase.set_value("SRCROOT", newsrcroot) - newcase.set_value("CASE",newcasename) - newcase.set_value("CASEROOT",newcaseroot) - newcase.set_value("CONTINUE_RUN","FALSE") - newcase.set_value("RESUBMIT",0) + newcase.set_value("CASE", newcasename) + newcase.set_value("CASEROOT", newcaseroot) + newcase.set_value("CONTINUE_RUN", "FALSE") + newcase.set_value("RESUBMIT", 0) newcase.set_value("CASE_HASH", newcase.new_hash()) # Important, and subtle: Writability should NOT be copied because # this allows the copy to be modified without needing a "with" statement # which opens the door to tricky errors such as unflushed writes. - newcase._read_only_mode = True # pylint: disable=protected-access + newcase._read_only_mode = True # pylint: disable=protected-access return newcase @@ -314,7 +389,9 @@ def flush(self, flushall=False): def get_values(self, item, attribute=None, resolved=True, subgroup=None): for env_file in self._files: # Wait and resolve in self rather than in env_file - results = env_file.get_values(item, attribute, resolved=False, subgroup=subgroup) + results = env_file.get_values( + item, attribute, resolved=False, subgroup=subgroup + ) if len(results) > 0: new_results = [] if resolved: @@ -342,7 +419,9 @@ def get_value(self, item, attribute=None, resolved=True, subgroup=None): result = None for env_file in self._files: # Wait and resolve in self rather than in env_file - result = env_file.get_value(item, attribute, resolved=False, subgroup=subgroup) + result = env_file.get_value( + item, attribute, resolved=False, subgroup=subgroup + ) if result is not None: if resolved and isinstance(result, CIME.six.string_types): @@ -357,14 +436,16 @@ def get_value(self, item, attribute=None, resolved=True, subgroup=None): return result def get_record_fields(self, variable, field): - """ get_record_fields gets individual requested field from an entry_id file - this routine is used only by xmlquery """ + """get_record_fields gets individual requested field from an entry_id file + this routine is used only by xmlquery""" # Empty result result = [] for env_file in self._env_entryid_files: # Wait and resolve in self rather than in env_file - logger.debug("(get_record_field) Searching in {}".format(env_file.__class__.__name__)) + logger.debug( + "(get_record_field) Searching in {}".format(env_file.__class__.__name__) + ) if field == "varid": roots = env_file.scan_children("entry") else: @@ -414,27 +495,42 @@ def get_type_info(self, item): def get_resolved_value(self, item, recurse=0, allow_unresolved_envvars=False): num_unresolved = item.count("$") if item else 0 recurse_limit = 10 - if (num_unresolved > 0 and recurse < recurse_limit ): + if num_unresolved > 0 and recurse < recurse_limit: for env_file in self._env_entryid_files: - item = env_file.get_resolved_value(item, - allow_unresolved_envvars=allow_unresolved_envvars) - if ("$" not in item): + item = env_file.get_resolved_value( + item, allow_unresolved_envvars=allow_unresolved_envvars + ) + if "$" not in item: return item else: - item = self.get_resolved_value(item, recurse=recurse+1, - allow_unresolved_envvars=allow_unresolved_envvars) + item = self.get_resolved_value( + item, + recurse=recurse + 1, + allow_unresolved_envvars=allow_unresolved_envvars, + ) return item - def set_value(self, item, value, subgroup=None, ignore_type=False, allow_undefined=False, return_file=False): + def set_value( + self, + item, + value, + subgroup=None, + ignore_type=False, + allow_undefined=False, + return_file=False, + ): """ If a file has been defined, and the variable is in the file, then that value will be set in the file object and the resovled value is returned unless return_file is True, in which case (resolved_value, filename) is returned where filename is the name of the modified file. """ - expect(not self._read_only_mode, "Cannot modify case, read_only. " - "Case must be opened with read_only=False and can only be modified within a context manager") + expect( + not self._read_only_mode, + "Cannot modify case, read_only. " + "Case must be opened with read_only=False and can only be modified within a context manager", + ) if item == "CASEROOT": self._caseroot = value @@ -442,43 +538,54 @@ def set_value(self, item, value, subgroup=None, ignore_type=False, allow_undefin for env_file in self._files: result = env_file.set_value(item, value, subgroup, ignore_type) - if (result is not None): + if result is not None: logger.debug("Will rewrite file {} {}".format(env_file.filename, item)) return (result, env_file.filename) if return_file else result if len(self._files) == 1: - expect(allow_undefined or result is not None, - "No variable {} found in file {}".format(item, self._files[0].filename)) + expect( + allow_undefined or result is not None, + "No variable {} found in file {}".format(item, self._files[0].filename), + ) else: - expect(allow_undefined or result is not None, - "No variable {} found in case".format(item)) + expect( + allow_undefined or result is not None, + "No variable {} found in case".format(item), + ) def set_valid_values(self, item, valid_values): """ Update or create a valid_values entry for item and populate it """ - expect(not self._read_only_mode, "Cannot modify case, read_only. " - "Case must be opened with read_only=False and can only be modified within a context manager") + expect( + not self._read_only_mode, + "Cannot modify case, read_only. " + "Case must be opened with read_only=False and can only be modified within a context manager", + ) result = None for env_file in self._env_entryid_files: result = env_file.set_valid_values(item, valid_values) - if (result is not None): + if result is not None: logger.debug("Will rewrite file {} {}".format(env_file.filename, item)) return result def set_lookup_value(self, item, value): if item in self.lookups and self.lookups[item] is not None: - logger.warning("Item {} already in lookups with value {}".format(item,self.lookups[item])) + logger.warning( + "Item {} already in lookups with value {}".format( + item, self.lookups[item] + ) + ) else: - logger.debug("Setting in lookups: item {}, value {}".format(item,value)) + logger.debug("Setting in lookups: item {}, value {}".format(item, value)) self.lookups[item] = value def clean_up_lookups(self, allow_undefined=False): # put anything in the lookups table into existing env objects - for key,value in list(self.lookups.items()): + for key, value in list(self.lookups.items()): logger.debug("lookup key {} value {}".format(key, value)) - result = self.set_value(key,value, allow_undefined=allow_undefined) + result = self.set_value(key, value, allow_undefined=allow_undefined) if result is not None: del self.lookups[key] @@ -497,48 +604,69 @@ def _set_compset(self, compset_name, files): science_support = [] compset_alias = None components = files.get_components("COMPSETS_SPEC_FILE") - logger.debug(" Possible components for COMPSETS_SPEC_FILE are {}".format(components)) + logger.debug( + " Possible components for COMPSETS_SPEC_FILE are {}".format(components) + ) self.set_lookup_value("COMP_INTERFACE", self._comp_interface) - if self._cime_model == 'ufs': + if self._cime_model == "ufs": ufs_driver = os.environ.get("UFS_DRIVER") attribute = None if ufs_driver: - attribute = {"component":"nems"} - comp_root_dir_cpl = files.get_value("COMP_ROOT_DIR_CPL", attribute=attribute) - elif self._cime_model == 'cesm': + attribute = {"component": "nems"} + comp_root_dir_cpl = files.get_value( + "COMP_ROOT_DIR_CPL", attribute=attribute + ) + elif self._cime_model == "cesm": comp_root_dir_cpl = files.get_value("COMP_ROOT_DIR_CPL") - if self._cime_model in ('cesm','ufs'): - self.set_lookup_value("COMP_ROOT_DIR_CPL",comp_root_dir_cpl) + if self._cime_model in ("cesm", "ufs"): + self.set_lookup_value("COMP_ROOT_DIR_CPL", comp_root_dir_cpl) # Loop through all of the files listed in COMPSETS_SPEC_FILE and find the file # that has a match for either the alias or the longname in that order for component in components: # Determine the compsets file for this component - compsets_filename = files.get_value("COMPSETS_SPEC_FILE", {"component":component}) + compsets_filename = files.get_value( + "COMPSETS_SPEC_FILE", {"component": component} + ) # If the file exists, read it and see if there is a match for the compset alias or longname - if (os.path.isfile(compsets_filename)): + if os.path.isfile(compsets_filename): compsets = Compsets(compsets_filename) - match, compset_alias, science_support = compsets.get_compset_match(name=compset_name) + match, compset_alias, science_support = compsets.get_compset_match( + name=compset_name + ) if match is not None: self._compsetname = match logger.info("Compset longname is {}".format(match)) - logger.info("Compset specification file is {}".format(compsets_filename)) + logger.info( + "Compset specification file is {}".format(compsets_filename) + ) break if compset_alias is None: - logger.info("Did not find an alias or longname compset match for {} ".format(compset_name)) + logger.info( + "Did not find an alias or longname compset match for {} ".format( + compset_name + ) + ) self._compsetname = compset_name # Fill in compset name - self._compsetname, self._components = self.valid_compset(self._compsetname, compset_alias, files) + self._compsetname, self._components = self.valid_compset( + self._compsetname, compset_alias, files + ) # if this is a valiid compset longname there will be at least 7 components. components = self.get_compset_components() - expect(len(components) > 6, "No compset alias {} found and this does not appear to be a compset longname.".format(compset_name)) + expect( + len(components) > 6, + "No compset alias {} found and this does not appear to be a compset longname.".format( + compset_name + ), + ) return compset_alias, science_support @@ -558,15 +686,24 @@ def _find_primary_component(self): if comp == "CPL": continue spec[comp] = self.get_value("COMP_{}".format(comp)) - notprogcomps = ("D{}".format(comp),"X{}".format(comp),"S{}".format(comp)) + notprogcomps = ("D{}".format(comp), "X{}".format(comp), "S{}".format(comp)) if spec[comp].upper() in notprogcomps: progcomps[comp] = False else: progcomps[comp] = True - expect("ATM" in progcomps and "LND" in progcomps and "OCN" in progcomps and \ - "ICE" in progcomps, " Not finding expected components in {}".format(self._component_classes)) - if progcomps["ATM"] and progcomps["LND"] and progcomps["OCN"] and \ - progcomps["ICE"]: + expect( + "ATM" in progcomps + and "LND" in progcomps + and "OCN" in progcomps + and "ICE" in progcomps, + " Not finding expected components in {}".format(self._component_classes), + ) + if ( + progcomps["ATM"] + and progcomps["LND"] + and progcomps["OCN"] + and progcomps["ICE"] + ): primary_component = "allactive" elif progcomps["LND"] and progcomps["OCN"] and progcomps["ICE"]: # this is a "J" compset @@ -616,6 +753,10 @@ def _valid_compset_impl(self, compset_name, compset_alias, comp_classes, comp_ha >>> caseroot = os.path.join(workdir, 'caseroot') # use non-existent caseroot to avoid error about not being a valid case directory in Case __init__ method >>> Case(caseroot, read_only=False)._valid_compset_impl('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) + >>> Case(caseroot, read_only=False)._valid_compset_impl('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) + ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) + >>> Case(caseroot, read_only=False)._valid_compset_impl('atm:DATM%NYF_rof:DROF%NYF_scn:2000_ice:DICE%SSMI_ocn:DOCN%DOM', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) + ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) >>> Case(caseroot, read_only=False)._valid_compset_impl('2000_DATM%NYF_DICE%SSMI_DOCN%DOM_DROF%NYF', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) ('2000_DATM%NYF_SLND_DICE%SSMI_DOCN%DOM_DROF%NYF_SGLC_SWAV_SESP', ['2000', 'DATM%NYF', 'SLND', 'DICE%SSMI', 'DOCN%DOM', 'DROF%NYF', 'SGLC', 'SWAV', 'SESP']) >>> Case(caseroot, read_only=False)._valid_compset_impl('2000_DICE%SSMI_DOCN%DOM_DATM%NYF_DROF%NYF', None, ['CPL', 'ATM', 'LND', 'ICE', 'OCN', 'ROF', 'GLC', 'WAV', 'ESP'], {'datm':1,'satm':1,'dlnd':2,'slnd':2,'dice':3,'sice':3,'docn':4,'socn':4,'drof':5,'srof':5,'sglc':6,'swav':7,'ww3':7,'sesp':8}) @@ -635,21 +776,36 @@ def _valid_compset_impl(self, compset_name, compset_alias, comp_classes, comp_ha >>> shutil.rmtree(workdir, ignore_errors=True) """ # Find the models declared in the compset - model_set = [None]*len(comp_classes) - components = compset_name.split('_') - model_set[0] = components[0] + model_set = [None] * len(comp_classes) + components = compset_name.split("_") noncomps = [] allstubs = True + colonformat = ":" in compset_name + if colonformat: + # make sure that scn: is component[0] as expected + for i in range(1, len(components)): + if components[i].startswith("scn:"): + tmp = components[0] + components[0] = components[i] + components[i] = tmp + break + + model_set[0] = components[0][4:] + else: + model_set[0] = components[0] + for model in components[1:]: match = Case.__mod_match_re__.match(model.lower()) expect(match is not None, "No model match for {}".format(model)) mod_match = match.group(1) # Check for noncomponent appends (BGC & TEST) - if mod_match in ('bgc', 'test'): + if mod_match in ("bgc", "test"): noncomps.append(model) + elif ":" in mod_match: + comp_ind = comp_hash[mod_match[4:]] + model_set[comp_ind] = model else: - expect(mod_match in comp_hash, - "Unknown model type, {}".format(model)) + expect(mod_match in comp_hash, "Unknown model type, {}".format(model)) comp_ind = comp_hash[mod_match] model_set[comp_ind] = model @@ -657,19 +813,25 @@ def _valid_compset_impl(self, compset_name, compset_alias, comp_classes, comp_ha for comp_ind in range(1, len(model_set)): if model_set[comp_ind] is None: comp_class = comp_classes[comp_ind] - stub = 'S' + comp_class + stub = "S" + comp_class logger.info("Automatically adding {} to compset".format(stub)) model_set[comp_ind] = stub - elif model_set[comp_ind][0] != 'S': + elif ":" in model_set[comp_ind]: + model_set[comp_ind] = model_set[comp_ind][4:] + + if model_set[comp_ind][0] != "S": allstubs = False - expect((compset_alias is not None) or (not allstubs), - 'Invalid compset name, {}, all stub components generated'.format(compset_name)) + expect( + (compset_alias is not None) or (not allstubs), + "Invalid compset name, {}, all stub components generated".format( + compset_name + ), + ) # Return the completed compset - compsetname = '_'.join(model_set) + compsetname = "_".join(model_set) for noncomp in noncomps: - compsetname = compsetname + '_' + noncomp - + compsetname = compsetname + "_" + noncomp return compsetname, model_set # RE to match component type name without optional piece (stuff after %). @@ -691,28 +853,30 @@ def valid_compset(self, compset_name, compset_alias, files): drv_config_file = files.get_value("CONFIG_CPL_FILE") drv_comp = Component(drv_config_file, "CPL") comp_classes = drv_comp.get_valid_model_components() - comp_hash = {} # Hash model name to component class index + comp_hash = {} # Hash model name to component class index for comp_ind in range(1, len(comp_classes)): comp = comp_classes[comp_ind] # Find list of models for component class # List can be in different locations, check CONFIG_XXX_FILE - node_name = 'CONFIG_{}_FILE'.format(comp) + node_name = "CONFIG_{}_FILE".format(comp) models = files.get_components(node_name) if (models is None) or (None in models): # Backup, check COMP_ROOT_DIR_XXX - node_name = 'COMP_ROOT_DIR_' + comp + node_name = "COMP_ROOT_DIR_" + comp models = files.get_components(node_name) - expect((models is not None) and (None not in models), - "Unable to find list of supported components") + expect( + (models is not None) and (None not in models), + "Unable to find list of supported components", + ) for model in models: mod_match = Case.__mod_match_re__.match(model.lower()).group(1) comp_hash[mod_match] = comp_ind - return self._valid_compset_impl(compset_name, compset_alias, - comp_classes, comp_hash) - + return self._valid_compset_impl( + compset_name, compset_alias, comp_classes, comp_hash + ) def _set_info_from_primary_component(self, files, pesfile=None): """ @@ -723,55 +887,70 @@ def _set_info_from_primary_component(self, files, pesfile=None): """ component = self.get_primary_component() - compset_spec_file = files.get_value("COMPSETS_SPEC_FILE", - {"component":component}, resolved=False) + compset_spec_file = files.get_value( + "COMPSETS_SPEC_FILE", {"component": component}, resolved=False + ) - self.set_lookup_value("COMPSETS_SPEC_FILE" ,compset_spec_file) + self.set_lookup_value("COMPSETS_SPEC_FILE", compset_spec_file) if pesfile is None: - self._pesfile = files.get_value("PES_SPEC_FILE", {"component":component}) - pesfile_unresolved = files.get_value("PES_SPEC_FILE", {"component":component}, resolved=False) + self._pesfile = files.get_value("PES_SPEC_FILE", {"component": component}) + pesfile_unresolved = files.get_value( + "PES_SPEC_FILE", {"component": component}, resolved=False + ) logger.info("Pes specification file is {}".format(self._pesfile)) else: self._pesfile = pesfile pesfile_unresolved = pesfile - expect(self._pesfile is not None,"No pesfile found for component {}".format(component)) + expect( + self._pesfile is not None, + "No pesfile found for component {}".format(component), + ) self.set_lookup_value("PES_SPEC_FILE", pesfile_unresolved) - tests_filename = files.get_value("TESTS_SPEC_FILE", {"component":component}, resolved=False) - tests_mods_dir = files.get_value("TESTS_MODS_DIR" , {"component":component}, resolved=False) - user_mods_dir = files.get_value("USER_MODS_DIR" , {"component":component}, resolved=False) + tests_filename = files.get_value( + "TESTS_SPEC_FILE", {"component": component}, resolved=False + ) + tests_mods_dir = files.get_value( + "TESTS_MODS_DIR", {"component": component}, resolved=False + ) + user_mods_dir = files.get_value( + "USER_MODS_DIR", {"component": component}, resolved=False + ) self.set_lookup_value("TESTS_SPEC_FILE", tests_filename) - self.set_lookup_value("TESTS_MODS_DIR" , tests_mods_dir) - self.set_lookup_value("USER_MODS_DIR" , user_mods_dir) - + self.set_lookup_value("TESTS_MODS_DIR", tests_mods_dir) + self.set_lookup_value("USER_MODS_DIR", user_mods_dir) def get_compset_components(self): - #If are doing a create_clone then, self._compsetname is not set yet + # If are doing a create_clone then, self._compsetname is not set yet components = [] compset = self.get_value("COMPSET") if compset is None: compset = self._compsetname - expect(compset is not None, - "compset is not set") + expect(compset is not None, "compset is not set") # the first element is always the date operator - skip it - elements = compset.split('_')[1:] # pylint: disable=maybe-no-member + elements = compset.split("_")[1:] # pylint: disable=maybe-no-member for element in elements: + if ":" in element: + element = element[4:] # ignore the possible BGC or TEST modifier if element.startswith("BGC%") or element.startswith("TEST"): continue else: - element_component = element.split('%')[0].lower() - if "ww" not in element_component and "fv3" not in element_component and "cice" not in element_component: - element_component = re.sub(r'[0-9]*',"",element_component) + element_component = element.split("%")[0].lower() + if ( + "ww" not in element_component + and "fv3" not in element_component + and "cice" not in element_component + ): + element_component = re.sub(r"[0-9]*", "", element_component) components.append(element_component) return components - def __iter__(self): for entryid_file in self._env_entryid_files: for key, val in entryid_file: - if isinstance(val, CIME.six.string_types) and '$' in val: + if isinstance(val, CIME.six.string_types) and "$" in val: yield key, self.get_resolved_value(val) else: yield key, val @@ -784,7 +963,11 @@ def set_comp_classes(self, comp_classes): def _get_component_config_data(self, files): # attributes used for multi valued defaults # attlist is a dictionary used to determine the value element that has the most matches - attlist = {"compset":self._compsetname, "grid":self._gridname, "cime_model":self._cime_model} + attlist = { + "compset": self._compsetname, + "grid": self._gridname, + "cime_model": self._cime_model, + } # Determine list of component classes that this coupler/driver knows how # to deal with. This list follows the same order as compset longnames follow. @@ -798,14 +981,26 @@ def _get_component_config_data(self, files): for env_file in self._env_entryid_files: env_file.add_elements_by_group(drv_comp, attributes=attlist) - drv_config_file_model_specific = files.get_value("CONFIG_CPL_FILE_MODEL_SPECIFIC") - expect(os.path.isfile(drv_config_file_model_specific), - "No {} specific file found for driver {}".format(get_model(),self._comp_interface)) - drv_comp_model_specific = Component(drv_config_file_model_specific, 'CPL') - - self._component_description["forcing"] = drv_comp_model_specific.get_forcing_description(self._compsetname) - logger.info("Compset forcing is {}".format(self._component_description["forcing"])) - self._component_description["CPL"] = drv_comp_model_specific.get_description(self._compsetname) + drv_config_file_model_specific = files.get_value( + "CONFIG_CPL_FILE_MODEL_SPECIFIC" + ) + expect( + os.path.isfile(drv_config_file_model_specific), + "No {} specific file found for driver {}".format( + get_model(), self._comp_interface + ), + ) + drv_comp_model_specific = Component(drv_config_file_model_specific, "CPL") + + self._component_description[ + "forcing" + ] = drv_comp_model_specific.get_forcing_description(self._compsetname) + logger.info( + "Compset forcing is {}".format(self._component_description["forcing"]) + ) + self._component_description["CPL"] = drv_comp_model_specific.get_description( + self._compsetname + ) if len(self._component_description["CPL"]) > 0: logger.info("Com forcing is {}".format(self._component_description["CPL"])) for env_file in self._env_entryid_files: @@ -818,18 +1013,22 @@ def _get_component_config_data(self, files): self.set_comp_classes(drv_comp.get_valid_model_components()) # will need a change here for new cpl components - root_dir_node_name = 'COMP_ROOT_DIR_CPL' - comp_root_dir = files.get_value(root_dir_node_name, {"component":self._comp_interface}, resolved=False) + root_dir_node_name = "COMP_ROOT_DIR_CPL" + comp_root_dir = files.get_value( + root_dir_node_name, {"component": self._comp_interface}, resolved=False + ) if comp_root_dir is not None: self.set_value(root_dir_node_name, comp_root_dir) - for i in range(1,len(self._component_classes)): + for i in range(1, len(self._component_classes)): comp_class = self._component_classes[i] - comp_name = self._components[i-1] - root_dir_node_name = 'COMP_ROOT_DIR_' + comp_class - node_name = 'CONFIG_' + comp_class + '_FILE' - compatt = {"component":comp_name} + comp_name = self._components[i - 1] + if ":" in comp_name: + comp_name = comp_name[4:] + root_dir_node_name = "COMP_ROOT_DIR_" + comp_class + node_name = "CONFIG_" + comp_class + "_FILE" + compatt = {"component": comp_name} comp_root_dir = files.get_value(root_dir_node_name, compatt, resolved=False) if comp_root_dir is not None: self.set_value(root_dir_node_name, comp_root_dir) @@ -837,47 +1036,73 @@ def _get_component_config_data(self, files): # Add the group and elements for the config_files.xml comp_config_file = files.get_value(node_name, compatt, resolved=False) - expect(comp_config_file is not None,"No component {} found for class {}".format(comp_name, comp_class)) + expect( + comp_config_file is not None, + "No component {} found for class {}".format(comp_name, comp_class), + ) self.set_value(node_name, comp_config_file) - comp_config_file = files.get_value(node_name, compatt) - - expect(comp_config_file is not None and os.path.isfile(comp_config_file), - "Config file {} for component {} not found.".format(comp_config_file, comp_name)) + comp_config_file = files.get_value(node_name, compatt) + + expect( + comp_config_file is not None and os.path.isfile(comp_config_file), + "Config file {} for component {} not found.".format( + comp_config_file, comp_name + ), + ) compobj = Component(comp_config_file, comp_class) # For files following version 3 schema this also checks the compsetname validity - self._component_description[comp_class] = compobj.get_description(self._compsetname) - expect(self._component_description[comp_class] is not None, - "No description found in file {} for component {} in comp_class {}".format(comp_config_file, comp_name, comp_class)) - logger.info("{} component is {}".format(comp_class, self._component_description[comp_class])) + self._component_description[comp_class] = compobj.get_description( + self._compsetname + ) + expect( + self._component_description[comp_class] is not None, + "No description found in file {} for component {} in comp_class {}".format( + comp_config_file, comp_name, comp_class + ), + ) + logger.info( + "{} component is {}".format( + comp_class, self._component_description[comp_class] + ) + ) for env_file in self._env_entryid_files: env_file.add_elements_by_group(compobj, attributes=attlist) - self.clean_up_lookups(allow_undefined=self._comp_interface=='nuopc') + self.clean_up_lookups(allow_undefined=self._comp_interface == "nuopc") def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): - #-------------------------------------------- + # -------------------------------------------- # pe layout - #-------------------------------------------- + # -------------------------------------------- mach_pes_obj = None # self._pesfile may already be env_mach_pes.xml if so we can just return gfile = GenericXML(infile=self._pesfile) ftype = gfile.get_id() - expect(ftype == "env_mach_pes.xml" or ftype == "config_pes", " Do not recognize {} as a valid CIME pes file {}".format(self._pesfile, ftype)) + expect( + ftype == "env_mach_pes.xml" or ftype == "config_pes", + " Do not recognize {} as a valid CIME pes file {}".format( + self._pesfile, ftype + ), + ) if ftype == "env_mach_pes.xml": - new_mach_pes_obj = EnvMachPes(infile=self._pesfile, components=self._component_classes, comp_interface=self._comp_interface) + new_mach_pes_obj = EnvMachPes( + infile=self._pesfile, + components=self._component_classes, + comp_interface=self._comp_interface, + ) self.update_env(new_mach_pes_obj, "mach_pes", blow_away=True) return new_mach_pes_obj.get_value("TOTALPES") pesobj = Pes(self._pesfile) - match1 = re.match('(.+)x([0-9]+)', "" if pecount is None else pecount) - match2 = re.match('([0-9]+)', "" if pecount is None else pecount) + match1 = re.match("(.+)x([0-9]+)", "" if pecount is None else pecount) + match2 = re.match("([0-9]+)", "" if pecount is None else pecount) pes_ntasks = {} pes_nthrds = {} pes_rootpe = {} pes_pstrid = {} - other = {} + other = {} comment = None force_tasks = None force_thrds = None @@ -887,15 +1112,34 @@ def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): if opti_tasks.isdigit(): force_tasks = int(opti_tasks) else: - pes_ntasks = pesobj.find_pes_layout(self._gridname, self._compsetname, machine_name, - pesize_opts=opti_tasks, mpilib=mpilib)[0] + pes_ntasks = pesobj.find_pes_layout( + self._gridname, + self._compsetname, + machine_name, + pesize_opts=opti_tasks, + mpilib=mpilib, + )[0] force_thrds = int(match1.group(2)) elif match2: force_tasks = int(match2.group(1)) - pes_nthrds = pesobj.find_pes_layout(self._gridname, self._compsetname, machine_name, mpilib=mpilib)[1] + pes_nthrds = pesobj.find_pes_layout( + self._gridname, self._compsetname, machine_name, mpilib=mpilib + )[1] else: - pes_ntasks, pes_nthrds, pes_rootpe, pes_pstrid, other, comment = pesobj.find_pes_layout(self._gridname, self._compsetname, - machine_name, pesize_opts=pecount, mpilib=mpilib) + ( + pes_ntasks, + pes_nthrds, + pes_rootpe, + pes_pstrid, + other, + comment, + ) = pesobj.find_pes_layout( + self._gridname, + self._compsetname, + machine_name, + pesize_opts=pecount, + mpilib=mpilib, + ) if match1 or match2: for component_class in self._component_classes: @@ -931,7 +1175,7 @@ def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): rootpe = pes_rootpe[rootpe_str] if rootpe_str in pes_rootpe else 0 pstrid = pes_pstrid[pstrid_str] if pstrid_str in pes_pstrid else 1 - totaltasks.append( (ntasks + rootpe) * nthrds ) + totaltasks.append((ntasks + rootpe) * nthrds) mach_pes_obj.set_value(ntasks_str, ntasks) mach_pes_obj.set_value(nthrds_str, nthrds) @@ -948,54 +1192,76 @@ def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): key = "NTASKS_{}".format(compclass) if key not in pes_ntasks: - mach_pes_obj.set_value(key,1) + mach_pes_obj.set_value(key, 1) key = "NTHRDS_{}".format(compclass) if compclass not in pes_nthrds: - mach_pes_obj.set_value(compclass,1) + mach_pes_obj.set_value(compclass, 1) if multi_driver: mach_pes_obj.set_value("MULTI_DRIVER", True) - - def configure(self, compset_name, grid_name, machine_name=None, - project=None, pecount=None, compiler=None, mpilib=None, - pesfile=None, gridfile=None, - multi_driver=False, ninst=1, test=False, - walltime=None, queue=None, output_root=None, - run_unsupported=False, answer=None, - input_dir=None, driver=None, workflowid="default", - non_local=False, extra_machines_dir=None, case_group=None, - ngpus_per_node=0): - - expect(check_name(compset_name, additional_chars='.'), "Invalid compset name {}".format(compset_name)) + def configure( + self, + compset_name, + grid_name, + machine_name=None, + project=None, + pecount=None, + compiler=None, + mpilib=None, + pesfile=None, + gridfile=None, + multi_driver=False, + ninst=1, + test=False, + walltime=None, + queue=None, + output_root=None, + run_unsupported=False, + answer=None, + input_dir=None, + driver=None, + workflowid="default", + non_local=False, + extra_machines_dir=None, + case_group=None, + ngpus_per_node=0, + ): + + expect( + check_name(compset_name, additional_chars="."), + "Invalid compset name {}".format(compset_name), + ) self._comp_interface = driver - #-------------------------------------------- + # -------------------------------------------- # compset, pesfile, and compset components - #-------------------------------------------- + # -------------------------------------------- files = Files(comp_interface=self._comp_interface) - #-------------------------------------------- + # -------------------------------------------- # find and/or fill out compset name - #-------------------------------------------- + # -------------------------------------------- compset_alias, science_support = self._set_compset(compset_name, files) self._components = self.get_compset_components() - #-------------------------------------------- + # -------------------------------------------- # grid - #-------------------------------------------- - grids = Grids(gridfile) + # -------------------------------------------- + grids = Grids(gridfile, comp_interface=driver) - gridinfo = grids.get_grid_info(name=grid_name, compset=self._compsetname, driver=self._comp_interface) + gridinfo = grids.get_grid_info( + name=grid_name, compset=self._compsetname, driver=self._comp_interface + ) self._gridname = gridinfo["GRID"] - for key,value in list(gridinfo.items()): - logger.debug("Set grid {} {}".format(key,value)) - self.set_lookup_value(key,value) + for key, value in list(gridinfo.items()): + logger.debug("Set grid {} {}".format(key, value)) + self.set_lookup_value(key, value) - #-------------------------------------------- + # -------------------------------------------- # component config data - #-------------------------------------------- + # -------------------------------------------- self._get_component_config_data(files) @@ -1011,9 +1277,9 @@ def configure(self, compset_name, grid_name, machine_name=None, self.clean_up_lookups(allow_undefined=True) - #-------------------------------------------- + # -------------------------------------------- # machine - #-------------------------------------------- + # -------------------------------------------- # set machine values in env_xxx files if extra_machines_dir: self.set_value("EXTRA_MACHDIR", extra_machines_dir) @@ -1022,15 +1288,27 @@ def configure(self, compset_name, grid_name, machine_name=None, machine_name = machobj.get_machine_name() self.set_value("MACH", machine_name) if probed_machine != machine_name and probed_machine is not None: - logger.warning("WARNING: User-selected machine '{}' does not match probed machine '{}'".format(machine_name, probed_machine)) + logger.warning( + "WARNING: User-selected machine '{}' does not match probed machine '{}'".format( + machine_name, probed_machine + ) + ) else: logger.info("Machine is {}".format(machine_name)) nodenames = machobj.get_node_names() - nodenames = [x for x in nodenames if - '_system' not in x and '_variables' not in x and 'mpirun' not in x and\ - 'COMPILER' not in x and 'MPILIB' not in x and 'MAX_MPITASKS_PER_NODE' not in x and\ - 'MAX_TASKS_PER_NODE' not in x and 'MAX_GPUS_PER_NODE' not in x] + nodenames = [ + x + for x in nodenames + if "_system" not in x + and "_variables" not in x + and "mpirun" not in x + and "COMPILER" not in x + and "MPILIB" not in x + and "MAX_MPITASKS_PER_NODE" not in x + and "MAX_TASKS_PER_NODE" not in x + and "MAX_GPUS_PER_NODE" not in x + ] for nodename in nodenames: value = machobj.get_value(nodename, resolved=False) @@ -1043,59 +1321,80 @@ def configure(self, compset_name, grid_name, machine_name=None, if compiler is None: compiler = machobj.get_default_compiler() else: - expect(machobj.is_valid_compiler(compiler), - "compiler {} is not supported on machine {}".format(compiler, machine_name)) + expect( + machobj.is_valid_compiler(compiler), + "compiler {} is not supported on machine {}".format( + compiler, machine_name + ), + ) - self.set_value("COMPILER",compiler) + self.set_value("COMPILER", compiler) if mpilib is None: - mpilib = machobj.get_default_MPIlib({"compiler":compiler}) + mpilib = machobj.get_default_MPIlib({"compiler": compiler}) else: - expect(machobj.is_valid_MPIlib(mpilib, {"compiler":compiler}), - "MPIlib {} is not supported on machine {}".format(mpilib, machine_name)) - self.set_value("MPILIB",mpilib) - for name in ("MAX_TASKS_PER_NODE","MAX_MPITASKS_PER_NODE","MAX_GPUS_PER_NODE"): - dmax = machobj.get_value(name,{'compiler':compiler}) + expect( + machobj.is_valid_MPIlib(mpilib, {"compiler": compiler}), + "MPIlib {} is not supported on machine {}".format(mpilib, machine_name), + ) + self.set_value("MPILIB", mpilib) + for name in ( + "MAX_TASKS_PER_NODE", + "MAX_MPITASKS_PER_NODE", + "MAX_GPUS_PER_NODE", + ): + dmax = machobj.get_value(name, {"compiler": compiler}) if not dmax: dmax = machobj.get_value(name) if dmax: self.set_value(name, dmax) else: - logger.warning("Variable {} not defined for machine {}".format(name, machine_name)) + logger.warning( + "Variable {} not defined for machine {}".format(name, machine_name) + ) machdir = machobj.get_machines_dir() self.set_value("MACHDIR", machdir) # Create env_mach_specific settings from machine info. env_mach_specific_obj = self.get_env("mach_specific") - env_mach_specific_obj.populate(machobj, attributes={"mpilib":mpilib, "compiler":compiler,"threaded":self.get_build_threaded()}) + env_mach_specific_obj.populate( + machobj, + attributes={ + "mpilib": mpilib, + "compiler": compiler, + "threaded": self.get_build_threaded(), + }, + ) self._setup_mach_pes(pecount, multi_driver, ninst, machine_name, mpilib) - if multi_driver and int(ninst)>1: + if multi_driver and int(ninst) > 1: logger.info(" Driver/Coupler has %s instances" % ninst) - #-------------------------------------------- + # -------------------------------------------- # archiving system - #-------------------------------------------- + # -------------------------------------------- env_archive = self.get_env("archive") - infile_node = files.get_child("entry", {"id":"ARCHIVE_SPEC_FILE"}) + infile_node = files.get_child("entry", {"id": "ARCHIVE_SPEC_FILE"}) infile = files.get_default_value(infile_node) infile = self.get_resolved_value(infile) logger.debug("archive defaults located in {}".format(infile)) archive = Archive(infile=infile, files=files) archive.setup(env_archive, self._components, files=files) - self.set_value("COMPSET",self._compsetname) + self.set_value("COMPSET", self._compsetname) self._set_pio_xml() logger.info(" Compset is: {} ".format(self._compsetname)) - logger.info(" Grid is: {} ".format(self._gridname )) + logger.info(" Grid is: {} ".format(self._gridname)) logger.info(" Components in compset are: {} ".format(self._components)) if not test and not run_unsupported and self._cime_model == "cesm": if grid_name in science_support: - logger.info("\nThis is a CESM scientifically supported compset at this resolution.\n") + logger.info( + "\nThis is a CESM scientifically supported compset at this resolution.\n" + ) else: self._check_testlists(compset_alias, grid_name, files) @@ -1120,8 +1419,12 @@ def configure(self, compset_name, grid_name, machine_name=None, output_root = os.path.abspath(output_root) self.set_value("CIME_OUTPUT_ROOT", output_root) if non_local: - self.set_value("EXEROOT", os.path.join(output_root, self.get_value("CASE"), "bld")) - self.set_value("RUNDIR", os.path.join(output_root, self.get_value("CASE"), "run")) + self.set_value( + "EXEROOT", os.path.join(output_root, self.get_value("CASE"), "bld") + ) + self.set_value( + "RUNDIR", os.path.join(output_root, self.get_value("CASE"), "run") + ) self.set_value("NONLOCAL", True) # Overwriting an existing exeroot or rundir can cause problems @@ -1130,9 +1433,15 @@ def configure(self, compset_name, grid_name, machine_name=None, for wdir in (exeroot, rundir): logging.debug("wdir is {}".format(wdir)) if os.path.exists(wdir): - expect(not test, "Directory {} already exists, aborting test".format(wdir)) + expect( + not test, "Directory {} already exists, aborting test".format(wdir) + ) if answer is None: - response = input("\nDirectory {} already exists, (r)eplace, (a)bort, or (u)se existing?".format(wdir)) + response = input( + "\nDirectory {} already exists, (r)eplace, (a)bort, or (u)se existing?".format( + wdir + ) + ) else: response = answer @@ -1142,7 +1451,7 @@ def configure(self, compset_name, grid_name, machine_name=None, expect(response.startswith("u"), "Aborting by user request") # miscellaneous settings - if self.get_value("RUN_TYPE") == 'hybrid': + if self.get_value("RUN_TYPE") == "hybrid": self.set_value("GET_REFCASE", True) if case_group: @@ -1152,13 +1461,13 @@ def configure(self, compset_name, grid_name, machine_name=None, model = get_model() self.set_model_version(model) if model == "cesm" and not test: - self.set_value("DOUT_S",True) + self.set_value("DOUT_S", True) self.set_value("TIMER_LEVEL", 4) if test: - self.set_value("TEST",True) + self.set_value("TEST", True) - #---------------------------------------------------------------------------------------------------------- + # ---------------------------------------------------------------------------------------------------------- # Sanity check: # 1. We assume that there is always a string "gpu" in the compiler name if we want to enable GPU # 2. For compilers without the string "gpu" in the name: @@ -1171,31 +1480,52 @@ def configure(self, compset_name, grid_name, machine_name=None, # 3.2. if ngpus_per_node argument is larger than the value of MAX_GPUS_PER_NODE, the NGPUS_PER_NODE # XML variable in the env_mach_pes.xml file would be set to MAX_GPUS_PER_NODE automatically. # 3.3. if ngpus-per-node argument is equal to 0, it will be updated to 1 automatically. - #---------------------------------------------------------------------------------------------------------- + # ---------------------------------------------------------------------------------------------------------- max_gpus_per_node = self.get_value("MAX_GPUS_PER_NODE") if max_gpus_per_node: - if "gpu" in compiler: + if "gpu" in compiler: if not ngpus_per_node: ngpus_per_node = 1 - logger.warning("Setting ngpus_per_node to 1 for compiler {}".format(compiler)) - expect(ngpus_per_node > 0," ngpus_per_node is expected > 0 for compiler {}; current value is {}".format(compiler, ngpus_per_node)) + logger.warning( + "Setting ngpus_per_node to 1 for compiler {}".format(compiler) + ) + expect( + ngpus_per_node > 0, + " ngpus_per_node is expected > 0 for compiler {}; current value is {}".format( + compiler, ngpus_per_node + ), + ) else: - expect(ngpus_per_node == 0," ngpus_per_node is expected = 0 for compiler {}; current value is {}".format(compiler, ngpus_per_node)) + expect( + ngpus_per_node == 0, + " ngpus_per_node is expected = 0 for compiler {}; current value is {}".format( + compiler, ngpus_per_node + ), + ) if ngpus_per_node >= 0: - self.set_value("NGPUS_PER_NODE", ngpus_per_node if ngpus_per_node <= max_gpus_per_node else max_gpus_per_node) + self.set_value( + "NGPUS_PER_NODE", + ngpus_per_node + if ngpus_per_node <= max_gpus_per_node + else max_gpus_per_node, + ) self.initialize_derived_attributes() - #-------------------------------------------- + # -------------------------------------------- # batch system (must come after initialize_derived_attributes) - #-------------------------------------------- + # -------------------------------------------- env_batch = self.get_env("batch") batch_system_type = machobj.get_value("BATCH_SYSTEM") logger.info("Batch_system_type is {}".format(batch_system_type)) - batch = Batch(batch_system=batch_system_type, machine=machine_name, files=files, - extra_machines_dir=extra_machines_dir) + batch = Batch( + batch_system=batch_system_type, + machine=machine_name, + files=files, + extra_machines_dir=extra_machines_dir, + ) workflow = Workflow(files=files) @@ -1206,9 +1536,13 @@ def configure(self, compset_name, grid_name, machine_name=None, env_workflow.create_job_groups(bjobs, test) if walltime: - self.set_value("USER_REQUESTED_WALLTIME", walltime, subgroup=self.get_primary_job()) + self.set_value( + "USER_REQUESTED_WALLTIME", walltime, subgroup=self.get_primary_job() + ) if queue: - self.set_value("USER_REQUESTED_QUEUE", queue, subgroup=self.get_primary_job()) + self.set_value( + "USER_REQUESTED_QUEUE", queue, subgroup=self.get_primary_job() + ) env_batch.set_job_defaults(bjobs, self) @@ -1224,13 +1558,20 @@ def configure(self, compset_name, grid_name, machine_name=None, self.set_value("DIN_LOC_ROOT", os.path.abspath(input_dir)) def get_compset_var_settings(self, files): - infile=files.get_value("COMPSETS_SPEC_FILE", - attribute={"component":self._primary_component}) + infile = files.get_value( + "COMPSETS_SPEC_FILE", attribute={"component": self._primary_component} + ) compset_obj = Compsets(infile=infile, files=files) - matches = compset_obj.get_compset_var_settings(self._compsetname, self._gridname) + matches = compset_obj.get_compset_var_settings( + self._compsetname, self._gridname + ) for name, value in matches: if len(value) > 0: - logger.info("Compset specific settings: name is {} and value is {}".format(name, value)) + logger.info( + "Compset specific settings: name is {} and value is {}".format( + name, value + ) + ) self.set_lookup_value(name, value) def set_initial_test_values(self): @@ -1249,10 +1590,12 @@ def _set_pio_xml(self): compset = self.get_value("COMPSET") mpilib = self.get_value("MPILIB") - defaults = pioobj.get_defaults(grid=grid, compset=compset, mach=mach, compiler=compiler, mpilib=mpilib) + defaults = pioobj.get_defaults( + grid=grid, compset=compset, mach=mach, compiler=compiler, mpilib=mpilib + ) for vid, value in list(defaults.items()): - self.set_value(vid,value) + self.set_value(vid, value) def _create_caseroot_tools(self): machines_dir = os.path.abspath(self.get_value("MACHDIR")) @@ -1260,51 +1603,65 @@ def _create_caseroot_tools(self): toolsdir = os.path.join(self.get_value("CIMEROOT"),"CIME","Tools") casetools = os.path.join(self._caseroot, "Tools") # setup executable files in caseroot/ - exefiles = (os.path.join(toolsdir, "case.setup"), - os.path.join(toolsdir, "case.build"), - os.path.join(toolsdir, "case.submit"), - os.path.join(toolsdir, "case.qstatus"), - os.path.join(toolsdir, "case.cmpgen_namelists"), - os.path.join(toolsdir, "preview_namelists"), - os.path.join(toolsdir, "preview_run"), - os.path.join(toolsdir, "check_input_data"), - os.path.join(toolsdir, "check_case"), - os.path.join(toolsdir, "xmlchange"), - os.path.join(toolsdir, "xmlquery"), - os.path.join(toolsdir, "pelayout")) + exefiles = ( + os.path.join(toolsdir, "case.setup"), + os.path.join(toolsdir, "case.build"), + os.path.join(toolsdir, "case.submit"), + os.path.join(toolsdir, "case.qstatus"), + os.path.join(toolsdir, "case.cmpgen_namelists"), + os.path.join(toolsdir, "preview_namelists"), + os.path.join(toolsdir, "preview_run"), + os.path.join(toolsdir, "check_input_data"), + os.path.join(toolsdir, "check_case"), + os.path.join(toolsdir, "xmlchange"), + os.path.join(toolsdir, "xmlquery"), + os.path.join(toolsdir, "pelayout"), + ) try: for exefile in exefiles: - destfile = os.path.join(self._caseroot,os.path.basename(exefile)) + destfile = os.path.join(self._caseroot, os.path.basename(exefile)) os.symlink(exefile, destfile) except Exception as e: logger.warning("FAILED to set up exefiles: {}".format(str(e))) - toolfiles = [os.path.join(toolsdir, "check_lockedfiles"), - os.path.join(toolsdir, "get_standard_makefile_args"), - os.path.join(toolsdir, "getTiming"), - os.path.join(toolsdir, "save_provenance"), - os.path.join(toolsdir, "Makefile"), - os.path.join(toolsdir, "mkSrcfiles"), - os.path.join(toolsdir, "mkDepends")] + toolfiles = [ + os.path.join(toolsdir, "check_lockedfiles"), + os.path.join(toolsdir, "get_standard_makefile_args"), + os.path.join(toolsdir, "getTiming"), + os.path.join(toolsdir, "save_provenance"), + os.path.join(toolsdir, "Makefile"), + os.path.join(toolsdir, "mkSrcfiles"), + os.path.join(toolsdir, "mkDepends"), + ] # used on Titan - if os.path.isfile( os.path.join(toolsdir,"mdiag_reduce.csh") ): - toolfiles.append( os.path.join(toolsdir,"mdiag_reduce.csh") ) - toolfiles.append( os.path.join(toolsdir,"mdiag_reduce.pl") ) + if os.path.isfile(os.path.join(toolsdir, "mdiag_reduce.csh")): + toolfiles.append(os.path.join(toolsdir, "mdiag_reduce.csh")) + toolfiles.append(os.path.join(toolsdir, "mdiag_reduce.pl")) for toolfile in toolfiles: destfile = os.path.join(casetools, os.path.basename(toolfile)) - expect(os.path.isfile(toolfile)," File {} does not exist".format(toolfile)) + expect(os.path.isfile(toolfile), " File {} does not exist".format(toolfile)) try: os.symlink(toolfile, destfile) except Exception as e: - logger.warning("FAILED to set up toolfiles: {} {} {}".format(str(e), toolfile, destfile)) + logger.warning( + "FAILED to set up toolfiles: {} {} {}".format( + str(e), toolfile, destfile + ) + ) if get_model() == "e3sm": if os.path.exists(os.path.join(machines_dir, "syslog.{}".format(machine))): - safe_copy(os.path.join(machines_dir, "syslog.{}".format(machine)), os.path.join(casetools, "mach_syslog")) + safe_copy( + os.path.join(machines_dir, "syslog.{}".format(machine)), + os.path.join(casetools, "mach_syslog"), + ) else: - safe_copy(os.path.join(machines_dir, "syslog.noop"), os.path.join(casetools, "mach_syslog")) + safe_copy( + os.path.join(machines_dir, "syslog.noop"), + os.path.join(casetools, "mach_syslog"), + ) safe_copy(os.path.join(toolsdir, "e3sm_compile_wrap.py"), casetools) @@ -1312,16 +1669,16 @@ def _create_caseroot_tools(self): if get_model() == "cesm": try: exefile = os.path.join(toolsdir, "archive_metadata") - destfile = os.path.join(self._caseroot,os.path.basename(exefile)) + destfile = os.path.join(self._caseroot, os.path.basename(exefile)) os.symlink(exefile, destfile) except Exception as e: logger.warning("FAILED to set up exefiles: {}".format(str(e))) def _create_caseroot_sourcemods(self): components = self.get_compset_components() - components.extend(['share', 'drv']) - if self._comp_interface == 'nuopc': - components.extend(['cdeps']) + components.extend(["share", "drv"]) + if self._comp_interface == "nuopc": + components.extend(["cdeps"]) readme_message = """Put source mods for the {component} library in this directory. @@ -1335,9 +1692,19 @@ def _create_caseroot_sourcemods(self): """ for component in components: - directory = os.path.join(self._caseroot,"SourceMods","src.{}".format(component)) + directory = os.path.join( + self._caseroot, "SourceMods", "src.{}".format(component) + ) # don't make SourceMods for stub components - if not os.path.exists(directory) and component not in ('satm','slnd','sice','socn','sesp','sglc','swav'): + if not os.path.exists(directory) and component not in ( + "satm", + "slnd", + "sice", + "socn", + "sesp", + "sglc", + "swav", + ): os.makedirs(directory) # Besides giving some information on SourceMods, this # README file serves one other important purpose: By @@ -1350,9 +1717,11 @@ def _create_caseroot_sourcemods(self): fd.write(readme_message.format(component=component)) if get_model() == "cesm": - # Note: this is CESM specific, given that we are referencing cism explitly + # Note: this is CESM specific, given that we are referencing cism explitly if "cism" in components: - directory = os.path.join(self._caseroot, "SourceMods", "src.cism", "source_cism") + directory = os.path.join( + self._caseroot, "SourceMods", "src.cism", "source_cism" + ) if not os.path.exists(directory): os.makedirs(directory) readme_file = os.path.join(directory, "README") @@ -1382,28 +1751,51 @@ def create_caseroot(self, clone=False): # Open a new README.case file in $self._caseroot append_status(" ".join(sys.argv), "README.case", caseroot=self._caseroot) compset_info = "Compset longname is {}".format(self.get_value("COMPSET")) - append_status(compset_info, - "README.case", caseroot=self._caseroot) - append_status("Compset specification file is {}".format(self.get_value("COMPSETS_SPEC_FILE")), - "README.case", caseroot=self._caseroot) - append_status("Pes specification file is {}".format(self.get_value("PES_SPEC_FILE")), - "README.case", caseroot=self._caseroot) + append_status(compset_info, "README.case", caseroot=self._caseroot) + append_status( + "Compset specification file is {}".format( + self.get_value("COMPSETS_SPEC_FILE") + ), + "README.case", + caseroot=self._caseroot, + ) + append_status( + "Pes specification file is {}".format(self.get_value("PES_SPEC_FILE")), + "README.case", + caseroot=self._caseroot, + ) if "forcing" in self._component_description: - append_status("Forcing is {}".format(self._component_description["forcing"]) - ,"README.case", caseroot=self._caseroot) + append_status( + "Forcing is {}".format(self._component_description["forcing"]), + "README.case", + caseroot=self._caseroot, + ) for component_class in self._component_classes: - if component_class in self._component_description and \ - len(self._component_description[component_class])>0: - append_status("Component {} is {}".format(component_class, self._component_description[component_class]),"README.case", caseroot=self._caseroot) + if ( + component_class in self._component_description + and len(self._component_description[component_class]) > 0 + ): + append_status( + "Component {} is {}".format( + component_class, self._component_description[component_class] + ), + "README.case", + caseroot=self._caseroot, + ) if component_class == "CPL": - append_status("Using %s coupler instances" % - (self.get_value("NINST_CPL")), - "README.case", caseroot=self._caseroot) + append_status( + "Using %s coupler instances" % (self.get_value("NINST_CPL")), + "README.case", + caseroot=self._caseroot, + ) continue comp_grid = "{}_GRID".format(component_class) - append_status("{} is {}".format(comp_grid,self.get_value(comp_grid)), - "README.case", caseroot=self._caseroot) + append_status( + "{} is {}".format(comp_grid, self.get_value(comp_grid)), + "README.case", + caseroot=self._caseroot, + ) comp = str(self.get_value("COMP_{}".format(component_class))) user_mods = self._get_comp_user_mods(comp) if user_mods is not None: @@ -1443,7 +1835,7 @@ def apply_user_mods(self, user_mods_dirs=None): if os.path.isabs(user_mods): user_mods_path = user_mods else: - user_mods_path = self.get_value('USER_MODS_DIR') + user_mods_path = self.get_value("USER_MODS_DIR") user_mods_path = os.path.join(user_mods_path, user_mods) apply_user_mods(self._caseroot, user_mods_path) @@ -1458,21 +1850,41 @@ def _get_comp_user_mods(self, component): Returns None if no value was found, or if the value is an empty string. """ comp_user_mods = self.get_value("{}_USER_MODS".format(component.upper())) - #pylint: disable=no-member + # pylint: disable=no-member if comp_user_mods is None or comp_user_mods == "" or comp_user_mods.isspace(): return None else: return comp_user_mods - def submit_jobs(self, no_batch=False, job=None, skip_pnl=None, prereq=None, allow_fail=False, - resubmit_immediate=False, mail_user=None, mail_type=None, batch_args=None, - dry_run=False, workflow=True): - env_batch = self.get_env('batch') - result = env_batch.submit_jobs(self, no_batch=no_batch, skip_pnl=skip_pnl, - job=job, user_prereq=prereq, allow_fail=allow_fail, - resubmit_immediate=resubmit_immediate, - mail_user=mail_user, mail_type=mail_type, - batch_args=batch_args, dry_run=dry_run, workflow=workflow) + def submit_jobs( + self, + no_batch=False, + job=None, + skip_pnl=None, + prereq=None, + allow_fail=False, + resubmit_immediate=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + workflow=True, + ): + env_batch = self.get_env("batch") + result = env_batch.submit_jobs( + self, + no_batch=no_batch, + skip_pnl=skip_pnl, + job=job, + user_prereq=prereq, + allow_fail=allow_fail, + resubmit_immediate=resubmit_immediate, + mail_user=mail_user, + mail_type=mail_type, + batch_args=batch_args, + dry_run=dry_run, + workflow=workflow, + ) return result def get_job_info(self): @@ -1484,7 +1896,7 @@ def get_job_info(self): return {} else: result = {} - job_infos = xml_job_ids.split(", ") # pylint: disable=no-member + job_infos = xml_job_ids.split(", ") # pylint: disable=no-member for job_info in job_infos: jobname, jobid = job_info.split(":") result[jobname] = jobid @@ -1494,17 +1906,23 @@ def get_job_info(self): def report_job_status(self): jobmap = self.get_job_info() if not jobmap: - logger.info("No job ids associated with this case. Either case.submit was not run or was run with no-batch") + logger.info( + "No job ids associated with this case. Either case.submit was not run or was run with no-batch" + ) else: for jobname, jobid in list(jobmap.items()): status = self.get_env("batch").get_status(jobid) if status: logger.info("{}: {}".format(jobname, status)) else: - logger.info("{}: Unable to get status. Job may be complete already.".format(jobname)) + logger.info( + "{}: Unable to get status. Job may be complete already.".format( + jobname + ) + ) def cancel_batch_jobs(self, jobids): - env_batch = self.get_env('batch') + env_batch = self.get_env("batch") for jobid in jobids: success = env_batch.cancel_job(jobid) if not success: @@ -1514,40 +1932,61 @@ def get_mpirun_cmd(self, job=None, allow_unresolved_envvars=True, overrides=None if job is None: job = self.get_primary_job() - env_mach_specific = self.get_env('mach_specific') + env_mach_specific = self.get_env("mach_specific") run_exe = env_mach_specific.get_value("run_exe") run_misc_suffix = env_mach_specific.get_value("run_misc_suffix") run_misc_suffix = "" if run_misc_suffix is None else run_misc_suffix mpirun_cmd_override = self.get_value("MPI_RUN_COMMAND") if mpirun_cmd_override not in ["", None, "UNSET"]: - return self.get_resolved_value(mpirun_cmd_override + " " + run_exe + " " + run_misc_suffix) + return self.get_resolved_value( + mpirun_cmd_override + " " + run_exe + " " + run_misc_suffix + ) queue = self.get_value("JOB_QUEUE", subgroup=job) # Things that will have to be matched against mpirun element attributes mpi_attribs = { - "compiler" : self.get_value("COMPILER"), - "mpilib" : self.get_value("MPILIB"), - "threaded" : self.get_build_threaded(), - "queue" : queue, - "unit_testing" : False, - "comp_interface" : self._comp_interface - } - - executable, mpi_arg_list, custom_run_exe, custom_run_misc_suffix = env_mach_specific.get_mpirun(self, mpi_attribs, job) + "compiler": self.get_value("COMPILER"), + "mpilib": self.get_value("MPILIB"), + "threaded": self.get_build_threaded(), + "queue": queue, + "unit_testing": False, + "comp_interface": self._comp_interface, + } + + ( + executable, + mpi_arg_list, + custom_run_exe, + custom_run_misc_suffix, + ) = env_mach_specific.get_mpirun(self, mpi_attribs, job) if custom_run_exe: - logger.info('Using a custom run_exe {}'.format(custom_run_exe)) + logger.info("Using a custom run_exe {}".format(custom_run_exe)) run_exe = custom_run_exe if custom_run_misc_suffix: - logger.info('Using a custom run_misc_suffix {}'.format(custom_run_misc_suffix)) + logger.info( + "Using a custom run_misc_suffix {}".format(custom_run_misc_suffix) + ) run_misc_suffix = custom_run_misc_suffix # special case for aprun - if executable is not None and "aprun" in executable and not "theta" in self.get_value("MACH"): - aprun_args, num_nodes = get_aprun_cmd_for_case(self, run_exe, overrides=overrides)[0:2] - if job in ("case.run","case.test"): - expect( (num_nodes + self.spare_nodes) == self.num_nodes, "Not using optimized num nodes") - return self.get_resolved_value(executable + aprun_args + " " + run_misc_suffix, allow_unresolved_envvars=allow_unresolved_envvars) + if ( + executable is not None + and "aprun" in executable + and not "theta" in self.get_value("MACH") + ): + aprun_args, num_nodes = get_aprun_cmd_for_case( + self, run_exe, overrides=overrides + )[0:2] + if job in ("case.run", "case.test"): + expect( + (num_nodes + self.spare_nodes) == self.num_nodes, + "Not using optimized num nodes", + ) + return self.get_resolved_value( + executable + aprun_args + " " + run_misc_suffix, + allow_unresolved_envvars=allow_unresolved_envvars, + ) else: mpi_arg_string = " ".join(mpi_arg_list) @@ -1560,15 +1999,23 @@ def get_mpirun_cmd(self, job=None, allow_unresolved_envvars=True, overrides=None # 1. this setting is tested on Casper only and may not work on other machines # 2. need to be revisited in the future for a more adaptable implementation rundir = self.get_value("RUNDIR") - output_name = rundir+'/set_device_rank.sh' + output_name = rundir + "/set_device_rank.sh" mpi_arg_string = mpi_arg_string + " " + output_name + " " - return self.get_resolved_value("{} {} {} {}".format(executable if executable is not None else "", mpi_arg_string, run_exe, run_misc_suffix), allow_unresolved_envvars=allow_unresolved_envvars) + return self.get_resolved_value( + "{} {} {} {}".format( + executable if executable is not None else "", + mpi_arg_string, + run_exe, + run_misc_suffix, + ), + allow_unresolved_envvars=allow_unresolved_envvars, + ) def set_model_version(self, model): version = "unknown" srcroot = self.get_value("SRCROOT") - version = get_current_commit(True, srcroot, tag=(model=="cesm")) + version = get_current_commit(True, srcroot, tag=(model == "cesm")) self.set_value("MODEL_VERSION", version) @@ -1584,8 +2031,7 @@ def load_env(self, reset=False, job=None, verbose=False): os.environ["OMP_NUM_THREADS"] = str(self.thread_count) env_module = self.get_env("mach_specific") self._loaded_envs = env_module.load_env(self, job=job, verbose=verbose) - self._loaded_envs.append(("OMP_NUM_THREADS", - os.environ["OMP_NUM_THREADS"])) + self._loaded_envs.append(("OMP_NUM_THREADS", os.environ["OMP_NUM_THREADS"])) self._is_env_loaded = True return self._loaded_envs @@ -1620,20 +2066,36 @@ def _check_testlists(self, compset_alias, grid_name, files): # Only collect supported tests as this _check_testlists is only # called if run_unsupported is False. tests = Testlist(tests_spec_file, files) - testlist = tests.get_tests(compset=compset_alias, grid=grid_name, supported_only=True) + testlist = tests.get_tests( + compset=compset_alias, grid=grid_name, supported_only=True + ) test_categories = ["prealpha", "prebeta", "test_release"] for test in testlist: - if test["category"] in test_categories or "aux_" in test["category"] \ - or get_cime_default_driver() in test["category"]: + if ( + test["category"] in test_categories + or "aux_" in test["category"] + or get_cime_default_driver() in test["category"] + ): testcnt += 1 if testcnt > 0: - logger.warning("\n*********************************************************************************************************************************") - logger.warning("This compset and grid combination is not scientifically supported, however it is used in {:d} tests.".format(testcnt)) - logger.warning("*********************************************************************************************************************************\n") + logger.warning( + "\n*********************************************************************************************************************************" + ) + logger.warning( + "This compset and grid combination is not scientifically supported, however it is used in {:d} tests.".format( + testcnt + ) + ) + logger.warning( + "*********************************************************************************************************************************\n" + ) else: - expect(False, "\nThis compset and grid combination is untested in CESM. " - "Override this warning with the --run-unsupported option to create_newcase.", - error_prefix="STOP: ") + expect( + False, + "\nThis compset and grid combination is untested in CESM. " + "Override this warning with the --run-unsupported option to create_newcase.", + error_prefix="STOP: ", + ) def set_file(self, xmlfile): """ @@ -1659,7 +2121,11 @@ def set_file(self, xmlfile): elif ftype == "env_case.xml": new_env_file = EnvCase(infile=xmlfile, components=components) elif ftype == "env_mach_pes.xml": - new_env_file = EnvMachPes(infile=xmlfile, components=components, comp_interface=self._comp_interface) + new_env_file = EnvMachPes( + infile=xmlfile, + components=components, + comp_interface=self._comp_interface, + ) elif ftype == "env_batch.xml": new_env_file = EnvBatch(infile=xmlfile) elif ftype == "env_workflow.xml": @@ -1669,7 +2135,9 @@ def set_file(self, xmlfile): elif ftype == "env_archive.xml": new_env_file = EnvArchive(infile=xmlfile) elif ftype == "env_mach_specific.xml": - new_env_file = EnvMachSpecific(infile=xmlfile, comp_interface=self._comp_interface) + new_env_file = EnvMachSpecific( + infile=xmlfile, comp_interface=self._comp_interface + ) else: expect(False, "No match found for file type {}".format(ftype)) @@ -1683,7 +2151,9 @@ def set_file(self, xmlfile): break - expect(new_env_file is not None, "No match found for file type {}".format(ftype)) + expect( + new_env_file is not None, "No match found for file type {}".format(ftype) + ) self._files = [new_env_file] def update_env(self, new_object, env_file, blow_away=False): @@ -1692,7 +2162,10 @@ def update_env(self, new_object, env_file, blow_away=False): """ old_object = self.get_env(env_file) if not blow_away: - expect(not old_object.needsrewrite, "Potential loss of unflushed changes in {}".format(env_file)) + expect( + not old_object.needsrewrite, + "Potential loss of unflushed changes in {}".format(env_file), + ) new_object.filename = old_object.filename if old_object in self._env_entryid_files: @@ -1712,7 +2185,7 @@ def get_latest_cpl_log(self, coupler_log_path=None, cplname="cpl"): if coupler_log_path is None: coupler_log_path = self.get_value("RUNDIR") cpllog = None - cpllogs = glob.glob(os.path.join(coupler_log_path, '{}.log.*'.format(cplname))) + cpllogs = glob.glob(os.path.join(coupler_log_path, "{}.log.*".format(cplname))) if cpllogs: cpllog = max(cpllogs, key=os.path.getctime) return cpllog @@ -1735,21 +2208,28 @@ def record_cmd(self, cmd=None, init=False): # and continuing to execute commands lines.append("set -e\n\n") lines.append("# Created {}\n\n".format(ctime)) - lines.append("CASEDIR=\"{}\"\n\n".format(caseroot)) - lines.append("cd \"${CASEDIR}\"\n\n") + lines.append('CASEDIR="{}"\n\n'.format(caseroot)) + lines.append('cd "${CASEDIR}"\n\n') # Ensure program path is absolute cmd[0] = re.sub("^./", "{}/scripts/".format(cimeroot), cmd[0]) else: - expect(caseroot and os.path.isdir(caseroot) and os.path.isfile(os.path.join(caseroot,"env_case.xml")), "Directory {} does not appear to be a valid case directory".format(caseroot)) + expect( + caseroot + and os.path.isdir(caseroot) + and os.path.isfile(os.path.join(caseroot, "env_case.xml")), + "Directory {} does not appear to be a valid case directory".format( + caseroot + ), + ) cmd = " ".join(cmd) # Replace instances of caseroot with variable - cmd = re.sub(caseroot, "\"${CASEDIR}\"", cmd) + cmd = re.sub(caseroot, '"${CASEDIR}"', cmd) lines_len = len(lines) - lines.insert(lines_len-1 if init else lines_len, "{}\n\n".format(cmd)) + lines.insert(lines_len - 1 if init else lines_len, "{}\n\n".format(cmd)) try: with open(os.path.join(caseroot, "replay.sh"), "a") as fd: @@ -1757,15 +2237,36 @@ def record_cmd(self, cmd=None, init=False): except PermissionError: logger.warning("Could not write to 'replay.sh' script") - def create(self, casename, srcroot, compset_name, grid_name, - user_mods_dirs=None, machine_name=None, - project=None, pecount=None, compiler=None, mpilib=None, - pesfile=None, gridfile=None, - multi_driver=False, ninst=1, test=False, - walltime=None, queue=None, output_root=None, - run_unsupported=False, answer=None, - input_dir=None, driver=None, workflowid="default", non_local=False, - extra_machines_dir=None, case_group=None, ngpus_per_node=0): + def create( + self, + casename, + srcroot, + compset_name, + grid_name, + user_mods_dirs=None, + machine_name=None, + project=None, + pecount=None, + compiler=None, + mpilib=None, + pesfile=None, + gridfile=None, + multi_driver=False, + ninst=1, + test=False, + walltime=None, + queue=None, + output_root=None, + run_unsupported=False, + answer=None, + input_dir=None, + driver=None, + workflowid="default", + non_local=False, + extra_machines_dir=None, + case_group=None, + ngpus_per_node=0, + ): try: # Set values for env_case.xml self.set_lookup_value("CASE", os.path.basename(casename)) @@ -1784,35 +2285,57 @@ def create(self, casename, srcroot, compset_name, grid_name, if user_mods_dirs: found_um_config_grids = False for this_user_mods_dir in user_mods_dirs: - um_config_grids = os.path.join(this_user_mods_dir,"config_grids.xml") + um_config_grids = os.path.join( + this_user_mods_dir, "config_grids.xml" + ) if os.path.exists(um_config_grids): if gridfile: # Either a gridfile was found in an earlier user_mods # directory or a gridfile was given on the command line. The # first case (which would set found_um_config_grids to True) # is an error; the second case just generates a warning. - expect(not found_um_config_grids, - "Cannot handle multiple usermods directories with config_grids.xml files: {} and {}".format( - gridfile, um_config_grids)) - logger.warning("A config_grids file was found in {} but also provided on the command line {}, command line takes precedence".format(um_config_grids, gridfile)) + expect( + not found_um_config_grids, + "Cannot handle multiple usermods directories with config_grids.xml files: {} and {}".format( + gridfile, um_config_grids + ), + ) + logger.warning( + "A config_grids file was found in {} but also provided on the command line {}, command line takes precedence".format( + um_config_grids, gridfile + ) + ) else: gridfile = um_config_grids found_um_config_grids = True - # Configure the Case - self.configure(compset_name, grid_name, machine_name=machine_name, - project=project, - pecount=pecount, compiler=compiler, mpilib=mpilib, - pesfile=pesfile, gridfile=gridfile, - multi_driver=multi_driver, ninst=ninst, test=test, - walltime=walltime, queue=queue, - output_root=output_root, - run_unsupported=run_unsupported, answer=answer, - input_dir=input_dir, driver=driver, - workflowid=workflowid, non_local=non_local, - extra_machines_dir=extra_machines_dir, case_group=case_group, - ngpus_per_node=ngpus_per_node) + self.configure( + compset_name, + grid_name, + machine_name=machine_name, + project=project, + pecount=pecount, + compiler=compiler, + mpilib=mpilib, + pesfile=pesfile, + gridfile=gridfile, + multi_driver=multi_driver, + ninst=ninst, + test=test, + walltime=walltime, + queue=queue, + output_root=output_root, + run_unsupported=run_unsupported, + answer=answer, + input_dir=input_dir, + driver=driver, + workflowid=workflowid, + non_local=non_local, + extra_machines_dir=extra_machines_dir, + case_group=case_group, + ngpus_per_node=ngpus_per_node, + ) self.create_caseroot() @@ -1825,7 +2348,11 @@ def create(self, casename, srcroot, compset_name, grid_name, except Exception: if os.path.exists(self._caseroot): if not logger.isEnabledFor(logging.DEBUG) and not test: - logger.warning("Failed to setup case, removing {}\nUse --debug to force me to keep caseroot".format(self._caseroot)) + logger.warning( + "Failed to setup case, removing {}\nUse --debug to force me to keep caseroot".format( + self._caseroot + ) + ) shutil.rmtree(self._caseroot) else: logger.warning("Leaving broken case dir {}".format(self._caseroot)) @@ -1833,8 +2360,7 @@ def create(self, casename, srcroot, compset_name, grid_name, raise def new_hash(self): - """ Creates a hash - """ + """Creates a hash""" args = "".join(sys.argv) ctime = time.strftime("%Y-%m-%d %H:%M:%S") hostname = socket.getfqdn() @@ -1844,7 +2370,7 @@ def new_hash(self): return hashlib.sha256(data.encode()).hexdigest() - def is_save_timing_dir_project(self,project): + def is_save_timing_dir_project(self, project): """ Check whether the project is permitted to archive performance data in the location specified for the current machine @@ -1853,7 +2379,9 @@ def is_save_timing_dir_project(self,project): if not save_timing_dir_projects: return False else: - save_timing_dir_projects = save_timing_dir_projects.split(",") # pylint: disable=no-member + save_timing_dir_projects = save_timing_dir_projects.split( + "," + ) # pylint: disable=no-member for save_timing_dir_project in save_timing_dir_projects: regex = re.compile(save_timing_dir_project) if regex.match(project): @@ -1883,7 +2411,7 @@ def preview_run(self, write, job): job = self.get_first_job() job_id_to_cmd = self.submit_jobs(dry_run=True, job=job) - env_batch = self.get_env('batch') + env_batch = self.get_env("batch") for job_id, cmd in job_id_to_cmd: write(" FOR JOB: {}".format(job_id)) write(" ENV:") @@ -1901,5 +2429,5 @@ def preview_run(self, write, job): # env vars. overrides = env_batch.get_job_overrides(job_id, self) write(" MPIRUN (job={}):".format(job_id)) - write (" {}".format(self.get_resolved_value(overrides["mpirun"]))) + write(" {}".format(self.get_resolved_value(overrides["mpirun"]))) write("") diff --git a/CIME/case/case_clone.py b/CIME/case/case_clone.py index fcf7fe320fb..f829e13993c 100644 --- a/CIME/case/case_clone.py +++ b/CIME/case/case_clone.py @@ -4,15 +4,24 @@ import os, glob, shutil from CIME.XML.standard_module_setup import * from CIME.utils import expect, check_name, safe_copy, get_model -from CIME.simple_compare import compare_files -from CIME.locked_files import lock_file +from CIME.simple_compare import compare_files +from CIME.locked_files import lock_file from CIME.user_mod_support import apply_user_mods logger = logging.getLogger(__name__) -def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None, - cime_output_root=None, exeroot=None, rundir=None, - user_mods_dirs=None): + +def create_clone( + self, + newcaseroot, + keepexe=False, + mach_dir=None, + project=None, + cime_output_root=None, + exeroot=None, + rundir=None, + user_mods_dirs=None, +): """ Create a case clone @@ -24,11 +33,12 @@ def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None, cime_output_root = self.get_value("CIME_OUTPUT_ROOT") newcaseroot = os.path.abspath(newcaseroot) - expect(not os.path.isdir(newcaseroot), - "New caseroot directory {} already exists".format(newcaseroot)) + expect( + not os.path.isdir(newcaseroot), + "New caseroot directory {} already exists".format(newcaseroot), + ) newcasename = os.path.basename(newcaseroot) - expect(check_name(newcasename), - "New case name invalid {} ".format(newcasename)) + expect(check_name(newcasename), "New case name invalid {} ".format(newcasename)) newcase_cimeroot = os.path.abspath(get_cime_root()) # create clone from case to case @@ -36,13 +46,15 @@ def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None, if newcase_cimeroot != clone_cimeroot: logger.warning(" case CIMEROOT is {} ".format(newcase_cimeroot)) logger.warning(" clone CIMEROOT is {} ".format(clone_cimeroot)) - logger.warning(" It is NOT recommended to clone cases from different versions of CIME.") + logger.warning( + " It is NOT recommended to clone cases from different versions of CIME." + ) # *** create case object as deepcopy of clone object *** - if os.path.isdir(os.path.join(newcase_cimeroot,'share')) and get_model() == "cesm": - srcroot = newcase_cimeroot + if os.path.isdir(os.path.join(newcase_cimeroot, "share")) and get_model() == "cesm": + srcroot = newcase_cimeroot else: - srcroot = os.path.join(newcase_cimeroot,"..") + srcroot = os.path.join(newcase_cimeroot, "..") newcase = self.copy(newcasename, newcaseroot, newsrcroot=srcroot) with newcase: newcase.set_value("CIMEROOT", newcase_cimeroot) @@ -58,9 +70,12 @@ def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None, # try to make the new output directory and raise an exception # on any error other than directory already exists. if os.path.isdir(cime_output_root): - expect(os.access(cime_output_root, os.W_OK), "Directory {} is not writable " - "by this user. Use the --cime-output-root flag to provide a writable " - "scratch directory".format(cime_output_root)) + expect( + os.access(cime_output_root, os.W_OK), + "Directory {} is not writable " + "by this user. Use the --cime-output-root flag to provide a writable " + "scratch directory".format(cime_output_root), + ) else: if not os.path.isdir(cime_output_root): os.makedirs(cime_output_root) @@ -69,13 +84,17 @@ def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None, if keepexe: orig_exeroot = self.get_value("EXEROOT") newcase.set_value("EXEROOT", orig_exeroot) - newcase.set_value("BUILD_COMPLETE","TRUE") + newcase.set_value("BUILD_COMPLETE", "TRUE") orig_bld_complete = self.get_value("BUILD_COMPLETE") if not orig_bld_complete: - logger.warning("\nWARNING: Creating a clone with --keepexe before building the original case may cause PIO_TYPENAME to be invalid in the clone") - logger.warning("Avoid this message by building case one before you clone.\n") + logger.warning( + "\nWARNING: Creating a clone with --keepexe before building the original case may cause PIO_TYPENAME to be invalid in the clone" + ) + logger.warning( + "Avoid this message by building case one before you clone.\n" + ) else: - newcase.set_value("BUILD_COMPLETE","FALSE") + newcase.set_value("BUILD_COMPLETE", "FALSE") # set machdir if mach_dir is not None: @@ -83,8 +102,10 @@ def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None, # set exeroot and rundir if requested if exeroot is not None: - expect(not keepexe, "create_case_clone: if keepexe is True, " - "then exeroot cannot be set") + expect( + not keepexe, + "create_case_clone: if keepexe is True, " "then exeroot cannot be set", + ) newcase.set_value("EXEROOT", exeroot) if rundir is not None: newcase.set_value("RUNDIR", rundir) @@ -106,15 +127,19 @@ def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None, # but users may have broken links to modify files locally. In this case # copy the locally modified file. We only want to do this for files that # already exist in the clone. - #pylint: disable=protected-access - self._copy_user_modified_to_clone(self.get_value("CASEROOT"), newcase.get_value("CASEROOT")) - self._copy_user_modified_to_clone(self.get_value("CASETOOLS"), newcase.get_value("CASETOOLS")) + # pylint: disable=protected-access + self._copy_user_modified_to_clone( + self.get_value("CASEROOT"), newcase.get_value("CASEROOT") + ) + self._copy_user_modified_to_clone( + self.get_value("CASETOOLS"), newcase.get_value("CASETOOLS") + ) newcase.flush(flushall=True) # copy user_ files cloneroot = self.get_case_root() - files = glob.glob(cloneroot + '/user_*') + files = glob.glob(cloneroot + "/user_*") for item in files: safe_copy(item, newcaseroot) @@ -122,17 +147,19 @@ def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None, # copy SourceMod and Buildconf files # if symlinks exist, copy rather than follow links for casesub in ("SourceMods", "Buildconf"): - shutil.copytree(os.path.join(cloneroot, casesub), - os.path.join(newcaseroot, casesub), - symlinks=True) + shutil.copytree( + os.path.join(cloneroot, casesub), + os.path.join(newcaseroot, casesub), + symlinks=True, + ) # copy the postprocessing directory if it exists if os.path.isdir(os.path.join(cloneroot, "postprocess")): - shutil.copytree(os.path.join(cloneroot, "postprocess"), - os.path.join(newcaseroot, "postprocess"), - symlinks=True) - - + shutil.copytree( + os.path.join(cloneroot, "postprocess"), + os.path.join(newcaseroot, "postprocess"), + symlinks=True, + ) # lock env_case.xml in new case lock_file("env_case.xml", newcaseroot) @@ -143,8 +170,10 @@ def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None, if keepexe: # If keepexe CANNOT change any env_build.xml variables - so make a temporary copy of # env_build.xml and verify that it has not been modified - safe_copy(os.path.join(newcaseroot, "env_build.xml"), - os.path.join(newcaseroot, "LockedFiles", "env_build.xml")) + safe_copy( + os.path.join(newcaseroot, "env_build.xml"), + os.path.join(newcaseroot, "LockedFiles", "env_build.xml"), + ) # Now apply contents of all specified user_mods directories for one_user_mods_dir in user_mods_dirs: @@ -152,34 +181,46 @@ def create_clone(self, newcaseroot, keepexe=False, mach_dir=None, project=None, # Determine if env_build.xml has changed if keepexe: - success, comment = compare_files(os.path.join(newcaseroot, "env_build.xml"), - os.path.join(newcaseroot, "LockedFiles", "env_build.xml")) + success, comment = compare_files( + os.path.join(newcaseroot, "env_build.xml"), + os.path.join(newcaseroot, "LockedFiles", "env_build.xml"), + ) if not success: logger.warning(comment) shutil.rmtree(newcase_root) - expect(False, "env_build.xml cannot be changed via usermods if keepexe is an option: \n " - "Failed to clone case, removed {}\n".format(newcase_root)) + expect( + False, + "env_build.xml cannot be changed via usermods if keepexe is an option: \n " + "Failed to clone case, removed {}\n".format(newcase_root), + ) # if keep executable, then remove the new case SourceMods directory and link SourceMods to # the clone directory if keepexe: shutil.rmtree(os.path.join(newcase_root, "SourceMods")) - os.symlink(os.path.join(cloneroot, "SourceMods"), - os.path.join(newcase_root, "SourceMods")) + os.symlink( + os.path.join(cloneroot, "SourceMods"), + os.path.join(newcase_root, "SourceMods"), + ) # Update README.case - fclone = open(cloneroot + "/README.case", "r") - fnewcase = open(newcaseroot + "/README.case", "a") + fclone = open(cloneroot + "/README.case", "r") + fnewcase = open(newcaseroot + "/README.case", "a") fnewcase.write("\n *** original clone README follows ****") - fnewcase.write("\n " + fclone.read()) + fnewcase.write("\n " + fclone.read()) clonename = self.get_value("CASE") - logger.info(" Successfully created new case {} from clone case {} ".format(newcasename, clonename)) + logger.info( + " Successfully created new case {} from clone case {} ".format( + newcasename, clonename + ) + ) newcase.case_setup() return newcase + # pylint: disable=unused-argument def _copy_user_modified_to_clone(self, origpath, newpath): """ @@ -187,9 +228,11 @@ def _copy_user_modified_to_clone(self, origpath, newpath): link in origpath, copy origpath file to newpath """ for file_ in os.listdir(newpath): - if (os.path.islink(os.path.join(newpath, file_)) and - os.path.isfile(os.path.join(origpath, file_)) and - not os.path.islink(os.path.join(origpath, file_))): + if ( + os.path.islink(os.path.join(newpath, file_)) + and os.path.isfile(os.path.join(origpath, file_)) + and not os.path.islink(os.path.join(origpath, file_)) + ): logger.info("Copying user modified file {} to clone".format(file_)) os.unlink(os.path.join(newpath, file_)) safe_copy(os.path.join(origpath, file_), newpath) diff --git a/CIME/case/case_cmpgen_namelists.py b/CIME/case/case_cmpgen_namelists.py index 5252c858592..c76ce826519 100644 --- a/CIME/case/case_cmpgen_namelists.py +++ b/CIME/case/case_cmpgen_namelists.py @@ -15,58 +15,80 @@ logger = logging.getLogger(__name__) -def _do_full_nl_comp(case, test, compare_name, baseline_root=None): - test_dir = case.get_value("CASEROOT") - casedoc_dir = os.path.join(test_dir, "CaseDocs") - baseline_root = case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root - all_match = True - baseline_dir = os.path.join(baseline_root, compare_name, test) +def _do_full_nl_comp(case, test, compare_name, baseline_root=None): + test_dir = case.get_value("CASEROOT") + casedoc_dir = os.path.join(test_dir, "CaseDocs") + baseline_root = ( + case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root + ) + + all_match = True + baseline_dir = os.path.join(baseline_root, compare_name, test) baseline_casedocs = os.path.join(baseline_dir, "CaseDocs") # Start off by comparing everything in CaseDocs except a few arbitrary files (ugh!) # TODO: Namelist files should have consistent suffix - all_items_to_compare = [item for item in glob.glob("{}/*".format(casedoc_dir))\ - if "README" not in os.path.basename(item)\ - and not item.endswith("doc")\ - and not item.endswith("prescribed")\ - and not os.path.basename(item).startswith(".")] + all_items_to_compare = [ + item + for item in glob.glob("{}/*".format(casedoc_dir)) + if "README" not in os.path.basename(item) + and not item.endswith("doc") + and not item.endswith("prescribed") + and not os.path.basename(item).startswith(".") + ] comments = "NLCOMP\n" for item in all_items_to_compare: - baseline_counterpart = os.path.join(baseline_casedocs \ - if os.path.dirname(item).endswith("CaseDocs") \ - else baseline_dir,os.path.basename(item)) + baseline_counterpart = os.path.join( + baseline_casedocs + if os.path.dirname(item).endswith("CaseDocs") + else baseline_dir, + os.path.basename(item), + ) if not os.path.exists(baseline_counterpart): comments += "Missing baseline namelist '{}'\n".format(baseline_counterpart) all_match = False else: if item.endswith("runconfig") or item.endswith("runseq"): - success, current_comments = compare_runconfigfiles(baseline_counterpart, item, test) + success, current_comments = compare_runconfigfiles( + baseline_counterpart, item, test + ) elif is_namelist_file(item): - success, current_comments = compare_namelist_files(baseline_counterpart, item, test) + success, current_comments = compare_namelist_files( + baseline_counterpart, item, test + ) else: - success, current_comments = compare_files(baseline_counterpart, item, test) + success, current_comments = compare_files( + baseline_counterpart, item, test + ) all_match &= success if not success: - comments += "Comparison failed between '{}' with '{}'\n".format(item, baseline_counterpart) + comments += "Comparison failed between '{}' with '{}'\n".format( + item, baseline_counterpart + ) comments += current_comments logging.info(comments) return all_match, comments + def _do_full_nl_gen_impl(case, test, generate_name, baseline_root=None): - test_dir = case.get_value("CASEROOT") - casedoc_dir = os.path.join(test_dir, "CaseDocs") - baseline_root = case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root + test_dir = case.get_value("CASEROOT") + casedoc_dir = os.path.join(test_dir, "CaseDocs") + baseline_root = ( + case.get_value("BASELINE_ROOT") if baseline_root is None else baseline_root + ) - baseline_dir = os.path.join(baseline_root, generate_name, test) + baseline_dir = os.path.join(baseline_root, generate_name, test) baseline_casedocs = os.path.join(baseline_dir, "CaseDocs") if not os.path.isdir(baseline_dir): - os.makedirs(baseline_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH) + os.makedirs( + baseline_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IXOTH | stat.S_IROTH + ) if os.path.isdir(baseline_casedocs): shutil.rmtree(baseline_casedocs) @@ -75,16 +97,26 @@ def _do_full_nl_gen_impl(case, test, generate_name, baseline_root=None): for item in glob.glob(os.path.join(test_dir, "user_nl*")): preexisting_baseline = os.path.join(baseline_dir, os.path.basename(item)) - if (os.path.exists(preexisting_baseline)): + if os.path.exists(preexisting_baseline): os.remove(preexisting_baseline) safe_copy(item, baseline_dir, preserve_meta=False) + def _do_full_nl_gen(case, test, generate_name, baseline_root=None): with SharedArea(): _do_full_nl_gen_impl(case, test, generate_name, baseline_root=baseline_root) -def case_cmpgen_namelists(self, compare=False, generate=False, compare_name=None, generate_name=None, baseline_root=None, logfile_name="TestStatus.log"): + +def case_cmpgen_namelists( + self, + compare=False, + generate=False, + compare_name=None, + generate_name=None, + baseline_root=None, + logfile_name="TestStatus.log", +): expect(self.get_value("TEST"), "Only makes sense to run this for a test case") caseroot, casebaseid = self.get_value("CASEROOT"), self.get_value("CASEBASEID") @@ -110,20 +142,31 @@ def case_cmpgen_namelists(self, compare=False, generate=False, compare_name=None # baseline operations which may not directly impact the functioning of the viability of this case if compare and not compare_name: compare_name = self.get_value("BASELINE_NAME_CMP") - expect(compare_name, "Was asked to do baseline compare but unable to determine baseline name") - logging.info("Comparing namelists with baselines '{}'".format(compare_name)) + expect( + compare_name, + "Was asked to do baseline compare but unable to determine baseline name", + ) + logging.info( + "Comparing namelists with baselines '{}'".format(compare_name) + ) if generate and not generate_name: generate_name = self.get_value("BASELINE_NAME_GEN") - expect(generate_name, "Was asked to do baseline generation but unable to determine baseline name") - logging.info("Generating namelists to baselines '{}'".format(generate_name)) + expect( + generate_name, + "Was asked to do baseline generation but unable to determine baseline name", + ) + logging.info( + "Generating namelists to baselines '{}'".format(generate_name) + ) success = True output = "" if compare: - success, output = _do_full_nl_comp(self, test_name, compare_name, baseline_root) + success, output = _do_full_nl_comp( + self, test_name, compare_name, baseline_root + ) if not success and ts.get_status(RUN_PHASE) is not None: - run_warn = \ -"""NOTE: It is not necessarily safe to compare namelists after RUN + run_warn = """NOTE: It is not necessarily safe to compare namelists after RUN phase has completed. Running a case can pollute namelists. The namelists kept in the baselines are pre-RUN namelists.""" output += run_warn @@ -133,11 +176,15 @@ def case_cmpgen_namelists(self, compare=False, generate=False, compare_name=None except Exception: success = False ts.set_status(NAMELIST_PHASE, TEST_FAIL_STATUS) - warn = "Exception during namelist operations:\n{}\n{}".format(sys.exc_info()[1], traceback.format_exc()) + warn = "Exception during namelist operations:\n{}\n{}".format( + sys.exc_info()[1], traceback.format_exc() + ) output += warn logging.warning(warn) finally: - ts.set_status(NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS) + ts.set_status( + NAMELIST_PHASE, TEST_PASS_STATUS if success else TEST_FAIL_STATUS + ) try: append_status(output, logfile_name, caseroot=caseroot) except IOError: diff --git a/CIME/case/case_run.py b/CIME/case/case_run.py index 1ecd1715c92..17ca1b02624 100644 --- a/CIME/case/case_run.py +++ b/CIME/case/case_run.py @@ -2,11 +2,11 @@ case_run is a member of Class Case '""" from CIME.XML.standard_module_setup import * -from CIME.utils import gzip_existing_file, new_lid, run_and_log_case_status -from CIME.utils import run_sub_or_cmd, append_status, safe_copy, model_log, CIMEError -from CIME.utils import get_model, batch_jobid -from CIME.get_timing import get_timing -from CIME.provenance import save_prerun_provenance, save_postrun_provenance +from CIME.utils import gzip_existing_file, new_lid, run_and_log_case_status +from CIME.utils import run_sub_or_cmd, append_status, safe_copy, model_log, CIMEError +from CIME.utils import get_model, batch_jobid +from CIME.get_timing import get_timing +from CIME.provenance import save_prerun_provenance, save_postrun_provenance import shutil, time, sys, os, glob @@ -14,11 +14,11 @@ ############################################################################### def _pre_run_check(case, lid, skip_pnl=False, da_cycle=0): -############################################################################### + ############################################################################### # Pre run initialization code.. if da_cycle > 0: - case.create_namelists(component='cpl') + case.create_namelists(component="cpl") return caseroot = case.get_value("CASEROOT") @@ -26,8 +26,8 @@ def _pre_run_check(case, lid, skip_pnl=False, da_cycle=0): rundir = case.get_value("RUNDIR") if case.get_value("TESTCASE") == "PFS": - env_mach_pes = os.path.join(caseroot,"env_mach_pes.xml") - safe_copy(env_mach_pes,"{}.{}".format(env_mach_pes, lid)) + env_mach_pes = os.path.join(caseroot, "env_mach_pes.xml") + safe_copy(env_mach_pes, "{}.{}".format(env_mach_pes, lid)) # check for locked files, may impact BUILD_COMPLETE skip = None @@ -38,8 +38,10 @@ def _pre_run_check(case, lid, skip_pnl=False, da_cycle=0): build_complete = case.get_value("BUILD_COMPLETE") # check that build is done - expect(build_complete, - "BUILD_COMPLETE is not true\nPlease rebuild the model interactively") + expect( + build_complete, + "BUILD_COMPLETE is not true\nPlease rebuild the model interactively", + ) logger.debug("build complete is {} ".format(build_complete)) # load the module environment... @@ -54,28 +56,51 @@ def _pre_run_check(case, lid, skip_pnl=False, da_cycle=0): # This needs to be done everytime the LID changes in order for log files to be set up correctly # The following also needs to be called in case a user changes a user_nl_xxx file OR an env_run.xml # variable while the job is in the queue - model_log("e3sm", logger, "{} NAMELIST CREATION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} NAMELIST CREATION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) if skip_pnl: - case.create_namelists(component='cpl') + case.create_namelists(component="cpl") else: logger.info("Generating namelists for {}".format(caseroot)) case.create_namelists() - model_log("e3sm", logger, "{} NAMELIST CREATION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} NAMELIST CREATION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) - logger.info("-------------------------------------------------------------------------") + logger.info( + "-------------------------------------------------------------------------" + ) logger.info(" - Prestage required restarts into {}".format(rundir)) - logger.info(" - Case input data directory (DIN_LOC_ROOT) is {} ".format(din_loc_root)) + logger.info( + " - Case input data directory (DIN_LOC_ROOT) is {} ".format(din_loc_root) + ) logger.info(" - Checking for required input datasets in DIN_LOC_ROOT") - logger.info("-------------------------------------------------------------------------") + logger.info( + "-------------------------------------------------------------------------" + ) + ############################################################################### def _run_model_impl(case, lid, skip_pnl=False, da_cycle=0): -############################################################################### + ############################################################################### - model_log("e3sm", logger, "{} PRE_RUN_CHECK BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} PRE_RUN_CHECK BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) _pre_run_check(case, lid, skip_pnl=skip_pnl, da_cycle=da_cycle) - model_log("e3sm", logger, "{} PRE_RUN_CHECK HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} PRE_RUN_CHECK HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) model = case.get_value("MODEL") @@ -112,20 +137,39 @@ def _run_model_impl(case, lid, skip_pnl=False, da_cycle=0): while loop: loop = False - model_log("e3sm", logger, "{} SAVE_PRERUN_PROVENANCE BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} SAVE_PRERUN_PROVENANCE BEGINS HERE".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) save_prerun_provenance(case) - model_log("e3sm", logger, "{} SAVE_PRERUN_PROVENANCE HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - model_log("e3sm", logger, "{} MODEL EXECUTION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} SAVE_PRERUN_PROVENANCE HAS FINISHED".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) + + model_log( + "e3sm", + logger, + "{} MODEL EXECUTION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) run_func = lambda: run_cmd_no_fail(cmd, from_dir=rundir) case.flush() try: - run_and_log_case_status(run_func, "model execution", - custom_starting_msg_functor=msg_func, - custom_success_msg_functor=msg_func, - caseroot=case.get_value("CASEROOT"), - is_batch=is_batch) + run_and_log_case_status( + run_func, + "model execution", + custom_starting_msg_functor=msg_func, + custom_success_msg_functor=msg_func, + caseroot=case.get_value("CASEROOT"), + is_batch=is_batch, + ) cmd_success = True except CIMEError: cmd_success = False @@ -141,28 +185,44 @@ def _run_model_impl(case, lid, skip_pnl=False, da_cycle=0): # should generally be reloaded (via case.get_value(XXX)) if they are still needed. case.read_xml() - model_log("e3sm", logger, "{} MODEL EXECUTION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} MODEL EXECUTION HAS FINISHED".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) model_logfile = os.path.join(rundir, model + ".log." + lid) # Determine if failure was due to a failed node, if so, try to restart if retry_run_re or node_fail_re: model_logfile = os.path.join(rundir, model + ".log." + lid) if os.path.exists(model_logfile): - num_node_fails=0 - num_retry_fails=0 + num_node_fails = 0 + num_retry_fails = 0 if node_fail_re: - num_node_fails = len(node_fail_regex.findall(open(model_logfile, 'r').read())) + num_node_fails = len( + node_fail_regex.findall(open(model_logfile, "r").read()) + ) if retry_run_re: - num_retry_fails = len(retry_run_regex.findall(open(model_logfile, 'r').read())) - logger.debug ("RETRY: num_retry_fails {} spare_nodes {} retry_count {}". - format(num_retry_fails, case.spare_nodes, retry_count)) + num_retry_fails = len( + retry_run_regex.findall(open(model_logfile, "r").read()) + ) + logger.debug( + "RETRY: num_retry_fails {} spare_nodes {} retry_count {}".format( + num_retry_fails, case.spare_nodes, retry_count + ) + ) if num_node_fails > 0 and case.spare_nodes >= num_node_fails: - # We failed due to node failure! - logger.warning("Detected model run failed due to node failure, restarting") + # We failed due to node failure! + logger.warning( + "Detected model run failed due to node failure, restarting" + ) case.spare_nodes -= num_node_fails loop = True - case.set_value("CONTINUE_RUN", - case.get_value("RESUBMIT_SETS_CONTINUE_RUN")) + case.set_value( + "CONTINUE_RUN", case.get_value("RESUBMIT_SETS_CONTINUE_RUN") + ) elif num_retry_fails > 0 and retry_count >= num_retry_fails: logger.warning("Detected model run failed, restarting") retry_count -= 1 @@ -179,35 +239,53 @@ def _run_model_impl(case, lid, skip_pnl=False, da_cycle=0): if not cmd_success and not loop: # We failed and we're not restarting - expect(False, "RUN FAIL: Command '{}' failed\nSee log file for details: {}".format(cmd, model_logfile)) - - model_log("e3sm", logger, "{} POST_RUN_CHECK BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + expect( + False, + "RUN FAIL: Command '{}' failed\nSee log file for details: {}".format( + cmd, model_logfile + ), + ) + + model_log( + "e3sm", + logger, + "{} POST_RUN_CHECK BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) _post_run_check(case, lid) - model_log("e3sm", logger, "{} POST_RUN_CHECK HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} POST_RUN_CHECK HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) return lid + ############################################################################### def _run_model(case, lid, skip_pnl=False, da_cycle=0): -############################################################################### + ############################################################################### functor = lambda: _run_model_impl(case, lid, skip_pnl=skip_pnl, da_cycle=da_cycle) is_batch = case.get_value("BATCH_SYSTEM") is not None msg_func = None - + if is_batch: jobid = batch_jobid() msg_func = lambda *args: jobid if jobid is not None else "" - return run_and_log_case_status(functor, "case.run", - custom_starting_msg_functor=msg_func, - custom_success_msg_functor=msg_func, - caseroot=case.get_value("CASEROOT"), - is_batch=is_batch) + return run_and_log_case_status( + functor, + "case.run", + custom_starting_msg_functor=msg_func, + custom_success_msg_functor=msg_func, + caseroot=case.get_value("CASEROOT"), + is_batch=is_batch, + ) + ############################################################################### def _post_run_check(case, lid): -############################################################################### + ############################################################################### rundir = case.get_value("RUNDIR") model = case.get_value("MODEL") @@ -218,13 +296,13 @@ def _post_run_check(case, lid): if "CPL" not in case.get_values("COMP_CLASSES"): fv3_standalone = True - if driver == 'nuopc': + if driver == "nuopc": if fv3_standalone: file_prefix = model else: - file_prefix = 'drv' + file_prefix = "drv" else: - file_prefix = 'cpl' + file_prefix = "cpl" cpl_ninst = 1 if case.get_value("MULTI_DRIVER"): @@ -233,7 +311,9 @@ def _post_run_check(case, lid): if cpl_ninst > 1: for inst in range(cpl_ninst): - cpl_logs.append(os.path.join(rundir, file_prefix + "_%04d.log." % (inst+1) + lid)) + cpl_logs.append( + os.path.join(rundir, file_prefix + "_%04d.log." % (inst + 1) + lid) + ) else: cpl_logs = [os.path.join(rundir, file_prefix + ".log." + lid)] @@ -251,26 +331,28 @@ def _post_run_check(case, lid): for cpl_logfile in cpl_logs: if not os.path.isfile(cpl_logfile): break - with open(cpl_logfile, 'r') as fd: - if fv3_standalone and 'HAS ENDED' in fd.read(): + with open(cpl_logfile, "r") as fd: + if fv3_standalone and "HAS ENDED" in fd.read(): count_ok += 1 - elif not fv3_standalone and 'SUCCESSFUL TERMINATION' in fd.read(): + elif not fv3_standalone and "SUCCESSFUL TERMINATION" in fd.read(): count_ok += 1 if count_ok != cpl_ninst: - expect(False, "Model did not complete - see {} \n " .format(cpl_logfile)) + expect(False, "Model did not complete - see {} \n ".format(cpl_logfile)) + ############################################################################### def _save_logs(case, lid): -############################################################################### + ############################################################################### rundir = case.get_value("RUNDIR") logfiles = glob.glob(os.path.join(rundir, "*.log.{}".format(lid))) for logfile in logfiles: if os.path.isfile(logfile): gzip_existing_file(logfile) + ###################################################################################### def _resubmit_check(case): -############################################################################### + ############################################################################### """ check to see if we need to do resubmission from this particular job, Note that Mira requires special logic @@ -286,10 +368,12 @@ def _resubmit_check(case): resubmit = False if not dout_s and resubmit_num > 0: resubmit = True - elif dout_s and mach == 'mira': + elif dout_s and mach == "mira": caseroot = case.get_value("CASEROOT") cimeroot = case.get_value("CIMEROOT") - cmd = "ssh cooleylogin1 'cd {case}; CIMEROOT={root} ./case.submit {case} --job case.st_archive'".format(case=caseroot, root=cimeroot) + cmd = "ssh cooleylogin1 'cd {case}; CIMEROOT={root} ./case.submit {case} --job case.st_archive'".format( + case=caseroot, root=cimeroot + ) run_cmd(cmd, verbose=True) if resubmit: @@ -299,28 +383,52 @@ def _resubmit_check(case): logger.debug("resubmit after check is {}".format(resubmit)) + ############################################################################### def _do_external(script_name, caseroot, rundir, lid, prefix): -############################################################################### - expect(os.path.isfile(script_name), "External script {} not found".format(script_name)) + ############################################################################### + expect( + os.path.isfile(script_name), "External script {} not found".format(script_name) + ) filename = "{}.external.log.{}".format(prefix, lid) outfile = os.path.join(rundir, filename) append_status("Starting script {}".format(script_name), "CaseStatus") - run_sub_or_cmd(script_name, [caseroot], (os.path.basename(script_name).split('.',1))[0], [caseroot], logfile=outfile) # For sub, use case? + run_sub_or_cmd( + script_name, + [caseroot], + (os.path.basename(script_name).split(".", 1))[0], + [caseroot], + logfile=outfile, + ) # For sub, use case? append_status("Completed script {}".format(script_name), "CaseStatus") + ############################################################################### def _do_data_assimilation(da_script, caseroot, cycle, lid, rundir): -############################################################################### - expect(os.path.isfile(da_script), "Data Assimilation script {} not found".format(da_script)) + ############################################################################### + expect( + os.path.isfile(da_script), + "Data Assimilation script {} not found".format(da_script), + ) filename = "da.log.{}".format(lid) outfile = os.path.join(rundir, filename) - run_sub_or_cmd(da_script, [caseroot, cycle], os.path.basename(da_script), [caseroot, cycle], logfile=outfile) # For sub, use case? + run_sub_or_cmd( + da_script, + [caseroot, cycle], + os.path.basename(da_script), + [caseroot, cycle], + logfile=outfile, + ) # For sub, use case? + ############################################################################### def case_run(self, skip_pnl=False, set_continue_run=False, submit_resubmits=False): -############################################################################### - model_log("e3sm", logger, "{} CASE.RUN BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + ############################################################################### + model_log( + "e3sm", + logger, + "{} CASE.RUN BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) # Set up the run, run the model, do the postrun steps # set up the LID @@ -328,26 +436,40 @@ def case_run(self, skip_pnl=False, set_continue_run=False, submit_resubmits=Fals prerun_script = self.get_value("PRERUN_SCRIPT") if prerun_script: - model_log("e3sm", logger, "{} PRERUN_SCRIPT BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} PRERUN_SCRIPT BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) self.flush() - _do_external(prerun_script, self.get_value("CASEROOT"), self.get_value("RUNDIR"), - lid, prefix="prerun") + _do_external( + prerun_script, + self.get_value("CASEROOT"), + self.get_value("RUNDIR"), + lid, + prefix="prerun", + ) self.read_xml() - model_log("e3sm", logger, "{} PRERUN_SCRIPT HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} PRERUN_SCRIPT HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) # We might need to tweak these if we want to allow the user to change them data_assimilation_cycles = self.get_value("DATA_ASSIMILATION_CYCLES") data_assimilation_script = self.get_value("DATA_ASSIMILATION_SCRIPT") - data_assimilation = (data_assimilation_cycles > 0 and - len(data_assimilation_script) > 0 and - os.path.isfile(data_assimilation_script)) + data_assimilation = ( + data_assimilation_cycles > 0 + and len(data_assimilation_script) > 0 + and os.path.isfile(data_assimilation_script) + ) for cycle in range(data_assimilation_cycles): # After the first DA cycle, runs are restart runs if cycle > 0: lid = new_lid() - self.set_value("CONTINUE_RUN", - self.get_value("RESUBMIT_SETS_CONTINUE_RUN")) + self.set_value("CONTINUE_RUN", self.get_value("RESUBMIT_SETS_CONTINUE_RUN")) # WARNING: All case variables are reloaded during run_model to get # new values of any variables that may have been changed by @@ -355,43 +477,99 @@ def case_run(self, skip_pnl=False, set_continue_run=False, submit_resubmits=Fals # set from case variables before this point may be # inconsistent with their latest values in the xml files, so # should generally be reloaded (via case.get_value(XXX)) if they are still needed. - model_log("e3sm", logger, "{} RUN_MODEL BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} RUN_MODEL BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) lid = _run_model(self, lid, skip_pnl, da_cycle=cycle) - model_log("e3sm", logger, "{} RUN_MODEL HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} RUN_MODEL HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) if self.get_value("CHECK_TIMING") or self.get_value("SAVE_TIMING"): - model_log("e3sm", logger, "{} GET_TIMING BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - get_timing(self, lid) # Run the getTiming script - model_log("e3sm", logger, "{} GET_TIMING HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - + model_log( + "e3sm", + logger, + "{} GET_TIMING BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) + get_timing(self, lid) # Run the getTiming script + model_log( + "e3sm", + logger, + "{} GET_TIMING HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) if data_assimilation: - model_log("e3sm", logger, "{} DO_DATA_ASSIMILATION BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} DO_DATA_ASSIMILATION BEGINS HERE".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) self.flush() - _do_data_assimilation(data_assimilation_script, self.get_value("CASEROOT"), cycle, lid, - self.get_value("RUNDIR")) + _do_data_assimilation( + data_assimilation_script, + self.get_value("CASEROOT"), + cycle, + lid, + self.get_value("RUNDIR"), + ) self.read_xml() - model_log("e3sm", logger, "{} DO_DATA_ASSIMILATION HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) - - _save_logs(self, lid) # Copy log files back to caseroot - - model_log("e3sm", logger, "{} SAVE_POSTRUN_PROVENANCE BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} DO_DATA_ASSIMILATION HAS FINISHED".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) + + _save_logs(self, lid) # Copy log files back to caseroot + + model_log( + "e3sm", + logger, + "{} SAVE_POSTRUN_PROVENANCE BEGINS HERE".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) save_postrun_provenance(self) - model_log("e3sm", logger, "{} SAVE_POSTRUN_PROVENANCE HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} SAVE_POSTRUN_PROVENANCE HAS FINISHED".format( + time.strftime("%Y-%m-%d %H:%M:%S") + ), + ) postrun_script = self.get_value("POSTRUN_SCRIPT") if postrun_script: - model_log("e3sm", logger, "{} POSTRUN_SCRIPT BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} POSTRUN_SCRIPT BEGINS HERE".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) self.flush() - _do_external(postrun_script, self.get_value("CASEROOT"), self.get_value("RUNDIR"), - lid, prefix="postrun") + _do_external( + postrun_script, + self.get_value("CASEROOT"), + self.get_value("RUNDIR"), + lid, + prefix="postrun", + ) self.read_xml() _save_logs(self, lid) - model_log("e3sm", logger, "{} POSTRUN_SCRIPT HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} POSTRUN_SCRIPT HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) if set_continue_run: - self.set_value("CONTINUE_RUN", - self.get_value("RESUBMIT_SETS_CONTINUE_RUN")) + self.set_value("CONTINUE_RUN", self.get_value("RESUBMIT_SETS_CONTINUE_RUN")) external_workflow = self.get_value("EXTERNAL_WORKFLOW") if not external_workflow: @@ -401,6 +579,9 @@ def case_run(self, skip_pnl=False, set_continue_run=False, submit_resubmits=Fals if submit_resubmits: _resubmit_check(self) - - model_log("e3sm", logger, "{} CASE.RUN HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S"))) + model_log( + "e3sm", + logger, + "{} CASE.RUN HAS FINISHED".format(time.strftime("%Y-%m-%d %H:%M:%S")), + ) return True diff --git a/CIME/case/case_setup.py b/CIME/case/case_setup.py index 62f502bad8e..d1d8f5a807e 100644 --- a/CIME/case/case_setup.py +++ b/CIME/case/case_setup.py @@ -7,14 +7,24 @@ from CIME.XML.standard_module_setup import * -from CIME.XML.machines import Machines -from CIME.BuildTools.configure import configure, generate_env_mach_specific, copy_depends_files -from CIME.utils import run_and_log_case_status, get_model, \ - get_batch_script_for_job, safe_copy, file_contains_python_function, import_from_file -from CIME.utils import batch_jobid -from CIME.utils import transform_vars -from CIME.test_status import * -from CIME.locked_files import unlock_file, lock_file +from CIME.XML.machines import Machines +from CIME.BuildTools.configure import ( + configure, + generate_env_mach_specific, + copy_depends_files, +) +from CIME.utils import ( + run_and_log_case_status, + get_model, + get_batch_script_for_job, + safe_copy, + file_contains_python_function, + import_from_file, +) +from CIME.utils import batch_jobid +from CIME.utils import transform_vars +from CIME.test_status import * +from CIME.locked_files import unlock_file, lock_file import errno, shutil, glob @@ -22,7 +32,7 @@ ############################################################################### def _build_usernl_files(case, model, comp): -############################################################################### + ############################################################################### """ Create user_nl_xxx files, expects cwd is caseroot """ @@ -31,21 +41,30 @@ def _build_usernl_files(case, model, comp): model_file = case.get_value("CONFIG_CPL_FILE") else: model_file = case.get_value("CONFIG_{}_FILE".format(model)) - expect(model_file is not None, - "Could not locate CONFIG_{}_FILE in config_files.xml".format(model)) + expect( + model_file is not None, + "Could not locate CONFIG_{}_FILE in config_files.xml".format(model), + ) model_dir = os.path.dirname(model_file) - expect(os.path.isdir(model_dir), - "cannot find cime_config directory {} for component {}".format(model_dir, comp)) + expect( + os.path.isdir(model_dir), + "cannot find cime_config directory {} for component {}".format(model_dir, comp), + ) comp_interface = case.get_value("COMP_INTERFACE") multi_driver = case.get_value("MULTI_DRIVER") ninst = 1 if multi_driver: ninst_max = case.get_value("NINST_MAX") - if comp_interface != "nuopc" and model not in ("DRV","CPL","ESP"): + if comp_interface != "nuopc" and model not in ("DRV", "CPL", "ESP"): ninst_model = case.get_value("NINST_{}".format(model)) - expect(ninst_model==ninst_max,"MULTI_DRIVER mode, all components must have same NINST value. NINST_{} != {}".format(model,ninst_max)) + expect( + ninst_model == ninst_max, + "MULTI_DRIVER mode, all components must have same NINST value. NINST_{} != {}".format( + model, ninst_max + ), + ) if comp == "cpl": if not os.path.exists("user_nl_cpl"): safe_copy(os.path.join(model_dir, "user_nl_cpl"), ".") @@ -61,10 +80,10 @@ def _build_usernl_files(case, model, comp): # Note that, even if there are multiple elements of user_nl_list (i.e., we are # creating multiple user_nl files for this component with different names), all of # them will start out as copies of the single user_nl_comp file in the model's - # source tree - unless the file has _stream in its name + # source tree - unless the file has _stream in its name for nlfile in user_nl_list: if ninst > 1: - for inst_counter in range(1, ninst+1): + for inst_counter in range(1, ninst + 1): inst_nlfile = "{}_{:04d}".format(nlfile, inst_counter) if not os.path.exists(inst_nlfile): # If there is a user_nl_foo in the case directory, copy it @@ -84,6 +103,7 @@ def _build_usernl_files(case, model, comp): elif os.path.exists(model_nl): safe_copy(model_nl, nlfile) + ############################################################################### def _get_user_nl_list(case, default_nlfile, model_dir): """Get a list of user_nl files needed by this component @@ -104,8 +124,9 @@ def _get_user_nl_list(case, default_nlfile, model_dir): # that function; if not, we'll fall back on the default value. buildnml_path = os.path.join(model_dir, "buildnml") has_function = False - if (os.path.isfile(buildnml_path) and - file_contains_python_function(buildnml_path, "get_user_nl_list")): + if os.path.isfile(buildnml_path) and file_contains_python_function( + buildnml_path, "get_user_nl_list" + ): has_function = True if has_function: @@ -114,19 +135,27 @@ def _get_user_nl_list(case, default_nlfile, model_dir): else: return [default_nlfile] + ############################################################################### -def _create_macros_cmake(caseroot, cmake_macros_dir, mach_obj, compiler, case_cmake_path): -############################################################################### - if not os.path.isfile("Macros.cmake"): +def _create_macros_cmake( + caseroot, cmake_macros_dir, mach_obj, compiler, case_cmake_path +): + ############################################################################### + if not os.path.isfile(os.path.join(caseroot, "Macros.cmake")): safe_copy(os.path.join(cmake_macros_dir, "Macros.cmake"), caseroot) - if not os.path.exists("cmake_macros"): + if not os.path.exists(os.path.join(caseroot, "cmake_macros")): shutil.copytree(cmake_macros_dir, case_cmake_path) - copy_depends_files(mach_obj.get_machine_name(), mach_obj.machines_dir, caseroot, compiler) + copy_depends_files( + mach_obj.get_machine_name(), mach_obj.machines_dir, caseroot, compiler + ) + ############################################################################### -def _create_macros(case, mach_obj, caseroot, compiler, mpilib, debug, comp_interface, sysos): -############################################################################### +def _create_macros( + case, mach_obj, caseroot, compiler, mpilib, debug, comp_interface, sysos +): + ############################################################################### """ creates the Macros.make, Depends.compiler, Depends.machine, Depends.machine.compiler and env_mach_specific.xml if they don't already exist. @@ -134,56 +163,103 @@ def _create_macros(case, mach_obj, caseroot, compiler, mpilib, debug, comp_inter reread = not os.path.isfile("env_mach_specific.xml") new_cmake_macros_dir = case.get_value("CMAKE_MACROS_DIR") - if new_cmake_macros_dir: - new_cmake_macro = os.path.join(new_cmake_macros_dir,"Macros.cmake") if reread: case.flush() generate_env_mach_specific( - caseroot, mach_obj, compiler, mpilib, debug, comp_interface, - sysos, False, threaded=case.get_build_threaded(), noenv=True,) + caseroot, + mach_obj, + compiler, + mpilib, + debug, + comp_interface, + sysos, + False, + threaded=case.get_build_threaded(), + noenv=True, + ) case.read_xml() # export CIME_NO_CMAKE_MACRO=1 to disable new macros - if os.path.exists(new_cmake_macro) and not "CIME_NO_CMAKE_MACRO" in os.environ: + if ( + new_cmake_macros_dir is not None + and os.path.exists(new_cmake_macros_dir) + and not "CIME_NO_CMAKE_MACRO" in os.environ + ): case_cmake_path = os.path.join(caseroot, "cmake_macros") - _create_macros_cmake(caseroot, new_cmake_macros_dir, mach_obj, compiler, case_cmake_path) + _create_macros_cmake( + caseroot, new_cmake_macros_dir, mach_obj, compiler, case_cmake_path + ) # check for macros in extra_machines_dir and in .cime local_macros = [] extra_machdir = case.get_value("EXTRA_MACHDIR") - if extra_machdir: - if os.path.isdir(os.path.join(extra_machdir,"cmake_macros")): - local_macros.extend(glob.glob(os.path.join(extra_machdir,"cmake_macros/*.cmake"))) - elif os.path.isfile(os.path.join(extra_machdir,"config_compilers.xml")): - logger.warning("WARNING: Found directory {} but no cmake macros within, set env variable CIME_NO_CMAKE_MACRO to use deprecated config_compilers method".format(extra_machdir)) + if extra_machdir: + if os.path.isdir(os.path.join(extra_machdir, "cmake_macros")): + local_macros.extend( + glob.glob(os.path.join(extra_machdir, "cmake_macros/*.cmake")) + ) + elif os.path.isfile(os.path.join(extra_machdir, "config_compilers.xml")): + logger.warning( + "WARNING: Found directory {} but no cmake macros within, set env variable CIME_NO_CMAKE_MACRO to use deprecated config_compilers method".format( + extra_machdir + ) + ) dotcime = None home = os.environ.get("HOME") if home: - dotcime = os.path.join(home,".cime") + dotcime = os.path.join(home, ".cime") if dotcime and os.path.isdir(dotcime): - local_macros.extend(glob.glob(dotcime+"/*.cmake")) + local_macros.extend(glob.glob(dotcime + "/*.cmake")) for macro in local_macros: safe_copy(macro, case_cmake_path) - if dotcime and os.path.isfile(os.path.join(dotcime,"config_compilers.xml")) and not local_macros: - logger.warning("WARNING: Found directory {} but no cmake macros within, set env variable CIME_NO_CMAKE_MACRO to use deprecated config_compilers method".format(dotcime)) + if ( + dotcime + and os.path.isfile(os.path.join(dotcime, "config_compilers.xml")) + and not local_macros + ): + logger.warning( + "WARNING: Found directory {} but no cmake macros within, set env variable CIME_NO_CMAKE_MACRO to use deprecated config_compilers method".format( + dotcime + ) + ) else: if not os.path.isfile("Macros.make"): - configure(mach_obj, - caseroot, ["Makefile"], compiler, mpilib, debug, comp_interface, sysos, noenv=True, - extra_machines_dir=mach_obj.get_extra_machines_dir()) + configure( + mach_obj, + caseroot, + ["Makefile"], + compiler, + mpilib, + debug, + comp_interface, + sysos, + noenv=True, + extra_machines_dir=mach_obj.get_extra_machines_dir(), + ) # Also write out Cmake macro file if not os.path.isfile("Macros.cmake"): - configure(mach_obj, - caseroot, ["CMake"], compiler, mpilib, debug, comp_interface, sysos, noenv=True, - extra_machines_dir=mach_obj.get_extra_machines_dir()) + configure( + mach_obj, + caseroot, + ["CMake"], + compiler, + mpilib, + debug, + comp_interface, + sysos, + noenv=True, + extra_machines_dir=mach_obj.get_extra_machines_dir(), + ) ############################################################################### -def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, keep=None): -############################################################################### +def _case_setup_impl( + case, caseroot, clean=False, test_mode=False, reset=False, keep=None +): + ############################################################################### os.chdir(caseroot) non_local = case.get_value("NONLOCAL") @@ -204,7 +280,7 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, # Check that $DIN_LOC_ROOT exists or can be created: if not non_local: din_loc_root = case.get_value("DIN_LOC_ROOT") - testcase = case.get_value("TESTCASE") + testcase = case.get_value("TESTCASE") if not os.path.isdir(din_loc_root): try: @@ -213,14 +289,24 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, if e.errno == errno.EACCES: logger.info("Invalid permissions to create {}".format(din_loc_root)) - expect(not (not os.path.isdir(din_loc_root) and testcase != "SBN"), - "inputdata root is not a directory or is not readable: {}".format(din_loc_root)) + expect( + not (not os.path.isdir(din_loc_root) and testcase != "SBN"), + "inputdata root is not a directory or is not readable: {}".format( + din_loc_root + ), + ) # Remove batch scripts if reset or clean: # clean setup-generated files batch_script = get_batch_script_for_job(case.get_primary_job()) - files_to_clean = [batch_script, "env_mach_specific.xml", "Macros.make", "Macros.cmake", "cmake_macros"] + files_to_clean = [ + batch_script, + "env_mach_specific.xml", + "Macros.make", + "Macros.cmake", + "cmake_macros", + ] for file_to_clean in files_to_clean: if os.path.exists(file_to_clean) and not (keep and file_to_clean in keep): if os.path.isdir(file_to_clean): @@ -237,15 +323,26 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, if clean and not os.path.isfile("env_mach_specific.xml"): case.flush() generate_env_mach_specific( - caseroot, mach_obj, compiler, mpilib, debug, comp_interface, - sysos, False, threaded=case.get_build_threaded(), noenv=True,) + caseroot, + mach_obj, + compiler, + mpilib, + debug, + comp_interface, + sysos, + False, + threaded=case.get_build_threaded(), + noenv=True, + ) case.read_xml() if not clean: if not non_local: case.load_env() - _create_macros(case, mach_obj, caseroot, compiler, mpilib, debug, comp_interface, sysos) + _create_macros( + case, mach_obj, caseroot, compiler, mpilib, debug, comp_interface, sysos + ) # Set tasks to 1 if mpi-serial library if mpilib == "mpi-serial": @@ -255,7 +352,7 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, # In CIME there can be multiple instances of each component model (an ensemble) NINST is the instance of that component. comp_interface = case.get_value("COMP_INTERFACE") if comp_interface == "nuopc": - ninst = case.get_value("NINST") + ninst = case.get_value("NINST") multi_driver = case.get_value("MULTI_DRIVER") @@ -264,11 +361,15 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, if comp == "CPL": continue if comp_interface != "nuopc": - ninst = case.get_value("NINST_{}".format(comp)) + ninst = case.get_value("NINST_{}".format(comp)) if multi_driver: if comp_interface != "nuopc": - expect(case.get_value("NINST_LAYOUT_{}".format(comp)) == "concurrent", - "If multi_driver is TRUE, NINST_LAYOUT_{} must be concurrent".format(comp)) + expect( + case.get_value("NINST_LAYOUT_{}".format(comp)) == "concurrent", + "If multi_driver is TRUE, NINST_LAYOUT_{} must be concurrent".format( + comp + ), + ) case.set_value("NTASKS_PER_INST_{}".format(comp), ntasks) else: if ninst > ntasks: @@ -276,12 +377,21 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, case.set_value("NTASKS_{}".format(comp), ninst) ntasks = ninst else: - expect(False, "NINST_{comp} value {ninst} greater than NTASKS_{comp} {ntasks}".format(comp=comp, ninst=ninst, ntasks=ntasks)) + expect( + False, + "NINST_{comp} value {ninst} greater than NTASKS_{comp} {ntasks}".format( + comp=comp, ninst=ninst, ntasks=ntasks + ), + ) - case.set_value("NTASKS_PER_INST_{}".format(comp), max(1,int(ntasks / ninst))) + case.set_value( + "NTASKS_PER_INST_{}".format(comp), max(1, int(ntasks / ninst)) + ) if os.path.exists(get_batch_script_for_job(case.get_primary_job())): - logger.info("Machine/Decomp/Pes configuration has already been done ...skipping") + logger.info( + "Machine/Decomp/Pes configuration has already been done ...skipping" + ) case.initialize_derived_attributes() @@ -303,10 +413,23 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, threaded = case.get_build_threaded() case.set_value("SMP_PRESENT", threaded) if threaded and case.total_tasks * case.thread_count > cost_per_node: - smt_factor = max(1.0,int(case.get_value("MAX_TASKS_PER_NODE") / cost_per_node)) - case.set_value("TOTALPES", case.iotasks + int((case.total_tasks - case.iotasks) * max(1.0,float(case.thread_count) / smt_factor))) + smt_factor = max( + 1.0, int(case.get_value("MAX_TASKS_PER_NODE") / cost_per_node) + ) + case.set_value( + "TOTALPES", + case.iotasks + + int( + (case.total_tasks - case.iotasks) + * max(1.0, float(case.thread_count) / smt_factor) + ), + ) else: - case.set_value("TOTALPES", (case.total_tasks - case.iotasks)*case.thread_count + case.iotasks) + case.set_value( + "TOTALPES", + (case.total_tasks - case.iotasks) * case.thread_count + + case.iotasks, + ) # May need to select new batch settings if pelayout changed (e.g. problem is now too big for prev-selected queue) env_batch = case.get_env("batch") @@ -315,8 +438,15 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, # create batch files env_batch.make_all_batch_files(case) if get_model() == "e3sm" and not case.get_value("TEST"): - input_batch_script = os.path.join(case.get_value("MACHDIR"), "template.case.run.sh") - env_batch.make_batch_script(input_batch_script, "case.run", case, outfile=get_batch_script_for_job("case.run.sh")) + input_batch_script = os.path.join( + case.get_value("MACHDIR"), "template.case.run.sh" + ) + env_batch.make_batch_script( + input_batch_script, + "case.run", + case, + outfile=get_batch_script_for_job("case.run.sh"), + ) # Make a copy of env_mach_pes.xml in order to be able # to check that it does not change once case.setup is invoked @@ -336,14 +466,18 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, _build_usernl_files(case, model, comp) if comp == "cism": glcroot = case.get_value("COMP_ROOT_DIR_GLC") - run_cmd_no_fail("{}/cime_config/cism.template {}".format(glcroot, caseroot)) + run_cmd_no_fail( + "{}/cime_config/cism.template {}".format(glcroot, caseroot) + ) _build_usernl_files(case, "drv", "cpl") # Create needed directories for case case.create_dirs() - logger.info("If an old case build already exists, might want to run \'case.build --clean\' before building") + logger.info( + "If an old case build already exists, might want to run 'case.build --clean' before building" + ) # Some tests need namelists created here (ERP) - so do this if we are in test mode if (test_mode or get_model() == "e3sm") and not non_local: @@ -360,14 +494,19 @@ def _case_setup_impl(case, caseroot, clean=False, test_mode=False, reset=False, if not non_local: env_module.save_all_env_info("software_environment.txt") - logger.info("You can now run './preview_run' to get more info on how your case will be run") + logger.info( + "You can now run './preview_run' to get more info on how your case will be run" + ) + ############################################################################### def case_setup(self, clean=False, test_mode=False, reset=False, keep=None): -############################################################################### + ############################################################################### caseroot, casebaseid = self.get_value("CASEROOT"), self.get_value("CASEBASEID") phase = "setup.clean" if clean else "case.setup" - functor = lambda: _case_setup_impl(self, caseroot, clean=clean, test_mode=test_mode, reset=reset, keep=keep) + functor = lambda: _case_setup_impl( + self, caseroot, clean=clean, test_mode=test_mode, reset=reset, keep=keep + ) is_batch = self.get_value("BATCH_SYSTEM") is not None msg_func = None @@ -380,12 +519,15 @@ def case_setup(self, clean=False, test_mode=False, reset=False, keep=None): test_name = casebaseid if casebaseid is not None else self.get_value("CASE") with TestStatus(test_dir=caseroot, test_name=test_name) as ts: try: - run_and_log_case_status(functor, phase, - custom_starting_msg_functor=msg_func, - custom_success_msg_functor=msg_func, - caseroot=caseroot, - is_batch=is_batch) - except BaseException: # Want to catch KeyboardInterrupt too + run_and_log_case_status( + functor, + phase, + custom_starting_msg_functor=msg_func, + custom_success_msg_functor=msg_func, + caseroot=caseroot, + is_batch=is_batch, + ) + except BaseException: # Want to catch KeyboardInterrupt too ts.set_status(SETUP_PHASE, TEST_FAIL_STATUS) raise else: @@ -394,11 +536,14 @@ def case_setup(self, clean=False, test_mode=False, reset=False, keep=None): else: ts.set_status(SETUP_PHASE, TEST_PASS_STATUS) else: - run_and_log_case_status(functor, phase, - custom_starting_msg_functor=msg_func, - custom_success_msg_functor=msg_func, - caseroot=caseroot, - is_batch=is_batch) + run_and_log_case_status( + functor, + phase, + custom_starting_msg_functor=msg_func, + custom_success_msg_functor=msg_func, + caseroot=caseroot, + is_batch=is_batch, + ) # put the following section here to make sure the rundir is generated first machdir = self.get_value("MACHDIR") @@ -406,20 +551,24 @@ def case_setup(self, clean=False, test_mode=False, reset=False, keep=None): ngpus_per_node = self.get_value("NGPUS_PER_NODE") overrides = {} overrides["ngpus_per_node"] = ngpus_per_node - input_template = os.path.join(machdir,"mpi_run_gpu.{}".format(mach)) + input_template = os.path.join(machdir, "mpi_run_gpu.{}".format(mach)) if os.path.isfile(input_template): # update the wrapper script that sets the device id for each MPI rank - output_text = transform_vars(open(input_template,"r").read(), case=self, overrides=overrides) + output_text = transform_vars( + open(input_template, "r").read(), case=self, overrides=overrides + ) # write it out to the run dir rundir = self.get_value("RUNDIR") - output_name = os.path.join(rundir,"set_device_rank.sh") + output_name = os.path.join(rundir, "set_device_rank.sh") logger.info("Creating file {}".format(output_name)) with open(output_name, "w") as f: f.write(output_text) # make the wrapper script executable if os.path.isfile(output_name): - os.system("chmod +x "+output_name) + os.system("chmod +x " + output_name) else: - expect(False, "The file {} is not written out correctly.".format(output_name)) + expect( + False, "The file {} is not written out correctly.".format(output_name) + ) diff --git a/CIME/case/case_st_archive.py b/CIME/case/case_st_archive.py index a12faf2dea7..ff3b9b64d93 100644 --- a/CIME/case/case_st_archive.py +++ b/CIME/case/case_st_archive.py @@ -7,46 +7,58 @@ import shutil, glob, re, os from CIME.XML.standard_module_setup import * -from CIME.utils import run_and_log_case_status, ls_sorted_by_mtime, symlink_force, safe_copy, find_files -from CIME.utils import batch_jobid -from CIME.date import get_file_date -from CIME.XML.archive import Archive -from CIME.XML.files import Files -from os.path import isdir, join +from CIME.utils import ( + run_and_log_case_status, + ls_sorted_by_mtime, + symlink_force, + safe_copy, + find_files, +) +from CIME.utils import batch_jobid +from CIME.date import get_file_date +from CIME.XML.archive import Archive +from CIME.XML.files import Files +from os.path import isdir, join logger = logging.getLogger(__name__) ############################################################################### def _get_archive_fn_desc(archive_fn): -############################################################################### + ############################################################################### return "moving" if archive_fn is shutil.move else "copying" + ############################################################################### def _get_archive_file_fn(copy_only): -############################################################################### + ############################################################################### """ Returns the function to use for archiving some files """ return safe_copy if copy_only else shutil.move + ############################################################################### def _get_datenames(casename, rundir): -############################################################################### + ############################################################################### """ Returns the date objects specifying the times of each file Note we are assuming that the coupler restart files exist and are consistent with other component datenames Not doc-testable due to filesystem dependence """ - expect(isdir(rundir), 'Cannot open directory {} '.format(rundir)) + expect(isdir(rundir), "Cannot open directory {} ".format(rundir)) - files = sorted(glob.glob(os.path.join(rundir, casename + '.cpl.r.*.nc'))) + files = sorted(glob.glob(os.path.join(rundir, casename + ".cpl.r.*.nc"))) if not files: - files = sorted(glob.glob(os.path.join(rundir, casename + '.cpl_0001.r.*.nc'))) + files = sorted(glob.glob(os.path.join(rundir, casename + ".cpl_0001.r.*.nc"))) logger.debug(" cpl files : {} ".format(files)) if not files: - logger.warning('Cannot find a {}.cpl*.r.*.nc file in directory {} '.format(casename, rundir)) + logger.warning( + "Cannot find a {}.cpl*.r.*.nc file in directory {} ".format( + casename, rundir + ) + ) datenames = [] for filename in files: @@ -54,6 +66,7 @@ def _get_datenames(casename, rundir): datenames.append(file_date) return datenames + def _datetime_str(_date): """ Returns the standard format associated with filenames. @@ -66,10 +79,13 @@ def _datetime_str(_date): """ format_string = "{year:04d}-{month:02d}-{day:02d}-{seconds:05d}" - return format_string.format(year = _date.year(), - month = _date.month(), - day = _date.day(), - seconds = _date.second_of_day()) + return format_string.format( + year=_date.year(), + month=_date.month(), + day=_date.day(), + seconds=_date.second_of_day(), + ) + def _datetime_str_mpas(_date): """ @@ -82,36 +98,46 @@ def _datetime_str_mpas(_date): '0011-12-09_00:07:15' """ - format_string = "{year:04d}-{month:02d}-{day:02d}_{hours:02d}:{minutes:02d}:{seconds:02d}" - return format_string.format(year = _date.year(), - month = _date.month(), - day = _date.day(), - hours = _date.hour(), - minutes = _date.minute(), - seconds = _date.second()) + format_string = ( + "{year:04d}-{month:02d}-{day:02d}_{hours:02d}:{minutes:02d}:{seconds:02d}" + ) + return format_string.format( + year=_date.year(), + month=_date.month(), + day=_date.day(), + hours=_date.hour(), + minutes=_date.minute(), + seconds=_date.second(), + ) + ############################################################################### def _get_ninst_info(case, compclass): -############################################################################### + ############################################################################### """ Returns the number of instances used by a component and suffix strings for filenames Not doc-testable due to case dependence """ - ninst = case.get_value('NINST_' + compclass.upper()) + ninst = case.get_value("NINST_" + compclass.upper()) ninst_strings = [] if ninst is None: ninst = 1 - for i in range(1,ninst+1): + for i in range(1, ninst + 1): if ninst > 1: - ninst_strings.append('_' + '{:04d}'.format(i)) + ninst_strings.append("_" + "{:04d}".format(i)) - logger.debug("ninst and ninst_strings are: {} and {} for {}".format(ninst, ninst_strings, compclass)) + logger.debug( + "ninst and ninst_strings are: {} and {} for {}".format( + ninst, ninst_strings, compclass + ) + ) return ninst, ninst_strings + ############################################################################### def _get_component_archive_entries(components, archive): -############################################################################### + ############################################################################### """ Each time this generator function is called, it yields a tuple (archive_entry, compname, compclass) for one component in this @@ -125,19 +151,30 @@ def _get_component_archive_entries(components, archive): compclass = None else: compclass = archive.get(archive_entry, "compclass") - yield(archive_entry, compname, compclass) + yield (archive_entry, compname, compclass) ############################################################################### -def _archive_rpointer_files(casename, ninst_strings, rundir, save_interim_restart_files, archive, - archive_entry, archive_restdir, datename, datename_is_last): -############################################################################### +def _archive_rpointer_files( + casename, + ninst_strings, + rundir, + save_interim_restart_files, + archive, + archive_entry, + archive_restdir, + datename, + datename_is_last, +): + ############################################################################### if datename_is_last: # Copy of all rpointer files for latest restart date - rpointers = glob.glob(os.path.join(rundir, 'rpointer.*')) + rpointers = glob.glob(os.path.join(rundir, "rpointer.*")) for rpointer in rpointers: - safe_copy(rpointer, os.path.join(archive_restdir, os.path.basename(rpointer))) + safe_copy( + rpointer, os.path.join(archive_restdir, os.path.basename(rpointer)) + ) else: # Generate rpointer file(s) for interim restarts for the one datename and each # possible value of ninst_strings @@ -154,61 +191,82 @@ def _archive_rpointer_files(casename, ninst_strings, rundir, save_interim_restar # put in a temporary setting for ninst_strings if they are empty # in order to have just one loop over ninst_strings below - if rpointer_content != 'unset': + if rpointer_content != "unset": if not ninst_strings: ninst_strings = ["empty"] for ninst_string in ninst_strings: rpointer_file = temp_rpointer_file rpointer_content = temp_rpointer_content - if ninst_string == 'empty': + if ninst_string == "empty": ninst_string = "" - for key, value in [('$CASE', casename), - ('$DATENAME', _datetime_str(datename)), - ('$MPAS_DATENAME', _datetime_str_mpas(datename)), - ('$NINST_STRING', ninst_string)]: + for key, value in [ + ("$CASE", casename), + ("$DATENAME", _datetime_str(datename)), + ("$MPAS_DATENAME", _datetime_str_mpas(datename)), + ("$NINST_STRING", ninst_string), + ]: rpointer_file = rpointer_file.replace(key, value) rpointer_content = rpointer_content.replace(key, value) # write out the respective files with the correct contents rpointer_file = os.path.join(archive_restdir, rpointer_file) logger.info("writing rpointer_file {}".format(rpointer_file)) - f = open(rpointer_file, 'w') - for output in rpointer_content.split(','): + f = open(rpointer_file, "w") + for output in rpointer_content.split(","): f.write("{} \n".format(output)) f.close() else: - logger.info("rpointer_content unset, not creating rpointer file {}".format(rpointer_file)) + logger.info( + "rpointer_content unset, not creating rpointer file {}".format( + rpointer_file + ) + ) + ############################################################################### def _archive_log_files(dout_s_root, rundir, archive_incomplete, archive_file_fn): -############################################################################### + ############################################################################### """ Find all completed log files, or all log files if archive_incomplete is True, and archive them. Each log file is required to have ".log." in its name, and completed ones will end with ".gz" Not doc-testable due to file system dependence """ - archive_logdir = os.path.join(dout_s_root, 'logs') + archive_logdir = os.path.join(dout_s_root, "logs") if not os.path.exists(archive_logdir): os.makedirs(archive_logdir) logger.debug("created directory {} ".format(archive_logdir)) if archive_incomplete == False: - log_search = '*.log.*.gz' + log_search = "*.log.*.gz" else: - log_search = '*.log.*' + log_search = "*.log.*" logfiles = glob.glob(os.path.join(rundir, log_search)) for logfile in logfiles: srcfile = join(rundir, os.path.basename(logfile)) destfile = join(archive_logdir, os.path.basename(logfile)) - logger.info("{} {} to {}".format(_get_archive_fn_desc(archive_file_fn), srcfile, destfile)) + logger.info( + "{} {} to {}".format( + _get_archive_fn_desc(archive_file_fn), srcfile, destfile + ) + ) archive_file_fn(srcfile, destfile) + ############################################################################### -def _archive_history_files(archive, compclass, compname, histfiles_savein_rundir, - last_date, archive_file_fn, dout_s_root, casename, rundir): -############################################################################### +def _archive_history_files( + archive, + compclass, + compname, + histfiles_savein_rundir, + last_date, + archive_file_fn, + dout_s_root, + casename, + rundir, +): + ############################################################################### """ perform short term archiving on history files in rundir @@ -217,21 +275,21 @@ def _archive_history_files(archive, compclass, compname, histfiles_savein_rundir # determine history archive directory (create if it does not exist) - archive_histdir = os.path.join(dout_s_root, compclass, 'hist') + archive_histdir = os.path.join(dout_s_root, compclass, "hist") if not os.path.exists(archive_histdir): os.makedirs(archive_histdir) logger.debug("created directory {}".format(archive_histdir)) # the compname is drv but the files are named cpl - if compname == 'drv': - compname = 'cpl' + if compname == "drv": + compname = "cpl" - if compname == 'nemo': - archive_rblddir = os.path.join(dout_s_root, compclass, 'rebuild') + if compname == "nemo": + archive_rblddir = os.path.join(dout_s_root, compclass, "rebuild") if not os.path.exists(archive_rblddir): os.makedirs(archive_rblddir) logger.debug("created directory {}".format(archive_rblddir)) - sfxrbld = r'mesh_mask_' + r'[0-9]*' + sfxrbld = r"mesh_mask_" + r"[0-9]*" pfile = re.compile(sfxrbld) rbldfiles = [f for f in os.listdir(rundir) if pfile.search(f)] logger.debug("rbldfiles = {} ".format(rbldfiles)) @@ -240,10 +298,14 @@ def _archive_history_files(archive, compclass, compname, histfiles_savein_rundir for rbldfile in rbldfiles: srcfile = join(rundir, rbldfile) destfile = join(archive_rblddir, rbldfile) - logger.info("{} {} to {} ".format(_get_archive_fn_desc(archive_file_fn), srcfile, destfile)) + logger.info( + "{} {} to {} ".format( + _get_archive_fn_desc(archive_file_fn), srcfile, destfile + ) + ) archive_file_fn(srcfile, destfile) - sfxhst = casename + r'_[0-9][mdy]_' + r'[0-9]*' + sfxhst = casename + r"_[0-9][mdy]_" + r"[0-9]*" pfile = re.compile(sfxhst) hstfiles = [f for f in os.listdir(rundir) if pfile.search(f)] logger.debug("hstfiles = {} ".format(hstfiles)) @@ -252,7 +314,11 @@ def _archive_history_files(archive, compclass, compname, histfiles_savein_rundir for hstfile in hstfiles: srcfile = join(rundir, hstfile) destfile = join(archive_histdir, hstfile) - logger.info("{} {} to {} ".format(_get_archive_fn_desc(archive_file_fn), srcfile, destfile)) + logger.info( + "{} {} to {} ".format( + _get_archive_fn_desc(archive_file_fn), srcfile, destfile + ) + ) archive_file_fn(srcfile, destfile) # determine ninst and ninst_string @@ -266,19 +332,28 @@ def _archive_history_files(archive, compclass, compname, histfiles_savein_rundir file_date = get_file_date(os.path.basename(histfile)) if last_date is None or file_date is None or file_date <= last_date: srcfile = join(rundir, histfile) - expect(os.path.isfile(srcfile), - "history file {} does not exist ".format(srcfile)) + expect( + os.path.isfile(srcfile), + "history file {} does not exist ".format(srcfile), + ) destfile = join(archive_histdir, histfile) if histfile in histfiles_savein_rundir: logger.info("copying {} to {} ".format(srcfile, destfile)) safe_copy(srcfile, destfile) else: - logger.info("{} {} to {} ".format(_get_archive_fn_desc(archive_file_fn), srcfile, destfile)) + logger.info( + "{} {} to {} ".format( + _get_archive_fn_desc(archive_file_fn), srcfile, destfile + ) + ) archive_file_fn(srcfile, destfile) + ############################################################################### -def get_histfiles_for_restarts(rundir, archive, archive_entry, restfile, testonly=False): -############################################################################### +def get_histfiles_for_restarts( + rundir, archive, archive_entry, restfile, testonly=False +): + ############################################################################### """ query restart files to determine history files that are needed for restarts @@ -287,15 +362,21 @@ def get_histfiles_for_restarts(rundir, archive, archive_entry, restfile, testonl # Make certain histfiles is a set so we don't repeat histfiles = set() - rest_hist_varname = archive.get_entry_value('rest_history_varname', archive_entry) - if rest_hist_varname != 'unset': - cmd = "ncdump -v {} {} ".format(rest_hist_varname, os.path.join(rundir, restfile)) + rest_hist_varname = archive.get_entry_value("rest_history_varname", archive_entry) + if rest_hist_varname != "unset": + cmd = "ncdump -v {} {} ".format( + rest_hist_varname, os.path.join(rundir, restfile) + ) if testonly: out = "{} =".format(rest_hist_varname) else: rc, out, error = run_cmd(cmd) if rc != 0: - logger.info(" WARNING: {} failed rc={:d}\n out={}\n err={}".format(cmd, rc, out, error)) + logger.info( + " WARNING: {} failed rc={:d}\n out={}\n err={}".format( + cmd, rc, out, error + ) + ) logger.debug(" get_histfiles_for_restarts: \n out={}".format(out)) searchname = "{} =".format(rest_hist_varname) @@ -311,64 +392,103 @@ def get_histfiles_for_restarts(rundir, archive, archive_entry, restfile, testonl histfile = os.path.basename(histfile) # append histfile to the list ONLY if it exists in rundir before the archiving if histfile in histfiles: - logger.warning("WARNING, tried to add a duplicate file to histfiles") - if os.path.isfile(os.path.join(rundir,histfile)): + logger.warning( + "WARNING, tried to add a duplicate file to histfiles" + ) + if os.path.isfile(os.path.join(rundir, histfile)): histfiles.add(histfile) else: - logger.debug(" get_histfiles_for_restarts: histfile {} does not exist ".format(histfile)) + logger.debug( + " get_histfiles_for_restarts: histfile {} does not exist ".format( + histfile + ) + ) return histfiles + ############################################################################### -def _archive_restarts_date(case, casename, rundir, archive, - datename, datename_is_last, last_date, - archive_restdir, archive_file_fn, components=None, - link_to_last_restart_files=False, testonly=False): -############################################################################### +def _archive_restarts_date( + case, + casename, + rundir, + archive, + datename, + datename_is_last, + last_date, + archive_restdir, + archive_file_fn, + components=None, + link_to_last_restart_files=False, + testonly=False, +): + ############################################################################### """ Archive restart files for a single date Returns a dictionary of histfiles that need saving in the run directory, indexed by compname """ - logger.info('-------------------------------------------') - logger.info('Archiving restarts for date {}'.format(datename)) - logger.debug('last date {}'.format(last_date)) - logger.debug('date is last? {}'.format(datename_is_last)) - logger.debug('components are {}'.format(components)) - logger.info('-------------------------------------------') + logger.info("-------------------------------------------") + logger.info("Archiving restarts for date {}".format(datename)) + logger.debug("last date {}".format(last_date)) + logger.debug("date is last? {}".format(datename_is_last)) + logger.debug("components are {}".format(components)) + logger.info("-------------------------------------------") logger.debug("last date: {}".format(last_date)) if components is None: components = case.get_compset_components() - components.append('drv') - components.append('dart') + components.append("drv") + components.append("dart") histfiles_savein_rundir_by_compname = {} - for (archive_entry, compname, compclass) in _get_component_archive_entries(components, archive): + for (archive_entry, compname, compclass) in _get_component_archive_entries( + components, archive + ): if compclass: - logger.info('Archiving restarts for {} ({})'.format(compname, compclass)) + logger.info("Archiving restarts for {} ({})".format(compname, compclass)) # archive restarts - histfiles_savein_rundir = _archive_restarts_date_comp(case, casename, rundir, - archive, archive_entry, - compclass, compname, - datename, datename_is_last, - last_date, archive_restdir, - archive_file_fn, - link_to_last_restart_files= - link_to_last_restart_files, - testonly=testonly) + histfiles_savein_rundir = _archive_restarts_date_comp( + case, + casename, + rundir, + archive, + archive_entry, + compclass, + compname, + datename, + datename_is_last, + last_date, + archive_restdir, + archive_file_fn, + link_to_last_restart_files=link_to_last_restart_files, + testonly=testonly, + ) histfiles_savein_rundir_by_compname[compname] = histfiles_savein_rundir return histfiles_savein_rundir_by_compname + ############################################################################### -def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry, - compclass, compname, datename, datename_is_last, - last_date, archive_restdir, archive_file_fn, - link_to_last_restart_files=False, testonly=False): -############################################################################### +def _archive_restarts_date_comp( + case, + casename, + rundir, + archive, + archive_entry, + compclass, + compname, + datename, + datename_is_last, + last_date, + archive_restdir, + archive_file_fn, + link_to_last_restart_files=False, + testonly=False, +): + ############################################################################### """ Archive restart files for a single date and single component @@ -379,14 +499,22 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry, """ datename_str = _datetime_str(datename) - if datename_is_last or case.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES'): + if datename_is_last or case.get_value("DOUT_S_SAVE_INTERIM_RESTART_FILES"): if not os.path.exists(archive_restdir): os.makedirs(archive_restdir) # archive the rpointer file(s) for this datename and all possible ninst_strings - _archive_rpointer_files(casename, _get_ninst_info(case, compclass)[1], rundir, - case.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES'), - archive, archive_entry, archive_restdir, datename, datename_is_last) + _archive_rpointer_files( + casename, + _get_ninst_info(case, compclass)[1], + rundir, + case.get_value("DOUT_S_SAVE_INTERIM_RESTART_FILES"), + archive, + archive_entry, + archive_restdir, + datename, + datename_is_last, + ) # move all but latest restart files into the archive restart directory # copy latest restart files to archive restart directory @@ -401,30 +529,47 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry, last_restart_file_fn_msg = "copying" # the compname is drv but the files are named cpl - if compname == 'drv': - compname = 'cpl' + if compname == "drv": + compname = "cpl" if compname == "cice6": - compname = 'cice' + compname = "cice" if compname == "ww3dev": - compname = 'ww3' + compname = "ww3" # get file_extension suffixes for suffix in archive.get_rest_file_extensions(archive_entry): -# logger.debug("suffix is {} ninst {}".format(suffix, ninst)) + # logger.debug("suffix is {} ninst {}".format(suffix, ninst)) restfiles = "" - if compname.find('mpas') == 0 or compname == 'mali': - pattern = casename + r'\.' + compname + r'\.' + suffix + r'\.' + '_'.join(datename_str.rsplit('-', 1)) + if compname.find("mpas") == 0 or compname == "mali": + pattern = ( + casename + + r"\." + + compname + + r"\." + + suffix + + r"\." + + "_".join(datename_str.rsplit("-", 1)) + ) pfile = re.compile(pattern) restfiles = [f for f in os.listdir(rundir) if pfile.search(f)] - elif compname == 'nemo': - pattern = r'_*_' + suffix + r'[0-9]*' + elif compname == "nemo": + pattern = r"_*_" + suffix + r"[0-9]*" pfile = re.compile(pattern) restfiles = [f for f in os.listdir(rundir) if pfile.search(f)] else: pattern = r"^{}\.{}[\d_]*\.".format(casename, compname) pfile = re.compile(pattern) files = [f for f in os.listdir(rundir) if pfile.search(f)] - pattern = r'_?' + r'\d*' + r'\.' + suffix + r'\.' + r'[^\.]*' + r'\.?' + datename_str + pattern = ( + r"_?" + + r"\d*" + + r"\." + + suffix + + r"\." + + r"[^\.]*" + + r"\.?" + + datename_str + ) pfile = re.compile(pattern) restfiles = [f for f in files if pfile.search(f)] logger.debug("pattern is {} restfiles {}".format(pattern, restfiles)) @@ -441,9 +586,9 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry, # obtain array of history files for restarts # need to do this before archiving restart files - histfiles_for_restart = get_histfiles_for_restarts(rundir, archive, - archive_entry, rfile, - testonly=testonly) + histfiles_for_restart = get_histfiles_for_restarts( + rundir, archive, archive_entry, rfile, testonly=testonly + ) if datename_is_last and histfiles_for_restart: for histfile in histfiles_for_restart: @@ -456,23 +601,41 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry, srcfile = os.path.join(rundir, rfile) destfile = os.path.join(archive_restdir, rfile) last_restart_file_fn(srcfile, destfile) - logger.info("{} file {} to {}".format(last_restart_file_fn_msg, srcfile, destfile)) + logger.info( + "{} file {} to {}".format( + last_restart_file_fn_msg, srcfile, destfile + ) + ) for histfile in histfiles_for_restart: srcfile = os.path.join(rundir, histfile) destfile = os.path.join(archive_restdir, histfile) - expect(os.path.isfile(srcfile), - "history restart file {} for last date does not exist ".format(srcfile)) + expect( + os.path.isfile(srcfile), + "history restart file {} for last date does not exist ".format( + srcfile + ), + ) logger.info("Copying {} to {}".format(srcfile, destfile)) safe_copy(srcfile, destfile) - logger.debug("datename_is_last + histfiles_for_restart copying \n {} to \n {}".format(srcfile, destfile)) + logger.debug( + "datename_is_last + histfiles_for_restart copying \n {} to \n {}".format( + srcfile, destfile + ) + ) else: # Only archive intermediate restarts if requested - otherwise remove them - if case.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES'): + if case.get_value("DOUT_S_SAVE_INTERIM_RESTART_FILES"): srcfile = os.path.join(rundir, rfile) destfile = os.path.join(archive_restdir, rfile) - expect(os.path.isfile(srcfile), - "restart file {} does not exist ".format(srcfile)) - logger.info("{} file {} to {}".format(_get_archive_fn_desc(archive_file_fn), srcfile, destfile)) + expect( + os.path.isfile(srcfile), + "restart file {} does not exist ".format(srcfile), + ) + logger.info( + "{} file {} to {}".format( + _get_archive_fn_desc(archive_file_fn), srcfile, destfile + ) + ) archive_file_fn(srcfile, destfile) # need to copy the history files needed for interim restarts - since @@ -480,23 +643,29 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry, for histfile in histfiles_for_restart: srcfile = os.path.join(rundir, histfile) destfile = os.path.join(archive_restdir, histfile) - expect(os.path.isfile(srcfile), - "hist file {} does not exist ".format(srcfile)) + expect( + os.path.isfile(srcfile), + "hist file {} does not exist ".format(srcfile), + ) logger.info("copying {} to {}".format(srcfile, destfile)) safe_copy(srcfile, destfile) else: - if compname == 'nemo': + if compname == "nemo": flist = glob.glob(rundir + "/" + casename + "_*_restart*.nc") logger.debug("nemo restart file {}".format(flist)) if len(flist) > 2: - flist0 = glob.glob(rundir + "/" + casename + "_*_restart_0000.nc") + flist0 = glob.glob( + rundir + "/" + casename + "_*_restart_0000.nc" + ) if len(flist0) > 1: rstfl01 = flist0[0] rstfl01spl = rstfl01.split("/") logger.debug("splitted name {}".format(rstfl01spl)) rstfl01nm = rstfl01spl[-1] rstfl01nmspl = rstfl01nm.split("_") - logger.debug("splitted name step2 {}".format(rstfl01nmspl)) + logger.debug( + "splitted name step2 {}".format(rstfl01nmspl) + ) rsttm01 = rstfl01nmspl[-3] rstfl02 = flist0[1] @@ -504,34 +673,66 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry, logger.debug("splitted name {}".format(rstfl02spl)) rstfl02nm = rstfl02spl[-1] rstfl02nmspl = rstfl02nm.split("_") - logger.debug("splitted name step2 {}".format(rstfl02nmspl)) + logger.debug( + "splitted name step2 {}".format(rstfl02nmspl) + ) rsttm02 = rstfl02nmspl[-3] if int(rsttm01) > int(rsttm02): - restlist = glob.glob(rundir + "/" + casename + "_" + rsttm02 + "_restart_*.nc") + restlist = glob.glob( + rundir + + "/" + + casename + + "_" + + rsttm02 + + "_restart_*.nc" + ) else: - restlist = glob.glob(rundir + "/" + casename + "_" + rsttm01 + "_restart_*.nc") + restlist = glob.glob( + rundir + + "/" + + casename + + "_" + + rsttm01 + + "_restart_*.nc" + ) logger.debug("nemo restart list {}".format(restlist)) if restlist: for _restfile in restlist: srcfile = os.path.join(rundir, _restfile) - logger.info("removing interim restart file {}".format(srcfile)) - if (os.path.isfile(srcfile)): + logger.info( + "removing interim restart file {}".format( + srcfile + ) + ) + if os.path.isfile(srcfile): try: os.remove(srcfile) except OSError: - logger.warning("unable to remove interim restart file {}".format(srcfile)) + logger.warning( + "unable to remove interim restart file {}".format( + srcfile + ) + ) else: - logger.warning("interim restart file {} does not exist".format(srcfile)) + logger.warning( + "interim restart file {} does not exist".format( + srcfile + ) + ) elif len(flist) == 2: - flist0 = glob.glob(rundir + "/" + casename + "_*_restart.nc") + flist0 = glob.glob( + rundir + "/" + casename + "_*_restart.nc" + ) if len(flist0) > 1: rstfl01 = flist0[0] rstfl01spl = rstfl01.split("/") logger.debug("splitted name {}".format(rstfl01spl)) rstfl01nm = rstfl01spl[-1] rstfl01nmspl = rstfl01nm.split("_") - logger.debug("splitted name step2 {}".format(rstfl01nmspl)) + logger.debug( + "splitted name step2 {}".format(rstfl01nmspl) + ) rsttm01 = rstfl01nmspl[-2] rstfl02 = flist0[1] @@ -539,51 +740,97 @@ def _archive_restarts_date_comp(case, casename, rundir, archive, archive_entry, logger.debug("splitted name {}".format(rstfl02spl)) rstfl02nm = rstfl02spl[-1] rstfl02nmspl = rstfl02nm.split("_") - logger.debug("splitted name step2 {}".format(rstfl02nmspl)) + logger.debug( + "splitted name step2 {}".format(rstfl02nmspl) + ) rsttm02 = rstfl02nmspl[-2] if int(rsttm01) > int(rsttm02): - restlist = glob.glob(rundir + "/" + casename + "_" + rsttm02 + "_restart_*.nc") + restlist = glob.glob( + rundir + + "/" + + casename + + "_" + + rsttm02 + + "_restart_*.nc" + ) else: - restlist = glob.glob(rundir + "/" + casename + "_" + rsttm01 + "_restart_*.nc") + restlist = glob.glob( + rundir + + "/" + + casename + + "_" + + rsttm01 + + "_restart_*.nc" + ) logger.debug("nemo restart list {}".format(restlist)) if restlist: for _rfile in restlist: srcfile = os.path.join(rundir, _rfile) - logger.info("removing interim restart file {}".format(srcfile)) - if (os.path.isfile(srcfile)): + logger.info( + "removing interim restart file {}".format( + srcfile + ) + ) + if os.path.isfile(srcfile): try: os.remove(srcfile) except OSError: - logger.warning("unable to remove interim restart file {}".format(srcfile)) + logger.warning( + "unable to remove interim restart file {}".format( + srcfile + ) + ) else: - logger.warning("interim restart file {} does not exist".format(srcfile)) + logger.warning( + "interim restart file {} does not exist".format( + srcfile + ) + ) else: - logger.warning("unable to find NEMO restart file in {}".format(rundir)) - + logger.warning( + "unable to find NEMO restart file in {}".format(rundir) + ) else: srcfile = os.path.join(rundir, rfile) logger.info("removing interim restart file {}".format(srcfile)) - if (os.path.isfile(srcfile)): + if os.path.isfile(srcfile): try: os.remove(srcfile) except OSError: - logger.warning("unable to remove interim restart file {}".format(srcfile)) + logger.warning( + "unable to remove interim restart file {}".format( + srcfile + ) + ) else: - logger.warning("interim restart file {} does not exist".format(srcfile)) + logger.warning( + "interim restart file {} does not exist".format(srcfile) + ) return histfiles_savein_rundir + ############################################################################### -def _archive_process(case, archive, last_date, archive_incomplete_logs, copy_only, - components=None,dout_s_root=None, casename=None, rundir=None, testonly=False): -############################################################################### +def _archive_process( + case, + archive, + last_date, + archive_incomplete_logs, + copy_only, + components=None, + dout_s_root=None, + casename=None, + rundir=None, + testonly=False, +): + ############################################################################### """ Parse config_archive.xml and perform short term archiving """ - logger.debug('In archive_process...') + logger.debug("In archive_process...") if dout_s_root is None: dout_s_root = case.get_value("DOUT_S_ROOT") @@ -593,14 +840,13 @@ def _archive_process(case, archive, last_date, archive_incomplete_logs, copy_onl casename = case.get_value("CASE") if components is None: components = case.get_compset_components() - components.append('drv') - components.append('dart') + components.append("drv") + components.append("dart") archive_file_fn = _get_archive_file_fn(copy_only) # archive log files - _archive_log_files(dout_s_root, rundir, - archive_incomplete_logs, archive_file_fn) + _archive_log_files(dout_s_root, rundir, archive_incomplete_logs, archive_file_fn) # archive restarts and all necessary associated files (e.g. rpointer files) datenames = _get_datenames(casename, rundir) @@ -611,31 +857,61 @@ def _archive_process(case, archive, last_date, archive_incomplete_logs, copy_onl if datename == datenames[-1]: datename_is_last = True - logger.debug("datename {} last_date {}".format(datename,last_date)) + logger.debug("datename {} last_date {}".format(datename, last_date)) if last_date is None or datename <= last_date: - archive_restdir = join(dout_s_root, 'rest', _datetime_str(datename)) + archive_restdir = join(dout_s_root, "rest", _datetime_str(datename)) histfiles_savein_rundir_by_compname_this_date = _archive_restarts_date( - case, casename, rundir, archive, datename, datename_is_last, - last_date, archive_restdir, archive_file_fn, components, testonly=testonly) + case, + casename, + rundir, + archive, + datename, + datename_is_last, + last_date, + archive_restdir, + archive_file_fn, + components, + testonly=testonly, + ) if datename_is_last: - histfiles_savein_rundir_by_compname = histfiles_savein_rundir_by_compname_this_date + histfiles_savein_rundir_by_compname = ( + histfiles_savein_rundir_by_compname_this_date + ) # archive history files for (_, compname, compclass) in _get_component_archive_entries(components, archive): if compclass: - logger.info('Archiving history files for {} ({})'.format(compname, compclass)) - histfiles_savein_rundir = histfiles_savein_rundir_by_compname.get(compname, []) - logger.debug("_archive_process: histfiles_savein_rundir {} ".format(histfiles_savein_rundir)) - _archive_history_files(archive, - compclass, compname, histfiles_savein_rundir, - last_date, archive_file_fn, - dout_s_root, casename, rundir) + logger.info( + "Archiving history files for {} ({})".format(compname, compclass) + ) + histfiles_savein_rundir = histfiles_savein_rundir_by_compname.get( + compname, [] + ) + logger.debug( + "_archive_process: histfiles_savein_rundir {} ".format( + histfiles_savein_rundir + ) + ) + _archive_history_files( + archive, + compclass, + compname, + histfiles_savein_rundir, + last_date, + archive_file_fn, + dout_s_root, + casename, + rundir, + ) + ############################################################################### -def restore_from_archive(self, rest_dir=None, dout_s_root=None, rundir=None, test=False): -############################################################################### +def restore_from_archive( + self, rest_dir=None, dout_s_root=None, rundir=None, test=False +): + ############################################################################### """ Take archived restart files and load them into current case. Use rest_dir if provided otherwise use most recent restore_from_archive is a member of Class Case @@ -651,12 +927,18 @@ def restore_from_archive(self, rest_dir=None, dout_s_root=None, rundir=None, tes rest_root = os.path.join(dout_s_root, "rest") if os.path.exists(rest_root): - rest_dir = os.path.join(rest_root, ls_sorted_by_mtime(os.path.join(dout_s_root, "rest"))[-1]) - - if rest_dir is None and test: - logger.warning("No rest_dir found for test - is this expected? DOUT_S_ROOT={}".format(dout_s_root)) + rest_dir = os.path.join( + rest_root, ls_sorted_by_mtime(os.path.join(dout_s_root, "rest"))[-1] + ) + + if rest_dir is None and test: + logger.warning( + "No rest_dir found for test - is this expected? DOUT_S_ROOT={}".format( + dout_s_root + ) + ) return - expect(os.path.exists(rest_dir),"ERROR: No directory {} found".format(rest_dir)) + expect(os.path.exists(rest_dir), "ERROR: No directory {} found".format(rest_dir)) logger.info("Restoring restart from {}".format(rest_dir)) for item in glob.glob("{}/*".format(rest_dir)): @@ -668,9 +950,12 @@ def restore_from_archive(self, rest_dir=None, dout_s_root=None, rundir=None, tes safe_copy(item, rundir) + ############################################################################### -def archive_last_restarts(self, archive_restdir, rundir, last_date=None, link_to_restart_files=False): -############################################################################### +def archive_last_restarts( + self, archive_restdir, rundir, last_date=None, link_to_restart_files=False +): + ############################################################################### """ Convenience function for archiving just the last set of restart files to a given directory. This also saves files attached to the @@ -684,7 +969,7 @@ def archive_last_restarts(self, archive_restdir, rundir, last_date=None, link_to are done for the restart files. (This has no effect on the history files that are associated with these restart files.) """ - archive = self.get_env('archive') + archive = self.get_env("archive") casename = self.get_value("CASE") datenames = _get_datenames(casename, rundir) expect(len(datenames) >= 1, "No restart dates found") @@ -694,20 +979,29 @@ def archive_last_restarts(self, archive_restdir, rundir, last_date=None, link_to # set of restart files, but needed to satisfy the following interface archive_file_fn = _get_archive_file_fn(copy_only=False) - _ = _archive_restarts_date(case=self, - casename=casename, - rundir=rundir, - archive=archive, - datename=last_datename, - datename_is_last=True, - last_date=last_date, - archive_restdir=archive_restdir, - archive_file_fn=archive_file_fn, - link_to_last_restart_files=link_to_restart_files) + _ = _archive_restarts_date( + case=self, + casename=casename, + rundir=rundir, + archive=archive, + datename=last_datename, + datename_is_last=True, + last_date=last_date, + archive_restdir=archive_restdir, + archive_file_fn=archive_file_fn, + link_to_last_restart_files=link_to_restart_files, + ) + ############################################################################### -def case_st_archive(self, last_date_str=None, archive_incomplete_logs=True, copy_only=False, resubmit=True): -############################################################################### +def case_st_archive( + self, + last_date_str=None, + archive_incomplete_logs=True, + copy_only=False, + resubmit=True, +): + ############################################################################### """ Create archive object and perform short term archiving """ @@ -718,24 +1012,25 @@ def case_st_archive(self, last_date_str=None, archive_incomplete_logs=True, copy try: last_date = get_file_date(last_date_str) except ValueError: - expect(False, 'Could not parse the last date to archive') + expect(False, "Could not parse the last date to archive") else: last_date = None - dout_s_root = self.get_value('DOUT_S_ROOT') - if dout_s_root is None or dout_s_root == 'UNSET': - expect(False, - 'XML variable DOUT_S_ROOT is required for short-term achiver') + dout_s_root = self.get_value("DOUT_S_ROOT") + if dout_s_root is None or dout_s_root == "UNSET": + expect(False, "XML variable DOUT_S_ROOT is required for short-term achiver") if not isdir(dout_s_root): os.makedirs(dout_s_root) - dout_s_save_interim = self.get_value('DOUT_S_SAVE_INTERIM_RESTART_FILES') - if dout_s_save_interim == 'FALSE' or dout_s_save_interim == 'UNSET': - rest_n = self.get_value('REST_N') - stop_n = self.get_value('STOP_N') + dout_s_save_interim = self.get_value("DOUT_S_SAVE_INTERIM_RESTART_FILES") + if dout_s_save_interim == "FALSE" or dout_s_save_interim == "UNSET": + rest_n = self.get_value("REST_N") + stop_n = self.get_value("STOP_N") if rest_n < stop_n: - logger.warning('Restart files from end of run will be saved' - 'interim restart files will be deleted') + logger.warning( + "Restart files from end of run will be saved" + "interim restart files will be deleted" + ) logger.info("st_archive starting") @@ -746,13 +1041,18 @@ def case_st_archive(self, last_date_str=None, archive_incomplete_logs=True, copy jobid = batch_jobid() msg_func = lambda *args: jobid if jobid is not None else "" - archive = self.get_env('archive') - functor = lambda: _archive_process(self, archive, last_date, archive_incomplete_logs, copy_only) - run_and_log_case_status(functor, "st_archive", - custom_starting_msg_functor=msg_func, - custom_success_msg_functor=msg_func, - caseroot=caseroot, - is_batch=is_batch) + archive = self.get_env("archive") + functor = lambda: _archive_process( + self, archive, last_date, archive_incomplete_logs, copy_only + ) + run_and_log_case_status( + functor, + "st_archive", + custom_starting_msg_functor=msg_func, + custom_success_msg_functor=msg_func, + caseroot=caseroot, + is_batch=is_batch, + ) logger.info("st_archive completed") @@ -761,23 +1061,32 @@ def case_st_archive(self, last_date_str=None, archive_incomplete_logs=True, copy resubmit_cnt = self.get_value("RESUBMIT") logger.debug("resubmit_cnt {} resubmit {}".format(resubmit_cnt, resubmit)) if resubmit_cnt > 0: - logger.info("resubmitting from st_archive, resubmit={:d}".format(resubmit_cnt)) + logger.info( + "resubmitting from st_archive, resubmit={:d}".format(resubmit_cnt) + ) if self.get_value("MACH") == "mira": - expect(os.path.isfile(".original_host"), "ERROR alcf host file not found") + expect( + os.path.isfile(".original_host"), "ERROR alcf host file not found" + ) with open(".original_host", "r") as fd: sshhost = fd.read() - run_cmd("ssh cooleylogin1 ssh {} '{case}/case.submit {case} --resubmit' "\ - .format(sshhost, case=caseroot), verbose=True) + run_cmd( + "ssh cooleylogin1 ssh {} '{case}/case.submit {case} --resubmit' ".format( + sshhost, case=caseroot + ), + verbose=True, + ) else: self.submit(resubmit=True) return True + def test_st_archive(self, testdir="st_archive_test"): files = Files() archive = Archive(files=files) components = [] -# expect(not self.get_value("MULTI_DRIVER"),"Test not configured for multi-driver cases") + # expect(not self.get_value("MULTI_DRIVER"),"Test not configured for multi-driver cases") config_archive_files = archive.get_all_config_archive_files(files) # create the run directory testdir and populate it with rest_file and hist_file from @@ -785,46 +1094,67 @@ def test_st_archive(self, testdir="st_archive_test"): if os.path.exists(testdir): logger.info("Removing existing test directory {}".format(testdir)) shutil.rmtree(testdir) - dout_s_root=os.path.join(testdir,"archive") + dout_s_root = os.path.join(testdir, "archive") archive = Archive() schema = files.get_schema("ARCHIVE_SPEC_FILE") for config_archive_file in config_archive_files: archive.read(config_archive_file, schema) comp_archive_specs = archive.get_children("comp_archive_spec") for comp_archive_spec in comp_archive_specs: - components.append(archive.get(comp_archive_spec, 'compname')) - test_file_names = archive.get_optional_child("test_file_names", root=comp_archive_spec) + components.append(archive.get(comp_archive_spec, "compname")) + test_file_names = archive.get_optional_child( + "test_file_names", root=comp_archive_spec + ) if test_file_names is not None: if not os.path.exists(testdir): - os.makedirs(os.path.join(testdir,"archive")) + os.makedirs(os.path.join(testdir, "archive")) for file_node in archive.get_children("tfile", root=test_file_names): - fname = os.path.join(testdir,archive.text(file_node)) + fname = os.path.join(testdir, archive.text(file_node)) disposition = archive.get(file_node, "disposition") - logger.info("Create file {} with disposition {}". - format(fname, disposition)) - with open(fname, 'w') as fd: - fd.write(disposition+"\n") + logger.info( + "Create file {} with disposition {}".format(fname, disposition) + ) + with open(fname, "w") as fd: + fd.write(disposition + "\n") logger.info("testing components: {} ".format(list(set(components)))) - _archive_process(self, archive, None, False, False,components=list(set(components)), - dout_s_root=dout_s_root, - casename="casename", rundir=testdir, testonly=True) + _archive_process( + self, + archive, + None, + False, + False, + components=list(set(components)), + dout_s_root=dout_s_root, + casename="casename", + rundir=testdir, + testonly=True, + ) _check_disposition(testdir) # Now test the restore capability - testdir2 = os.path.join(testdir,"run2") + testdir2 = os.path.join(testdir, "run2") os.makedirs(testdir2) restore_from_archive(self, rundir=testdir2, dout_s_root=dout_s_root, test=True) - restfiles = [f for f in os.listdir(os.path.join(testdir,"archive","rest","1976-01-01-00000"))] + restfiles = [ + f + for f in os.listdir( + os.path.join(testdir, "archive", "rest", "1976-01-01-00000") + ) + ] for _file in restfiles: - expect(os.path.isfile(os.path.join(testdir2,_file)), "Expected file {} to be restored from rest dir".format(_file)) + expect( + os.path.isfile(os.path.join(testdir2, _file)), + "Expected file {} to be restored from rest dir".format(_file), + ) return True + def test_env_archive(self, testdir="env_archive_test"): components = self.get_values("COMP_CLASSES") comps_in_case = [] @@ -833,94 +1163,142 @@ def test_env_archive(self, testdir="env_archive_test"): if os.path.exists(testdir): logger.info("Removing existing test directory {}".format(testdir)) shutil.rmtree(testdir) - dout_s_root=os.path.join(testdir,"archive") - archive = self.get_env('archive') + dout_s_root = os.path.join(testdir, "archive") + archive = self.get_env("archive") comp_archive_specs = archive.scan_children("comp_archive_spec") # ignore stub and dead components for comp in list(components): compname = self.get_value("COMP_{}".format(comp)) - if (compname == 's'+comp.lower() or compname == 'x'+comp.lower()) and comp != 'ESP': + if ( + compname == "s" + comp.lower() or compname == "x" + comp.lower() + ) and comp != "ESP": logger.info("Not testing component {}".format(comp)) components.remove(comp) - elif comp == 'ESP' and self.get_value('MODEL') == 'e3sm': + elif comp == "ESP" and self.get_value("MODEL") == "e3sm": components.remove(comp) else: - if compname == 'cpl': - compname = 'drv' + if compname == "cpl": + compname = "drv" comps_in_case.append(compname) for comp_archive_spec in comp_archive_specs: - comp_expected = archive.get(comp_archive_spec, 'compname') + comp_expected = archive.get(comp_archive_spec, "compname") if comp_expected == "ww3": comp_expected = "ww" - comp_class = archive.get(comp_archive_spec, 'compclass').upper() + comp_class = archive.get(comp_archive_spec, "compclass").upper() if comp_class in components: components.remove(comp_class) else: - expect(False,"Error finding comp_class {} in components".format(comp_class)) - if comp_expected == 'cpl': - comp_expected = 'drv' - if comp_expected != 'dart': - expect(comp_expected in comps_in_case, "env_archive defines component {} not defined in case".format(comp_expected)) - - test_file_names = archive.get_optional_child("test_file_names", root=comp_archive_spec) + expect( + False, "Error finding comp_class {} in components".format(comp_class) + ) + if comp_expected == "cpl": + comp_expected = "drv" + if comp_expected != "dart": + expect( + comp_expected in comps_in_case, + "env_archive defines component {} not defined in case".format( + comp_expected + ), + ) + + test_file_names = archive.get_optional_child( + "test_file_names", root=comp_archive_spec + ) if test_file_names is not None: if not os.path.exists(testdir): - os.makedirs(os.path.join(testdir,"archive")) + os.makedirs(os.path.join(testdir, "archive")) for file_node in archive.get_children("tfile", root=test_file_names): - fname = os.path.join(testdir,archive.text(file_node)) + fname = os.path.join(testdir, archive.text(file_node)) disposition = archive.get(file_node, "disposition") - logger.info("Create file {} with disposition {}". - format(fname, disposition)) - with open(fname, 'w') as fd: - fd.write(disposition+"\n") - - expect(not components, "No archive entry found for components: {}".format(components)) - if 'dart' not in comps_in_case: - comps_in_case.append('dart') + logger.info( + "Create file {} with disposition {}".format(fname, disposition) + ) + with open(fname, "w") as fd: + fd.write(disposition + "\n") + + expect( + not components, "No archive entry found for components: {}".format(components) + ) + if "dart" not in comps_in_case: + comps_in_case.append("dart") logger.info("testing components: {} ".format(comps_in_case)) - _archive_process(self, archive, None, False, False,components=comps_in_case, - dout_s_root=dout_s_root, - casename="casename", rundir=testdir, testonly=True) + _archive_process( + self, + archive, + None, + False, + False, + components=comps_in_case, + dout_s_root=dout_s_root, + casename="casename", + rundir=testdir, + testonly=True, + ) _check_disposition(testdir) # Now test the restore capability - testdir2 = os.path.join(testdir,"run2") + testdir2 = os.path.join(testdir, "run2") os.makedirs(testdir2) restfiles = [] restore_from_archive(self, rundir=testdir2, dout_s_root=dout_s_root, test=True) - if os.path.exists(os.path.join(testdir,"archive","rest")): - restfiles = [f for f in os.listdir(os.path.join(testdir,"archive","rest","1976-01-01-00000"))] + if os.path.exists(os.path.join(testdir, "archive", "rest")): + restfiles = [ + f + for f in os.listdir( + os.path.join(testdir, "archive", "rest", "1976-01-01-00000") + ) + ] for _file in restfiles: - expect(os.path.isfile(os.path.join(testdir2,_file)), "Expected file {} to be restored from rest dir".format(_file)) + expect( + os.path.isfile(os.path.join(testdir2, _file)), + "Expected file {} to be restored from rest dir".format(_file), + ) return True + def _check_disposition(testdir): copyfilelist = [] for root, _, files in os.walk(testdir): for _file in files: with open(os.path.join(root, _file), "r") as fd: disposition = fd.readline() - logger.info("Checking testfile {} with disposition {}".format(_file, disposition)) + logger.info( + "Checking testfile {} with disposition {}".format(_file, disposition) + ) if root == testdir: if "move" in disposition: if find_files(os.path.join(testdir, "archive"), _file): - expect(False, - "Copied file {} to archive with disposition move".format(_file)) + expect( + False, + "Copied file {} to archive with disposition move".format( + _file + ), + ) else: - expect(False, - "Failed to move file {} to archive".format(_file)) + expect(False, "Failed to move file {} to archive".format(_file)) if "copy" in disposition: copyfilelist.append(_file) elif "ignore" in disposition: - expect(False, "Moved file {} with dispostion ignore to directory {}".format(_file, root)) + expect( + False, + "Moved file {} with dispostion ignore to directory {}".format( + _file, root + ), + ) elif "copy" in disposition: - expect(_file in copyfilelist, "File {} with disposition copy was moved to directory {}" - .format(_file, root)) + expect( + _file in copyfilelist, + "File {} with disposition copy was moved to directory {}".format( + _file, root + ), + ) for _file in copyfilelist: - expect(find_files(os.path.join(testdir,"archive"), _file) != [], - "File {} was not copied to archive.".format(_file)) + expect( + find_files(os.path.join(testdir, "archive"), _file) != [], + "File {} was not copied to archive.".format(_file), + ) diff --git a/CIME/case/case_submit.py b/CIME/case/case_submit.py index b90f611720e..3c826947913 100644 --- a/CIME/case/case_submit.py +++ b/CIME/case/case_submit.py @@ -8,14 +8,15 @@ """ import configparser from CIME.XML.standard_module_setup import * -from CIME.utils import expect, run_and_log_case_status, CIMEError -from CIME.locked_files import unlock_file, lock_file -from CIME.test_status import * +from CIME.utils import expect, run_and_log_case_status, CIMEError +from CIME.locked_files import unlock_file, lock_file +from CIME.test_status import * import socket logger = logging.getLogger(__name__) + def _build_prereq_str(case, prev_job_ids): delimiter = case.get_value("depend_separator") prereq_str = "" @@ -23,23 +24,38 @@ def _build_prereq_str(case, prev_job_ids): prereq_str += str(job_id) + delimiter return prereq_str[:-1] -def _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resubmit=False, - resubmit_immediate=False, skip_pnl=False, mail_user=None, mail_type=None, - batch_args=None, workflow=True, chksum=False): + +def _submit( + case, + job=None, + no_batch=False, + prereq=None, + allow_fail=False, + resubmit=False, + resubmit_immediate=False, + skip_pnl=False, + mail_user=None, + mail_type=None, + batch_args=None, + workflow=True, + chksum=False, +): if job is None: job = case.get_first_job() caseroot = case.get_value("CASEROOT") # Check mediator hasMediator = True comp_classes = case.get_values("COMP_CLASSES") - if 'CPL' not in comp_classes: + if "CPL" not in comp_classes: hasMediator = False # Check if CONTINUE_RUN value makes sense if job != "case.test" and case.get_value("CONTINUE_RUN") and hasMediator: rundir = case.get_value("RUNDIR") - expect(os.path.isdir(rundir), - "CONTINUE_RUN is true but RUNDIR {} does not exist".format(rundir)) + expect( + os.path.isdir(rundir), + "CONTINUE_RUN is true but RUNDIR {} does not exist".format(rundir), + ) # only checks for the first instance in a multidriver case if case.get_value("COMP_INTERFACE") == "nuopc": rpointer = "rpointer.cpl" @@ -47,16 +63,23 @@ def _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resub rpointer = "rpointer.drv_0001" else: rpointer = "rpointer.drv" - expect(os.path.exists(os.path.join(rundir,rpointer)), - "CONTINUE_RUN is true but this case does not appear to have restart files staged in {} {}".format(rundir,rpointer)) + expect( + os.path.exists(os.path.join(rundir, rpointer)), + "CONTINUE_RUN is true but this case does not appear to have restart files staged in {} {}".format( + rundir, rpointer + ), + ) # Finally we open the rpointer file and check that it's correct casename = case.get_value("CASE") - with open(os.path.join(rundir,rpointer), "r") as fd: + with open(os.path.join(rundir, rpointer), "r") as fd: ncfile = fd.readline().strip() - expect(ncfile.startswith(casename) and - os.path.exists(os.path.join(rundir,ncfile)), - "File {ncfile} not present or does not match case {casename}". - format(ncfile=os.path.join(rundir,ncfile),casename=casename)) + expect( + ncfile.startswith(casename) + and os.path.exists(os.path.join(rundir, ncfile)), + "File {ncfile} not present or does not match case {casename}".format( + ncfile=os.path.join(rundir, ncfile), casename=casename + ), + ) # if case.submit is called with the no_batch flag then we assume that this # flag will stay in effect for the duration of the RESUBMITs @@ -75,17 +98,20 @@ def _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resub env_batch_has_changed = False if not external_workflow: try: - case.check_lockedfile(os.path.basename(env_batch.filename), caseroot=caseroot) + case.check_lockedfile( + os.path.basename(env_batch.filename), caseroot=caseroot + ) except: env_batch_has_changed = True if batch_system != "none" and env_batch_has_changed and not external_workflow: # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc) - logger.warning(\ -""" + logger.warning( + """ env_batch.xml appears to have changed, regenerating batch scripts manual edits to these file will be lost! -""") +""" + ) env_batch.make_all_batch_files(case) case.flush() lock_file(os.path.basename(env_batch.filename), caseroot=caseroot) @@ -97,7 +123,7 @@ def _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resub resub = case.get_value("RESUBMIT") logger.info("Submitting job '{}', resubmit={:d}".format(job, resub)) - case.set_value("RESUBMIT", resub-1) + case.set_value("RESUBMIT", resub - 1) if case.get_value("RESUBMIT_SETS_CONTINUE_RUN"): case.set_value("CONTINUE_RUN", True) @@ -120,11 +146,12 @@ def _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resub if env_batch.get_batch_system_type() != "none" and env_batch_has_changed: # May need to regen batch files if user made batch setting changes (e.g. walltime, queue, etc) - logger.warning(\ -""" + logger.warning( + """ env_batch.xml appears to have changed, regenerating batch scripts manual edits to these file will be lost! -""") +""" + ) env_batch.make_all_batch_files(case) unlock_file(os.path.basename(env_batch.filename), caseroot=caseroot) @@ -135,18 +162,26 @@ def _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resub case.check_DA_settings() if case.get_value("MACH") == "mira": with open(".original_host", "w") as fd: - fd.write( socket.gethostname()) + fd.write(socket.gethostname()) - #Load Modules + # Load Modules case.load_env() case.flush() logger.warning("submit_jobs {}".format(job)) - job_ids = case.submit_jobs(no_batch=no_batch, job=job, prereq=prereq, - skip_pnl=skip_pnl, resubmit_immediate=resubmit_immediate, - allow_fail=allow_fail, mail_user=mail_user, - mail_type=mail_type, batch_args=batch_args, workflow=workflow) + job_ids = case.submit_jobs( + no_batch=no_batch, + job=job, + prereq=prereq, + skip_pnl=skip_pnl, + resubmit_immediate=resubmit_immediate, + allow_fail=allow_fail, + mail_user=mail_user, + mail_type=mail_type, + batch_args=batch_args, + workflow=workflow, + ) xml_jobids = [] for jobname, jobid in job_ids.items(): @@ -160,11 +195,26 @@ def _submit(case, job=None, no_batch=False, prereq=None, allow_fail=False, resub return xml_jobid_text -def submit(self, job=None, no_batch=False, prereq=None, allow_fail=False, resubmit=False, - resubmit_immediate=False, skip_pnl=False, mail_user=None, mail_type=None, - batch_args=None, workflow=True, chksum=False): - if resubmit_immediate and self.get_value("MACH") in ['mira', 'cetus']: - logger.warning("resubmit_immediate does not work on Mira/Cetus, submitting normally") + +def submit( + self, + job=None, + no_batch=False, + prereq=None, + allow_fail=False, + resubmit=False, + resubmit_immediate=False, + skip_pnl=False, + mail_user=None, + mail_type=None, + batch_args=None, + workflow=True, + chksum=False, +): + if resubmit_immediate and self.get_value("MACH") in ["mira", "cetus"]: + logger.warning( + "resubmit_immediate does not work on Mira/Cetus, submitting normally" + ) resubmit_immediate = False caseroot = self.get_value("CASEROOT") @@ -186,28 +236,41 @@ def submit(self, job=None, no_batch=False, prereq=None, allow_fail=False, resubm if resubmit and os.path.exists(submit_options): config = configparser.RawConfigParser() config.read(submit_options) - if not skip_pnl and config.has_option('SubmitOptions','skip_pnl'): - skip_pnl = config.getboolean('SubmitOptions', 'skip_pnl') - if mail_user is None and config.has_option('SubmitOptions', 'mail_user'): - mail_user = config.get('SubmitOptions', 'mail_user') - if mail_type is None and config.has_option('SubmitOptions', 'mail_type'): - mail_type = str(config.get('SubmitOptions', 'mail_type')).split(',') - if batch_args is None and config.has_option('SubmitOptions', 'batch_args'): - batch_args = config.get('SubmitOptions', 'batch_args') + if not skip_pnl and config.has_option("SubmitOptions", "skip_pnl"): + skip_pnl = config.getboolean("SubmitOptions", "skip_pnl") + if mail_user is None and config.has_option("SubmitOptions", "mail_user"): + mail_user = config.get("SubmitOptions", "mail_user") + if mail_type is None and config.has_option("SubmitOptions", "mail_type"): + mail_type = str(config.get("SubmitOptions", "mail_type")).split(",") + if batch_args is None and config.has_option("SubmitOptions", "batch_args"): + batch_args = config.get("SubmitOptions", "batch_args") is_batch = self.get_value("BATCH_SYSTEM") is not None try: - functor = lambda: _submit(self, job=job, no_batch=no_batch, prereq=prereq, - allow_fail=allow_fail, resubmit=resubmit, - resubmit_immediate=resubmit_immediate, skip_pnl=skip_pnl, - mail_user=mail_user, mail_type=mail_type, - batch_args=batch_args, workflow=workflow, - chksum=chksum) - run_and_log_case_status(functor, "case.submit", caseroot=caseroot, - custom_success_msg_functor=lambda x: x.split(":")[-1], - is_batch=is_batch) - except BaseException: # Want to catch KeyboardInterrupt too + functor = lambda: _submit( + self, + job=job, + no_batch=no_batch, + prereq=prereq, + allow_fail=allow_fail, + resubmit=resubmit, + resubmit_immediate=resubmit_immediate, + skip_pnl=skip_pnl, + mail_user=mail_user, + mail_type=mail_type, + batch_args=batch_args, + workflow=workflow, + chksum=chksum, + ) + run_and_log_case_status( + functor, + "case.submit", + caseroot=caseroot, + custom_success_msg_functor=lambda x: x.split(":")[-1], + is_batch=is_batch, + ) + except BaseException: # Want to catch KeyboardInterrupt too # If something failed in the batch system, make sure to mark # the test as failed if we are running a test. if self.get_value("TEST"): @@ -216,24 +279,31 @@ def submit(self, job=None, no_batch=False, prereq=None, allow_fail=False, resubm raise + def check_case(self, skip_pnl=False, chksum=False): self.check_lockedfiles() if not skip_pnl: - self.create_namelists() # Must be called before check_all_input_data + self.create_namelists() # Must be called before check_all_input_data logger.info("Checking that inputdata is available as part of case submission") self.check_all_input_data(chksum=chksum) - if self.get_value('COMP_WAV') == 'ww': + if self.get_value("COMP_WAV") == "ww": # the ww3 buildnml has dependencies on inputdata so we must run it again - self.create_namelists(component='WAV') + self.create_namelists(component="WAV") - expect(self.get_value("BUILD_COMPLETE"), "Build complete is " - "not True please rebuild the model by calling case.build") + expect( + self.get_value("BUILD_COMPLETE"), + "Build complete is " "not True please rebuild the model by calling case.build", + ) logger.info("Check case OK") + def check_DA_settings(self): script = self.get_value("DATA_ASSIMILATION_SCRIPT") cycles = self.get_value("DATA_ASSIMILATION_CYCLES") if len(script) > 0 and os.path.isfile(script) and cycles > 0: - logger.info("Data Assimilation enabled using script {} with {:d} cycles".format(script, - cycles)) + logger.info( + "Data Assimilation enabled using script {} with {:d} cycles".format( + script, cycles + ) + ) diff --git a/CIME/case/case_test.py b/CIME/case/case_test.py index 6ed144437f8..030c5f6618b 100644 --- a/CIME/case/case_test.py +++ b/CIME/case/case_test.py @@ -9,10 +9,16 @@ import sys, signal + def _iter_signal_names(): - for signame in [item for item in dir(signal) if item.startswith("SIG") and not item.startswith("SIG_")]: + for signame in [ + item + for item in dir(signal) + if item.startswith("SIG") and not item.startswith("SIG_") + ]: yield signame + def _signal_handler(signum, _): name = "Unknown" for signame in _iter_signal_names(): @@ -32,6 +38,7 @@ def _signal_handler(signum, _): # Throw an exception so SystemTest infrastructure can handle this error expect(False, "Job killed due to receiving signal {:d} ({})".format(signum, name)) + def _set_up_signal_handlers(): """ Add handles for all signals that might be used to abort a test @@ -43,9 +50,10 @@ def _set_up_signal_handlers(): signum = getattr(signal, signame) signal.signal(signum, _signal_handler) + def case_test(self, testname=None, reset=False, skip_pnl=False): if testname is None: - testname = self.get_value('TESTCASE') + testname = self.get_value("TESTCASE") expect(testname is not None, "testname argument not resolved") logging.warning("Running test for {}".format(testname)) diff --git a/CIME/case/check_input_data.py b/CIME/case/check_input_data.py index 6169531f236..a7ff5d95dad 100644 --- a/CIME/case/check_input_data.py +++ b/CIME/case/check_input_data.py @@ -11,7 +11,8 @@ logger = logging.getLogger(__name__) # The inputdata_checksum.dat file will be read into this hash if it's available chksum_hash = dict() -local_chksum_file = 'inputdata_checksum.dat' +local_chksum_file = "inputdata_checksum.dat" + def _download_checksum_file(rundir): """ @@ -22,11 +23,15 @@ def _download_checksum_file(rundir): chksum_found = False # download and merge all available chksum files. while protocol is not None: - protocol, address, user, passwd, chksum_file,_,_ = inputdata.get_next_server() + protocol, address, user, passwd, chksum_file, _, _ = inputdata.get_next_server() if protocol not in vars(CIME.Servers): logger.info("Client protocol {} not enabled".format(protocol)) continue - logger.info("Using protocol {} with user {} and passwd {}".format(protocol, user, passwd)) + logger.info( + "Using protocol {} with user {} and passwd {}".format( + protocol, user, passwd + ) + ) if protocol == "svn": server = CIME.Servers.SVN(address, user, passwd) elif protocol == "gftp": @@ -48,12 +53,16 @@ def _download_checksum_file(rundir): success = False rel_path = chksum_file full_path = os.path.join(rundir, local_chksum_file) - new_file = full_path + '.raw' + new_file = full_path + ".raw" protocol = type(server).__name__ - logger.info("Trying to download file: '{}' to path '{}' using {} protocol.".format(rel_path, new_file, protocol)) + logger.info( + "Trying to download file: '{}' to path '{}' using {} protocol.".format( + rel_path, new_file, protocol + ) + ) tmpfile = None if os.path.isfile(full_path): - tmpfile = full_path+".tmp" + tmpfile = full_path + ".tmp" os.rename(full_path, tmpfile) # Use umask to make sure files are group read/writable. As long as parent directories # have +s, then everything should work. @@ -67,31 +76,37 @@ def _download_checksum_file(rundir): else: if tmpfile and os.path.isfile(tmpfile): os.rename(tmpfile, full_path) - logger.warning("Could not automatically download file "+full_path+ - " Restoring existing version.") + logger.warning( + "Could not automatically download file " + + full_path + + " Restoring existing version." + ) else: - logger.warning("Could not automatically download file {}". - format(full_path)) + logger.warning( + "Could not automatically download file {}".format(full_path) + ) return chksum_found + def _reformat_chksum_file(chksum_file, server_file): """ The checksum file on the server has 8 space seperated columns, I need the first and last ones. This function gets the first and last column of server_file and saves it to chksum_file """ - with open(server_file) as fd, open(chksum_file,"w") as fout: + with open(server_file) as fd, open(chksum_file, "w") as fout: lines = fd.readlines() for line in lines: lsplit = line.split() - if len(lsplit) < 8 or ' DIR ' in line: + if len(lsplit) < 8 or " DIR " in line: continue # remove the first directory ('inputdata/') from the filename chksum = lsplit[0] - fname = (lsplit[7]).split('/',1)[1] - fout.write(" ".join((chksum, fname))+"\n") + fname = (lsplit[7]).split("/", 1)[1] + fout.write(" ".join((chksum, fname)) + "\n") os.remove(server_file) + def _merge_chksum_files(new_file, old_file): """ If more than one server checksum file is available, this merges the files and removes @@ -107,8 +122,9 @@ def _merge_chksum_files(new_file, old_file): os.remove(old_file) - -def _download_if_in_repo(server, input_data_root, rel_path, isdirectory=False, ic_filepath=None): +def _download_if_in_repo( + server, input_data_root, rel_path, isdirectory=False, ic_filepath=None +): """ Return True if successfully downloaded server is an object handle of type CIME.Servers @@ -122,12 +138,16 @@ def _download_if_in_repo(server, input_data_root, rel_path, isdirectory=False, i full_path = os.path.join(input_data_root, rel_path) if ic_filepath: full_path = full_path.replace(ic_filepath, "/") - logger.info("Trying to download file: '{}' to path '{}' using {} protocol.".format(rel_path, full_path, type(server).__name__)) + logger.info( + "Trying to download file: '{}' to path '{}' using {} protocol.".format( + rel_path, full_path, type(server).__name__ + ) + ) # Make sure local path exists, create if it does not if isdirectory or full_path.endswith(os.sep): if not os.path.exists(full_path): logger.info("Creating directory {}".format(full_path)) - os.makedirs(full_path+".tmp") + os.makedirs(full_path + ".tmp") isdirectory = True elif not os.path.exists(os.path.dirname(full_path)): os.makedirs(os.path.dirname(full_path)) @@ -136,20 +156,28 @@ def _download_if_in_repo(server, input_data_root, rel_path, isdirectory=False, i # have +s, then everything should work. with SharedArea(): if isdirectory: - success = server.getdirectory(rel_path, full_path+".tmp") + success = server.getdirectory(rel_path, full_path + ".tmp") # this is intended to prevent a race condition in which # one case attempts to use a refdir before another one has # completed the download if success: - os.rename(full_path+".tmp",full_path) + os.rename(full_path + ".tmp", full_path) else: - shutil.rmtree(full_path+".tmp") + shutil.rmtree(full_path + ".tmp") else: success = server.getfile(rel_path, full_path) return success -def check_all_input_data(self, protocol=None, address=None, input_data_root=None, data_list_dir="Buildconf", - download=True, chksum=False): + +def check_all_input_data( + self, + protocol=None, + address=None, + input_data_root=None, + data_list_dir="Buildconf", + download=True, + chksum=False, +): """ Read through all files of the form *.input_data_list in the data_list_dir directory. These files contain a list of input and boundary files needed by each model component. For each file in the @@ -158,8 +186,14 @@ def check_all_input_data(self, protocol=None, address=None, input_data_root=None """ success = False if protocol is not None and address is not None: - success = self.check_input_data(protocol=protocol, address=address, download=download, - input_data_root=input_data_root, data_list_dir=data_list_dir, chksum=chksum) + success = self.check_input_data( + protocol=protocol, + address=address, + download=download, + input_data_root=input_data_root, + data_list_dir=data_list_dir, + chksum=chksum, + ) else: if chksum: chksum_found = _download_checksum_file(self.get_value("RUNDIR")) @@ -169,47 +203,70 @@ def check_all_input_data(self, protocol=None, address=None, input_data_root=None clm_usrdat_name = None if download and clm_usrdat_name: - success = _downloadfromserver(self, input_data_root, data_list_dir, - attributes={"CLM_USRDAT_NAME":clm_usrdat_name}) + success = _downloadfromserver( + self, + input_data_root, + data_list_dir, + attributes={"CLM_USRDAT_NAME": clm_usrdat_name}, + ) if not success: - success = self.check_input_data(protocol=protocol, address=address, download=False, - input_data_root=input_data_root, data_list_dir=data_list_dir, chksum=chksum and chksum_found) + success = self.check_input_data( + protocol=protocol, + address=address, + download=False, + input_data_root=input_data_root, + data_list_dir=data_list_dir, + chksum=chksum and chksum_found, + ) if download and not success: if not chksum: chksum_found = _download_checksum_file(self.get_value("RUNDIR")) success = _downloadfromserver(self, input_data_root, data_list_dir) - expect(not download or (download and success), "Could not find all inputdata on any server") + expect( + not download or (download and success), + "Could not find all inputdata on any server", + ) self.stage_refcase(input_data_root=input_data_root, data_list_dir=data_list_dir) return success + def _downloadfromserver(case, input_data_root, data_list_dir, attributes=None): """ Download files """ success = False - protocol = 'svn' + protocol = "svn" inputdata = Inputdata() if not input_data_root: - input_data_root = case.get_value('DIN_LOC_ROOT') + input_data_root = case.get_value("DIN_LOC_ROOT") while not success and protocol is not None: - protocol, address, user, passwd, _, ic_filepath, _ = inputdata.get_next_server(attributes=attributes) + protocol, address, user, passwd, _, ic_filepath, _ = inputdata.get_next_server( + attributes=attributes + ) logger.info("Checking server {} with protocol {}".format(address, protocol)) - success = case.check_input_data(protocol=protocol, address=address, download=True, - input_data_root=input_data_root, - data_list_dir=data_list_dir, - user=user, passwd=passwd, ic_filepath=ic_filepath) + success = case.check_input_data( + protocol=protocol, + address=address, + download=True, + input_data_root=input_data_root, + data_list_dir=data_list_dir, + user=user, + passwd=passwd, + ic_filepath=ic_filepath, + ) return success + def stage_refcase(self, input_data_root=None, data_list_dir=None): """ Get a REFCASE for a hybrid or branch run This is the only case in which we are downloading an entire directory instead of a single file at a time. """ - get_refcase = self.get_value("GET_REFCASE") - run_type = self.get_value("RUN_TYPE") + get_refcase = self.get_value("GET_REFCASE") + run_type = self.get_value("RUN_TYPE") continue_run = self.get_value("CONTINUE_RUN") # We do not fully populate the inputdata directory on every @@ -220,62 +277,93 @@ def stage_refcase(self, input_data_root=None, data_list_dir=None): # missing. if get_refcase and run_type != "startup" and not continue_run: din_loc_root = self.get_value("DIN_LOC_ROOT") - run_refdate = self.get_value("RUN_REFDATE") - run_refcase = self.get_value("RUN_REFCASE") - run_refdir = self.get_value("RUN_REFDIR") - rundir = self.get_value("RUNDIR") + run_refdate = self.get_value("RUN_REFDATE") + run_refcase = self.get_value("RUN_REFCASE") + run_refdir = self.get_value("RUN_REFDIR") + rundir = self.get_value("RUNDIR") if os.path.isabs(run_refdir): refdir = run_refdir - expect(os.path.isdir(refdir), "Reference case directory {} does not exist or is not readable".format(refdir)) + expect( + os.path.isdir(refdir), + "Reference case directory {} does not exist or is not readable".format( + refdir + ), + ) else: refdir = os.path.join(din_loc_root, run_refdir, run_refcase, run_refdate) if not os.path.isdir(refdir): - logger.warning("Refcase not found in {}, will attempt to download from inputdata".format(refdir)) - with open(os.path.join("Buildconf","refcase.input_data_list"),"w") as fd: + logger.warning( + "Refcase not found in {}, will attempt to download from inputdata".format( + refdir + ) + ) + with open( + os.path.join("Buildconf", "refcase.input_data_list"), "w" + ) as fd: fd.write("refdir = {}{}".format(refdir, os.sep)) if input_data_root is None: input_data_root = din_loc_root if data_list_dir is None: data_list_dir = "Buildconf" - success = _downloadfromserver(self, input_data_root=input_data_root, data_list_dir=data_list_dir) + success = _downloadfromserver( + self, input_data_root=input_data_root, data_list_dir=data_list_dir + ) expect(success, "Could not download refcase from any server") logger.info(" - Prestaging REFCASE ({}) to {}".format(refdir, rundir)) # prestage the reference case's files. - if (not os.path.exists(rundir)): + if not os.path.exists(rundir): logger.debug("Creating run directory: {}".format(rundir)) os.makedirs(rundir) rpointerfile = None # copy the refcases' rpointer files to the run directory - for rpointerfile in glob.iglob(os.path.join("{}","*rpointer*").format(refdir)): + for rpointerfile in glob.iglob(os.path.join("{}", "*rpointer*").format(refdir)): logger.info("Copy rpointer {}".format(rpointerfile)) safe_copy(rpointerfile, rundir) os.chmod(os.path.join(rundir, os.path.basename(rpointerfile)), 0o644) - expect(rpointerfile,"Reference case directory {} does not contain any rpointer files".format(refdir)) + expect( + rpointerfile, + "Reference case directory {} does not contain any rpointer files".format( + refdir + ), + ) # link everything else - for rcfile in glob.iglob(os.path.join(refdir,"*")): + for rcfile in glob.iglob(os.path.join(refdir, "*")): rcbaseline = os.path.basename(rcfile) if not os.path.exists("{}/{}".format(rundir, rcbaseline)): logger.info("Staging file {}".format(rcfile)) os.symlink(rcfile, "{}/{}".format(rundir, rcbaseline)) # Backward compatibility, some old refcases have cam2 in the name # link to local cam file. - for cam2file in glob.iglob(os.path.join("{}","*.cam2.*").format(rundir)): + for cam2file in glob.iglob(os.path.join("{}", "*.cam2.*").format(rundir)): camfile = cam2file.replace("cam2", "cam") os.symlink(cam2file, camfile) elif not get_refcase and run_type != "startup": - logger.info("GET_REFCASE is false, the user is expected to stage the refcase to the run directory.") - if os.path.exists(os.path.join("Buildconf","refcase.input_data_list")): - os.remove(os.path.join("Buildconf","refcase.input_data_list")) + logger.info( + "GET_REFCASE is false, the user is expected to stage the refcase to the run directory." + ) + if os.path.exists(os.path.join("Buildconf", "refcase.input_data_list")): + os.remove(os.path.join("Buildconf", "refcase.input_data_list")) return True -def check_input_data(case, protocol="svn", address=None, input_data_root=None, data_list_dir="Buildconf", - download=False, user=None, passwd=None, chksum=False, ic_filepath=None): + +def check_input_data( + case, + protocol="svn", + address=None, + input_data_root=None, + data_list_dir="Buildconf", + download=False, + user=None, + passwd=None, + chksum=False, + ic_filepath=None, +): """ For a given case check for the relevant input data as specified in data_list_dir/*.input_data_list in the directory input_data_root, if not found optionally download it using the servers specified @@ -286,20 +374,31 @@ def check_input_data(case, protocol="svn", address=None, input_data_root=None, d case.load_env(reset=True) rundir = case.get_value("RUNDIR") # Fill in defaults as needed - input_data_root = case.get_value("DIN_LOC_ROOT") if input_data_root is None else input_data_root + input_data_root = ( + case.get_value("DIN_LOC_ROOT") if input_data_root is None else input_data_root + ) input_ic_root = case.get_value("DIN_LOC_IC", resolved=True) - expect(os.path.isdir(data_list_dir), "Invalid data_list_dir directory: '{}'".format(data_list_dir)) + expect( + os.path.isdir(data_list_dir), + "Invalid data_list_dir directory: '{}'".format(data_list_dir), + ) data_list_files = find_files(data_list_dir, "*.input_data_list") if not data_list_files: - logger.warning("WARNING: No .input_data_list files found in dir '{}'".format(data_list_dir)) + logger.warning( + "WARNING: No .input_data_list files found in dir '{}'".format(data_list_dir) + ) no_files_missing = True if download: if protocol not in vars(CIME.Servers): logger.info("Client protocol {} not enabled".format(protocol)) return False - logger.info("Using protocol {} with user {} and passwd {}".format(protocol, user, passwd)) + logger.info( + "Using protocol {} with user {} and passwd {}".format( + protocol, user, passwd + ) + ) if protocol == "svn": server = CIME.Servers.SVN(address, user, passwd) elif protocol == "gftp": @@ -321,42 +420,60 @@ def check_input_data(case, protocol="svn", address=None, input_data_root=None, d for line in lines: line = line.strip() use_ic_path = False - if (line and not line.startswith("#")): - tokens = line.split('=') + if line and not line.startswith("#"): + tokens = line.split("=") description, full_path = tokens[0].strip(), tokens[1].strip() - if description.endswith('datapath') or description.endswith('data_path') or full_path.endswith('/dev/null'): + if ( + description.endswith("datapath") + or description.endswith("data_path") + or full_path.endswith("/dev/null") + ): continue - if description.endswith('file') or description.endswith('filename'): + if description.endswith("file") or description.endswith("filename"): # There are required input data with key, or 'description' entries # that specify in their names whether they are files or filenames # rather than 'datapath's or 'data_path's so we check to make sure # the input data list has correct non-path values for input files. # This check happens whether or not a file already exists locally. - expect((not full_path.endswith(os.sep)), "Unsupported directory path in input_data_list named {}. Line entry is '{} = {}'.".format(data_list_file, description, full_path)) - if(full_path): + expect( + (not full_path.endswith(os.sep)), + "Unsupported directory path in input_data_list named {}. Line entry is '{} = {}'.".format( + data_list_file, description, full_path + ), + ) + if full_path: # expand xml variables full_path = case.get_resolved_value(full_path) rel_path = full_path - if input_ic_root and input_ic_root in full_path \ - and ic_filepath: + if input_ic_root and input_ic_root in full_path and ic_filepath: rel_path = full_path.replace(input_ic_root, ic_filepath) use_ic_path = True elif input_data_root in full_path: - rel_path = full_path.replace(input_data_root, "") - elif input_ic_root and \ - (input_ic_root not in input_data_root and input_ic_root in full_path): + rel_path = full_path.replace(input_data_root, "") + elif input_ic_root and ( + input_ic_root not in input_data_root + and input_ic_root in full_path + ): if ic_filepath: - rel_path = full_path.replace(input_ic_root, ic_filepath) + rel_path = full_path.replace(input_ic_root, ic_filepath) use_ic_path = True - model = os.path.basename(data_list_file).split('.')[0] - isdirectory=rel_path.endswith(os.sep) - - if ("/" in rel_path and rel_path == full_path and not full_path.startswith('unknown')): + model = os.path.basename(data_list_file).split(".")[0] + isdirectory = rel_path.endswith(os.sep) + + if ( + "/" in rel_path + and rel_path == full_path + and not full_path.startswith("unknown") + ): # User pointing to a file outside of input_data_root, we cannot determine # rel_path, and so cannot download the file. If it already exists, we can # proceed if not os.path.exists(full_path): - print("Model {} missing file {} = '{}'".format(model, description, full_path)) + print( + "Model {} missing file {} = '{}'".format( + model, description, full_path + ) + ) # Data download path must be DIN_LOC_ROOT, DIN_LOC_IC or RUNDIR rundir = case.get_value("RUNDIR") @@ -364,15 +481,25 @@ def check_input_data(case, protocol="svn", address=None, input_data_root=None, d if full_path.startswith(rundir): filepath = os.path.dirname(full_path) if not os.path.exists(filepath): - logger.info("Creating directory {}".format(filepath)) + logger.info( + "Creating directory {}".format(filepath) + ) os.makedirs(filepath) - tmppath = full_path[len(rundir)+1:] - success = _download_if_in_repo(server, os.path.join(rundir,"inputdata"), - tmppath[10:], - isdirectory=isdirectory, ic_filepath='/') + tmppath = full_path[len(rundir) + 1 :] + success = _download_if_in_repo( + server, + os.path.join(rundir, "inputdata"), + tmppath[10:], + isdirectory=isdirectory, + ic_filepath="/", + ) no_files_missing = success else: - logger.warning(" Cannot download file since it lives outside of the input_data_root '{}'".format(input_data_root)) + logger.warning( + " Cannot download file since it lives outside of the input_data_root '{}'".format( + input_data_root + ) + ) else: no_files_missing = False else: @@ -385,34 +512,69 @@ def check_input_data(case, protocol="svn", address=None, input_data_root=None, d # directory tree) you can assume it's a special # value and ignore it (perhaps with a warning) - if ("/" in rel_path and not os.path.exists(full_path) and not full_path.startswith('unknown')): - print("Model {} missing file {} = '{}'".format(model, description, full_path)) - if (download): + if ( + "/" in rel_path + and not os.path.exists(full_path) + and not full_path.startswith("unknown") + ): + print( + "Model {} missing file {} = '{}'".format( + model, description, full_path + ) + ) + if download: if use_ic_path: - success = _download_if_in_repo(server, - input_ic_root, rel_path.strip(os.sep), - isdirectory=isdirectory, ic_filepath=ic_filepath) + success = _download_if_in_repo( + server, + input_ic_root, + rel_path.strip(os.sep), + isdirectory=isdirectory, + ic_filepath=ic_filepath, + ) else: - success = _download_if_in_repo(server, - input_data_root, rel_path.strip(os.sep), - isdirectory=isdirectory, ic_filepath=ic_filepath) + success = _download_if_in_repo( + server, + input_data_root, + rel_path.strip(os.sep), + isdirectory=isdirectory, + ic_filepath=ic_filepath, + ) if not success: no_files_missing = False if success and chksum: - verify_chksum(input_data_root, rundir, rel_path.strip(os.sep), isdirectory) + verify_chksum( + input_data_root, + rundir, + rel_path.strip(os.sep), + isdirectory, + ) else: no_files_missing = False else: if chksum: - verify_chksum(input_data_root, rundir, rel_path.strip(os.sep), isdirectory) - logger.info("Chksum passed for file {}".format(os.path.join(input_data_root,rel_path))) - logger.debug(" Already had input file: '{}'".format(full_path)) + verify_chksum( + input_data_root, + rundir, + rel_path.strip(os.sep), + isdirectory, + ) + logger.info( + "Chksum passed for file {}".format( + os.path.join(input_data_root, rel_path) + ) + ) + logger.debug( + " Already had input file: '{}'".format(full_path) + ) else: - model = os.path.basename(data_list_file).split('.')[0] - logger.warning("Model {} no file specified for {}".format(model, description)) + model = os.path.basename(data_list_file).split(".")[0] + logger.warning( + "Model {} no file specified for {}".format(model, description) + ) return no_files_missing + def verify_chksum(input_data_root, rundir, filename, isdirectory): """ For file in filename perform a chksum and compare the result to that stored in @@ -429,12 +591,15 @@ def verify_chksum(input_data_root, rundir, filename, isdirectory): for line in lines: fchksum, fname = line.split() if fname in chksum_hash: - expect(chksum_hash[fname] == fchksum, " Inconsistent hashes in chksum for file {}".format(fname)) + expect( + chksum_hash[fname] == fchksum, + " Inconsistent hashes in chksum for file {}".format(fname), + ) else: chksum_hash[fname] = fchksum if isdirectory: - filenames = glob.glob(os.path.join(filename,"*.*")) + filenames = glob.glob(os.path.join(filename, "*.*")) else: filenames = [filename] for fname in filenames: @@ -443,11 +608,19 @@ def verify_chksum(input_data_root, rundir, filename, isdirectory): chksum = md5(os.path.join(input_data_root, fname)) if chksum_hash: if not fname in chksum_hash: - logger.warning("Did not find hash for file {} in chksum file {}".format(filename, hashfile)) + logger.warning( + "Did not find hash for file {} in chksum file {}".format( + filename, hashfile + ) + ) else: - expect(chksum == chksum_hash[fname], - "chksum mismatch for file {} expected {} found {}". - format(os.path.join(input_data_root,fname),chksum, chksum_hash[fname])) + expect( + chksum == chksum_hash[fname], + "chksum mismatch for file {} expected {} found {}".format( + os.path.join(input_data_root, fname), chksum, chksum_hash[fname] + ), + ) + def md5(fname): """ diff --git a/CIME/case/check_lockedfiles.py b/CIME/case/check_lockedfiles.py index e007b0e8caa..f2b1e8cd3f8 100644 --- a/CIME/case/check_lockedfiles.py +++ b/CIME/case/check_lockedfiles.py @@ -16,6 +16,7 @@ import glob, CIME.six + def check_pelayouts_require_rebuild(self, models): """ Create if we require a rebuild, expects cwd is caseroot @@ -24,26 +25,37 @@ def check_pelayouts_require_rebuild(self, models): if os.path.exists(locked_pes): # Look to see if $comp_PE_CHANGE_REQUIRES_REBUILD is defined # for any component - env_mach_pes_locked = EnvMachPes(infile=locked_pes, components=self.get_values("COMP_CLASSES")) + env_mach_pes_locked = EnvMachPes( + infile=locked_pes, components=self.get_values("COMP_CLASSES") + ) for comp in models: if self.get_value("{}_PE_CHANGE_REQUIRES_REBUILD".format(comp)): # Changing these values in env_mach_pes.xml will force # you to clean the corresponding component - old_tasks = env_mach_pes_locked.get_value("NTASKS_{}".format(comp)) + old_tasks = env_mach_pes_locked.get_value("NTASKS_{}".format(comp)) old_threads = env_mach_pes_locked.get_value("NTHRDS_{}".format(comp)) - old_inst = env_mach_pes_locked.get_value("NINST_{}".format(comp)) + old_inst = env_mach_pes_locked.get_value("NINST_{}".format(comp)) - new_tasks = self.get_value("NTASKS_{}".format(comp)) + new_tasks = self.get_value("NTASKS_{}".format(comp)) new_threads = self.get_value("NTHRDS_{}".format(comp)) - new_inst = self.get_value("NINST_{}".format(comp)) - - if old_tasks != new_tasks or old_threads != new_threads or old_inst != new_inst: - logging.warning("{} pe change requires clean build {} {}".format(comp, old_tasks, new_tasks)) + new_inst = self.get_value("NINST_{}".format(comp)) + + if ( + old_tasks != new_tasks + or old_threads != new_threads + or old_inst != new_inst + ): + logging.warning( + "{} pe change requires clean build {} {}".format( + comp, old_tasks, new_tasks + ) + ) cleanflag = comp.lower() clean(self, cleanlist=[cleanflag]) unlock_file("env_mach_pes.xml", self.get_value("CASEROOT")) + def check_lockedfile(self, filebase): caseroot = self.get_value("CASEROOT") @@ -51,21 +63,23 @@ def check_lockedfile(self, filebase): lfile = os.path.join(caseroot, "LockedFiles", filebase) components = self.get_values("COMP_CLASSES") if os.path.isfile(cfile): - objname = filebase.split('.')[0] + objname = filebase.split(".")[0] if objname == "env_build": - f1obj = self.get_env('build') + f1obj = self.get_env("build") f2obj = EnvBuild(caseroot, lfile, read_only=True) elif objname == "env_mach_pes": - f1obj = self.get_env('mach_pes') + f1obj = self.get_env("mach_pes") f2obj = EnvMachPes(caseroot, lfile, components=components, read_only=True) elif objname == "env_case": - f1obj = self.get_env('case') + f1obj = self.get_env("case") f2obj = EnvCase(caseroot, lfile, read_only=True) elif objname == "env_batch": - f1obj = self.get_env('batch') + f1obj = self.get_env("batch") f2obj = EnvBatch(caseroot, lfile, read_only=True) else: - logging.warning("Locked XML file '{}' is not current being handled".format(filebase)) + logging.warning( + "Locked XML file '{}' is not current being handled".format(filebase) + ) return diffs = f1obj.compare_xml(f2obj) @@ -75,30 +89,42 @@ def check_lockedfile(self, filebase): toggle_build_status = False for key in diffs.keys(): if key != "BUILD_COMPLETE": - logging.warning(" found difference in {} : case {} locked {}" - .format(key, repr(diffs[key][0]), repr(diffs[key][1]))) + logging.warning( + " found difference in {} : case {} locked {}".format( + key, repr(diffs[key][0]), repr(diffs[key][1]) + ) + ) toggle_build_status = True if objname == "env_mach_pes": expect(False, "Invoke case.setup --reset ") elif objname == "env_case": - expect(False, "Cannot change file env_case.xml, please" - " recover the original copy from LockedFiles") + expect( + False, + "Cannot change file env_case.xml, please" + " recover the original copy from LockedFiles", + ) elif objname == "env_build": if toggle_build_status: logging.warning("Setting build complete to False") self.set_value("BUILD_COMPLETE", False) if "PIO_VERSION" in diffs: self.set_value("BUILD_STATUS", 2) - logging.critical("Changing PIO_VERSION requires running " - "case.build --clean-all and rebuilding") + logging.critical( + "Changing PIO_VERSION requires running " + "case.build --clean-all and rebuilding" + ) else: self.set_value("BUILD_STATUS", 1) elif objname == "env_batch": - expect(False, "Batch configuration has changed, please run case.setup --reset") + expect( + False, + "Batch configuration has changed, please run case.setup --reset", + ) else: expect(False, "'{}' diff was not handled".format(objname)) + def check_lockedfiles(self, skip=None): """ Check that all lockedfiles match what's in case @@ -112,7 +138,7 @@ def check_lockedfiles(self, skip=None): for lfile in lockedfiles: fpart = os.path.basename(lfile) # ignore files used for tests such as env_mach_pes.ERP1.xml by looking for extra dots in the name - if fpart.count('.') > 1: + if fpart.count(".") > 1: continue do_skip = False diff --git a/CIME/case/preview_namelists.py b/CIME/case/preview_namelists.py index 2c588293ff6..707d59a53c0 100644 --- a/CIME/case/preview_namelists.py +++ b/CIME/case/preview_namelists.py @@ -4,19 +4,21 @@ """ from CIME.XML.standard_module_setup import * -from CIME.utils import run_sub_or_cmd, safe_copy +from CIME.utils import import_and_run_sub_or_cmd, safe_copy import time, glob + logger = logging.getLogger(__name__) + def create_dirs(self): """ Make necessary directories for case """ # Get data from XML - exeroot = self.get_value("EXEROOT") - libroot = self.get_value("LIBROOT") - incroot = self.get_value("INCROOT") - rundir = self.get_value("RUNDIR") + exeroot = self.get_value("EXEROOT") + libroot = self.get_value("LIBROOT") + incroot = self.get_value("INCROOT") + rundir = self.get_value("RUNDIR") caseroot = self.get_value("CASEROOT") docdir = os.path.join(caseroot, "CaseDocs") dirs_to_make = [] @@ -28,7 +30,7 @@ def create_dirs(self): dirs_to_make.extend([exeroot, libroot, incroot, rundir, docdir]) for dir_to_make in dirs_to_make: - if (not os.path.isdir(dir_to_make) and not os.path.islink(dir_to_make)): + if not os.path.isdir(dir_to_make) and not os.path.islink(dir_to_make): try: logger.debug("Making dir '{}'".format(dir_to_make)) os.makedirs(dir_to_make) @@ -36,12 +38,18 @@ def create_dirs(self): # In a multithreaded situation, we may have lost a race to create this dir. # We do not want to crash if that's the case. if not os.path.isdir(dir_to_make): - expect(False, "Could not make directory '{}', error: {}".format(dir_to_make, e)) + expect( + False, + "Could not make directory '{}', error: {}".format( + dir_to_make, e + ), + ) # As a convenience write the location of the case directory in the bld and run directories for dir_ in (exeroot, rundir): - with open(os.path.join(dir_,"CASEROOT"),"w+") as fd: - fd.write(caseroot+"\n") + with open(os.path.join(dir_, "CASEROOT"), "w+") as fd: + fd.write(caseroot + "\n") + def create_namelists(self, component=None): """ @@ -69,45 +77,61 @@ def create_namelists(self, component=None): models += [models.pop(0)] for model in models: model_str = model.lower() - logger.info(" {} {} ".format(time.strftime("%Y-%m-%d %H:%M:%S"),model_str)) + logger.info(" {} {} ".format(time.strftime("%Y-%m-%d %H:%M:%S"), model_str)) config_file = self.get_value("CONFIG_{}_FILE".format(model_str.upper())) config_dir = os.path.dirname(config_file) if model_str == "cpl": compname = "drv" else: compname = self.get_value("COMP_{}".format(model_str.upper())) - if component is None or component == model_str or compname=="ufsatm": - # first look in the case SourceMods directory - cmd = os.path.join(caseroot, "SourceMods", "src."+compname, "buildnml") - if os.path.isfile(cmd): - logger.warning("\nWARNING: Using local buildnml file {}\n".format(cmd)) - else: - # otherwise look in the component config_dir - cmd = os.path.join(config_dir, "buildnml") - expect(os.path.isfile(cmd), "Could not find buildnml file for component {}".format(compname)) + if component is None or component == model_str or compname == "ufsatm": + cmd = os.path.join(config_dir, "buildnml") logger.info("Create namelist for component {}".format(compname)) - run_sub_or_cmd(cmd, (caseroot), "buildnml", - (self, caseroot, compname), case=self) - - logger.debug("Finished creating component namelists, component {} models = {}".format(component, models)) + import_and_run_sub_or_cmd( + cmd, + (caseroot), + "buildnml", + (self, caseroot, compname), + config_dir, + compname, + case=self, + ) + + logger.debug( + "Finished creating component namelists, component {} models = {}".format( + component, models + ) + ) # Save namelists to docdir - if (not os.path.isdir(docdir)): + if not os.path.isdir(docdir): os.makedirs(docdir) try: with open(os.path.join(docdir, "README"), "w") as fd: - fd.write(" CESM Resolved Namelist Files\n For documentation only DO NOT MODIFY\n") + fd.write( + " CESM Resolved Namelist Files\n For documentation only DO NOT MODIFY\n" + ) except (OSError, IOError) as e: expect(False, "Failed to write {}/README: {}".format(docdir, e)) - for cpglob in ["*_in_[0-9]*", "*modelio*", "*_in", "nuopc.runconfig", - "*streams*txt*", "*streams.xml", "*stxt", "*maps.rc", "*cism*.config*", "nuopc.runseq"]: + for cpglob in [ + "*_in_[0-9]*", + "*modelio*", + "*_in", + "nuopc.runconfig", + "*streams*txt*", + "*streams.xml", + "*stxt", + "*maps.rc", + "*cism*.config*", + "nuopc.runseq", + ]: for file_to_copy in glob.glob(os.path.join(rundir, cpglob)): logger.debug("Copy file from '{}' to '{}'".format(file_to_copy, docdir)) safe_copy(file_to_copy, docdir) # Copy over chemistry mechanism docs if they exist atmconf = self.get_value("COMP_ATM") + "conf" - if (os.path.isdir(os.path.join(casebuild, atmconf))): + if os.path.isdir(os.path.join(casebuild, atmconf)): for file_to_copy in glob.glob(os.path.join(casebuild, atmconf, "*chem_mech*")): safe_copy(file_to_copy, docdir) diff --git a/CIME/code_checker.py b/CIME/code_checker.py index b17d236c299..e92ee716efa 100644 --- a/CIME/code_checker.py +++ b/CIME/code_checker.py @@ -7,10 +7,19 @@ from CIME.XML.standard_module_setup import * -from CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, get_src_root, is_python_executable, get_cime_default_driver +from CIME.utils import ( + run_cmd, + run_cmd_no_fail, + expect, + get_cime_root, + get_src_root, + is_python_executable, + get_cime_default_driver, +) from multiprocessing.dummy import Pool as ThreadPool -#pylint: disable=import-error + +# pylint: disable=import-error from distutils.spawn import find_executable logger = logging.getLogger(__name__) @@ -20,8 +29,12 @@ def _run_pylint(all_files, interactive): ############################################################################### pylint = find_executable("pylint") - cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import" - cmd_options += ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement" + cmd_options = ( + " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import" + ) + cmd_options += ( + ",fixme,broad-except,bare-except,eval-used,exec-used,global-statement" + ) cmd_options += ",logging-format-interpolation,no-name-in-module,arguments-renamed" cmd_options += " -j 0 -f json" cimeroot = get_cime_root() @@ -74,20 +87,30 @@ def _run_pylint(all_files, interactive): # logger.info("File %s has no pylint problems" % on_file) # return (on_file, "") + ############################################################################### def _matches(file_path, file_ends): -############################################################################### + ############################################################################### for file_end in file_ends: if file_path.endswith(file_end): return True return False + ############################################################################### def _should_pylint_skip(filepath): -############################################################################### + ############################################################################### # TODO - get rid of this - list_of_directories_to_ignore = ("xmlconvertors", "pointclm", "point_clm", "tools", "machines", "apidocs", "doc") + list_of_directories_to_ignore = ( + "xmlconvertors", + "pointclm", + "point_clm", + "tools", + "machines", + "apidocs", + "doc", + ) for dir_to_skip in list_of_directories_to_ignore: if dir_to_skip + "/" in filepath: return True @@ -99,27 +122,46 @@ def _should_pylint_skip(filepath): return False + ############################################################################### def get_all_checkable_files(): -############################################################################### + ############################################################################### cimeroot = get_cime_root() - all_git_files = run_cmd_no_fail("git ls-files", from_dir=cimeroot, verbose=False).splitlines() + all_git_files = run_cmd_no_fail( + "git ls-files", from_dir=cimeroot, verbose=False + ).splitlines() if get_cime_default_driver() == "nuopc": srcroot = get_src_root() nuopc_git_files = [] try: - nuopc_git_files = run_cmd_no_fail("git ls-files", from_dir=os.path.join(srcroot,"components","cmeps"), verbose=False).splitlines() + nuopc_git_files = run_cmd_no_fail( + "git ls-files", + from_dir=os.path.join(srcroot, "components", "cmeps"), + verbose=False, + ).splitlines() except: logger.warning("No nuopc driver found in source") - all_git_files.extend([os.path.join(srcroot,"components","cmeps",_file) for _file in nuopc_git_files]) - files_to_test = [item for item in all_git_files - if ((item.endswith(".py") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))] + all_git_files.extend( + [ + os.path.join(srcroot, "components", "cmeps", _file) + for _file in nuopc_git_files + ] + ) + files_to_test = [ + item + for item in all_git_files + if ( + (item.endswith(".py") or is_python_executable(os.path.join(cimeroot, item))) + and not _should_pylint_skip(item) + ) + ] return files_to_test + ############################################################################### def check_code(files, num_procs=10, interactive=False): -############################################################################### + ############################################################################### """ Check all python files in the given directory @@ -138,10 +180,12 @@ def check_code(files, num_procs=10, interactive=False): for repo_file in repo_files: if repo_file.endswith(filearg): found = True - files_to_check.append(repo_file) # could have multiple matches + files_to_check.append(repo_file) # could have multiple matches if not found: - logger.warning("Could not find file matching argument '%s'" % filearg) + logger.warning( + "Could not find file matching argument '%s'" % filearg + ) else: # Check every python file files_to_check = get_all_checkable_files() diff --git a/CIME/compare_namelists.py b/CIME/compare_namelists.py index 00bde0ecf64..42241692ed0 100644 --- a/CIME/compare_namelists.py +++ b/CIME/compare_namelists.py @@ -1,14 +1,15 @@ import os, re, logging, CIME.six from collections import OrderedDict -from CIME.utils import expect, CIMEError -logger=logging.getLogger(__name__) +from CIME.utils import expect, CIMEError + +logger = logging.getLogger(__name__) # pragma pylint: disable=unsubscriptable-object ############################################################################### def _normalize_lists(value_str): -############################################################################### + ############################################################################### """ >>> _normalize_lists("'one two' 'three four'") "'one two','three four'" @@ -60,9 +61,10 @@ def _normalize_lists(value_str): return result + ############################################################################### def _interpret_value(value_str, filename): -############################################################################### + ############################################################################### """ >>> _interpret_value("one", "foo") 'one' @@ -77,18 +79,23 @@ def _interpret_value(value_str, filename): >>> _interpret_value("'DMS -> 1.0* value.nc'", "foo") OrderedDict([('DMS', '1.0*value.nc')]) """ - comma_re = re.compile(r'\s*,\s*') + comma_re = re.compile(r"\s*,\s*") dict_re = re.compile(r"^'(\S+)\s*->\s*(\S+|(?:\S+\s*\*\s*\S+))\s*'") value_str = _normalize_lists(value_str) tokens = [item.strip() for item in comma_re.split(value_str) if item.strip() != ""] - if ("->" in value_str): + if "->" in value_str: # dict rv = OrderedDict() for token in tokens: m = dict_re.match(token) - expect(m is not None, "In file '{}', Dict entry '{}' does not match expected format".format(filename, token)) + expect( + m is not None, + "In file '{}', Dict entry '{}' does not match expected format".format( + filename, token + ), + ) k, v = m.groups() rv[k] = _interpret_value(v, filename) @@ -101,7 +108,10 @@ def _interpret_value(value_str, filename): # the following ensure that the following to namelist settings trigger a match # nmlvalue = 1,1,1 versus nmlvalue = 3*1 sub_tokens = [item.strip() for item in token.split("*")] - expect(len(sub_tokens) == 2, "Incorrect usage of multiplication in token '{}'".format(token)) + expect( + len(sub_tokens) == 2, + "Incorrect usage of multiplication in token '{}'".format(token), + ) new_tokens.extend([sub_tokens[1]] * int(sub_tokens[0])) except Exception: # User probably did not intend to use the * operator as a namelist multiplier @@ -114,9 +124,10 @@ def _interpret_value(value_str, filename): else: return new_tokens[0] + ############################################################################### def _parse_namelists(namelist_lines, filename): -############################################################################### + ############################################################################### """ Return data in form: {namelist -> {key -> value} }. value can be an int, string, list, or dict @@ -200,143 +211,199 @@ def _parse_namelists(namelist_lines, filename): OrderedDict([('nml', OrderedDict([('val', ["'a brown cow'", "'a red hen'"])]))]) """ - comment_re = re.compile(r'^[#!]') - namelist_re = re.compile(r'^&(\S+)$') + comment_re = re.compile(r"^[#!]") + namelist_re = re.compile(r"^&(\S+)$") name_re = re.compile(r"^([^\s=']+)\s*=\s*(.+)$") rcline_re = re.compile(r"^([^&\s':]+)\s*:\s*(.+)$") rv = OrderedDict() current_namelist = None - multiline_variable = None # (name, value) + multiline_variable = None # (name, value) for line in namelist_lines: line = line.strip() - line = line.replace('"',"'") + line = line.replace('"', "'") logger.debug("Parsing line: '{}'".format(line)) - if (line == "" or comment_re.match(line) is not None): + if line == "" or comment_re.match(line) is not None: logger.debug(" Line was whitespace or comment, skipping.") continue rcline = rcline_re.match(line) - if (rcline is not None): + if rcline is not None: # Defining a variable (AKA name) name, value = rcline.groups() - logger.debug(" Parsing variable '{}' with data '{}'".format(name, value)) - if 'seq_maps.rc' not in rv: - rv['seq_maps.rc'] = OrderedDict() + if "seq_maps.rc" not in rv: + rv["seq_maps.rc"] = OrderedDict() - expect(name not in rv['seq_maps.rc'], "In file '{}', Duplicate name: '{}'".format(filename, name)) - rv['seq_maps.rc'][name] = value + expect( + name not in rv["seq_maps.rc"], + "In file '{}', Duplicate name: '{}'".format(filename, name), + ) + rv["seq_maps.rc"][name] = value - elif (current_namelist is None): + elif current_namelist is None: # Must start a namelist - expect(multiline_variable is None, - "In file '{}', Incomplete multiline variable: '{}'".format(filename, multiline_variable[0] if multiline_variable is not None else "")) + expect( + multiline_variable is None, + "In file '{}', Incomplete multiline variable: '{}'".format( + filename, + multiline_variable[0] if multiline_variable is not None else "", + ), + ) # Unfortunately, other tools were using the old compare_namelists.pl script # to compare files that are not namelist files. We need a special error # to signify this event - if (namelist_re.match(line) is None): - expect(rv != OrderedDict(), - "File '{}' does not appear to be a namelist file, skipping".format(filename)) - expect(False, - "In file '{}', Line '{}' did not begin a namelist as expected".format(filename, line)) + if namelist_re.match(line) is None: + expect( + rv != OrderedDict(), + "File '{}' does not appear to be a namelist file, skipping".format( + filename + ), + ) + expect( + False, + "In file '{}', Line '{}' did not begin a namelist as expected".format( + filename, line + ), + ) current_namelist = namelist_re.match(line).groups()[0] - expect(current_namelist not in rv, - "In file '{}', Duplicate namelist '{}'".format(filename, current_namelist)) + expect( + current_namelist not in rv, + "In file '{}', Duplicate namelist '{}'".format( + filename, current_namelist + ), + ) rv[current_namelist] = OrderedDict() logger.debug(" Starting namelist '{}'".format(current_namelist)) - elif (line == "/"): + elif line == "/": # Ends a namelist logger.debug(" Ending namelist '{}'".format(current_namelist)) - expect(multiline_variable is None, - "In file '{}', Incomplete multiline variable: '{}'".format(filename, multiline_variable[0] if multiline_variable is not None else "")) + expect( + multiline_variable is None, + "In file '{}', Incomplete multiline variable: '{}'".format( + filename, + multiline_variable[0] if multiline_variable is not None else "", + ), + ) current_namelist = None - elif (name_re.match(line)): + elif name_re.match(line): # Defining a variable (AKA name) name, value_str = name_re.match(line).groups() - logger.debug(" Parsing variable '{}' with data '{}'".format(name, value_str)) - - expect(multiline_variable is None, - "In file '{}', Incomplete multiline variable: '{}'".format(filename, multiline_variable[0] if multiline_variable is not None else "")) - expect(name not in rv[current_namelist], "In file '{}', Duplicate name: '{}'".format(filename, name)) + logger.debug( + " Parsing variable '{}' with data '{}'".format(name, value_str) + ) + + expect( + multiline_variable is None, + "In file '{}', Incomplete multiline variable: '{}'".format( + filename, + multiline_variable[0] if multiline_variable is not None else "", + ), + ) + expect( + name not in rv[current_namelist], + "In file '{}', Duplicate name: '{}'".format(filename, name), + ) real_value = _interpret_value(value_str, filename) rv[current_namelist][name] = real_value logger.debug(" Adding value: {}".format(real_value)) - if (line.endswith(",")): + if line.endswith(","): # Value will continue on in subsequent lines multiline_variable = (name, real_value) logger.debug(" Var is multiline...") - elif (multiline_variable is not None): + elif multiline_variable is not None: # Continuation of list or dict variable current_value = multiline_variable[1] - logger.debug(" Continuing multiline variable '{}' with data '{}'".format(multiline_variable[0], line)) + logger.debug( + " Continuing multiline variable '{}' with data '{}'".format( + multiline_variable[0], line + ) + ) real_value = _interpret_value(line, filename) - if (type(current_value) is list): - expect(type(real_value) is not OrderedDict, "In file '{}', multiline list variable '{}' had dict entries".format(filename, multiline_variable[0])) + if type(current_value) is list: + expect( + type(real_value) is not OrderedDict, + "In file '{}', multiline list variable '{}' had dict entries".format( + filename, multiline_variable[0] + ), + ) real_value = real_value if type(real_value) is list else [real_value] current_value.extend(real_value) - elif (type(current_value) is OrderedDict): - expect(type(real_value) is OrderedDict, "In file '{}', multiline dict variable '{}' had non-dict entries".format(filename, multiline_variable[0])) + elif type(current_value) is OrderedDict: + expect( + type(real_value) is OrderedDict, + "In file '{}', multiline dict variable '{}' had non-dict entries".format( + filename, multiline_variable[0] + ), + ) current_value.update(real_value) else: - expect(False, "In file '{}', Continuation should have been for list or dict, instead it was: '{}'".format(filename, type(current_value))) + expect( + False, + "In file '{}', Continuation should have been for list or dict, instead it was: '{}'".format( + filename, type(current_value) + ), + ) logger.debug(" Adding value: {}".format(real_value)) - if (not line.endswith(",")): + if not line.endswith(","): # Completed multiline_variable = None logger.debug(" Terminating multiline variable") else: - expect(False, "In file '{}', Unrecognized line: '{}'".format(filename, line)) + expect( + False, "In file '{}', Unrecognized line: '{}'".format(filename, line) + ) return rv + ############################################################################### def _normalize_string_value(name, value, case): -############################################################################### + ############################################################################### """ Some of the string in namelists will contain data that's inherently prone to diffs, like file paths, etc. This function attempts to normalize that data so that it will not cause diffs. """ # Any occurance of case must be normalized because test-ids might not match - if (case is not None): - case_re = re.compile(r'{}[.]([GC]+)[.]([^./\s]+)'.format(case)) + if case is not None: + case_re = re.compile(r"{}[.]([GC]+)[.]([^./\s]+)".format(case)) value = case_re.sub("{}.ACTION.TESTID".format(case), value) - if (name in ["runid", "model_version", "username", "logfile"]): + if name in ["runid", "model_version", "username", "logfile"]: # Don't even attempt to diff these, we don't care return name.upper() - elif (":" in value): + elif ":" in value: items = value.split(":") items = [_normalize_string_value(name, item, case) for item in items] return ":".join(items) - elif ("/" in value): + elif "/" in value: # Handle special format scale*path, normalize the path and reconstruct parsed = re.match(r"^([^*]+\*)(/[^/]+)*", value) if parsed is not None and len(parsed.groups()) == 2: @@ -353,9 +420,10 @@ def _normalize_string_value(name, value, case): else: return value + ############################################################################### def _compare_values(name, gold_value, comp_value, case): -############################################################################### + ############################################################################### """ Compare values for a specific variable in a namelist. @@ -364,49 +432,75 @@ def _compare_values(name, gold_value, comp_value, case): Note there will only be comments if values did not match """ comments = "" - if (type(gold_value) != type(comp_value)): - comments += " variable '{}' did not have expected type '{}', instead is type '{}'\n".format(name, type(gold_value), type(comp_value)) + if type(gold_value) != type(comp_value): + comments += " variable '{}' did not have expected type '{}', instead is type '{}'\n".format( + name, type(gold_value), type(comp_value) + ) return comments - if (type(gold_value) is list): + if type(gold_value) is list: # Note, list values remain order sensitive for idx, gold_value_list_item in enumerate(gold_value): - if (idx < len(comp_value)): - comments += _compare_values("{} list item {:d}".format(name, idx), - gold_value_list_item, comp_value[idx], case) + if idx < len(comp_value): + comments += _compare_values( + "{} list item {:d}".format(name, idx), + gold_value_list_item, + comp_value[idx], + case, + ) else: - comments += " list variable '{}' missing value {}\n".format(name, gold_value_list_item) + comments += " list variable '{}' missing value {}\n".format( + name, gold_value_list_item + ) - if (len(comp_value) > len(gold_value)): - for comp_value_list_item in comp_value[len(gold_value):]: - comments += " list variable '{}' has extra value {}\n".format(name, comp_value_list_item) + if len(comp_value) > len(gold_value): + for comp_value_list_item in comp_value[len(gold_value) :]: + comments += " list variable '{}' has extra value {}\n".format( + name, comp_value_list_item + ) - elif (type(gold_value) is OrderedDict): + elif type(gold_value) is OrderedDict: for key, gold_value_dict_item in gold_value.items(): - if (key in comp_value): - comments += _compare_values("{} dict item {}".format(name, key), - gold_value_dict_item, comp_value[key], case) + if key in comp_value: + comments += _compare_values( + "{} dict item {}".format(name, key), + gold_value_dict_item, + comp_value[key], + case, + ) else: - comments += " dict variable '{}' missing key {} with value {}\n".format(name, key, gold_value_dict_item) + comments += ( + " dict variable '{}' missing key {} with value {}\n".format( + name, key, gold_value_dict_item + ) + ) for key in comp_value: - if (key not in gold_value): - comments += " dict variable '{}' has extra key {} with value {}\n".format(name, key, comp_value[key]) + if key not in gold_value: + comments += ( + " dict variable '{}' has extra key {} with value {}\n".format( + name, key, comp_value[key] + ) + ) else: - expect(isinstance(gold_value, CIME.six.string_types), "Unexpected type found: '{}'".format(type(gold_value))) + expect( + isinstance(gold_value, CIME.six.string_types), + "Unexpected type found: '{}'".format(type(gold_value)), + ) norm_gold_value = _normalize_string_value(name, gold_value, case) norm_comp_value = _normalize_string_value(name, comp_value, case) - if (norm_gold_value != norm_comp_value): + if norm_gold_value != norm_comp_value: comments += " BASE: {} = {}\n".format(name, norm_gold_value) comments += " COMP: {} = {}\n".format(name, norm_comp_value) return comments + ############################################################################### def _compare_namelists(gold_namelists, comp_namelists, case): -############################################################################### + ############################################################################### """ Compare two namelists. Print diff information if any. Returns comments @@ -547,13 +641,15 @@ def _compare_namelists(gold_namelists, comp_namelists, case): """ different_namelists = OrderedDict() for namelist, gold_names in gold_namelists.items(): - if (namelist not in comp_namelists): + if namelist not in comp_namelists: different_namelists[namelist] = ["Missing namelist: {}\n".format(namelist)] else: comp_names = comp_namelists[namelist] for name, gold_value in gold_names.items(): - if (name not in comp_names): - different_namelists.setdefault(namelist, []).append(" missing variable: '{}'\n".format(name)) + if name not in comp_names: + different_namelists.setdefault(namelist, []).append( + " missing variable: '{}'\n".format(name) + ) else: comp_value = comp_names[name] comments = _compare_values(name, gold_value, comp_value, case) @@ -561,12 +657,16 @@ def _compare_namelists(gold_namelists, comp_namelists, case): different_namelists.setdefault(namelist, []).append(comments) for name in comp_names: - if (name not in gold_names): - different_namelists.setdefault(namelist, []).append(" found extra variable: '{}'\n".format(name)) + if name not in gold_names: + different_namelists.setdefault(namelist, []).append( + " found extra variable: '{}'\n".format(name) + ) for namelist in comp_namelists: - if (namelist not in gold_namelists): - different_namelists[namelist] = ["Found extra namelist: {}\n".format(namelist)] + if namelist not in gold_namelists: + different_namelists[namelist] = [ + "Found extra namelist: {}\n".format(namelist) + ] comments = "" for namelist, nlcomment in different_namelists.items(): @@ -578,9 +678,10 @@ def _compare_namelists(gold_namelists, comp_namelists, case): return comments + ############################################################################### def compare_namelist_files(gold_file, compare_file, case=None): -############################################################################### + ############################################################################### """ Returns (is_match, comments) """ @@ -592,9 +693,10 @@ def compare_namelist_files(gold_file, compare_file, case=None): comments = _compare_namelists(gold_namelists, comp_namelists, case) return comments == "", comments + ############################################################################### def is_namelist_file(file_path): -############################################################################### + ############################################################################### try: compare_namelist_files(file_path, file_path) except CIMEError as e: diff --git a/CIME/compare_test_results.py b/CIME/compare_test_results.py index a29aa604797..e19e13b804d 100644 --- a/CIME/compare_test_results.py +++ b/CIME/compare_test_results.py @@ -9,41 +9,62 @@ ############################################################################### def append_status_cprnc_log(msg, logfile_name, test_dir): -############################################################################### + ############################################################################### try: append_status(msg, logfile_name, caseroot=test_dir) except IOError: pass + ############################################################################### def compare_namelists(case, baseline_name, baseline_root, logfile_name): -############################################################################### + ############################################################################### log_lvl = logging.getLogger().getEffectiveLevel() logging.disable(logging.CRITICAL) - success = case.case_cmpgen_namelists(compare=True, compare_name=baseline_name, baseline_root=baseline_root, logfile_name=logfile_name) + success = case.case_cmpgen_namelists( + compare=True, + compare_name=baseline_name, + baseline_root=baseline_root, + logfile_name=logfile_name, + ) logging.getLogger().setLevel(log_lvl) return success + ############################################################################### def compare_history(case, baseline_name, baseline_root, log_id): -############################################################################### + ############################################################################### real_user = case.get_value("REALUSER") with EnvironmentContext(USER=real_user): - baseline_full_dir = os.path.join(baseline_root, baseline_name, case.get_value("CASEBASEID")) + baseline_full_dir = os.path.join( + baseline_root, baseline_name, case.get_value("CASEBASEID") + ) outfile_suffix = "{}.{}".format(baseline_name, log_id) try: - result, comments = compare_baseline(case, baseline_dir=baseline_full_dir, - outfile_suffix=outfile_suffix) + result, comments = compare_baseline( + case, baseline_dir=baseline_full_dir, outfile_suffix=outfile_suffix + ) except IOError: - result, comments = compare_baseline(case, baseline_dir=baseline_full_dir, - outfile_suffix=None) + result, comments = compare_baseline( + case, baseline_dir=baseline_full_dir, outfile_suffix=None + ) return result, comments + ############################################################################### -def compare_test_results(baseline_name, baseline_root, test_root, compiler, test_id=None, compare_tests=None, namelists_only=False, hist_only=False): -############################################################################### +def compare_test_results( + baseline_name, + baseline_root, + test_root, + compiler, + test_id=None, + compare_tests=None, + namelists_only=False, + hist_only=False, +): + ############################################################################### """ Compares with baselines for all matching tests @@ -75,13 +96,15 @@ def compare_test_results(baseline_name, baseline_root, test_root, compiler, test testopts = [] if testopts is None else testopts build_only = "B" in testopts - if (compare_tests in [[], None] or CIME.utils.match_any(test_name, compare_tests)): + if compare_tests in [[], None] or CIME.utils.match_any( + test_name, compare_tests + ): - if (not hist_only): + if not hist_only: nl_compare_result = None nl_compare_comment = "" nl_result = ts.get_status(SETUP_PHASE) - if (nl_result is None): + if nl_result is None: nl_compare_result = "SKIP" nl_compare_comment = "Test did not make it to setup phase" nl_do_compare = False @@ -91,15 +114,15 @@ def compare_test_results(baseline_name, baseline_root, test_root, compiler, test nl_do_compare = False detailed_comments = "" - if (not namelists_only and not build_only): + if not namelists_only and not build_only: compare_result = None compare_comment = "" run_result = ts.get_status(RUN_PHASE) - if (run_result is None): + if run_result is None: compare_result = "SKIP" compare_comment = "Test did not make it to run phase" do_compare = False - elif (run_result != TEST_PASS_STATUS): + elif run_result != TEST_PASS_STATUS: compare_result = "SKIP" compare_comment = "Run phase did not pass" do_compare = False @@ -112,22 +135,31 @@ def compare_test_results(baseline_name, baseline_root, test_root, compiler, test if baseline_name is None: baseline_name = case.get_value("BASELINE_NAME_CMP") if not baseline_name: - baseline_name = CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) + baseline_name = CIME.utils.get_current_branch( + repo=CIME.utils.get_cime_root() + ) if baseline_root is None: baseline_root = case.get_value("BASELINE_ROOT") - logfile_name = "compare.log.{}.{}".format(baseline_name.replace("/", "_"), log_id) + logfile_name = "compare.log.{}.{}".format( + baseline_name.replace("/", "_"), log_id + ) append_status_cprnc_log( "Comparing against baseline with compare_test_results:\n" - "Baseline: {}\n In baseline_root: {}".format(baseline_name, baseline_root), + "Baseline: {}\n In baseline_root: {}".format( + baseline_name, baseline_root + ), logfile_name, - test_dir) + test_dir, + ) if nl_do_compare or do_compare: if nl_do_compare: - nl_success = compare_namelists(case, baseline_name, baseline_root, logfile_name) + nl_success = compare_namelists( + case, baseline_name, baseline_root, logfile_name + ) if nl_success: nl_compare_result = TEST_PASS_STATUS nl_compare_comment = "" @@ -137,7 +169,9 @@ def compare_test_results(baseline_name, baseline_root, test_root, compiler, test all_pass_or_skip = False if do_compare: - success, detailed_comments = compare_history(case, baseline_name, baseline_root, log_id) + success, detailed_comments = compare_history( + case, baseline_name, baseline_root, log_id + ) if success: compare_result = TEST_PASS_STATUS else: @@ -148,10 +182,14 @@ def compare_test_results(baseline_name, baseline_root, test_root, compiler, test brief_result = "" if not hist_only: - brief_result += "{} {} {} {}\n".format(nl_compare_result, test_name, NAMELIST_PHASE, nl_compare_comment) + brief_result += "{} {} {} {}\n".format( + nl_compare_result, test_name, NAMELIST_PHASE, nl_compare_comment + ) if not namelists_only: - brief_result += "{} {} {}".format(compare_result, test_name, BASELINE_PHASE) + brief_result += "{} {} {}".format( + compare_result, test_name, BASELINE_PHASE + ) if compare_comment: brief_result += " {}".format(compare_comment) brief_result += "\n" @@ -161,6 +199,8 @@ def compare_test_results(baseline_name, baseline_root, test_root, compiler, test append_status_cprnc_log(brief_result, logfile_name, test_dir) if detailed_comments: - append_status_cprnc_log("Detailed comments:\n" + detailed_comments, logfile_name, test_dir) + append_status_cprnc_log( + "Detailed comments:\n" + detailed_comments, logfile_name, test_dir + ) return all_pass_or_skip diff --git a/CIME/cs_status.py b/CIME/cs_status.py index 5dddede453b..8b4c479b93d 100644 --- a/CIME/cs_status.py +++ b/CIME/cs_status.py @@ -11,10 +11,17 @@ import sys from collections import defaultdict -def cs_status(test_paths, summary=False, fails_only=False, - count_fails_phase_list=None, - expected_fails_filepath=None, - out=sys.stdout): + +def cs_status( + test_paths, + summary=False, + fails_only=False, + count_fails_phase_list=None, + check_throughput=False, + check_memory=False, + expected_fails_filepath=None, + out=sys.stdout, +): """Print the test statuses of all tests in test_paths. The default is to print to stdout, but this can be overridden with the 'out' argument. @@ -35,10 +42,11 @@ def cs_status(test_paths, summary=False, fails_only=False, the full path to a file listing expected failures for this test suite. Expected failures are then labeled as such in the output. """ - expect(not (summary and fails_only), - "Cannot have both summary and fails_only") - expect(not (summary and count_fails_phase_list), - "Cannot have both summary and count_fails_phase_list") + expect(not (summary and fails_only), "Cannot have both summary and fails_only") + expect( + not (summary and count_fails_phase_list), + "Cannot have both summary and count_fails_phase_list", + ) if count_fails_phase_list is None: count_fails_phase_list = [] non_pass_counts = dict.fromkeys(count_fails_phase_list, 0) @@ -46,20 +54,29 @@ def cs_status(test_paths, summary=False, fails_only=False, test_id_output = defaultdict(str) test_id_counts = defaultdict(int) for test_path in test_paths: - test_dir=os.path.dirname(test_path) + test_dir = os.path.dirname(test_path) ts = TestStatus(test_dir=test_dir) test_id = os.path.basename(test_dir).split(".")[-1] if summary: - output = _overall_output(ts, " {status} {test_name}\n") + output = _overall_output( + ts, " {status} {test_name}\n", check_throughput, check_memory + ) else: if fails_only: - output = '' + output = "" else: - output = _overall_output(ts, " {test_name} (Overall: {status}) details:\n") - output += ts.phase_statuses_dump(prefix=" ", - skip_passes=fails_only, - skip_phase_list=count_fails_phase_list, - xfails=xfails.get(ts.get_name())) + output = _overall_output( + ts, + " {test_name} (Overall: {status}) details:\n", + check_throughput, + check_memory, + ) + output += ts.phase_statuses_dump( + prefix=" ", + skip_passes=fails_only, + skip_phase_list=count_fails_phase_list, + xfails=xfails.get(ts.get_name()), + ) if count_fails_phase_list: ts.increment_non_pass_counts(non_pass_counts) @@ -68,15 +85,18 @@ def cs_status(test_paths, summary=False, fails_only=False, for test_id in sorted(test_id_output): count = test_id_counts[test_id] - print("{}: {} test{}".format(test_id, count, 's' if count > 1 else ''), file=out) + print( + "{}: {} test{}".format(test_id, count, "s" if count > 1 else ""), file=out + ) print(test_id_output[test_id], file=out) - print(' ', file=out) + print(" ", file=out) if count_fails_phase_list: - print(72*'=', file=out) - print('Non-PASS results for select phases:', file=out) + print(72 * "=", file=out) + print("Non-PASS results for select phases:", file=out) for phase in count_fails_phase_list: - print('{} non-passes: {}'.format(phase, non_pass_counts[phase]), file=out) + print("{} non-passes: {}".format(phase, non_pass_counts[phase]), file=out) + def _get_xfails(expected_fails_filepath): """Returns a dictionary of ExpectedFails objects, where the keys are test names @@ -93,7 +113,8 @@ def _get_xfails(expected_fails_filepath): xfails = {} return xfails -def _overall_output(ts, format_str): + +def _overall_output(ts, format_str, check_throughput, check_memory): """Returns a string giving the overall test status Args: @@ -102,5 +123,8 @@ def _overall_output(ts, format_str): contain place-holders for status and test_name """ test_name = ts.get_name() - status = ts.get_overall_test_status()[0] + status = ts.get_overall_test_status( + check_throughput=check_throughput, + check_memory=check_memory, + )[0] return format_str.format(status=status, test_name=test_name) diff --git a/CIME/cs_status_creator.py b/CIME/cs_status_creator.py index e7fc06a99c9..787ff5d32a3 100644 --- a/CIME/cs_status_creator.py +++ b/CIME/cs_status_creator.py @@ -7,7 +7,8 @@ import os import stat -def create_cs_status(test_root, test_id, extra_args='', filename=None): + +def create_cs_status(test_root, test_id, extra_args="", filename=None): """Create a test suite-specific cs.status file from the template Arguments: @@ -29,10 +30,12 @@ def create_cs_status(test_root, test_id, extra_args='', filename=None): template_path = CIME.utils.get_template_path() template_file = os.path.join(template_path, "cs.status.template") template = open(template_file, "r").read() - template = template.replace("", tools_path).replace\ - ("", extra_args).replace\ - ("", test_id).replace\ - ("", test_root) + template = ( + template.replace("", tools_path) + .replace("", extra_args) + .replace("", test_id) + .replace("", test_root) + ) if not os.path.exists(test_root): os.makedirs(test_root) if filename is None: @@ -40,4 +43,6 @@ def create_cs_status(test_root, test_id, extra_args='', filename=None): cs_status_file = os.path.join(test_root, filename) with open(cs_status_file, "w") as fd: fd.write(template) - os.chmod(cs_status_file, os.stat(cs_status_file).st_mode | stat.S_IXUSR | stat.S_IXGRP) + os.chmod( + cs_status_file, os.stat(cs_status_file).st_mode | stat.S_IXUSR | stat.S_IXGRP + ) diff --git a/CIME/data/CMake/CESM_utils.cmake b/CIME/data/CMake/CESM_utils.cmake index 6b7a433cbad..1fa512a81f4 100644 --- a/CIME/data/CMake/CESM_utils.cmake +++ b/CIME/data/CMake/CESM_utils.cmake @@ -1,2 +1,2 @@ message("CESM_utils.cmake is deprecated, please replace references with CIME_utils.cmake") -include(CIME_utils) \ No newline at end of file +include(CIME_utils) diff --git a/CIME/data/CMake/mpiexec.cmake b/CIME/data/CMake/mpiexec.cmake index 2ca9f736a86..7378715eccc 100644 --- a/CIME/data/CMake/mpiexec.cmake +++ b/CIME/data/CMake/mpiexec.cmake @@ -27,7 +27,3 @@ function( add_mpi_test _testName _testExe _testArgs _numProc _timeout) set_tests_properties(${_testName} PROPERTIES TIMEOUT ${_timeout}) endfunction(add_mpi_test) - - - - diff --git a/CIME/data/components/stub_comps_nuopc/satm/cime_config/buildnml b/CIME/data/components/stub_comps_nuopc/satm/cime_config/buildnml index e428f57fb4c..8a21a4ae9e6 100755 --- a/CIME/data/components/stub_comps_nuopc/satm/cime_config/buildnml +++ b/CIME/data/components/stub_comps_nuopc/satm/cime_config/buildnml @@ -4,6 +4,6 @@ build stub model namelist """ # DO NOTHING -#pylint: disable=unused-argument +# pylint: disable=unused-argument def buildnml(case, caseroot, compname): pass diff --git a/CIME/data/components/stub_comps_nuopc/sesp/cime_config/buildnml b/CIME/data/components/stub_comps_nuopc/sesp/cime_config/buildnml index e428f57fb4c..8a21a4ae9e6 100755 --- a/CIME/data/components/stub_comps_nuopc/sesp/cime_config/buildnml +++ b/CIME/data/components/stub_comps_nuopc/sesp/cime_config/buildnml @@ -4,6 +4,6 @@ build stub model namelist """ # DO NOTHING -#pylint: disable=unused-argument +# pylint: disable=unused-argument def buildnml(case, caseroot, compname): pass diff --git a/CIME/data/components/stub_comps_nuopc/sglc/cime_config/buildnml b/CIME/data/components/stub_comps_nuopc/sglc/cime_config/buildnml index e428f57fb4c..8a21a4ae9e6 100755 --- a/CIME/data/components/stub_comps_nuopc/sglc/cime_config/buildnml +++ b/CIME/data/components/stub_comps_nuopc/sglc/cime_config/buildnml @@ -4,6 +4,6 @@ build stub model namelist """ # DO NOTHING -#pylint: disable=unused-argument +# pylint: disable=unused-argument def buildnml(case, caseroot, compname): pass diff --git a/CIME/data/components/stub_comps_nuopc/sice/cime_config/buildnml b/CIME/data/components/stub_comps_nuopc/sice/cime_config/buildnml index e428f57fb4c..8a21a4ae9e6 100755 --- a/CIME/data/components/stub_comps_nuopc/sice/cime_config/buildnml +++ b/CIME/data/components/stub_comps_nuopc/sice/cime_config/buildnml @@ -4,6 +4,6 @@ build stub model namelist """ # DO NOTHING -#pylint: disable=unused-argument +# pylint: disable=unused-argument def buildnml(case, caseroot, compname): pass diff --git a/CIME/data/components/stub_comps_nuopc/slnd/cime_config/buildnml b/CIME/data/components/stub_comps_nuopc/slnd/cime_config/buildnml index e428f57fb4c..8a21a4ae9e6 100755 --- a/CIME/data/components/stub_comps_nuopc/slnd/cime_config/buildnml +++ b/CIME/data/components/stub_comps_nuopc/slnd/cime_config/buildnml @@ -4,6 +4,6 @@ build stub model namelist """ # DO NOTHING -#pylint: disable=unused-argument +# pylint: disable=unused-argument def buildnml(case, caseroot, compname): pass diff --git a/CIME/data/components/stub_comps_nuopc/socn/cime_config/buildnml b/CIME/data/components/stub_comps_nuopc/socn/cime_config/buildnml index e428f57fb4c..8a21a4ae9e6 100755 --- a/CIME/data/components/stub_comps_nuopc/socn/cime_config/buildnml +++ b/CIME/data/components/stub_comps_nuopc/socn/cime_config/buildnml @@ -4,6 +4,6 @@ build stub model namelist """ # DO NOTHING -#pylint: disable=unused-argument +# pylint: disable=unused-argument def buildnml(case, caseroot, compname): pass diff --git a/CIME/data/components/stub_comps_nuopc/srof/cime_config/buildnml b/CIME/data/components/stub_comps_nuopc/srof/cime_config/buildnml index e428f57fb4c..8a21a4ae9e6 100755 --- a/CIME/data/components/stub_comps_nuopc/srof/cime_config/buildnml +++ b/CIME/data/components/stub_comps_nuopc/srof/cime_config/buildnml @@ -4,6 +4,6 @@ build stub model namelist """ # DO NOTHING -#pylint: disable=unused-argument +# pylint: disable=unused-argument def buildnml(case, caseroot, compname): pass diff --git a/CIME/data/components/stub_comps_nuopc/swav/cime_config/buildnml b/CIME/data/components/stub_comps_nuopc/swav/cime_config/buildnml index e428f57fb4c..8a21a4ae9e6 100755 --- a/CIME/data/components/stub_comps_nuopc/swav/cime_config/buildnml +++ b/CIME/data/components/stub_comps_nuopc/swav/cime_config/buildnml @@ -4,6 +4,6 @@ build stub model namelist """ # DO NOTHING -#pylint: disable=unused-argument +# pylint: disable=unused-argument def buildnml(case, caseroot, compname): pass diff --git a/CIME/data/components/xcpl_comps_nuopc/xatm/cime_config/buildnml b/CIME/data/components/xcpl_comps_nuopc/xatm/cime_config/buildnml index f8308e8a76d..1115818efda 100755 --- a/CIME/data/components/xcpl_comps_nuopc/xatm/cime_config/buildnml +++ b/CIME/data/components/xcpl_comps_nuopc/xatm/cime_config/buildnml @@ -6,22 +6,27 @@ build data model library import sys, os -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) from standard_script_setup import * from CIME.buildnml import build_xcpl_nml, parse_input from CIME.case import Case + def buildnml(case, caseroot, compname): if compname != "xatm": raise AttributeError build_xcpl_nml(case, caseroot, compname) + def _main_func(): caseroot = parse_input(sys.argv) with Case(caseroot) as case: buildnml(case, caseroot, "xatm") + if __name__ == "__main__": _main_func() diff --git a/CIME/data/components/xcpl_comps_nuopc/xglc/cime_config/buildnml b/CIME/data/components/xcpl_comps_nuopc/xglc/cime_config/buildnml index 2394c6c8714..93b5a7028d2 100755 --- a/CIME/data/components/xcpl_comps_nuopc/xglc/cime_config/buildnml +++ b/CIME/data/components/xcpl_comps_nuopc/xglc/cime_config/buildnml @@ -6,22 +6,27 @@ build data model library import sys, os -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) from standard_script_setup import * from CIME.buildnml import build_xcpl_nml, parse_input from CIME.case import Case + def buildnml(case, caseroot, compname): if compname != "xglc": raise AttributeError build_xcpl_nml(case, caseroot, compname) + def _main_func(): caseroot = parse_input(sys.argv) with Case(caseroot) as case: buildnml(case, caseroot, "xglc") + if __name__ == "__main__": _main_func() diff --git a/CIME/data/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 b/CIME/data/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 index 1ad724b9700..07d8ecf1492 100644 --- a/CIME/data/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 +++ b/CIME/data/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 @@ -351,7 +351,7 @@ subroutine state_setexport(rc) lat(n) = ownedElemCoords(2*n) end do - ! Start from index 2 in order to skip the scalar field + ! Start from index 2 in order to skip the scalar field do ns = 1,num_icesheets do nf = 2,fldsFrGlc_num if (fldsFrGlc(nf)%ungridded_ubound == 0) then diff --git a/CIME/data/components/xcpl_comps_nuopc/xice/cime_config/buildnml b/CIME/data/components/xcpl_comps_nuopc/xice/cime_config/buildnml index 21dd29be064..e54d7ae5075 100755 --- a/CIME/data/components/xcpl_comps_nuopc/xice/cime_config/buildnml +++ b/CIME/data/components/xcpl_comps_nuopc/xice/cime_config/buildnml @@ -6,22 +6,27 @@ build data model library import sys, os -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) from standard_script_setup import * from CIME.buildnml import build_xcpl_nml, parse_input from CIME.case import Case + def buildnml(case, caseroot, compname): if compname != "xice": raise AttributeError build_xcpl_nml(case, caseroot, compname) + def _main_func(): caseroot = parse_input(sys.argv) with Case(caseroot) as case: buildnml(case, caseroot, "xice") + if __name__ == "__main__": _main_func() diff --git a/CIME/data/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml b/CIME/data/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml index 655cc315d95..7d85bc7e131 100755 --- a/CIME/data/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml +++ b/CIME/data/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml @@ -6,22 +6,27 @@ build data model library import sys, os -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) from standard_script_setup import * from CIME.buildnml import build_xcpl_nml, parse_input from CIME.case import Case + def buildnml(case, caseroot, compname): if compname != "xlnd": raise AttributeError build_xcpl_nml(case, caseroot, compname) + def _main_func(): caseroot = parse_input(sys.argv) with Case(caseroot) as case: buildnml(case, caseroot, "xlnd") + if __name__ == "__main__": _main_func() diff --git a/CIME/data/components/xcpl_comps_nuopc/xocn/cime_config/buildnml b/CIME/data/components/xcpl_comps_nuopc/xocn/cime_config/buildnml index 16abbabe55c..8e751125a4e 100755 --- a/CIME/data/components/xcpl_comps_nuopc/xocn/cime_config/buildnml +++ b/CIME/data/components/xcpl_comps_nuopc/xocn/cime_config/buildnml @@ -6,22 +6,27 @@ build data model library import sys, os -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) from standard_script_setup import * from CIME.buildnml import build_xcpl_nml, parse_input from CIME.case import Case + def buildnml(case, caseroot, compname): if compname != "xocn": raise AttributeError build_xcpl_nml(case, caseroot, compname) + def _main_func(): caseroot = parse_input(sys.argv) with Case(caseroot) as case: buildnml(case, caseroot, "xocn") + if __name__ == "__main__": _main_func() diff --git a/CIME/data/components/xcpl_comps_nuopc/xrof/cime_config/buildnml b/CIME/data/components/xcpl_comps_nuopc/xrof/cime_config/buildnml index 7889ed07fbf..323b7f3470c 100755 --- a/CIME/data/components/xcpl_comps_nuopc/xrof/cime_config/buildnml +++ b/CIME/data/components/xcpl_comps_nuopc/xrof/cime_config/buildnml @@ -6,22 +6,27 @@ build data model library import sys, os -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) from standard_script_setup import * from CIME.buildnml import build_xcpl_nml, parse_input from CIME.case import Case + def buildnml(case, caseroot, compname): if compname != "xrof": raise AttributeError build_xcpl_nml(case, caseroot, compname) + def _main_func(): caseroot = parse_input(sys.argv) with Case(caseroot) as case: buildnml(case, caseroot, "xrof") + if __name__ == "__main__": _main_func() diff --git a/CIME/data/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 b/CIME/data/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 index d8b55abd8c0..7d88a7fce89 100644 --- a/CIME/data/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 +++ b/CIME/data/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 @@ -11,8 +11,8 @@ module dead_methods_mod use ESMF , only : ESMF_GeomType_Flag, ESMF_FieldStatus_Flag use ESMF , only : ESMF_Mesh, ESMF_MeshGet use ESMF , only : ESMF_GEOMTYPE_MESH, ESMF_GEOMTYPE_GRID, ESMF_FIELDSTATUS_COMPLETE - use ESMF , only : ESMF_Clock, ESMF_ClockCreate, ESMF_ClockGet, ESMF_ClockSet - use ESMF , only : ESMF_ClockPrint, ESMF_ClockAdvance + use ESMF , only : ESMF_Clock, ESMF_ClockCreate, ESMF_ClockGet, ESMF_ClockSet + use ESMF , only : ESMF_ClockPrint, ESMF_ClockAdvance use ESMF , only : ESMF_Alarm, ESMF_AlarmCreate, ESMF_AlarmGet, ESMF_AlarmSet use ESMF , only : ESMF_Calendar, ESMF_CALKIND_NOLEAP, ESMF_CALKIND_GREGORIAN use ESMF , only : ESMF_Time, ESMF_TimeGet, ESMF_TimeSet @@ -34,7 +34,7 @@ module dead_methods_mod public :: state_getscalar public :: state_setscalar public :: state_diagnose - public :: alarmInit + public :: alarmInit public :: chkerr private :: timeInit @@ -61,7 +61,7 @@ module dead_methods_mod optMonthly = "monthly" , & optYearly = "yearly" , & optDate = "date" , & - optIfdays0 = "ifdays0" + optIfdays0 = "ifdays0" ! Module data integer, parameter :: SecPerDay = 86400 ! Seconds per day @@ -422,7 +422,7 @@ subroutine field_getfldptr(field, fldptr1, fldptr2, rank, abort, rc) call ESMF_MeshGet(lmesh, numOwnedNodes=nnodes, numOwnedElements=nelements, rc=rc) if (chkerr(rc,__LINE__,u_FILE_u)) return if (nnodes == 0 .and. nelements == 0) lrank = 0 - else + else call ESMF_LogWrite(trim(subname)//": ERROR geomtype not supported ", & ESMF_LOGMSG_INFO, rc=rc) rc = ESMF_FAILURE @@ -783,7 +783,7 @@ end subroutine alarmInit subroutine timeInit( Time, ymd, cal, tod, rc) - ! Create the ESMF_Time object corresponding to the given input time, + ! Create the ESMF_Time object corresponding to the given input time, ! given in YMD (Year Month Day) and TOD (Time-of-day) format. ! Set the time by an integer as YYYYMMDD and integer seconds in the day diff --git a/CIME/data/components/xcpl_comps_nuopc/xwav/cime_config/buildnml b/CIME/data/components/xcpl_comps_nuopc/xwav/cime_config/buildnml index 6838bc01041..100f0243be8 100755 --- a/CIME/data/components/xcpl_comps_nuopc/xwav/cime_config/buildnml +++ b/CIME/data/components/xcpl_comps_nuopc/xwav/cime_config/buildnml @@ -6,22 +6,27 @@ build data model library import sys, os -_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","..","..") +_CIMEROOT = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "..", "..", ".." +) sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) from standard_script_setup import * from CIME.buildnml import build_xcpl_nml, parse_input from CIME.case import Case + def buildnml(case, caseroot, compname): if compname != "xwav": raise AttributeError build_xcpl_nml(case, caseroot, compname) + def _main_func(): caseroot = parse_input(sys.argv) with Case(caseroot) as case: buildnml(case, caseroot, "xwav") + if __name__ == "__main__": _main_func() diff --git a/CIME/data/config/cesm/config_archive.xml b/CIME/data/config/cesm/config_archive.xml deleted file mode 100644 index 624c08e93e0..00000000000 --- a/CIME/data/config/cesm/config_archive.xml +++ /dev/null @@ -1,81 +0,0 @@ - - - [ri] - h\d*.*\.nc$ - unset - - rpointer.ice$NINST_STRING - ./$CASE.cice$NINST_STRING.r.$DATENAME.nc - - - rpointer.ice - casename.cice.r.1976-01-01-00000.nc - casename.cice.h.1976-01-01-00000.nc - - - - - - r - r[ho] - rh.* - h\d*.*\.nc$ - d[dovt] - unset - - rpointer.ocn$NINST_STRING.restart - ./$CASE.pop$NINST_STRING.r.$DATENAME.nc,RESTART_FMT=nc - - - rpointer.ocn$NINST_STRING.ovf - ./$CASE.pop$NINST_STRING.ro.$DATENAME - - - rpointer.ocn$NINST_STRING.tavg - ./$CASE.pop$NINST_STRING.rh.$DATENAME.nc - - - rpointer.pop - casename.pop_0001.r.1976-01-01-00000.nc - casename.pop.r.1976-01-01-00000.nc - casename.pop.h.1976-01-01-00000.nc - casename.pop.h.1975-02-01-00000.nc - casename.pop.h0.1976-01-01-00000.nc - casename.pop.dd.1976-01-01-00000 - casename.pop.rh.ecosys.nyear1.1976-01-01-00000.nc - casename.pop.r.1975-01-01-00000.nc - anothercasename.pop.r.1976-01-01-00000.nc - - - - - r - hi.*\.nc$ - unset - - rpointer.wav$NINST_STRING - unset - - - - - r - rh\d? - [ei] - restart_hist - - rpointer.unset - unset - - - - casename.dart.r.1976-01-01-00000.nc - casename.dart.rh.pop_preassim_priorinf_mean.1976-01-01-00000.nc - casename.dart.rh.cam_preassim_priorinf_mean.1976-01-01-00000.nc - - casename.dart.e.cam_postassim_mean.1976-01-01-00000.nc - casename.dart.i.cam_output_mean.1976-01-01-00000.nc - casename.dart.e.cam_obs_seq_final.1976-01-01-00000.nc - - - diff --git a/CIME/data/config/cesm/config_files.xml b/CIME/data/config/cesm/config_files.xml index d78b4017072..02e42fba108 100644 --- a/CIME/data/config/cesm/config_files.xml +++ b/CIME/data/config/cesm/config_files.xml @@ -26,7 +26,11 @@ char - $CIMEROOT/CIME/data/config/$MODEL/config_grids.xml + $SRCROOT/ccs_config/config_grids.xml + + $SRCROOT/ccs_config/config_grids_nuopc.xml + $SRCROOT/ccs_config/config_grids_mct.xml + case_last env_case.xml file containing specification of all supported model grids, domains and mapping files (for documentation only - DO NOT EDIT) @@ -35,7 +39,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/machines/config_machines.xml + $SRCROOT/ccs_config/machines/config_machines.xml case_last env_case.xml file containing machine specifications for target model primary component (for documentation only - DO NOT EDIT) @@ -44,7 +48,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/machines/config_batch.xml + $SRCROOT/ccs_config/machines/config_batch.xml case_last env_case.xml file containing batch system details for target system (for documentation only - DO NOT EDIT) @@ -53,7 +57,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/machines/config_workflow.xml + $SRCROOT/ccs_config/machines/config_workflow.xml case_last env_case.xml file containing workflow (for documentation only - DO NOT EDIT) @@ -62,7 +66,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/config_inputdata.xml + $SRCROOT/ccs_config/config_inputdata.xml case_last env_case.xml file containing inputdata server descriptions (for documentation only - DO NOT EDIT) @@ -71,7 +75,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/machines/config_compilers.xml + $SRCROOT/ccs_config/machines/config_compilers.xml case_last env_case.xml file containing compiler specifications for target model primary component (for documentation only - DO NOT EDIT) @@ -80,15 +84,15 @@ char - $CIMEROOT/config/$MODEL/machines/cmake_macros + $SRCROOT/ccs_config/machines/cmake_macros case_last env_case.xml Directory containing cmake macros (for documentation only - DO NOT EDIT) - + char - $CIMEROOT/CIME/data/config/$MODEL/machines/config_pio.xml + $SRCROOT/ccs_config/machines/config_pio.xml case_last env_case.xml file containing specification of pio settings for target model possible machine, compiler, mpilib, compset and/or grid attributes (for documentation only - DO NOT EDIT) @@ -134,12 +138,12 @@ char - $SRCROOT/components/cmeps - $SRCROOT/components/cmeps - $SRCROOT/components/cmeps - $SRCROOT/components/cpl7/driver - $SRCROOT/components/cpl7/driver - $SRCROOT/components/cpl7/driver + $SRCROOT/components/cmeps + $SRCROOT/components/cmeps + $SRCROOT/components/cmeps + $SRCROOT/components/cpl7/driver + $SRCROOT/components/cpl7/driver + $SRCROOT/components/cpl7/driver case_comps env_case.xml @@ -171,8 +175,8 @@ char unset - $SRCROOT/components/ww3dev $SRCROOT/components/ww3/ + $SRCROOT/components/ww3dev $SRCROOT/components/cpl7/components/data_comps_$COMP_INTERFACE/dwav $SRCROOT/components/cdeps/dwav $SRCROOT/components/cpl7/components/stub_comps_$COMP_INTERFACE/swav @@ -337,7 +341,7 @@ char - $CIMEROOT/CIME/data/config/cesm/config_archive.xml + $SRCROOT/ccs_config/config_archive.xml $COMP_ROOT_DIR_CPL/cime_config/config_archive.xml $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml @@ -519,8 +523,8 @@ char - $SRCROOT/components/cmeps/cime_config/config_component_$MODEL.xml --> - $SRCROOT/components/cpl7/driver/cime_config/config_component_$MODEL.xml --> + $SRCROOT/components/cmeps/cime_config/config_component_$MODEL.xml --> + $SRCROOT/components/cpl7/driver/cime_config/config_component_$MODEL.xml --> case_last env_case.xml diff --git a/CIME/data/config/cesm/config_grids.xml b/CIME/data/config/cesm/config_grids.xml deleted file mode 100644 index 0c694c45b5e..00000000000 --- a/CIME/data/config/cesm/config_grids.xml +++ /dev/null @@ -1,2091 +0,0 @@ - - - - - - ========================================= - GRID naming convention - ========================================= - The notation for the grid longname is - a%name_l%name_oi%name_r%name_m%mask_g%name_w%name - where - a% => atm, l% => lnd, oi% => ocn/ice, r% => river, m% => mask, g% => glc, w% => wav - - Supported out of the box grid configurations are given via alias specification in - the file "config_grids.xml". Each grid alias can also be associated with the - following optional attributes - - compset (Regular expression for compset matches that are required for this grid) - not_compset (Regular expression for compset matches that are not permitted this grid) - - Using the alias and the optional "compset" and "not_compset" attributes a grid longname is created - Note that the mask is for information only - and is not an attribute of the grid - By default, if the mask is not specified below, it will be set to the ocnice grid - And if there is no ocnice grid (such as for single column, the mask is null since it does not mean anything) - - - - - null - null - null - null - rx1 - r05 - r05 - HDMA - rx1 - r05 - r05 - null - gris4 - gris4 - null - ww3a - ww3a - ww3a - null - - - - gx1v6 - gx1v6 - gx1v6 - Non-standard grid for testing of the interpolation in DATM rather than coupler - - - - gx1v7 - gx1v7 - gx1v7 - Non-standard grid for testing of the interpolation in DATM rather than coupler - - - - 01col - 01col - Non-standard grid for running POP in true 1D mode - - - - CLM_USRDAT - CLM_USRDAT - null - - - - 1x1_numaIA - 1x1_numaIA - null - - - - 1x1_brazil - 1x1_brazil - null - - - - 1x1_smallvilleIA - 1x1_smallvilleIA - null - - - - 1x1_camdenNJ - 1x1_camdenNJ - null - - - - 1x1_mexicocityMEX - 1x1_mexicocityMEX - null - - - - 1x1_vancouverCAN - 1x1_vancouverCAN - null - - - - 1x1_urbanc_alpha - 1x1_urbanc_alpha - null - - - - 5x5_amazon - 5x5_amazon - null - 5x5_amazon - - - - 5x5_amazon - 5x5_amazon - r05_amazon - 5x5_amazon - - - - - 0.125nldas2 - 0.125nldas2 - 0.125nldas2 - 0.125nldas2 - 0.125nldas2 - - - - 360x720cru - 360x720cru - - - - - - T31 - T31 - gx3v7 - gx3v7 - - - - T31 - T31 - gx3v7 - gris4 - gx3v7 - - - - T31 - T31 - gx3v7 - gris4 - gx3v7 - - - - T31 - T31 - gx3v7 - gris20 - gx3v7 - - - - T31 - T31 - gx3v7 - gris20 - gx3v7 - - - - T42 - T42 - T42 - gx1v7 - - - - T42 - T42 - T42 - gx1v6 - - - - T42 - T42 - T42 - gx1v7 - - - - T5 - T5 - T5 - gx3v7 - - - - T85 - T85 - T85 - gx1v6 - - - - T85 - T85 - T85 - gx1v7 - - - - T85 - 0.9x1.25 - tx0.1v2 - tx0.1v2 - - - - T341 - 0.23x0.31 - tx0.1v2 - tx0.1v2 - - - - T62 - T62 - gx3v7 - gx3v7 - - - - T62 - T62 - tx1v1 - tx1v1 - - - - T62 - T62 - tn1v3 - tn1v3 - - - - T62 - T62 - tn0.25v3 - tn0.25v3 - - - - T62 - T62 - tx0.1v2 - tx0.1v2 - - - - T62 - T62 - tx0.1v3 - tx0.1v3 - - - - TL319 - TL319 - gx1v7 - JRA025v2 - gx1v7 - - - - TL319 - TL319 - gx1v7 - JRA025v2 - gx1v7 - gx1v7 - - - - TL319 - TL319 - gx1v7 - JRA025 - gx1v7 - - - - TL319 - TL319 - tx0.66v1 - JRA025 - - - - TL319 - TL319 - tx0.66v1 - JRA025 - tx0.66v1 - - - - TL319 - TL319 - tx0.1v2 - JRA025 - - - - TL319 - TL319 - tx0.1v3 - JRA025v2 - - - - TL319 - TL319 - tx0.1v3 - JRA025 - - - - TL639 - TL639 - gx1v7 - - gx1v7 - - - - TL639 - TL639 - tx0.66v1 - - - - - T62 - T62 - tx0.66v1 - - - - T62 - T62 - tx0.66v1 - tx0.66v1 - - - - T62 - T62 - tx0.25v1 - - - 0.9x1.25 - 0.9x1.25 - tx0.66v1 - - - - T62 - T62 - gx1v6 - gx1v6 - - - - T62 - T62 - gx1v7 - gx1v7 - - - - T62 - T62 - gx1v7 - gx1v7 - gx1v7 - - - - T62 - T62 - oQU120 - oQU120 - - - - - - 0.23x0.31 - 0.23x0.31 - gx1v6 - gx1v6 - - - - 0.23x0.31 - 0.23x0.31 - gx1v7 - gx1v7 - - - - 0.23x0.31 - 0.23x0.31 - tn1v3 - tn1v3 - - - - 0.23x0.31 - 0.23x0.31 - tn0.25v3 - tn0.25v3 - - - - 0.23x0.31 - 0.23x0.31 - tx0.1v2 - tx0.1v2 - - - - 0.47x0.63 - 0.47x0.63 - gx1v6 - gx1v6 - - - - 0.47x0.63 - 0.47x0.63 - gx1v7 - gx1v7 - - - - 0.47x0.63 - 0.47x0.63 - tx0.1v2 - tx0.1v2 - - - - 0.9x1.25 - 0.9x1.25 - gx1v6 - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - gx1v7 - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - tn1v3 - tn1v3 - - - - 0.9x1.25 - 0.9x1.25 - tn0.25v3 - tn0.25v3 - - - - 0.9x1.25 - 0.9x1.25 - gx1v7 - gris4 - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - gx1v7 - gris4 - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - gx1v7 - ais8 - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - gx1v7 - ais8:gris4 - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - gx1v7 - gris20 - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - gx1v7 - gris20 - gx1v7 - - - - 0.9x1.25 - 0.9x1.25 - 0.9x1.25 - null - - - - 0.9x1.25 - 0.9x1.25 - 0.9x1.25 - gx1v6 - - - - 0.9x1.25 - 0.9x1.25 - 0.9x1.25 - gx1v7 - - - - 0.47x0.63 - 0.47x0.63 - 0.47x0.63 - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - gx1v7 - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - r01 - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - gx1v7 - r01 - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v7 - r05 - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v7 - gris4 - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v7 - gris4 - gx1v7 - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - null - - - - 1.9x2.5 - 1.9x2.5 - 1.9x2.5 - gx1v7 - - - - 4x5 - 4x5 - gx3v7 - gx3v7 - - - - 0.23x0.31 - 0.23x0.31 - 0.23x0.31 - gx1v6 - - - - 0.23x0.31 - 0.23x0.31 - 0.23x0.31 - gx1v7 - - - - 2.5x3.33 - 2.5x3.33 - 2.5x3.33 - gx1v6 - - - - 2.5x3.33 - 2.5x3.33 - 2.5x3.33 - gx1v7 - - - - 4x5 - 4x5 - 4x5 - gx3v7 - - - - 10x15 - 10x15 - 10x15 - gx3v7 - - - - 10x15 - 10x15 - 10x15 - ais8 - gx3v7 - - - - 10x15 - 10x15 - 10x15 - ais8:gris4 - gx3v7 - - - - 10x15 - 10x15 - 10x15 - usgs - - - - 10x15 - 10x15 - gx3v7 - gx3v7 - - - - - - ne5np4 - ne5np4 - ne5np4 - gx3v7 - - - - ne16np4 - ne16np4 - gx1v7 - gx1v7 - - - - ne16np4 - ne16np4 - ne16np4 - gx1v7 - - - - ne30np4 - ne30np4 - gx1v6 - gx1v6 - - - - ne30np4 - ne30np4 - gx1v7 - gx1v7 - - - - ne30np4.pg3 - ne30np4.pg3 - gx1v7 - gx1v7 - - - - ne30np4 - 1.9x2.5 - gx1v6 - For testing tri-grid - gx1v6 - - - - ne30np4 - 1.9x2.5 - gx1v7 - For testing tri-grid - gx1v7 - - - - ne30np4 - 0.9x1.25 - gx1v6 - For testing tri-grid - gx1v6 - - - - ne30np4 - 0.9x1.25 - gx1v7 - For testing tri-grid - gx1v7 - - - - ne30np4 - ne30np4 - ne30np4 - gx1v6 - - - - ne30np4 - ne30np4 - ne30np4 - gx1v7 - - - - ne60np4 - ne60np4 - gx1v6 - gx1v6 - - - - ne60np4 - ne60np4 - gx1v7 - gx1v7 - - - - ne60np4 - ne60np4 - ne60np4 - gx1v6 - - - - ne120np4 - ne120np4 - gx1v6 - gx1v6 - - - - ne120np4 - ne120np4 - gx1v7 - gx1v7 - - - - ne120np4 - ne120np4 - tx0.1v2 - tx0.1v2 - - - - ne120np4 - ne120np4 - ne120np4 - gx1v6 - - - - ne120np4 - ne120np4 - ne120np4 - gx1v7 - - - - ne240np4 - 0.23x0.31 - gx1v6 - For testing high resolution tri-grid - gx1v6 - - - - ne240np4 - 0.23x0.31 - gx1v7 - For testing high resolution tri-grid - gx1v7 - - - - ne240np4 - ne240np4 - tx0.1v2 - tx0.1v2 - - - - ne240np4 - ne240np4 - ne240np4 - gx1v6 - - - - ne240np4 - ne240np4 - ne240np4 - gx1v7 - - - - - - ne5np4.pg2 - ne5np4.pg2 - ne5np4.pg2 - gx3v7 - - - - ne30np4.pg2 - ne30np4.pg2 - ne30np4.pg2 - gx1v7 - - - - ne60np4.pg2 - ne60np4.pg2 - ne60np4.pg2 - gx1v7 - - - - ne120np4.pg2 - ne120np4.pg2 - ne120np4.pg2 - gx1v7 - - - - ne120np4.pg2 - ne120np4.pg2 - ne120np4.pg2 - tx0.1v2 - - - - ne240np4.pg2 - ne240np4.pg2 - ne240np4.pg2 - gx1v7 - - - - - - ne5np4.pg3 - ne5np4.pg3 - ne5np4.pg3 - gx3v7 - - - - ne16np4.pg3 - ne16np4.pg3 - ne16np4.pg3 - gx1v7 - - - - ne30np4.pg3 - ne30np4.pg3 - ne30np4.pg3 - gx1v7 - - - - ne60np4.pg3 - ne60np4.pg3 - ne60np4.pg3 - gx1v7 - - - - ne120np4.pg3 - ne120np4.pg3 - ne120np4.pg3 - gx1v7 - - - - ne120np4.pg3 - ne120np4.pg3 - ne120np4.pg3 - tx0.1v3 - - - - ne240np4.pg3 - ne240np4.pg3 - ne240np4.pg3 - gx1v7 - - - - ne120np4.pg3 - ne120np4.pg3 - gx1v7 - gx1v7 - - - - ne120np4.pg3 - ne120np4.pg3 - gx1v7 - tx0.1v3 - - - - - - ne5np4.pg4 - ne5np4.pg4 - ne5np4.pg4 - gx3v7 - - - - ne30np4.pg4 - ne30np4.pg4 - ne30np4.pg4 - gx1v7 - - - - ne60np4.pg4 - ne60np4.pg4 - ne60np4.pg4 - gx1v7 - - - - ne120np4.pg4 - ne120np4.pg4 - ne120np4.pg4 - gx1v7 - - - - - - ne0np4CONUS.ne30x8 - ne0np4CONUS.ne30x8 - gx1v7 - gx1v7 - - - - ne0np4CONUS.ne30x8 - ne0np4CONUS.ne30x8 - ne0np4CONUS.ne30x8 - gx1v7 - - - - ne0np4TESTONLY.ne5x4 - ne0np4TESTONLY.ne5x4 - ne0np4TESTONLY.ne5x4 - gx3v7 - - - - ne0np4CONUS.ne30x8 - ne0np4CONUS.ne30x8 - ne0np4CONUS.ne30x8 - tx0.1v2 - - - - ne0np4.ARCTIC.ne30x4 - ne0np4.ARCTIC.ne30x4 - ne0np4.ARCTIC.ne30x4 - tx0.1v2 - - - - ne0np4.ARCTICGRIS.ne30x8 - ne0np4.ARCTICGRIS.ne30x8 - ne0np4.ARCTICGRIS.ne30x8 - tx0.1v2 - - - - - - mpasa480 - mpasa480 - mpasa480 - gx1v7 - - - - mpasa120 - mpasa120 - mpasa120 - gx1v7 - - - - mpasa60 - mpasa60 - mpasa60 - gx1v7 - - - - mpasa30 - mpasa30 - mpasa30 - gx1v7 - - - - mpasa15 - mpasa15 - mpasa15 - gx1v7 - - - - mpasa12 - mpasa12 - mpasa12 - gx1v7 - - - - mpasa15-3 - mpasa15-3 - mpasa15-3 - gx1v7 - - - - - - T31 - T31 - gx3v7 - gx3v7 - - - - 4x5 - 4x5 - gx3v7 - gx3v7 - - - - 1.9x2.5 - 1.9x2.5 - gx1v6 - gx1v6 - - - - 1.9x2.5 - 1.9x2.5 - gx1v7 - gx1v7 - - - - ne30np4 - ne30np4 - gx1v6 - gx1v6 - - - - ne30np4 - ne30np4 - gx1v7 - gx1v7 - - - - C24 - C24 - C24 - gx1v7 - - - - C48 - C48 - C48 - gx1v7 - - - - C96 - C96 - C96 - gris4 - gx1v7 - - - - C96 - C96 - C96 - tx0.66v1 - - - - C96 - C96 - tx0.66v1 - tx0.66v1 - - - - C96 - C96 - tx0.25v1 - tx0.25v1 - - - - C192 - C192 - C192 - gx1v7 - - - - - C384 - C384 - C384 - gx1v7 - - - - C384 - C384 - tx0.25v1 - tx0.25v1 - - - - - ww3a - - - - - - - - - - - - - 0 0 - unset - null is no grid: - - - - - - - - - - 720 360 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.360x720_cruncep.100429.nc - Exact half-degree CRUNCEP datm forcing grid with CRUNCEP land-mask -- only valid for DATM/CLM compset - - - - 5 5 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.5x5pt-amazon_navy.090715.nc - $DIN_LOC_ROOT/share/meshes/5x5pt-amazon_navy_ESMFmesh_cd5_c20210107.nc - 5x5 Amazon regional case -- only valid for DATM/CLM compset - - - - 464 224 - - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.0.125nldas2_0.125nldas2.190410.nc - $DIN_LOC_ROOT/share/domains/domain.clm/domain.ocn.0.125nldas2.190410.nc - $DIN_LOC_ROOT/share/meshes/0.125nldas2_ESMFmesh_cd5_241220.nc - Regional NLDAS-2 grid over the U.S. (0.125 degree resolution; 25-53N, 235-293E) - - - - 1152 768 - domain.lnd.fv0.23x0.31_gx1v6.100517.nc - domain.ocn.0.23x0.31_gx1v6_101108.nc - domain.lnd.fv0.23x0.31_tn1v3.160414.nc - domain.ocn.fv0.23x0.31_tn1v3.160414.nc - domain.lnd.fv0.23x0.31_tn0.25v3.160721.nc - domain.ocn.fv0.23x0.31_tn0.25v3.160721.nc - 0.23x0.31 is FV 1/4-deg grid: - - - - 576 384 - domain.lnd.fv0.47x0.63_gx1v6.090407.nc - domain.ocn.0.47x0.63_gx1v6_090408.nc - domain.lnd.fv0.47x0.63_gx1v7.180521.nc - domain.ocn.fv0.47x0.63_gx1v7.180521.nc - $DIN_LOC_ROOT/share/meshes/fv0.47x0.63_141008_ESMFmesh.nc - 0.47x0.63 is FV 1/2-deg grid: - - - - 288 192 - domain.lnd.fv0.9x1.25_gx1v6.090309.nc - domain.ocn.0.9x1.25_gx1v6_090403.nc - domain.lnd.fv0.9x1.25_gx1v7.151020.nc - domain.ocn.fv0.9x1.25_gx1v7.151020.nc - domain.lnd.fv0.9x1.25_tx0.66v1.190314.nc - domain.ocn.fv0.9x1.25_tx0.66v1.190314.nc - domain.lnd.fv0.9x1.25_tn1v3.160414.nc - domain.ocn.fv0.9x1.25_tn1v3.160414.nc - domain.lnd.fv0.9x1.25_tn0.25v3.160721.nc - domain.ocn.fv0.9x1.25_tn0.25v3.160721.nc - /glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc - /glade/u/home/benedict/ys/datain/domain.aqua.fv0.9x1.25.nc - $DIN_LOC_ROOT/share/meshes/fv0.9x1.25_141008_polemod_ESMFmesh.nc - 0.9x1.25 is FV 1-deg grid: - - - - - 144 96 - domain.lnd.fv1.9x2.5_gx1v6.090206.nc - domain.ocn.1.9x2.5_gx1v6_090403.nc - domain.lnd.fv1.9x2.5_gx1v7.181205.nc - domain.ocn.fv1.9x2.5_gx1v7.181205.nc - domain.aqua.fv1.9x2.5.nc - $DIN_LOC_ROOT/share/meshes/fv1.9x2.5_141008_ESMFmesh.nc - 1.9x2.5 is FV 2-deg grid: - - - - 72 46 - domain.lnd.fv4x5_gx3v7.091218.nc - domain.ocn.4x5_gx3v7_100120.nc - $DIN_LOC_ROOT/share/meshes/fv4x5_050615_polemod_ESMFmesh.nc - 4x5 is FV 4-deg grid: - - - - 108 72 - domain.lnd.fv2.5x3.33_gx3v7.110223.nc - domain.ocn.fv2.5x3.33_gx3v7_110223.nc - 2.5x3.33 is FV 3-deg grid: - - - - 24 19 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.fv10x15_USGS.110713.nc - $DIN_LOC_ROOT/share/domains/domain.clm/domain.ocn.fv10x15_USGS_070807.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.fv10x15_gx3v7.180321.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.fv10x15_gx3v7.180321.nc - $DIN_LOC_ROOT/share/meshes/10x15_nomask_c110308_ESMFmesh.nc - 10x15 is FV 10-deg grid: - For low resolution testing - - - - 1024 512 - - - domain.lnd.T341_gx1v6.111226.nc - T341 is Gaussian grid: - Backward compatible for very high resolution Spectral-dycore experiments - - - - 16 8 - $DIN_LOC_ROOT/share/domains/domain.lnd.T5_gx3v7.181009.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T5_gx3v7.181009.nc - T5 is Gaussian grid: - - - - - 256 128 - domain.lnd.T85_gx1v4.060403.nc - domain.lnd.T85_gx1v4.060403.nc - T85 is Gaussian grid: - Backward compatible for high resolution Spectral-dycore experiments - - - - 192 94 - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx1v7.151008.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx1v6.090320.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_gx3v7.090911.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx0.66v1.190425.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx1v1.090122.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx0.1v2_090623.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tx0.1v3.170929.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_oQU120.160325.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T62_gx1v6.130409.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T62_gx1v7.151008.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T62_gx3v7.130409.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T62_tx0.66v1.190425.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tn1v3.160414.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.T62_tn0.25v3.160721.nc - $DIN_LOC_ROOT/share/meshes/T62_040121_ESMFmesh.nc - T62 is Gaussian grid: - - - - 96 48 - $DIN_LOC_ROOT/share/domains/domain.lnd.T31_gx3v7.130409.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T31_gx3v7.130409.nc - $DIN_LOC_ROOT/share/meshes/T31_040122_ESMFmesh.nc - T31 is Gaussian grid: - - - - 128 64 - $DIN_LOC_ROOT/share/domains/domain.lnd.T42_gx1v7.180727.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.T42_gx1v7.180727.nc - $DIN_LOC_ROOT/share/meshes/T42_ESMFmesh_c20200629.nc - T42 is Gaussian grid: - - - - 1352 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne5np4_gx3v7.140810.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne5np4_gx3v7.140810.nc - ne5np4 is Spectral Elem 6-deg grid: - For ultra-low resolution spectral element grid testing - - - - 1350 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne5np4.pg2_gx3v7.200311.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne5np4.pg2_gx3v7.200311.nc - ne5np4 is Spectral Elem 6-deg grid with a 2x2 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 1350 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne5np4.pg3_gx3v7.170605.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne5np4.pg3_gx3v7.170605.nc - ne5np4 is Spectral Elem 6-deg grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 1350 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne5np4.pg4_gx3v7.200319.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne5np4.pg4_gx3v7.200319.nc - ne5np4 is Spectral Elem 6-deg grid with a 4x4 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 13826 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne16np4_gx1v7.171018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne16np4_gx1v7.171018.nc - $DIN_LOC_ROOT/share/meshes/ne16np4_scrip_171002_ESMFmesh.nc - ne16np4 is Spectral Elem 2-deg grid: - For low resolution spectral element grid testing - - - - 13824 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne16pg3_gx1v7.171003.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne16pg3_gx1v7.171003.nc - $DIN_LOC_ROOT/share/meshes/ne16pg3_ESMFmesh_cdf5_c20211018.nc - ne16np4.pg3 is a Spectral Elem 2-deg grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 48602 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4_gx1v6.110905.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4_gx1v6_110217.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30_gx1v7.171003.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30_gx1v7.171003.nc - $DIN_LOC_ROOT/share/meshes/ne30np4_ESMFmesh_cdf5_c20211018.nc - ne30np4 is Spectral Elem 1-deg grid: - - - - 21600 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg2_gx1v7.200626.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg2_gx1v7.200626.nc - $DIN_LOC_ROOT/share/meshes/ne30pg2_ESMFmesh_cdf5_c20211018.nc - ne30np4.pg2 is a Spectral Elem 1-deg grid with a 2x2 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 48600 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg3_gx1v7.170605.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg3_gx1v7_170605.nc - $DIN_LOC_ROOT/share/meshes/ne30pg3_ESMFmesh_cdf5_c20211018.nc - ne30np4.pg3 is a Spectral Elem ne30 grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 86400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne30np4.pg4_gx1v7.170628.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne30np4.pg4_gx1v7.170628.nc - $DIN_LOC_ROOT/share/meshes/ne30pg4_ESMFmesh_cdf5_c20211018.nc - ne30np4.pg4 is a Spectral Elem 1-deg grid with a 4x4 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 194402 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4_gx1v6.120406.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4_gx1v6.121113.nc - $DIN_LOC_ROOT/share/meshes/ne60np4_ESMFmesh_cdf5_c20211018.nc - ne60np4 is Spectral Elem 1/2-deg grid: - - - - 86400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg2_gx1v7.170628.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg2_gx1v7.170628.nc - $DIN_LOC_ROOT/share/meshes/ne60pg2_ESMFmesh_cdf5_c20211018.nc - ne60np4.pg2 is a Spectral Elem 0.5-deg grid with a 2x2 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 194400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg3_gx1v7.170628.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg3_gx1v7.170628.nc - $DIN_LOC_ROOT/share/meshes/ne60pg3_ESMFmesh_cdf5_c20211018.nc - ne60np4.pg3 is a Spectral Elem 0.5-deg grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 345600 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne60np4.pg4_gx1v7.170628.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne60np4.pg4_gx1v7.170628.nc - $DIN_LOC_ROOT/share/meshes/ne60pg4_ESMFmesh_cdf5_c20211018.nc - ne60np4.pg4 is a Spectral Elem 0.5-deg grid with a 4x4 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 777602 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_gx1v6.110502.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_gx1v6.121113.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4_gx1v7.190718.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4_gx1v7.190718.nc - $DIN_LOC_ROOT/share/meshes/ne120np4_ESMFmesh_cdf5_c20211018.nc - ne120np4 is Spectral Elem 1/4-deg grid: - - - - 345600 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg2_gx1v7.170629.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg2_gx1v7.170629.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg2_tx0.1v2.200626.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg2_tx0.1v2.200626.nc - $DIN_LOC_ROOT/share/meshes/ne120pg2_ESMFmesh_cdf5_c20211018.nc - ne120np4.pg2 is a Spectral Elem 0.25-deg grid with a 2x2 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 777600 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg3_gx1v7.190718.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg3_gx1v7.190718.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg3_tx0.1v3.190820.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg3_tx0.1v3.190820.nc - $DIN_LOC_ROOT/share/meshes/ne120pg3_ESMFmesh_cdf5_c20211018.nc - ne120np4.pg3 is a Spectral Elem 0.25-deg grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 1382400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne120np4.pg4_gx1v7.170629.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne120np4.pg4_gx1v7.170629.nc - $DIN_LOC_ROOT/share/meshes/ne120pg4_ESMFmesh_cdf5_c20211018.nc - ne120np4.pg4 is a Spectral Elem 0.25-deg grid with a 4x4 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 3110402 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4_gx1v6.111226.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4_gx1v6.111226.nc - $DIN_LOC_ROOT/share/meshes/ne240np4_ESMFmesh_cdf5_c20211018.nc - ne240np4 is Spectral Elem 1/8-deg grid: - Experimental for very high resolution experiments - - - 1382400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4.pg2_gx1v7.170629.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4.pg2_gx1v7.170629.nc - $DIN_LOC_ROOT/share/meshes/ne240pg2_ESMFmesh_cdf5_c20211018.nc - ne240np4.pg2 is a Spectral Elem 0.125-deg grid with a 2x2 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - 3110400 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne240np4.pg3_gx1v7.170629.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne240np4.pg3_gx1v7.170629.nc - ne240np4.pg3 is a Spectral Elem 0.125-deg grid with a 3x3 FVM physics grid: - EXPERIMENTAL FVM physics grid - - - - 3863 1 - ne0np4TESTONLY.ne5x4 is a low-resolution refined SE grid for testing: - Test support only - - - - 174098 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne0CONUSne30x8_gx1v7.190322.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne0CONUSne30x8_gx1v7.190322.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.ne0CONUSne30x8_tx0.1v2.171010.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne0CONUSne30x8_tx0.1v2.171010.nc - $DIN_LOC_ROOT/share/meshes/ne0CONUSne30x8_ESMFmesh_c20200727.nc - ne0np4CONUS.ne30x8 is a Spectral Elem 1-deg grid with a 1/8 deg refined region over the continental United States: - Test support only - - - - 117398 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne0np4.ARCTIC.ne30x4_tx0.1v2.200626.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne0np4.ARCTIC.ne30x4_tx0.1v2.200626.nc - $DIN_LOC_ROOT/share/meshes/ne0ARCTICne30x4_ESMFmesh_c20200727.nc - ne0np4.ARCTIC.ne30x4 is a Spectral Elem 1-deg grid with a 1/4 deg refined region over Arctic: - Test support only - - - - 152390 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.ne0np4.ARCTICGRIS.ne30x8_tx0.1v2.200626.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ne0np4.ARCTICGRIS.ne30x8_tx0.1v2.200626.nc - $DIN_LOC_ROOT/share/meshes/ne0ARCTICGRISne30x8_ESMFmesh_c20200730.nc - ne0np4.ARCTICGRIS.ne30x8 is a Spectral Elem 1-deg grid with a 1/8 deg refined region over Greenland: - Test support only - - - - 2562 1 - MPAS-A 480-km quasi-uniform mesh: - - - 40962 1 - $DIN_LOC_ROOT/share/meshes/mpasa120z32_ESMFmesh_cdf5_c20210120.nc - MPAS-A 120-km quasi-uniform mesh: - - - 163842 1 - MPAS-A 60-km quasi-uniform mesh: - - - 655362 1 - MPAS-A 30-km quasi-uniform mesh: - - - 2621442 1 - MPAS-A 15-km quasi-uniform mesh: - - - 4096002 1 - MPAS-A 12-km quasi-uniform mesh: - - - 6488066 1 - MPAS-A 15-3-km variable-uniform mesh: - - - - 640 320 - $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_gx1v7.170705.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.TL319_gx1v7.170705.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_tx0.66v1.190425.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.TL319_tx0.66v1.190425.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_tx0.1v2.161014.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v2.161014.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_tx0.1v3.170730.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v3.170730.nc - $DIN_LOC_ROOT/share/meshes/TL319_151007_ESMFmesh.nc - TL319 grid for JRA55 - - - - 1440 721 - $DIN_LOC_ROOT/share/domains/domain.lnd.TL639_gx1v7.200619.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.TL639_gx1v7.200619.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.TL639_tx0.66v1.200619.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.TL639_tx0.66v1.200619.nc - $DIN_LOC_ROOT/share/meshes/TL639_200618_ESMFmesh.nc - TL639 grid for ERA5 - - - - - - 3456 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.C24_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C24_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.C24_gx1v7.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C24_gx1v7.181018.nc - $DIN_LOC_ROOT/share/meshes/C24_181018_ESMFmesh.nc - C24 is a fvcubed xx-deg grid: - Experimental for fv3 dycore - - - - 13824 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.C48_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C48_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.C48_gx1v7.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C48_gx1v7.181018.nc - $DIN_LOC_ROOT/share/meshes/C48_181018_ESMFmesh.nc - C48 is a fvcubed xx-deg grid: - Experimental for fv3 dycore - - - - 55296 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.C96_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C96_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.C96_gx1v7.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C96_gx1v7.181018.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.C96_tx0.66v1.181210.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C96_tx0.66v1.181210.nc - $DIN_LOC_ROOT/share/meshes/C96_181018_ESMFmesh.nc - C96 is a fvcubed xx-deg grid: - Experimental for fv3 dycore - - - - 221184 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.C192_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C192_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.C192_gx1v7.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C192_gx1v7.181018.nc - $DIN_LOC_ROOT/share/meshes/C192_181018_ESMFmesh.nc - C192 is a fvcubed xx-deg grid: - Experimental for fv3 dycore - - - - 884736 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.C384_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C384_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.C384_gx1v7.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C384_gx1v7.181018.nc - $DIN_LOC_ROOT/share/meshes/C384_181018_ESMFmesh.nc - C384 is a fvcubed xx-deg grid: - Experimental for fv3 dycore - - - - - - - - 320 384 - $DIN_LOC_ROOT/share/domains/domain.ocn.gx1v6.090206.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.gx1v6.090206.nc - $DIN_LOC_ROOT/share/meshes/gx1v6_090205_ESMFmesh.nc - gx1v6 is displaced Greenland pole v6 1-deg grid: - - - 320 384 - $DIN_LOC_ROOT/share/domains/domain.ocn.gx1v7.151008.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.gx1v7.151008.nc - $DIN_LOC_ROOT/share/meshes/gx1v7_151008_ESMFmesh.nc - gx1v7 is displaced Greenland pole 1-deg grid with Caspian as a land feature: - - - 100 116 - $DIN_LOC_ROOT/share/domains/domain.ocn.gx3v7.120323.nc - $DIN_LOC_ROOT/share/meshes/gx3v7_120309_ESMFmesh.nc - gx3v7 is displaced Greenland pole v7 3-deg grid: - - - 540 458 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.66v1.190425.nc - $DIN_LOC_ROOT/share/meshes/tx0.66v1_190314_ESMFmesh.nc - tx0.66v1 is tripole v1 0.66-deg MOM6 grid: - Experimental for MOM6 experiments - - - 1440 1080 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.25v1.190207.nc - $DIN_LOC_ROOT/share/meshes/tx0.25v1_190204_ESMFmesh.nc - tx0.25v1 is tripole v1 0.25-deg MOM6 grid: - Experimental for MOM6 experiments - - - 3600 2400 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v2.161014.nc - $DIN_LOC_ROOT/share/meshes/tx0.1v2_ESMFmesh_cd5_c20210105.nc - tx0.1v2 is tripole v2 1/10-deg grid: - Experimental for high resolution experiments - - - 3600 2400 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.1v3.170730.nc - tx0.1v3 is tripole v3 1/10-deg grid: - Experimental for high resolution experiments - - - 360 240 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx1v1.090122.nc - $DIN_LOC_ROOT/share/meshes/tx1v1_ESMFMesh_cd5_c20210105.nc - tripole v1 1-deg grid: testing proxy for high-res tripole ocean grids- do not use for scientific experiments - Experimental tripole ocean grid - - - 28574 1 - $DIN_LOC_ROOT/share/domains/domain.ocn.oQU120.160325.nc - oQU120 is a MPAS ocean grid that is roughly 1 degree resolution: - Experimental, under development - - - 360 291 - $DIN_LOC_ROOT/share/domains/domain.ocn.tn1v3.160414.nc - tn1v3 is NEMO ORCA1 tripole grid at 1 deg (reduced eORCA): - NEMO ORCA1 tripole ocean grid - - - 1440 1050 - $DIN_LOC_ROOT/share/domains/domain.ocn.tn0.25v3.160721.nc - tn0.25v3 is NEMO ORCA1 tripole grid at 1/4 deg (reduced eORCA): - NEMO ORCA1 tripole ocean grid - - - - - - - - 360 180 - $DIN_LOC_ROOT/share/meshes/rx1_nomask_181022_ESMFmesh.nc - rx1 is 1 degree river routing grid (only valid for DROF): - Can only be used by DROF - - - - 720 360 - $DIN_LOC_ROOT/share/meshes/r05_nomask_c110308_ESMFmesh.nc - r05 is 1/2 degree river routing grid: - - - - 816 1 - $DIN_LOC_ROOT/rof/mizuRoute/meshes/r05_amazon_c110308_ctrcrd_cdf5_ESMFmesh_c20200624.nc - - r05_amazon is the regular 1/2 degree river routing grid, but over a region in the Amazon (corresponds to the 5x5_amazon region for CTSM) - - - - - 295335 1 - $DIN_LOC_ROOT/rof/mizuRoute/meshes/HDMA_global_ESMFmesh_c20191209.nc - HDMA is a medium resolution river routing grid for mizuRoute: - - - - 3600 1800 - - r01 is 1/10 degree river routing grid: - For experimental use by high resolution grids - - - - 1440 720 - $DIN_LOC_ROOT/lnd/dlnd7/JRA55/JRA.v1.4.runoff.1958_ESMFmesh_cdf5_20201020.nc - JRA 0.25 degree runoff grid for use with JRA-55 runoff data - - - - 1440 720 - $DIN_LOC_ROOT/lnd/dlnd7/JRA55/JRA.v1.4.runoff.1958_ESMFmesh_cdf5_20201020.nc - JRA is 0.25 degree runoff grid for use with JRA-55 runoff data - - - - - - - - 76 141 - $DIN_LOC_ROOT/share/meshes/gland_20km_c150511_ESMFmesh.nc - 20-km Greenland grid - - - - 416 704 - $DIN_LOC_ROOT/share/meshes/greenland_4km_epsg3413_c170414_ESMFmesh_c20190729.nc - 4-km Greenland grid, for use with the glissade dycore - - - - 704 576 - $DIN_LOC_ROOT/share/meshes/antarctica_8km_epsg3031_ESMFmesh_c210621.nc - 8-km Antarctica grid - - - - - - - - 90 50 - $DIN_LOC_ROOT/share/domains/domain.lnd.ww3a_ww3a.120222.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.ww3a_ww3a.120222.nc - $DIN_LOC_ROOT/share/meshes/ww3a_120222_ESMFmesh.nc - WW3 90 x 50 global grid - For testing of the WAV model - - - - - - - - - - - 1 1 - domain.ocn.01col.ArcticOcean.20150824.nc - domain.ocn.01col.ArcticOcean.20150824.nc - 01col is a single-column grid for datm and POP: - - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.${CLM_USRDAT_NAME}_navy.nc - user specified domain - only valid for DATM/CLM compset - - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-numaIA_navy.110106.nc - 1x1 Numa Iowa -- only valid for DATM/CLM compset - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-brazil_navy.090715.nc - 1x1 Brazil -- only valid for DATM/CLM compset - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-smallvilleIA_test.110106.nc - 1x1 Smallville Iowa Crop Test Case -- only valid for DATM/CLM compset - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-camdenNJ_navy.111004.nc - 1x1 Camden New Jersey -- only valid for DATM/CLM compset - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-mexicocityMEX_navy.090715.nc - 1x1 Mexico City Mexico -- only valid for DATM/CLM compset - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-vancouverCAN_navy.090715.nc - 1x1 Vancouver Canada -- only valid for DATM/CLM compset - - - 1 1 - $DIN_LOC_ROOT/share/domains/domain.clm/domain.lnd.1x1pt-urbanc_alpha_test.110201.nc - 1x1 Urban C Alpha Test Case -- only valid for DATM/CLM compset - - - - - - - - - - 83.144928 - 359.150902 - 01col is a single-column grid for datm and POP: - - - - user specified domain - only valid for DATM/CLM compset - - - 40.6878 - 267.0228 - 1x1 Numa Iowa -- only valid for DATM/CLM compset - - - -7.0 - 305.0 - 1x1 Brazil -- only valid for DATM/CLM compset - - - 40.6878 - 267.0228 - 1x1 Smallville Iowa Crop Test Case -- only valid for DATM/CLM compset - - - 40.0 - 285.0 - 1x1 Camden New Jersey -- only valid for DATM/CLM compset - - - 19.5 - 260.5 - 1x1 Mexico City Mexico -- only valid for DATM/CLM compset - - - 49.5 - 236.5 - 1x1 Vancouver Canada -- only valid for DATM/CLM compset - - - -37.7308 - 0.0 - 1x1 Urban C Alpha Test Case -- only valid for DATM/CLM compset - - - - - - - - - - - ATM2OCN_FMAPNAME - ATM2OCN_SMAPNAME - ATM2OCN_VMAPNAME - OCN2ATM_FMAPNAME - OCN2ATM_SMAPNAME - ATM2LND_FMAPNAME - ATM2LND_SMAPNAME - LND2ATM_FMAPNAME - LND2ATM_SMAPNAME - ATM2WAV_SMAPNAME - OCN2WAV_SMAPNAME - ICE2WAV_SMAPNAME - ROF2OCN_LIQ_RMAPNAME - ROF2OCN_ICE_RMAPNAME - LND2ROF_FMAPNAME - ROF2LND_FMAPNAME - LND2GLC_SMAPNAME - LND2GLC_FMAPNAME - GLC2LND_SMAPNAME - GLC2LND_FMAPNAME - ROF2OCN_FMAPNAME - WAV2OCN_SMAPNAME - - - - - - - diff --git a/CIME/data/config/cesm/config_grids_common.xml b/CIME/data/config/cesm/config_grids_common.xml deleted file mode 100644 index bc4ed0412d9..00000000000 --- a/CIME/data/config/cesm/config_grids_common.xml +++ /dev/null @@ -1,168 +0,0 @@ - - - - - - - - - - cpl/gridmaps/rx1/map_rx1_to_gx3v7_nnsm_e1000r500_180430.nc - cpl/gridmaps/rx1/map_rx1_to_gx3v7_nnsm_e1000r500_180430.nc - - - cpl/gridmaps/rx1/map_rx1_to_gx1v6_nn_open_ocean_nnsm_e1000r300_marginal_sea_170503.nc - cpl/gridmaps/rx1/map_rx1_to_gx1v6_nnsm_e1000r300_170503.nc - - - cpl/gridmaps/rx1/map_rx1_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_170413.nc - cpl/gridmaps/rx1/map_rx1_to_gx1v7_nnsm_e1000r300_170413.nc - - - cpl/gridmaps/rx1/map_rx1_to_tx1v1_e1000r300_161214.nc - cpl/gridmaps/rx1/map_rx1_to_tx1v1_e1000r300_161214.nc - - - cpl/gridmaps/rx1/map_rx1_to_tx0.66v1_nnsm_e1000r300_190315.nc - cpl/gridmaps/rx1/map_rx1_to_tx0.66v1_nnsm_e1000r300_190315.nc - - - cpl/cpl6/map_rx1_to_tx0.1v2_e1000r200_090624.nc - cpl/cpl6/map_rx1_to_tx0.1v2_e1000r200_090624.nc - - - cpl/cpl6/map_rx1_to_tx0.1v3_nnsm_e1000r200_170914.nc - cpl/cpl6/map_rx1_to_tx0.1v3_nnsm_e1000r200_170914.nc - - - cpl/gridmaps/rx1/map_rx1_to_oQU120_nn.160527.nc - cpl/gridmaps/rx1/map_rx1_to_oQU120_nn.160527.nc - - - - cpl/gridmaps/r05/map_r05_to_gx3v7_nnsm_e1000r500_180430.nc - cpl/gridmaps/r05/map_r05_to_gx3v7_nnsm_e1000r500_180430.nc - - - cpl/gridmaps/r05/map_r05_to_gx1v6_nn_open_ocean_nnsm_e1000r300_marginal_sea_170503.nc - cpl/gridmaps/r05/map_r05_to_gx1v6_nnsm_e1000r300_170503.nc - - - cpl/gridmaps/r05/map_r05_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_170413.nc - cpl/gridmaps/r05/map_r05_to_gx1v7_nnsm_e1000r300_170413.nc - - - cpl/gridmaps/r05/map_r05_to_tx1v1_e1000r500_161214.nc - cpl/gridmaps/r05/map_r05_to_tx1v1_e1000r500_161214.nc - - - cpl/cpl6/map_r05_to_tx0.1v2_r500e1000_080620.nc - cpl/cpl6/map_r05_to_tx0.1v2_r500e1000_080620.nc - - - - cpl/cpl6/map_r01_to_gx1v6_120711.nc - cpl/cpl6/map_r01_to_gx1v6_120711.nc - - - - cpl/gridmaps/rJRA025/map_JRA025m_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_190214.nc - cpl/gridmaps/rJRA025/map_JRA025m_to_gx1v7_e1000r300_190214.nc - - - cpl/gridmaps/rJRA025/map_JRA025m_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_170801.nc - cpl/gridmaps/rJRA025/map_JRA025m_to_gx1v7_e1000r300_170801.nc - - - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.1v2_e333r100_170619.nc - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.1v2_e333r100_170619.nc - - - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.1v3_nnsm_e333r100_190226.nc - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.1v3_nnsm_e333r100_190226.nc - - - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.1v3_e333r100_170830.nc - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.1v3_e333r100_170830.nc - - - cpl/gridmaps/r05/map_r05_to_tx0.66v1_e1000r300_190314.nc - cpl/gridmaps/r05/map_r05_to_tx0.66v1_e1000r300_190314.nc - - - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.66v1_nnsm_e333r100_190910.nc - cpl/gridmaps/rJRA025/map_JRA025m_to_tx0.66v1_nnsm_e333r100_190910.nc - - - - - - - - - - - - - - - cpl/gridmaps/gland4km/map_gland4km_to_gx1v6_nn_open_ocean_nnsm_e1000r300_marginal_sea_171105.nc - cpl/gridmaps/gland4km/map_gland4km_to_gx1v6_nnsm_e1000r300_171105.nc - - - cpl/gridmaps/gland4km/map_gland4km_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_171105.nc - cpl/gridmaps/gland4km/map_gland4km_to_gx1v7_nnsm_e1000r300_171105.nc - - - - cpl/gridmaps/gland4km/map_gland4km_to_gx3v7_nnsm_e1000r500_180502.nc - cpl/gridmaps/gland4km/map_gland4km_to_gx3v7_nnsm_e1000r500_180502.nc - - - - cpl/gridmaps/gland20km/map_gland20km_to_gx1v6_nn_open_ocean_nnsm_e1000r300_marginal_sea_171105.nc - cpl/gridmaps/gland20km/map_gland20km_to_gx1v6_nnsm_e1000r300_171105.nc - - - cpl/gridmaps/gland20km/map_gland20km_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_171105.nc - cpl/gridmaps/gland20km/map_gland20km_to_gx1v7_nnsm_e1000r300_171105.nc - - - - cpl/gridmaps/gland20km/map_gland20km_to_gx3v7_nnsm_e1000r500_180502.nc - cpl/gridmaps/gland20km/map_gland20km_to_gx3v7_nnsm_e1000r500_180502.nc - - - - cpl/gridmaps/ais8/map_ais8_to_gx1v7_nn_open_ocean_nnsm_e1000r300_marginal_sea_TOBECREATED.nc - cpl/gridmaps/ais8/map_ais8_to_gx1v7_nnsm_e1000r300_TOBECREATED.nc - - - - - - - - cpl/gridmaps/ww3a/map_ww3a_TO_gx3v7_splice_150428.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_ww3a_splice_150428.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_ww3a_splice_150428.nc - - - cpl/gridmaps/ww3a/map_ww3a_TO_gx1v6_splice_150428.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ww3a_splice_150428.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ww3a_splice_150428.nc - - - cpl/gridmaps/ww3a/map_ww3a_TO_gx1v7_splice_170214.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ww3a_splice_170214.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ww3a_splice_170214.nc - - - cpl/gridmaps/gland4km/map_gland4km_to_tx0.66v1_nnsm_e1000r300_190314.nc - cpl/gridmaps/gland4km/map_gland4km_to_tx0.66v1_nnsm_e1000r300_190314.nc - - - - diff --git a/CIME/data/config/cesm/config_grids_mct.xml b/CIME/data/config/cesm/config_grids_mct.xml deleted file mode 100644 index 6fcce8d1974..00000000000 --- a/CIME/data/config/cesm/config_grids_mct.xml +++ /dev/null @@ -1,964 +0,0 @@ - - - - - - - - - - cpl/gridmaps/C24/map_C24_TO_gx1v6_aave.181018.nc - cpl/gridmaps/C24/map_C24_TO_gx1v6_blin.181018.nc - cpl/gridmaps/C24/map_C24_TO_gx1v6_patc.181018.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_C24_aave.181018.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_C24_aave.181018.nc - - - - cpl/gridmaps/C24/map_C24_TO_gx1v7_aave.181018.nc - cpl/gridmaps/C24/map_C24_TO_gx1v7_blin.181018.nc - cpl/gridmaps/C24/map_C24_TO_gx1v7_patc.181018.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_C24_aave.181018.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_C24_aave.181018.nc - - - - cpl/gridmaps/C48/map_C48_TO_gx1v6_aave.181018.nc - cpl/gridmaps/C48/map_C48_TO_gx1v6_blin.181018.nc - cpl/gridmaps/C48/map_C48_TO_gx1v6_patc.181018.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_C48_aave.181018.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_C48_aave.181018.nc - - - - cpl/gridmaps/C48/map_C48_TO_gx1v7_aave.181018.nc - cpl/gridmaps/C48/map_C48_TO_gx1v7_blin.181018.nc - cpl/gridmaps/C48/map_C48_TO_gx1v7_patc.181018.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_C48_aave.181018.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_C48_aave.181018.nc - - - - cpl/gridmaps/C96/map_C96_TO_gx1v6_aave.181018.nc - cpl/gridmaps/C96/map_C96_TO_gx1v6_blin.181018.nc - cpl/gridmaps/C96/map_C96_TO_gx1v6_patc.181018.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_C96_aave.181018.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_C96_aave.181018.nc - - - - cpl/gridmaps/C96/map_C96_TO_gx1v7_aave.181018.nc - cpl/gridmaps/C96/map_C96_TO_gx1v7_blin.181018.nc - cpl/gridmaps/C96/map_C96_TO_gx1v7_patc.181018.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_C96_aave.181018.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_C96_aave.181018.nc - - - - cpl/gridmaps/C192/map_am_C192_TO_pop_gx1v6_aave.181018.nc - cpl/gridmaps/C192/map_am_C192_TO_pop_gx1v6_blin.181018.nc - cpl/gridmaps/C192/map_am_C192_TO_pop_gx1v6_patc.181018.nc - cpl/gridmaps/gx1v6/map_pop_gx1v6_TO_am_C192_aave.181018.nc - cpl/gridmaps/gx1v6/map_pop_gx1v6_TO_am_C192_aave.181018.nc - - - - cpl/gridmaps/C192/map_am_C192_TO_pop_gx1v7_aave.181018.nc - cpl/gridmaps/C192/map_am_C192_TO_pop_gx1v7_blin.181018.nc - cpl/gridmaps/C192/map_am_C192_TO_pop_gx1v7_patc.181018.nc - cpl/gridmaps/gx1v7/map_pop_gx1v7_TO_am_C192_aave.181018.nc - cpl/gridmaps/gx1v7/map_pop_gx1v7_TO_am_C192_aave.181018.nc - - - - cpl/gridmaps/C384/map_am_C384_TO_pop_gx1v6_aave.181018.nc - cpl/gridmaps/C384/map_am_C384_TO_pop_gx1v6_blin.181018.nc - cpl/gridmaps/C384/map_am_C384_TO_pop_gx1v6_patc.181018.nc - cpl/gridmaps/gx1v6/map_pop_gx1v6_TO_am_C384_aave.181018.nc - cpl/gridmaps/gx1v6/map_pop_gx1v6_TO_am_C384_aave.181018.nc - - - - cpl/gridmaps/C384/map_am_C384_TO_pop_gx1v7_aave.181018.nc - cpl/gridmaps/C384/map_am_C384_TO_pop_gx1v7_blin.181018.nc - cpl/gridmaps/C384/map_am_C384_TO_pop_gx1v7_patc.181018.nc - cpl/gridmaps/gx1v7/map_pop_gx1v7_TO_am_C384_aave.181018.nc - cpl/gridmaps/gx1v7/map_pop_gx1v7_TO_am_C384_aave.181018.nc - - - - cpl/gridmaps/C96/map_C96_TO_tx0.66v1_aave.181210.nc - cpl/gridmaps/C96/map_C96_TO_tx0.66v1_blin.181210.nc - cpl/gridmaps/C96/map_C96_TO_tx0.66v1_patc.181210.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_C96_aave.181210.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_C96_blin.181210.nc - - - - cpl/cpl6/map_fv0.23x0.31_to_gx1v6_aave_da_100423.nc - cpl/cpl6/map_fv0.23x0.31_to_gx1v6_bilin_da_100423.nc - cpl/cpl6/map_fv0.23x0.31_to_gx1v6_bilin_da_100423.nc - cpl/cpl6/map_gx1v6_to_fv0.23x0.31_aave_da_100423.nc - cpl/cpl6/map_gx1v6_to_fv0.23x0.31_aave_da_100423.nc - - - - cpl/cpl6/map_fv0.23x0.31_to_tx0.1v2_aave_da_090127.nc - cpl/cpl6/map_fv0.23x0.31_to_tx0.1v2_bilin_da_090127.nc - cpl/cpl6/map_fv0.23x0.31_to_tx0.1v2_bilin_da_090127.nc - cpl/cpl6/map_tx0.1v2_to_fv0.23x0.31_aave_da_090127.nc - cpl/cpl6/map_tx0.1v2_to_fv0.23x0.31_aave_da_090127.nc - - - - cpl/cpl6/map_fv0.47x0.63_to_gx1v6_aave_da_090407.nc - cpl/cpl6/map_fv0.47x0.63_to_gx1v6_patch_090401.nc - cpl/cpl6/map_fv0.47x0.63_to_gx1v6_patch_090401.nc - cpl/cpl6/map_gx1v6_to_fv0.47x0.63_aave_da_090407.nc - cpl/cpl6/map_gx1v6_to_fv0.47x0.63_aave_da_090407.nc - - - - cpl/cpl6/map_fv0.47x0.63_to_tx0.1v2_aave_da_090218.nc - cpl/cpl6/map_fv0.47x0.63_to_tx0.1v2_bilin_da_090218.nc - cpl/cpl6/map_fv0.47x0.63_to_tx0.1v2_bilin_da_090218.nc - cpl/cpl6/map_tx0.1v2_to_fv0.47x0.63_aave_da_090218.nc - cpl/cpl6/map_tx0.1v2_to_fv0.47x0.63_aave_da_090218.nc - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v6_aave.130322.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v6_blin.130322.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v6_patc.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv0.9x1.25_aave.130322.nc - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v7_aave.151008.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v7_blin.151008.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gx1v7_patc.151008.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_fv0.9x1.25_aave.151008.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_fv0.9x1.25_aave.151008.nc - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_tx0.66v1_aave.190314.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_tx0.66v1_blin.190314.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_tx0.66v1_patc.190314.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_fv0.9x1.25_aave.190314.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_fv0.9x1.25_aave.190314.nc - - - - cpl/cpl6/map_fv0.9x1.25_to_mp120v1_aave_da_111004.nc - cpl/cpl6/map_fv0.9x1.25_to_mp120v1_aave_da_111004.nc - cpl/cpl6/map_fv0.9x1.25_to_mp120v1_aave_da_111004.nc - cpl/cpl6/map_mp120v1_to_fv0.9x1.25_aave_da_111004.nc - cpl/cpl6/map_mp120v1_to_fv0.9x1.25_aave_da_111004.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v6_aave.130322.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v6_blin.130322.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v6_patc.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv1.9x2.5_aave.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_fv1.9x2.5_aave.130322.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v7_aave.181205.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v7_blin.181205.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gx1v7_patc.181205.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_fv1.9x2.5_aave.181205.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_fv1.9x2.5_aave.181205.nc - - - - cpl/cpl6/map_fv1.9x2.5_to_tx1v1_aave_da_090710.nc - cpl/cpl6/map_fv1.9x2.5_to_tx1v1_bilin_da_090710.nc - cpl/cpl6/map_fv1.9x2.5_to_tx1v1_bilin_da_090710.nc - cpl/cpl6/map_tx1v1_to_fv1.9x2.5_aave_da_090710.nc - cpl/cpl6/map_tx1v1_to_fv1.9x2.5_aave_da_090710.nc - - - - cpl/cpl6/map_fv4x5_to_gx3v7_aave_da_091218.nc - cpl/cpl6/map_fv4x5_to_gx3v7_bilin_da_091218.nc - cpl/cpl6/map_fv4x5_to_gx3v7_bilin_da_091218.nc - cpl/cpl6/map_gx3v7_to_fv4x5_aave_da_091218.nc - cpl/cpl6/map_gx3v7_to_fv4x5_aave_da_091218.nc - - - - cpl/gridmaps/ne16np4/map_ne16np4_TO_gx1v7_aave.171018.nc - cpl/gridmaps/ne16np4/map_ne16np4_TO_gx1v7_aave.171018.nc - cpl/gridmaps/ne16np4/map_ne16np4_TO_gx1v7_aave.171018.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne16np4_aave.171018.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne16np4_aave.171018.nc - - - - cpl/cpl6/map_ne30np4_to_gx1v6_aave_110121.nc - cpl/cpl6/map_ne30np4_to_gx1v6_native_110328.nc - cpl/cpl6/map_ne30np4_to_gx1v6_native_110328.nc - cpl/cpl6/map_gx1v6_to_ne30np4_aave_110121.nc - cpl/cpl6/map_gx1v6_to_ne30np4_aave_110121.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/ne30np4/map_ne30np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne30np4_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne30np4_aave.120712.nc - - - cpl/cpl6/map_ne30np4_to_fv1.9x2.5_aave_da_091230.nc - cpl/cpl6/map_ne30np4_to_fv1.9x2.5_aave_da_091230.nc - cpl/cpl6/map_fv1.9x2.5_to_ne30np4_aave_da_091230.nc - cpl/cpl6/map_fv1.9x2.5_to_ne30np4_aave_da_091230.nc - - - cpl/gridmaps/ne30np4/map_ne30_TO_gx1v7_aave.190214.nc - cpl/gridmaps/ne30np4/map_ne30_TO_gx1v7_blin.190214.nc - cpl/gridmaps/ne30np4/map_ne30_TO_gx1v7_blin.190214.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne30_aave.190214.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne30_aave.190214.nc - - - cpl/gridmaps/ne30np4/map_ne30_TO_ww3a_blin.190214.nc - - - - cpl/gridmaps/ne30np4.pg2/map_ne30np4.pg2_TO_gx1v7_aave.200626.nc - cpl/gridmaps/ne30np4.pg2/map_ne30np4.pg2_TO_gx1v7_blin.200626.nc - cpl/gridmaps/ne30np4.pg2/map_ne30np4.pg2_TO_gx1v7_blin.200626.nc - cpl/gridmaps/ne30np4.pg2/map_gx1v7_TO_ne30np4.pg2_aave.200626.nc - cpl/gridmaps/ne30np4.pg2/map_gx1v7_TO_ne30np4.pg2_aave.200626.nc - - - cpl/gridmaps/ne30np4.pg2/map_ne30np4.pg2_TO_ww3a_blin.200626.nc - - - - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_gx1v7_aave.190215.nc - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_gx1v7_blin.190215.nc - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_gx1v7_blin.190215.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne30pg3_aave.190215.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne30pg3_aave.190215.nc - - - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_ww3a_blin.190215.nc - - - - - cpl/gridmaps/ne60np4/map_ne60np4_TO_gx1v6_aave.120406.nc - cpl/gridmaps/ne60np4/map_ne60np4_TO_gx1v6_blin.120406.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ne60np4_aave.120406.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_ne60np4_aave.120406.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v6_bilin_110428.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v6_bilin_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne120np4_aave_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne120np4_aave_110428.nc - - - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v7_aave_190718.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v7_bilin_190718.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_gx1v7_bilin_190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_to_ne120np4_aave_190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_to_ne120np4_aave_190718.nc - - - cpl/gridmaps/ne120np4/map_ne120np4_TO_ww3a_blin.190502.nc - - - - cpl/gridmaps/ne120np4.pg2/map_ne120np4.pg2_TO_tx0.1v2_aave.200626.nc - cpl/gridmaps/ne120np4.pg2/map_ne120np4.pg2_TO_tx0.1v2_blin.200626.nc - cpl/gridmaps/ne120np4.pg2/map_ne120np4.pg2_TO_tx0.1v2_blin.200626.nc - cpl/gridmaps/ne120np4.pg2/map_tx0.1v2_TO_ne120np4.pg2_aave.200626.nc - cpl/gridmaps/ne120np4.pg2/map_tx0.1v2_TO_ne120np4.pg2_aave.200626.nc - - - cpl/gridmaps/ne120np4.pg2/map_ne120np4.pg2_TO_ww3a_blin.200626.nc - - - - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_gx1v7_aave.190718.nc - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_gx1v7_blin.190718.nc - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_gx1v7_blin.190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne120np4.pg3_aave.190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne120np4.pg3_aave.190718.nc - - - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_tx0.1v3_aave.190820.nc - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_tx0.1v3_blin.190820.nc - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_tx0.1v3_blin.190820.nc - cpl/gridmaps/tx0.1v3/map_tx0.1v3_TO_ne120np4.pg3_aave.190820.nc - cpl/gridmaps/tx0.1v3/map_tx0.1v3_TO_ne120np4.pg3_aave.190820.nc - - - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_ww3a_blin.190503.nc - - - cpl/gridmaps/ne120np4/map_ne120np4_to_tx0.1v2_aave_110331.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_tx0.1v2_090127_bilin_110331.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_tx0.1v2_090127_bilin_110331.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_ne120np4_aave_110331.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_ne120np4_aave_110331.nc - - - cpl/gridmaps/ne120np4/map_ne120np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/ne120np4/map_ne120np4_TO_fv0.9x1.25_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne120np4_aave.120712.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ne120np4_aave.120712.nc - - - cpl/gridmaps/ne120np4/map_ne120np4_to_fv0.23x0.31_aave_110331.nc - cpl/gridmaps/ne120np4/map_ne120np4_to_fv0.23x0.31_aave_110331.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne120np4_aave_110331.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne120np4_aave_110331.nc - - - - cpl/gridmaps/ne240np4/map_ne240np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_gx1v6_aave_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne240np4_aave_110428.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_ne240np4_aave_110428.nc - - - cpl/gridmaps/ne240np4/map_ne240np4_to_tx0.1v2_aave_110419.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_tx0.1v2_aave_110419.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_tx0.1v2_aave_110419.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_ne240np4_aave_110419.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_ne240np4_aave_110419.nc - - - cpl/gridmaps/ne240np4/map_ne240np4_to_fv0.23x0.31_aave_110428.nc - cpl/gridmaps/ne240np4/map_ne240np4_to_fv0.23x0.31_aave_110428.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne240np4_aave_110428.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_ne240np4_aave_110428.nc - - - - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_gx1v7_aave.190718.nc - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_gx1v7_blin.190718.nc - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_gx1v7_patc.190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne0CONUSne30x8_aave.190718.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_ne0CONUSne30x8_aave.190718.nc - - - - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_ww3a_blin.190322.nc - - - - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/maps/map_ne0np4.ARCTIC.ne30x4_TO_tx0.1v2_aave.200626.nc - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/maps/map_ne0np4.ARCTIC.ne30x4_TO_tx0.1v2_blin.200626.nc - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/maps/map_ne0np4.ARCTIC.ne30x4_TO_tx0.1v2_blin.200626.nc - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/maps/map_tx0.1v2_TO_ne0np4.ARCTIC.ne30x4_aave.200626.nc - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/maps/map_tx0.1v2_TO_ne0np4.ARCTIC.ne30x4_aave.200626.nc - - - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/maps/map_ne0np4.ARCTIC.ne30x4_TO_ww3a_blin.200626.nc - - - - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/maps/map_ne0np4.ARCTICGRIS.ne30x8_TO_tx0.1v2_aave.200626.nc - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/maps/map_ne0np4.ARCTICGRIS.ne30x8_TO_tx0.1v2_blin.200626.nc - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/maps/map_ne0np4.ARCTICGRIS.ne30x8_TO_tx0.1v2_blin.200626.nc - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/maps/map_tx0.1v2_TO_ne0np4.ARCTICGRIS.ne30x8_aave.200626.nc - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/maps/map_tx0.1v2_TO_ne0np4.ARCTICGRIS.ne30x8_aave.200626.nc - - - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/maps/map_ne0np4.ARCTICGRIS.ne30x8_TO_ww3a_blin.200626.nc - - - - cpl/gridmaps/TL319/map_TL319_TO_gx1v7_aave.170705.nc - cpl/gridmaps/TL319/map_TL319_TO_gx1v7_blin.170705.nc - cpl/gridmaps/TL319/map_TL319_TO_gx1v7_patc.170705.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_TL319_aave.170705.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_TL319_aave.170705.nc - - - cpl/gridmaps/TL319/map_TL319_TO_tx0.66v1_aave.190326.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.66v1_blin.190326.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.66v1_patc.190326.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_TL319_aave.190326.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_TL319_aave.190326.nc - - - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v2_patc.161014.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v2_patc.161014.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v2_patc.161014.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_TO_TL319_aave.161014.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_TO_TL319_aave.161014.nc - - - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v3_patc_blin_merged.180705.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v3_patc_blin_merged.180705.nc - cpl/gridmaps/TL319/map_TL319_TO_tx0.1v3_patc_blin_merged.180705.nc - cpl/gridmaps/tx0.1v3/map_tx0.1v3_TO_TL319_aave.170730.nc - cpl/gridmaps/tx0.1v3/map_tx0.1v3_TO_TL319_aave.170730.nc - - - cpl/gridmaps/T62/map_T62_TO_gx3v7_aave.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx3v7_blin.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx3v7_patc.130322.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_T62_aave.130322.nc - cpl/gridmaps/gx3v7/map_gx3v7_TO_T62_aave.130322.nc - - - cpl/gridmaps/T62/map_T62_TO_gx1v6_aave.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx1v6_blin.130322.nc - cpl/gridmaps/T62/map_T62_TO_gx1v6_patc.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_T62_aave.130322.nc - cpl/gridmaps/gx1v6/map_gx1v6_TO_T62_aave.130322.nc - - - cpl/gridmaps/T62/map_T62_TO_gx1v7_aave.151008.nc - cpl/gridmaps/T62/map_T62_TO_gx1v7_blin.151008.nc - cpl/gridmaps/T62/map_T62_TO_gx1v7_patc.151008.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_T62_aave.151008.nc - cpl/gridmaps/gx1v7/map_gx1v7_TO_T62_aave.151008.nc - - - cpl/gridmaps/T62/map_T62_TO_tx0.66v1_aave.190314.nc - cpl/gridmaps/T62/map_T62_TO_tx0.66v1_blin.190314.nc - cpl/gridmaps/T62/map_T62_TO_tx0.66v1_blin.190314.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_T62_aave.190314.nc - cpl/gridmaps/tx0.66v1/map_tx0.66v1_TO_T62_aave.190314.nc - - - cpl/cpl6/map_T62_to_tx1v1_aave_da_090122.nc - cpl/cpl6/map_T62_to_tx1v1_bilin_da_090122.nc - cpl/cpl6/map_T62_to_tx1v1_bilin_da_090122.nc - cpl/cpl6/map_tx1v1_to_T62_aave_da_090122.nc - cpl/cpl6/map_tx1v1_to_T62_aave_da_090122.nc - - - cpl/cpl6/map_T62_to_tx0.1v2_aave_da_090220.nc - cpl/cpl6/map_T62_to_tx0.1v2_bilin_da_090220.nc - cpl/cpl6/map_T62_to_tx0.1v2_bilin_da_090220.nc - cpl/cpl6/map_tx0.1v2_to_T62_aave_da_090220.nc - cpl/cpl6/map_tx0.1v2_to_T62_aave_da_090220.nc - - - cpl/cpl6/map_T62_TO_tx0.1v3_patc.170928.nc - cpl/cpl6/map_T62_TO_tx0.1v3_patc.170928.nc - cpl/cpl6/map_T62_TO_tx0.1v3_patc.170928.nc - cpl/cpl6/map_tx0.1v3_TO_T62_aave.170928.nc - cpl/cpl6/map_tx0.1v3_TO_T62_aave.170928.nc - - - cpl/gridmaps/T62/map_T62_TO_oQU120_aave.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU120_aave.151209.nc - cpl/gridmaps/T62/map_T62_TO_oQU120_aave.151209.nc - cpl/gridmaps/oQU120/map_oQU120_TO_T62_aave.151209.nc - cpl/gridmaps/oQU120/map_oQU120_TO_T62_aave.151209.nc - - - - - cpl/cpl6/map_T31_to_gx3v7_aave_da_090903.nc - cpl/cpl6/map_T31_to_gx3v7_patch_090903.nc - cpl/cpl6/map_T31_to_gx3v7_patch_090903.nc - cpl/cpl6/map_gx3v7_to_T31_aave_da_090903.nc - cpl/cpl6/map_gx3v7_to_T31_aave_da_090903.nc - - - - cpl/gridmaps/T85/map_T85_to_gx1v6_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_gx1v6_bilin_110411.nc - cpl/gridmaps/T85/map_T85_to_gx1v6_bilin_110411.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_T85_aave_110411.nc - cpl/gridmaps/gx1v6/map_gx1v6_to_T85_aave_110411.nc - - - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_bilin_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_bilin_110411.nc - - - cpl/gridmaps/T85/map_T85_to_tx0.1v2_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_tx0.1v2_bilin_110411.nc - cpl/gridmaps/T85/map_T85_to_tx0.1v2_bilin_110411.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T85_bilin_110411.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T85_aave_110411.nc - - - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_aave_110411.nc - cpl/gridmaps/T85/map_T85_to_fv0.9x1.25_bilin_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_to_T85_aave_110411.nc - - - - cpl/gridmaps/T341/map_T341_to_tx0.1v2_aave_110413.nc - cpl/gridmaps/T341/map_T341_to_tx0.1v2_aave_110413.nc - cpl/gridmaps/T341/map_T341_to_tx0.1v2_aave_110413.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T341_aave_110413.nc - cpl/gridmaps/tx0.1v2/map_tx0.1v2_to_T341_aave_110413.nc - - - cpl/gridmaps/T341/map_T341_to_fv0.23x0.31_aave_110413.nc - cpl/gridmaps/T341/map_T341_to_fv0.23x0.31_aave_110413.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_T341_aave_110413.nc - cpl/gridmaps/fv0.23x0.31/map_fv0.23x0.31_to_T341_aave_110413.nc - - - - - - cpl/gridmaps/ww3a/map_ww3a_TO_tx1v1_blin.170523.nc - cpl/gridmaps/tx1v1/map_tx1v1_TO_ww3a_blin.170523.nc - cpl/gridmaps/tx1v1/map_tx1v1_TO_ww3a_blin.170523.nc - - - - cpl/gridmaps/T31/map_T31_TO_ww3a_bilin_131104.nc - - - - cpl/gridmaps/T62/map_T62_TO_ww3a_bilin.150617.nc - - - - cpl/gridmaps/TL319/map_TL319_TO_ww3a_bilin.170707.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_ww3a_bilin_140702.nc - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_ww3a_bilin.160324.nc - - - - - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_360x720_nomask_to_0.1x0.1_nomask_aave_da_c130107.nc - lnd/clm2/mappingdata/maps/360x720/map_0.1x0.1_nomask_to_360x720_nomask_aave_da_c130104.nc - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_1.9x2.5_nomask_to_0.1x0.1_nomask_aave_da_c120709.nc - lnd/clm2/mappingdata/maps/1.9x2.5/map_0.1x0.1_nomask_to_1.9x2.5_nomask_aave_da_c120709.nc - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_ne120np4_nomask_to_0.1x0.1_nomask_aave_da_c120711.nc - lnd/clm2/mappingdata/maps/ne120np4/map_0.1x0.1_nomask_to_ne120np4_nomask_aave_da_c120706.nc - - - - lnd/clm2/mappingdata/maps/0.1x0.1/map_ne240np4_nomask_to_0.1x0.1_nomask_aave_da_c120711.nc - lnd/clm2/mappingdata/maps/ne240np4/map_0.1x0.1_nomask_to_ne240np4_nomask_aave_da_c120706.nc - - - - lnd/clm2/mappingdata/maps/C24/map_C24_nomask_to_0.5x0.5_nomask_aave_da_c181018.nc - lnd/clm2/mappingdata/maps/C24/map_0.5x0.5_TO_C24_aave.181018.nc - - - - lnd/clm2/mappingdata/maps/C48/map_C48_TO_0.5x0.5_aave.181018.nc - lnd/clm2/mappingdata/maps/C48/map_0.5x0.5_TO_C48_aave.181018.nc - - - - lnd/clm2/mappingdata/maps/C96/map_C96_TO_0.5x0.5_aave.181018.nc - lnd/clm2/mappingdata/maps/C96/map_0.5x0.5_TO_C96_aave.181018.nc - - - - lnd/clm2/mappingdata/maps/C192/map_C192_nomask_to_0.5x0.5_nomask_aave_da_c181018.nc - lnd/clm2/mappingdata/maps/C192/map_0.5x0.5_TO_C192_aave.181018.nc - - - - lnd/clm2/mappingdata/maps/C384/map_C384_TO_0.5x0.5_aave.181018.nc - lnd/clm2/mappingdata/maps/C384/map_0.5x0.5_TO_C384_aave.181018.nc - - - - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_0.5x0.5_nomask_aave.190322.nc - cpl/gridmaps/ne0np4CONUS.ne30x8/map_0.5x0.5_nomask_TO_ne0CONUSne30x8_aave.190322.nc - - - - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/map_ne0np4.ARCTIC.ne30x4_TO_r05_aave.200626.nc - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/map_r05_TO_ne0np4.ARCTIC.ne30x4_aave.200626.nc - - - - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/map_ne0np4.ARCTICGRIS.ne30x8_TO_r05_aave.200626.nc - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/map_r05_TO_ne0np4.ARCTICGRIS.ne30x8_aave.200626.nc - - - - lnd/clm2/mappingdata/maps/0.5x0.5/map_360x720_nomask_to_0.5x0.5_nomask_aave_da_c130103.nc - lnd/clm2/mappingdata/maps/360x720/map_0.5x0.5_nomask_to_360x720_nomask_aave_da_c120830.nc - - - - lnd/clm2/mappingdata/maps/ne16np4/map_ne16np4_nomask_to_0.5x0.5_nomask_aave_da_c110922.nc - lnd/clm2/mappingdata/maps/ne16np4/map_0.5x0.5_nomask_to_ne16np4_nomask_aave_da_c110922.nc - - - - lnd/clm2/mappingdata/maps/ne30np4/map_ne30np4_to_0.5x0.5rtm_aave_da_110320.nc - lnd/clm2/mappingdata/maps/ne30np4/map_0.5x0.5_nomask_to_ne30np4_nomask_aave_da_c121019.nc - - - - cpl/gridmaps/ne30np4.pg2/map_ne30np4.pg2_TO_r05_aave.200626.nc - cpl/gridmaps/ne30np4.pg2/map_r05_TO_ne30np4.pg2_aave.200626.nc - - - - lnd/clm2/mappingdata/maps/ne30pg3/map_ne30pg3_to_0.5x0.5_nomask_aave_da_c180515.nc - lnd/clm2/mappingdata/maps/ne30pg3/map_0.5x0.5_nomask_to_ne30pg3_aave_da_c180515.nc - - - - lnd/clm2/mappingdata/maps/ne60np4/map_ne60np4_nomask_to_0.5x0.5_nomask_aave_da_c110922.nc - lnd/clm2/mappingdata/maps/ne60np4/map_0.5x0.5_nomask_to_ne60np4_nomask_aave_da_c110922.nc - - - - lnd/clm2/mappingdata/maps/ne120np4/map_ne120np4_TO_0.5x0.5_nomask_aave.190502.nc - lnd/clm2/mappingdata/maps/ne120np4/map_0.5x0.5_nomask_TO_ne120np4_aave.190502.nc - - - - cpl/gridmaps/ne120np4.pg2/map_ne120np4.pg2_TO_r05_aave.200626.nc - cpl/gridmaps/ne120np4.pg2/map_r05_TO_ne120np4.pg2_aave.200626.nc - - - - lnd/clm2/mappingdata/maps/ne120np4.pg3/map_ne120np4.pg3_TO_0.5x0.5_nomask_aave.190503.nc - lnd/clm2/mappingdata/maps/ne120np4.pg3/map_0.5x0.5_nomask_TO_ne120np4.pg3_aave.190503.nc - - - - lnd/clm2/mappingdata/maps/ne240np4/map_ne240np4_nomask_to_0.5x0.5_nomask_aave_da_c110922.nc - lnd/clm2/mappingdata/maps/ne240np4/map_0.5x0.5_nomask_to_ne240np4_nomask_aave_da_c121019.nc - - - - lnd/clm2/mappingdata/maps/0.23x0.31/map_0.23x0.31_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/0.23x0.31/map_0.5x0.5_nomask_to_0.23x0.31_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/0.47x0.63/map_0.47x0.63_nomask_to_0.5x0.5_nomask_aave_da_c120306.nc - lnd/clm2/mappingdata/maps/0.47x0.63/map_0.5x0.5_nomask_to_0.47x0.63_nomask_aave_da_c120306.nc - - - - lnd/clm2/mappingdata/maps/0.9x1.25/map_0.9x1.25_nomask_to_0.5x0.5_nomask_aave_da_c120522.nc - lnd/clm2/mappingdata/maps/0.9x1.25/map_0.5x0.5_nomask_to_0.9x1.25_nomask_aave_da_c121019.nc - - - - lnd/clm2/mappingdata/maps/1.9x2.5/map_1.9x2.5_nomask_to_0.5x0.5_nomask_aave_da_c120522.nc - lnd/clm2/mappingdata/maps/1.9x2.5/map_0.5x0.5_nomask_to_1.9x2.5_nomask_aave_da_c120709.nc - - - - lnd/clm2/mappingdata/maps/2.5x3.33/map_2.5x3.33_nomask_to_0.5x0.5_nomask_aave_da_c110823.nc - lnd/clm2/mappingdata/maps/2.5x3.33/map_0.5x0.5_nomask_to_2.5x3.33_nomask_aave_da_c110823.nc - - - - lnd/clm2/mappingdata/maps/10x15/map_10x15_to_0.5x0.5rtm_aave_da_c20190725.nc - lnd/clm2/mappingdata/maps/10x15/map_0.5x0.5_nomask_to_10x15_nomask_aave_da_c20190725.nc - - - - lnd/clm2/mappingdata/maps/4x5/map_4x5_nomask_to_0.5x0.5_nomask_aave_da_c110822.nc - lnd/clm2/mappingdata/maps/4x5/map_0.5x0.5_nomask_to_4x5_nomask_aave_da_c110822.nc - - - - lnd/clm2/mappingdata/maps/512x1024/map_512x1024_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/512x1024/map_0.5x0.5_nomask_to_512x1024_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/128x256/map_128x256_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/128x256/map_0.5x0.5_nomask_to_128x256_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/64x128/map_64x128_nomask_to_0.5x0.5_nomask_aave_da_c110920.nc - lnd/clm2/mappingdata/maps/64x128/map_0.5x0.5_nomask_to_64x128_nomask_aave_da_c110920.nc - - - - lnd/clm2/mappingdata/maps/48x96/map_48x96_nomask_to_0.5x0.5_nomask_aave_da_c110822.nc - lnd/clm2/mappingdata/maps/48x96/map_0.5x0.5_nomask_to_48x96_nomask_aave_da_c110822.nc - - - - - - - - cpl/gridmaps/r05/map_r05_TO_gx3v7_aave.200504.nc - - - - cpl/cpl6/map_r05_TO_g16_aave.120920.nc - - - - cpl/gridmaps/r05/map_r05_TO_gx1v7_aave.161012.nc - - - - - - - - - - - - - - - - cpl/gridmaps/fv0.47x0.63/map_fv0.47x0.63_TO_gland4km_aave.171105.nc - cpl/gridmaps/fv0.47x0.63/map_fv0.47x0.63_TO_gland4km_blin.171105.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv0.47x0.63_aave.171105.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv0.47x0.63_aave.171105.nc - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gland4km_aave.170429.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv0.9x1.25_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv0.9x1.25_aave.170429.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gland4km_aave.170429.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv1.9x2.5_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv1.9x2.5_aave.170429.nc - - - - cpl/gridmaps/C24/map_C24_TO_gland4km_aave.181018.nc - cpl/gridmaps/C24/map_C24_TO_gland4km_blin.181018.nc - cpl/gridmaps/gland4km/map_gland4km_TO_C24_aave.181018.nc - cpl/gridmaps/gland4km/map_gland4km_TO_C24_aave.181018.nc - - - - cpl/gridmaps/C48/map_C48_TO_gland4km_aave.181018.nc - cpl/gridmaps/C48/map_C48_TO_gland4km_blin.181018.nc - cpl/gridmaps/gland4km/map_gland4km_TO_C48_aave.181018.nc - cpl/gridmaps/gland4km/map_gland4km_TO_C48_aave.181018.nc - - - - cpl/gridmaps/C96/map_C96_TO_gland4km_aave.181018.nc - cpl/gridmaps/C96/map_C96_TO_gland4km_blin.181018.nc - cpl/gridmaps/gland4km/map_gland4km_TO_C96_aave.181018.nc - cpl/gridmaps/gland4km/map_gland4km_TO_C96_aave.181018.nc - - - - cpl/gridmaps/C192/map_C192_TO_gland4km_aave.181018.nc - cpl/gridmaps/C192/map_C192_TO_gland4km_blin.181018.nc - cpl/gridmaps/gland4km/map_gland4km_TO_C192_aave.181018.nc - cpl/gridmaps/gland4km/map_gland4km_TO_C192_aave.181018.nc - - - - cpl/gridmaps/C384/map_C384_TO_gland4km_aave.181018.nc - cpl/gridmaps/C384/map_C384_TO_gland4km_blin.181018.nc - cpl/gridmaps/gland4km/map_gland4km_TO_C384_aave.181018.nc - cpl/gridmaps/gland4km/map_gland4km_TO_C384_aave.181018.nc - - - - cpl/gridmaps/T31/map_T31_TO_gland4km_aave.170429.nc - cpl/gridmaps/T31/map_T31_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_T31_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_T31_aave.170429.nc - - - - cpl/gridmaps/360x720/map_360x720_TO_gland4km_aave.170429.nc - cpl/gridmaps/360x720/map_360x720_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_360x720_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_360x720_aave.170429.nc - - - - cpl/gridmaps/fv10x15/map_fv10x15_TO_gland4km_aave.170429.nc - cpl/gridmaps/fv10x15/map_fv10x15_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv10x15_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv10x15_aave.170429.nc - - - - cpl/gridmaps/fv4x5/map_fv4x5_TO_gland4km_aave.170429.nc - cpl/gridmaps/fv4x5/map_fv4x5_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv4x5_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_fv4x5_aave.170429.nc - - - - cpl/gridmaps/ne16np4/map_ne16np4_TO_gland4km_aave.170429.nc - cpl/gridmaps/ne16np4/map_ne16np4_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne16np4_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne16np4_aave.170429.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_TO_gland4km_aave.170429.nc - cpl/gridmaps/ne30np4/map_ne30np4_TO_gland4km_blin.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne30np4_aave.170429.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne30np4_aave.170429.nc - - - - cpl/gridmaps/ne30np4.pg2/map_ne30np4.pg2_TO_gland4km_aave.200626.nc - cpl/gridmaps/ne30np4.pg2/map_ne30np4.pg2_TO_gland4km_blin.200626.nc - cpl/gridmaps/ne30np4.pg2/map_gland4km_TO_ne30np4.pg2_aave.200626.nc - cpl/gridmaps/ne30np4.pg2/map_gland4km_TO_ne30np4.pg2_aave.200626.nc - - - - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_gland4km_aave.180515.nc - cpl/gridmaps/ne30pg3/map_ne30pg3_TO_gland4km_blin.180515.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne30pg3_aave.180510.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne30pg3_aave.180510.nc - - - - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_gland4km_aave.190322.nc - cpl/gridmaps/ne0np4CONUS.ne30x8/map_ne0CONUSne30x8_TO_gland4km_blin.190322.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne0CONUSne30x8_aave.190322.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne0CONUSne30x8_aave.190322.nc - - - - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/map_ne0np4.ARCTIC.ne30x4_TO_gland4km_aave.200626.nc - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/map_ne0np4.ARCTIC.ne30x4_TO_gland4km_blin.200626.nc - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/map_gland4km_TO_ne0np4.ARCTIC.ne30x4_aave.200626.nc - cpl/gridmaps/ne0np4.ARCTIC.ne30x4/map_gland4km_TO_ne0np4.ARCTIC.ne30x4_aave.200626.nc - - - - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/map_ne0np4.ARCTICGRIS.ne30x8_TO_gland4km_aave.200626.nc - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/map_ne0np4.ARCTICGRIS.ne30x8_TO_gland4km_blin.200626.nc - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/map_gland4km_TO_ne0np4.ARCTICGRIS.ne30x8_aave.200626.nc - cpl/gridmaps/ne0np4.ARCTICGRIS.ne30x8/map_gland4km_TO_ne0np4.ARCTICGRIS.ne30x8_aave.200626.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_TO_gland4km_aave.190502.nc - cpl/gridmaps/ne120np4/map_ne120np4_TO_gland4km_blin.190502.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne120np4_aave.190502.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne120np4_aave.190502.nc - - - - cpl/gridmaps/ne120np4.pg2/map_ne120np4.pg2_TO_gland4km_aave.200626.nc - cpl/gridmaps/ne120np4.pg2/map_ne120np4.pg2_TO_gland4km_blin.200626.nc - cpl/gridmaps/ne120np4.pg2/map_gland4km_TO_ne120np4.pg2_aave.200626.nc - cpl/gridmaps/ne120np4.pg2/map_gland4km_TO_ne120np4.pg2_aave.200626.nc - - - - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_gland4km_aave.190503.nc - cpl/gridmaps/ne120np4.pg3/map_ne120np4.pg3_TO_gland4km_blin.190503.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne120np4.pg3_aave.190503.nc - cpl/gridmaps/gland4km/map_gland4km_TO_ne120np4.pg3_aave.190503.nc - - - - - - - - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gland20km_aave.150514.nc - cpl/gridmaps/fv0.9x1.25/map_fv0.9x1.25_TO_gland20km_blin.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv0.9x1.25_aave.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv0.9x1.25_aave.150514.nc - - - - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gland20km_aave.150514.nc - cpl/gridmaps/fv1.9x2.5/map_fv1.9x2.5_TO_gland20km_blin.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv1.9x2.5_aave.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv1.9x2.5_aave.150514.nc - - - - cpl/gridmaps/T31/map_T31_TO_gland20km_aave.150514.nc - cpl/gridmaps/T31/map_T31_TO_gland20km_blin.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_T31_aave.150514.nc - cpl/gridmaps/gland20km/map_gland20km_TO_T31_aave.150514.nc - - - - cpl/gridmaps/360x720/map_360x720_TO_gland20km_aave.160329.nc - cpl/gridmaps/360x720/map_360x720_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_360x720_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_360x720_aave.160329.nc - - - - cpl/gridmaps/fv10x15/map_fv10x15_TO_gland20km_aave.160329.nc - cpl/gridmaps/fv10x15/map_fv10x15_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv10x15_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv10x15_aave.160329.nc - - - - cpl/gridmaps/fv4x5/map_fv4x5_TO_gland20km_aave.160329.nc - cpl/gridmaps/fv4x5/map_fv4x5_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv4x5_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_fv4x5_aave.160329.nc - - - - cpl/gridmaps/ne16np4/map_ne16np4_TO_gland20km_aave.160329.nc - cpl/gridmaps/ne16np4/map_ne16np4_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne16np4_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne16np4_aave.160329.nc - - - - cpl/gridmaps/ne30np4/map_ne30np4_TO_gland20km_aave.160329.nc - cpl/gridmaps/ne30np4/map_ne30np4_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne30np4_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne30np4_aave.160329.nc - - - - cpl/gridmaps/ne120np4/map_ne120np4_TO_gland20km_aave.160329.nc - cpl/gridmaps/ne120np4/map_ne120np4_TO_gland20km_blin.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne120np4_aave.160329.nc - cpl/gridmaps/gland20km/map_gland20km_TO_ne120np4_aave.160329.nc - - - diff --git a/CIME/data/config/cesm/config_grids_nuopc.xml b/CIME/data/config/cesm/config_grids_nuopc.xml deleted file mode 100644 index db430933e2a..00000000000 --- a/CIME/data/config/cesm/config_grids_nuopc.xml +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/CIME/data/config/cesm/config_inputdata.xml b/CIME/data/config/cesm/config_inputdata.xml deleted file mode 100644 index 2cb2d6781ba..00000000000 --- a/CIME/data/config/cesm/config_inputdata.xml +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - - - - - grid ftp requires the globus-url-copy tool on the client side - gftp -
ftp://gridanon.cgd.ucar.edu:2811/cesm/inputdata/
- ../inputdata_checksum.dat -
- - - wget -
ftp://ftp.cgd.ucar.edu/cesm/inputdata/
- anonymous - user@example.edu - ../inputdata_checksum.dat -
- - - ftp requires the python package ftplib - ftp -
ftp.cgd.ucar.edu/cesm/inputdata
- anonymous - user@example.edu - ../inputdata_checksum.dat -
- - - svn -
https://svn-ccsm-inputdata.cgd.ucar.edu/trunk/inputdata
-
- - - NEON Tower data for datm - wget -
https://s3.data.neonscience.org/neon-ncar/NEON/
- - - - ../listing.csv -
- -
diff --git a/CIME/data/config/cesm/machines/Depends.babbageKnc b/CIME/data/config/cesm/machines/Depends.babbageKnc deleted file mode 100644 index 130ade09928..00000000000 --- a/CIME/data/config/cesm/machines/Depends.babbageKnc +++ /dev/null @@ -1,6 +0,0 @@ - -shr_ncread_mod.o: shr_ncread_mod.F90 - $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FFLAGS_NOOPT) $< - -quadrature_mod.o: quadrature_mod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -DIS_ACCELERATOR $< diff --git a/CIME/data/config/cesm/machines/Depends.bluewaters b/CIME/data/config/cesm/machines/Depends.bluewaters deleted file mode 100755 index e113298e08a..00000000000 --- a/CIME/data/config/cesm/machines/Depends.bluewaters +++ /dev/null @@ -1,5 +0,0 @@ -# - ifeq ($(strip $(COMPILER)),pgi) - progseasalts_intr.o: progseasalts_intr.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -Mnovect $< -endif diff --git a/CIME/data/config/cesm/machines/Depends.corip1 b/CIME/data/config/cesm/machines/Depends.corip1 deleted file mode 100644 index 81be8f3f16e..00000000000 --- a/CIME/data/config/cesm/machines/Depends.corip1 +++ /dev/null @@ -1,5 +0,0 @@ -# Workaround for ICE in intel/2016.0.109 -ifeq (CPRINTEL,$(findstring CPRINTEL, $(FFLAGS))) -RtmMod.o: RtmMod.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) $(CONTIGUOUS_FLAG) -O1 $< -endif diff --git a/CIME/data/config/cesm/machines/Depends.cray b/CIME/data/config/cesm/machines/Depends.cray deleted file mode 100644 index bbe5a712d97..00000000000 --- a/CIME/data/config/cesm/machines/Depends.cray +++ /dev/null @@ -1,6 +0,0 @@ -NOOPTOBJS= ice_boundary.o dyn_comp.o unicon.o - -$(NOOPTOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $< - - diff --git a/CIME/data/config/cesm/machines/Depends.gnu b/CIME/data/config/cesm/machines/Depends.gnu deleted file mode 100644 index 2d53247217e..00000000000 --- a/CIME/data/config/cesm/machines/Depends.gnu +++ /dev/null @@ -1,2 +0,0 @@ -geopk.o:geopk.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fcray-pointer $< diff --git a/CIME/data/config/cesm/machines/Depends.intel b/CIME/data/config/cesm/machines/Depends.intel deleted file mode 100644 index d412aad0bc6..00000000000 --- a/CIME/data/config/cesm/machines/Depends.intel +++ /dev/null @@ -1,51 +0,0 @@ -# -PERFOBJS=\ -prim_advection_mod.o \ -edge_mod.o \ -derivative_mod.o \ -bndry_mod.o \ -prim_advance_mod.o - -# CLM's SatellitePhenologyMod is compiled incorrectly with intel 15.0.0 at -O2 -REDUCED_OPT_OBJS=\ -SatellitePhenologyMod.o - -# shr_wv_sat_mod does not need to have better than ~0.1% precision, and benefits -# enormously from a lower precision in the vector functions. -REDUCED_PRECISION_OBJS=\ -shr_wv_sat_mod.o - -SHR_RANDNUM_FORT_OBJS=\ -kissvec_mod.o \ -mersennetwister_mod.o \ -dSFMT_interface.o \ -shr_RandNum_mod.o - -SHR_RANDNUM_C_OBJS=\ -dSFMT.o \ -dSFMT_utils.o \ -kissvec.o - -PUMAS_MG_OBJS=\ -micro_mg1_0.o \ -micro_mg3_0.o \ -micro_pumas_data.o \ -micro_pumas_utils.o \ -wv_sat_methods.o - - -ifeq ($(DEBUG),FALSE) - $(PERFOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -no-prec-div $< - $(REDUCED_OPT_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O1 $< - $(REDUCED_PRECISION_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fimf-precision=low -fp-model fast $< - $(SHR_RANDNUM_FORT_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -fp-model fast -no-prec-div -no-prec-sqrt -qoverride-limits $< - $(SHR_RANDNUM_C_OBJS): %.o: %.c - $(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) -O3 -fp-model fast $< - $(PUMAS_MG_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -xCORE-AVX2 -no-fma -ftz -no-prec-sqrt -qoverride-limits -no-inline-max-total-size -inline-factor=200 -qopt-report=5 $< - -endif diff --git a/CIME/data/config/cesm/machines/Depends.intel14 b/CIME/data/config/cesm/machines/Depends.intel14 deleted file mode 100644 index 32e4747d7a3..00000000000 --- a/CIME/data/config/cesm/machines/Depends.intel14 +++ /dev/null @@ -1,28 +0,0 @@ -# -# 12/03/2012 the intel compiler on yellowstone 12.1.5 20120612 -# does not converge the pH computation without the -CU flag -# root cause has not been determined. JPE -# this problem is resolved in intel 13.0.1 -#ecosys_mod.o: ecosys_mod.F90 -# $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -CU $< - - - -PERFOBJS=\ -prim_advection_mod_base.o \ -vertremap_mod_base.o \ -edge_mod_base.o \ -derivative_mod_base.o \ -bndry_mod_base.o \ -prim_advance_mod.o \ -uwshcu.o \ -wetdep.o - -ifeq ($(DEBUG),FALSE) - $(PERFOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -no-prec-div $< - $(REDUCED_OPT_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O1 $< - $(REDUCED_PRECISION_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fimf-precision=low -fp-model fast $< -endif diff --git a/CIME/data/config/cesm/machines/Depends.intelmic b/CIME/data/config/cesm/machines/Depends.intelmic deleted file mode 100644 index 5b90a1de374..00000000000 --- a/CIME/data/config/cesm/machines/Depends.intelmic +++ /dev/null @@ -1,6 +0,0 @@ - -#derivative_mod_base.o: derivative_mod_base.F90 -# $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS_NOOPT) -O1 $< - -shr_ncread_mod.o: shr_ncread_mod.F90 - $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FFLAGS_NOOPT) $< diff --git a/CIME/data/config/cesm/machines/Depends.intelmic14 b/CIME/data/config/cesm/machines/Depends.intelmic14 deleted file mode 100644 index 5b90a1de374..00000000000 --- a/CIME/data/config/cesm/machines/Depends.intelmic14 +++ /dev/null @@ -1,6 +0,0 @@ - -#derivative_mod_base.o: derivative_mod_base.F90 -# $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS_NOOPT) -O1 $< - -shr_ncread_mod.o: shr_ncread_mod.F90 - $(FC) -c $(FPPFLAGS) $(INCLDIR) $(INCS) $(FREEFLAGS) $(FFLAGS) $(FFLAGS_NOOPT) $< diff --git a/CIME/data/config/cesm/machines/Depends.mira b/CIME/data/config/cesm/machines/Depends.mira deleted file mode 100644 index c786f6248a4..00000000000 --- a/CIME/data/config/cesm/machines/Depends.mira +++ /dev/null @@ -1,22 +0,0 @@ -# These routines have problems with stacksize when omp is invoked add -qsmallstack to resolve -SSOBJS = shr_reprosum_mod.o mo_sethet.o mo_drydep.o - -$(SSOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -qsmallstack $< - -time_management.o: time_management.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -qsmp=noauto:noomp $< - -# These routines benefit from -qnostrict without violating the bfb test -PERFOBJS=\ -prim_advection_mod.o \ -edge_mod.o \ -derivative_mod.o \ -bndry_mod.o \ -prim_advance_mod.o \ -uwshcu.o \ -wetdep.o -ifeq ($(DEBUG),FALSE) - $(PERFOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -qnostrict $< -endif diff --git a/CIME/data/config/cesm/machines/Depends.nag b/CIME/data/config/cesm/machines/Depends.nag deleted file mode 100644 index ea20b4f9341..00000000000 --- a/CIME/data/config/cesm/machines/Depends.nag +++ /dev/null @@ -1,4 +0,0 @@ -wrap_mpi.o: wrap_mpi.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -mismatch_all $(FFLAGS_NOOPT) $(FREEFLAGS) $< -fft99.o: fft99.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) -mismatch_all $(FFLAGS_NOOPT) $(FREEFLAGS) $< diff --git a/CIME/data/config/cesm/machines/Depends.nvhpc-gpu b/CIME/data/config/cesm/machines/Depends.nvhpc-gpu deleted file mode 100644 index ddf1a8ee266..00000000000 --- a/CIME/data/config/cesm/machines/Depends.nvhpc-gpu +++ /dev/null @@ -1,16 +0,0 @@ -# -PUMAS_MG_OBJS=\ -micro_mg1_0.o \ -micro_mg3_0.o \ -micro_pumas_data.o \ -micro_pumas_utils.o \ -micro_mg_cam.o \ -wv_sat_methods.o \ -wv_saturation.o \ -macrop_driver.o \ -shr_spfn_mod.o - -ifeq ($(DEBUG),FALSE) - $(PUMAS_MG_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -acc -ta=tesla:cc70,lineinfo,nofma -Minfo=accel $< -endif diff --git a/CIME/data/config/cesm/machines/Depends.pgi-gpu b/CIME/data/config/cesm/machines/Depends.pgi-gpu deleted file mode 100644 index ddf1a8ee266..00000000000 --- a/CIME/data/config/cesm/machines/Depends.pgi-gpu +++ /dev/null @@ -1,16 +0,0 @@ -# -PUMAS_MG_OBJS=\ -micro_mg1_0.o \ -micro_mg3_0.o \ -micro_pumas_data.o \ -micro_pumas_utils.o \ -micro_mg_cam.o \ -wv_sat_methods.o \ -wv_saturation.o \ -macrop_driver.o \ -shr_spfn_mod.o - -ifeq ($(DEBUG),FALSE) - $(PUMAS_MG_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -acc -ta=tesla:cc70,lineinfo,nofma -Minfo=accel $< -endif diff --git a/CIME/data/config/cesm/machines/README b/CIME/data/config/cesm/machines/README deleted file mode 100644 index 3bafbe274ee..00000000000 --- a/CIME/data/config/cesm/machines/README +++ /dev/null @@ -1,15 +0,0 @@ -config_pes_pop.xml -current assumptions: - prognostic: pop, cice - data: datm, drof - stub: slnd, sglc -DATM.+XLND.+CICE.+POP.+DROF.+SGLC -The current attributes that are supported are - lcompset_matchN= (where N can be any number) - pecount=[S,M,L,XL] - -Please refer to the documentation in the config_machines.xml and config_compilers.xml files. - - - - diff --git a/CIME/data/config/cesm/machines/cmake_macros/CMakeLists.txt b/CIME/data/config/cesm/machines/cmake_macros/CMakeLists.txt deleted file mode 100644 index c2ebcb51be4..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/CMakeLists.txt +++ /dev/null @@ -1,4 +0,0 @@ -cmake_policy(SET CMP0057 NEW) -cmake_minimum_required(VERSION 3.5) -project(cime LANGUAGES C Fortran) -include(../Macros.cmake) diff --git a/CIME/data/config/cesm/machines/cmake_macros/CNL.cmake b/CIME/data/config/cesm/machines/cmake_macros/CNL.cmake deleted file mode 100644 index 601924ebaad..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/CNL.cmake +++ /dev/null @@ -1,14 +0,0 @@ -set(CMAKE_OPTS "-DCMAKE_SYSTEM_NAME=Catamount") -string(APPEND CPPDEFS " -DLINUX") -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY") -endif() -set(MPICC "cc") -set(MPICXX "CC") -set(MPIFC "ftn") -set(NETCDF_PATH "$ENV{NETCDF_DIR}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(PNETCDF_PATH "$ENV{PARALLEL_NETCDF_DIR}") -set(SCC "cc") -set(SCXX "CC") -set(SFC "ftn") diff --git a/CIME/data/config/cesm/machines/cmake_macros/Darwin.cmake b/CIME/data/config/cesm/machines/cmake_macros/Darwin.cmake deleted file mode 100644 index 89fd4bdc078..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/Darwin.cmake +++ /dev/null @@ -1 +0,0 @@ -string(APPEND CPPDEFS " -DSYSDARWIN") diff --git a/CIME/data/config/cesm/machines/cmake_macros/Macros.cmake b/CIME/data/config/cesm/machines/cmake_macros/Macros.cmake deleted file mode 100644 index b089f58058a..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/Macros.cmake +++ /dev/null @@ -1,37 +0,0 @@ -# -# Use this file to include the relevant macros based on -# machine/compiler settings. This file gets copied to CASEROOT -# and that's the one that gets included by the build system. Feel free -# to modify this file in the CASEROOT. -# -set(MACROS_DIR ${CASEROOT}/cmake_macros) - -set(UNIVERSAL_MACRO ${MACROS_DIR}/universal.cmake) -set(COMPILER_MACRO ${MACROS_DIR}/${COMPILER}.cmake) -set(OS_MACRO ${MACROS_DIR}/${OS}.cmake) -set(MACHINE_MACRO ${MACROS_DIR}/${MACH}.cmake) -set(COMPILER_OS_MACRO ${MACROS_DIR}/${COMPILER}_${OS}.cmake) -set(COMPILER_MACHINE_MACRO ${MACROS_DIR}/${COMPILER}_${MACH}.cmake) - -if (CONVERT_TO_MAKE) - get_cmake_property(VARS_BEFORE_BUILD_INTERNAL_IGNORE VARIABLES) -endif() - -# Include order defines precedence -foreach (MACRO_FILE ${UNIVERSAL_MACRO} ${COMPILER_MACRO} ${OS_MACRO} ${MACHINE_MACRO} ${COMPILER_OS_MACRO} ${COMPILER_MACHINE_MACRO}) - if (EXISTS ${MACRO_FILE}) - include(${MACRO_FILE}) - else() - message("No macro file found: ${MACRO_FILE}") - endif() -endforeach() - -if (CONVERT_TO_MAKE) - get_cmake_property(VARS_AFTER VARIABLES) - - foreach (VAR_AFTER IN LISTS VARS_AFTER) - if (NOT VAR_AFTER IN_LIST VARS_BEFORE_BUILD_INTERNAL_IGNORE) - message("CIME_SET_MAKEFILE_VAR ${VAR_AFTER} := ${${VAR_AFTER}}") - endif() - endforeach() -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/arm.cmake b/CIME/data/config/cesm/machines/cmake_macros/arm.cmake deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/CIME/data/config/cesm/machines/cmake_macros/armgcc.cmake b/CIME/data/config/cesm/machines/cmake_macros/armgcc.cmake deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/CIME/data/config/cesm/machines/cmake_macros/athena.cmake b/CIME/data/config/cesm/machines/cmake_macros/athena.cmake deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/CIME/data/config/cesm/machines/cmake_macros/bluewaters.cmake b/CIME/data/config/cesm/machines/cmake_macros/bluewaters.cmake deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/CIME/data/config/cesm/machines/cmake_macros/casper.cmake b/CIME/data/config/cesm/machines/cmake_macros/casper.cmake deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/CIME/data/config/cesm/machines/cmake_macros/centos7-linux.cmake b/CIME/data/config/cesm/machines/cmake_macros/centos7-linux.cmake deleted file mode 100644 index 4dc3995df54..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/centos7-linux.cmake +++ /dev/null @@ -1 +0,0 @@ -string(APPEND SLIBS " -L$(NETCDF_PATH)/lib -Wl,-rpath,$(NETCDF_PATH)/lib -lnetcdff -lnetcdf") diff --git a/CIME/data/config/cesm/machines/cmake_macros/cheyenne.cmake b/CIME/data/config/cesm/machines/cmake_macros/cheyenne.cmake deleted file mode 100644 index 8cf9400fb2b..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/cheyenne.cmake +++ /dev/null @@ -1,6 +0,0 @@ -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY") -endif() -set(NETCDF_PATH "$ENV{NETCDF}") -set(PIO_FILESYSTEM_HINTS "gpfs") -set(PNETCDF_PATH "$ENV{PNETCDF}") diff --git a/CIME/data/config/cesm/machines/cmake_macros/container.cmake b/CIME/data/config/cesm/machines/cmake_macros/container.cmake deleted file mode 100644 index 8fc09bc4060..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/container.cmake +++ /dev/null @@ -1,7 +0,0 @@ -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY") -endif() -set(NETCDF_PATH "/usr/local") -set(PNETCDF_PATH "/usr/local") -set(LDFLAGS "") -string(APPEND SLIBS " -lnetcdf -lnetcdff -llapack -lblas") diff --git a/CIME/data/config/cesm/machines/cmake_macros/cray.cmake b/CIME/data/config/cesm/machines/cmake_macros/cray.cmake deleted file mode 100644 index 7f667df0847..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/cray.cmake +++ /dev/null @@ -1,38 +0,0 @@ -if (NOT compile_threaded) - string(APPEND CFLAGS " -h noomp") -endif() -if (compile_threaded) - string(APPEND CFLAGS " -fopenmp") -endif() -if (DEBUG) - string(APPEND CFLAGS " -g -O0") -endif() -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_R16 -DCPRCRAY") -if (MODEL STREQUAL pop) - string(APPEND CPPDEFS " -DDIR=NOOP") -endif() -if (MODEL STREQUAL moby) - string(APPEND CPPDEFS " -DDIR=NOOP") -endif() -set(FC_AUTO_R8 "-s real64") -string(APPEND FFLAGS " -f free -N 255 -h byteswapio -x dir -ef") -if (NOT compile_threaded) - string(APPEND FFLAGS " -h noomp") -endif() -if (compile_threaded) - string(APPEND FFLAGS " -h omp") -endif() -if (DEBUG) - string(APPEND FFLAGS " -g -O0 -K trap=fp -m1") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O2,ipa2 -em") -endif() -set(FFLAGS_NOOPT "-O1,fp2,ipa0,scalar0,vector0") -set(HAS_F2008_CONTIGUOUS "TRUE") -set(LDFLAGS "-Wl,--allow-multiple-definition -h byteswapio") -string(APPEND LDFLAGS " -h omp") - diff --git a/CIME/data/config/cesm/machines/cmake_macros/cray_daint.cmake b/CIME/data/config/cesm/machines/cmake_macros/cray_daint.cmake deleted file mode 100644 index 5929a1be65e..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/cray_daint.cmake +++ /dev/null @@ -1,2 +0,0 @@ -string(APPEND FFLAGS " -I/project/s824/edavin/OASIS3-MCT_2.0/build.cray/build/lib/mct -I/project/s824/edavin/OASIS3-MCT_2.0/build.cray/build/lib/psmile.MPI1") -string(APPEND SLIBS " -L/project/s824/edavin/OASIS3-MCT_2.0/build.cray/lib -lpsmile.MPI1 -lscrip -lmct_oasis -lmpeu_oasis") diff --git a/CIME/data/config/cesm/machines/cmake_macros/euler2.cmake b/CIME/data/config/cesm/machines/cmake_macros/euler2.cmake deleted file mode 100644 index 63ca8a325fc..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/euler2.cmake +++ /dev/null @@ -1,5 +0,0 @@ -string(APPEND CPPDEFS " -DLINUX") -set(NETCDF_PATH "$ENV{NETCDF}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(PNETCDF_PATH "$ENV{PNETCDF}") -set(SLIBS "-L$ENV{NETCDF}/lib -lnetcdf -lnetcdff") diff --git a/CIME/data/config/cesm/machines/cmake_macros/euler3.cmake b/CIME/data/config/cesm/machines/cmake_macros/euler3.cmake deleted file mode 100644 index 63ca8a325fc..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/euler3.cmake +++ /dev/null @@ -1,5 +0,0 @@ -string(APPEND CPPDEFS " -DLINUX") -set(NETCDF_PATH "$ENV{NETCDF}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(PNETCDF_PATH "$ENV{PNETCDF}") -set(SLIBS "-L$ENV{NETCDF}/lib -lnetcdf -lnetcdff") diff --git a/CIME/data/config/cesm/machines/cmake_macros/euler4.cmake b/CIME/data/config/cesm/machines/cmake_macros/euler4.cmake deleted file mode 100644 index 63ca8a325fc..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/euler4.cmake +++ /dev/null @@ -1,5 +0,0 @@ -string(APPEND CPPDEFS " -DLINUX") -set(NETCDF_PATH "$ENV{NETCDF}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(PNETCDF_PATH "$ENV{PNETCDF}") -set(SLIBS "-L$ENV{NETCDF}/lib -lnetcdf -lnetcdff") diff --git a/CIME/data/config/cesm/machines/cmake_macros/frontera.cmake b/CIME/data/config/cesm/machines/cmake_macros/frontera.cmake deleted file mode 100644 index ba7a1dc95dd..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/frontera.cmake +++ /dev/null @@ -1,9 +0,0 @@ -set(HAS_F2008_CONTIGUOUS "TRUE") -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY") -endif() -set(NETCDF_PATH "$ENV{TACC_NETCDF_DIR}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(PNETCDF_PATH "$ENV{TACC_PNETCDF_DIR}") -string(APPEND LDFLAGS " -Wl,-rpath,${NETCDF_PATH}/lib") -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdff -lnetcdf") diff --git a/CIME/data/config/cesm/machines/cmake_macros/gnu.cmake b/CIME/data/config/cesm/machines/cmake_macros/gnu.cmake deleted file mode 100644 index 5ac48baf50c..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/gnu.cmake +++ /dev/null @@ -1,43 +0,0 @@ -string(APPEND CFLAGS " -std=gnu99") -if (compile_threaded) - string(APPEND CFLAGS " -fopenmp") -endif() -if (DEBUG) - string(APPEND CFLAGS " -g -Wall -Og -fbacktrace -ffpe-trap=invalid,zero,overflow -fcheck=bounds") -endif() -if (NOT DEBUG) - string(APPEND CFLAGS " -O") -endif() -string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU") -set(CXX_LINKER "FORTRAN") -set(FC_AUTO_R8 "-fdefault-real-8 -fdefault-double-8") -string(APPEND FFLAGS " -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none") -if (compile_threaded) - string(APPEND FFLAGS " -fopenmp") -endif() -if (DEBUG) - string(APPEND FFLAGS " -g -Wall -Og -fbacktrace -ffpe-trap=zero,overflow -fcheck=bounds") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O") -endif() -set(FFLAGS_NOOPT "-O0") -set(FIXEDFLAGS "-ffixed-form") -set(FREEFLAGS "-ffree-form") -set(HAS_F2008_CONTIGUOUS "FALSE") -if (compile_threaded) - string(APPEND LDFLAGS " -fopenmp") -endif() -set(MPICC "mpicc") -set(MPICXX "mpicxx") -set(MPIFC "mpif90") -set(SCC "gcc") -set(SCXX "g++") -set(SFC "gfortran") -set(SUPPORTS_CXX "TRUE") - -message("C compiler version is ${CMAKE_C_COMPILER_VERSION}") -message("Fortran compiler version is ${CMAKE_Fortran_COMPILER_VERSION}") -if (CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10) - string(APPEND FFLAGS " -fallow-argument-mismatch -fallow-invalid-boz ") -endif() \ No newline at end of file diff --git a/CIME/data/config/cesm/machines/cmake_macros/gnu_cheyenne.cmake b/CIME/data/config/cesm/machines/cmake_macros/gnu_cheyenne.cmake deleted file mode 100644 index d6f24dd420e..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/gnu_cheyenne.cmake +++ /dev/null @@ -1,4 +0,0 @@ -if (MODEL STREQUAL pio1) - string(APPEND CPPDEFS " -DNO_MPIMOD") -endif() -string(APPEND SLIBS " -ldl") diff --git a/CIME/data/config/cesm/machines/cmake_macros/gnu_coeus.cmake b/CIME/data/config/cesm/machines/cmake_macros/gnu_coeus.cmake deleted file mode 100644 index f2c24ef9e88..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/gnu_coeus.cmake +++ /dev/null @@ -1,2 +0,0 @@ -set(NETCDF_PATH "/vol/apps/hpc/stow/netcdf/4.4.1.1/gcc-6.3.0") -set(SLIBS "-L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi") diff --git a/CIME/data/config/cesm/machines/cmake_macros/gnu_hobart.cmake b/CIME/data/config/cesm/machines/cmake_macros/gnu_hobart.cmake deleted file mode 100644 index 35929dcc37e..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/gnu_hobart.cmake +++ /dev/null @@ -1 +0,0 @@ -string(APPEND SLIBS " -lm -ldl") diff --git a/CIME/data/config/cesm/machines/cmake_macros/gnu_homebrew.cmake b/CIME/data/config/cesm/machines/cmake_macros/gnu_homebrew.cmake deleted file mode 100644 index 9e468c1276c..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/gnu_homebrew.cmake +++ /dev/null @@ -1 +0,0 @@ -string(APPEND LDFLAGS " -framework Accelerate -Wl,-rpath $(NETCDF)/lib") diff --git a/CIME/data/config/cesm/machines/cmake_macros/gnu_melvin.cmake b/CIME/data/config/cesm/machines/cmake_macros/gnu_melvin.cmake deleted file mode 100644 index 668d542c960..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/gnu_melvin.cmake +++ /dev/null @@ -1,13 +0,0 @@ -set(ALBANY_PATH "/projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install") -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -set(CONFIG_ARGS "--host=cray") -set(CXX_LIBS "-lstdc++ -lmpi_cxx") -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() -set(NETCDF_PATH "$ENV{NETCDFROOT}") -set(PNETCDF_PATH "$ENV{PNETCDFROOT}") -execute_process(COMMAND ${NETCDF_PATH}/bin/nf-config --flibs OUTPUT_VARIABLE SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0 OUTPUT_STRIP_TRAILING_WHITESPACE) -string(APPEND SLIBS " ${SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0} -lblas -llapack") diff --git a/CIME/data/config/cesm/machines/cmake_macros/gnu_modex.cmake b/CIME/data/config/cesm/machines/cmake_macros/gnu_modex.cmake deleted file mode 100644 index 62440372919..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/gnu_modex.cmake +++ /dev/null @@ -1,5 +0,0 @@ -string(APPEND SLIBS " -L$ENV{HDF5_HOME}/lib -lhdf5_fortran -lhdf5 -lhdf5_hl -lhdf5hl_fortran") -string(APPEND SLIBS " -L$ENV{NETCDF_PATH}/lib/ -lnetcdff -lnetcdf -lcurl -lblas -llapack") -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY -DHAVE_BACKTRACE") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/hobart.cmake b/CIME/data/config/cesm/machines/cmake_macros/hobart.cmake deleted file mode 100644 index 0446d1ae2d6..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/hobart.cmake +++ /dev/null @@ -1,9 +0,0 @@ -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY") -endif() -set(LAPACK_LIBDIR "/usr/lib64") -if (MPILIB STREQUAL mvapich2) - set(MPI_LIB_NAME "mpich") -endif() -set(NETCDF_PATH "$ENV{NETCDF_PATH}") -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdff -lnetcdf") diff --git a/CIME/data/config/cesm/machines/cmake_macros/ibm.cmake b/CIME/data/config/cesm/machines/cmake_macros/ibm.cmake deleted file mode 100644 index 21d8747b027..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/ibm.cmake +++ /dev/null @@ -1,38 +0,0 @@ -string(APPEND CFLAGS " -g -qfullpath -qmaxmem=-1") -if (NOT DEBUG) - string(APPEND CFLAGS " -O3") -endif() -if (compile_threaded) - string(APPEND CFLAGS " -qsmp=omp") -endif() -if (DEBUG AND compile_threaded) - string(APPEND CFLAGS " -qsmp=omp:noopt") -endif() -string(APPEND CPPDEFS " -DFORTRAN_SAME -DCPRIBM") -set(CPRE "-WF,-D") -set(FC_AUTO_R8 "-qrealsize=8") -string(APPEND FFLAGS " -g -qfullpath -qmaxmem=-1") -if (NOT DEBUG) - string(APPEND FFLAGS " -O2 -qstrict -qinline=auto") -endif() -if (compile_threaded) - string(APPEND FFLAGS " -qsmp=omp") -endif() -if (DEBUG) - string(APPEND FFLAGS " -qinitauto=7FF7FFFF -qflttrap=ov:zero:inv:en") -endif() -if (DEBUG AND compile_threaded) - string(APPEND FFLAGS " -qsmp=omp:noopt") -endif() -if (DEBUG AND MODEL STREQUAL pop) - string(APPEND FFLAGS " -C") -endif() -set(FIXEDFLAGS "-qsuffix=f=f -qfixed=132") -set(FREEFLAGS "-qsuffix=f=f90:cpp=F90") -set(HAS_F2008_CONTIGUOUS "TRUE") -if (compile_threaded) - string(APPEND LDFLAGS " -qsmp=omp") -endif() -if (DEBUG AND compile_threaded) - string(APPEND LDFLAGS " -qsmp=omp:noopt") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/ibm_AIX.cmake b/CIME/data/config/cesm/machines/cmake_macros/ibm_AIX.cmake deleted file mode 100644 index a8b10f6cd7c..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/ibm_AIX.cmake +++ /dev/null @@ -1,18 +0,0 @@ -string(APPEND CFLAGS " -qarch=auto -qtune=auto -qcache=auto") -set(CONFIG_SHELL "/usr/bin/bash") -string(APPEND FFLAGS " -qarch=auto -qtune=auto -qcache=auto -qsclk=micro") -if (MODEL STREQUAL cam) - string(APPEND FFLAGS " -qspill=6000") -endif() -if (DEBUG) - string(APPEND LDFLAGS " -qsigtrap=xl__trcedump") -endif() -string(APPEND LDFLAGS " -bdatapsize:64K -bstackpsize:64K -btextpsize:32K") -set(MPICC "mpcc_r") -set(MPIFC "mpxlf2003_r") -set(SCC "cc_r") -set(SFC "xlf2003_r") -string(APPEND SLIBS " -lmassv -lessl") -if (NOT DEBUG) - string(APPEND SLIBS " -lmass") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/ibm_BGQ.cmake b/CIME/data/config/cesm/machines/cmake_macros/ibm_BGQ.cmake deleted file mode 100644 index 0f113a90feb..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/ibm_BGQ.cmake +++ /dev/null @@ -1,11 +0,0 @@ -set(CONFIG_ARGS "--build=powerpc-bgp-linux --host=powerpc64-suse-linux") -string(APPEND CPPDEFS " -DLINUX") -string(APPEND FFLAGS " -g -qfullpath -qmaxmem=-1 -qspillsize=2500 -qextname=flush") -if (NOT DEBUG) - string(APPEND FFLAGS " -O3 -qstrict -qinline=auto") -endif() -if (NOT DEBUG AND compile_threaded) -endif() -if (DEBUG AND compile_threaded) -endif() -set(LDFLAGS "-Wl,--relax -Wl,--allow-multiple-definition") diff --git a/CIME/data/config/cesm/machines/cmake_macros/ibm_mira.cmake b/CIME/data/config/cesm/machines/cmake_macros/ibm_mira.cmake deleted file mode 100644 index b9a3209671e..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/ibm_mira.cmake +++ /dev/null @@ -1,12 +0,0 @@ -string(APPEND CFLAGS " -qfloat=nomaf") -string(APPEND FFLAGS " -qfloat=nomaf") -set(HDF5_PATH "$ENV{HDF5}") -set(LD "/home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf77_r") -set(MPICC "/home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlc_r") -set(MPIFC "/home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf2003_r") -set(NETCDF_PATH "/soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/") -set(PIO_FILESYSTEM_HINTS "gpfs") -set(PNETCDF_PATH "/soft/libraries/pnetcdf/1.6.1/cnk-xl/current/") -set(SCC "/home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlc_r") -set(SFC "/home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf2003_r") -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdff -lnetcdf -L$ENV{HDF5}/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel.cmake deleted file mode 100644 index 7328c85e305..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel.cmake +++ /dev/null @@ -1,61 +0,0 @@ -string(APPEND CFLAGS " -qno-opt-dynamic-align -fp-model precise -std=gnu99") -if (compile_threaded) - string(APPEND CFLAGS " -qopenmp") -endif() -if (NOT DEBUG) - string(APPEND CFLAGS " -O2 -debug minimal") -endif() -if (DEBUG) - string(APPEND CFLAGS " -O0 -g") -endif() -string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DCPRINTEL") -string(APPEND CXX_LDFLAGS " -cxxlib") -set(CXX_LINKER "FORTRAN") -set(FC_AUTO_R8 "-r8") -string(APPEND FFLAGS " -qno-opt-dynamic-align -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model source") -if (compile_threaded) - string(APPEND FFLAGS " -qopenmp") -endif() -if (DEBUG) - string(APPEND FFLAGS " -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O2 -debug minimal") -endif() -set(FFLAGS_NOOPT "-O0") -set(FIXEDFLAGS "-fixed") -set(FREEFLAGS "-free") -if (compile_threaded) - string(APPEND LDFLAGS " -qopenmp") -endif() -set(MPICC "mpicc") -set(MPICXX "mpicxx") -set(MPIFC "mpif90") -set(SCC "icc") -set(SCXX "icpc") -set(SFC "ifort") -if (MPILIB STREQUAL mpich) - string(APPEND SLIBS " -mkl=cluster") -endif() -if (MPILIB STREQUAL mpich2) - string(APPEND SLIBS " -mkl=cluster") -endif() -if (MPILIB STREQUAL mvapich) - string(APPEND SLIBS " -mkl=cluster") -endif() -if (MPILIB STREQUAL mvapich2) - string(APPEND SLIBS " -mkl=cluster") -endif() -if (MPILIB STREQUAL mpt) - string(APPEND SLIBS " -mkl=cluster") -endif() -if (MPILIB STREQUAL openmpi) - string(APPEND SLIBS " -mkl=cluster") -endif() -if (MPILIB STREQUAL impi) - string(APPEND SLIBS " -mkl=cluster") -endif() -if (MPILIB STREQUAL mpi-serial) - string(APPEND SLIBS " -mkl") -endif() -set(SUPPORTS_CXX "TRUE") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_Darwin.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_Darwin.cmake deleted file mode 100644 index 7a2450e00f8..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_Darwin.cmake +++ /dev/null @@ -1,3 +0,0 @@ -if (NOT compile_threaded) - string(APPEND FFLAGS " -heap-arrays") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_aleph.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_aleph.cmake deleted file mode 100644 index d2edb323201..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_aleph.cmake +++ /dev/null @@ -1,8 +0,0 @@ -set(CONFIG_ARGS "--host=cray") -string(APPEND CFLAGS " -xCORE-AVX2") -string(APPEND FFLAGS " -xCORE-AVX2") -string(APPEND SLIBS " -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf") -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_PAPI -DHAVE_SLASHPROC") -endif() -string(APPEND LDFLAGS " -mkl") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_athena.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_athena.cmake deleted file mode 100644 index a9686803bee..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_athena.cmake +++ /dev/null @@ -1,20 +0,0 @@ -string(APPEND CFLAGS " -xHost") -string(APPEND CPPDEFS " -DINTEL_MKL -DHAVE_SSE2") -string(APPEND FFLAGS " -xHost") -if (MODEL STREQUAL nemo) - string(APPEND FFLAGS " $(FC_AUTO_R8) -O3 -assume norealloc_lhs") -endif() -execute_process(COMMAND ${NETCDF_PATH}/bin/nc-config --flibs OUTPUT_VARIABLE SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0 OUTPUT_STRIP_TRAILING_WHITESPACE) -string(APPEND SLIBS " ${SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0}") -if (MPILIB STREQUAL mpich2) - set(MPICXX "mpiicpc") -endif() -if (MPILIB STREQUAL mpich2) - set(MPICC "mpiicc") -endif() -if (MPILIB STREQUAL mpich2) - set(MPIFC "mpiifort") -endif() -if (MPILIB STREQUAL mpich2) - set(TRILINOS_PATH "$ENV{TRILINOS_PATH}") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_bluewaters.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_bluewaters.cmake deleted file mode 100644 index 29b1da06dae..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_bluewaters.cmake +++ /dev/null @@ -1,3 +0,0 @@ -set(HAS_F2008_CONTIGUOUS "FALSE") -string(APPEND FFLAGS " -dynamic -mkl=sequential -no-fma") -string(APPEND CFLAGS " -dynamic -mkl=sequential -no-fma") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_casper.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_casper.cmake deleted file mode 100644 index 7ee1c8b71dc..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_casper.cmake +++ /dev/null @@ -1,9 +0,0 @@ -string(APPEND CFLAGS " -qopt-report -xCORE_AVX2 -no-fma") -string(APPEND FFLAGS " -qopt-report -xCORE_AVX2 -no-fma") -if (DEBUG) - string(APPEND CMAKE_OPTS " -DPIO_ENABLE_LOGGING=ON") -endif() -if (MPILIB STREQUAL mpi-serial AND NOT compile_threaded) - set(PFUNIT_PATH "$ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_noMPI_noOpenMP") -endif() -set(HAS_F2008_CONTIGUOUS "TRUE") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_cheyenne.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_cheyenne.cmake deleted file mode 100644 index f83ba0ebd29..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_cheyenne.cmake +++ /dev/null @@ -1,12 +0,0 @@ -string(APPEND CFLAGS " -qopt-report -xCORE_AVX2 -no-fma") -string(APPEND FFLAGS " -qopt-report -xCORE_AVX2 -no-fma") -if (DEBUG) - string(APPEND CMAKE_OPTS " -DPIO_ENABLE_LOGGING=ON") -endif() -if (MPILIB STREQUAL mpi-serial AND NOT compile_threaded) - set(PFUNIT_PATH "$ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_noMPI_noOpenMP") -endif() -if (MPILIB STREQUAL mpt AND compile_threaded) - set(PFUNIT_PATH "$ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP") -endif() -set(HAS_F2008_CONTIGUOUS "TRUE") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_constance.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_constance.cmake deleted file mode 100644 index 5733786855e..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_constance.cmake +++ /dev/null @@ -1,11 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -set(CONFIG_ARGS "--host=cray") -string(APPEND CPPDEFS " -DLINUX") -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() -set(NETCDF_PATH "$ENV{NETCDF_HOME}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(SLIBS "-L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH} -lmkl_rt") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_cori-haswell.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_cori-haswell.cmake deleted file mode 100644 index 5a7a781ef78..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_cori-haswell.cmake +++ /dev/null @@ -1,9 +0,0 @@ -set(CONFIG_ARGS "--host=cray") -string(APPEND CFLAGS " -xCORE-AVX2") -string(APPEND FFLAGS " -xCORE-AVX2") -string(APPEND SLIBS " -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf") -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_SLASHPROC") -endif() -string(APPEND LDFLAGS " -mkl") -set(HAS_F2008_CONTIGUOUS "FALSE") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_cori-knl.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_cori-knl.cmake deleted file mode 100644 index 936a8a66a55..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_cori-knl.cmake +++ /dev/null @@ -1,9 +0,0 @@ -set(CONFIG_ARGS "--host=cray") -string(APPEND CFLAGS " -xMIC-AVX512") -string(APPEND FFLAGS " -xMIC-AVX512") -string(APPEND SLIBS " -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf") -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_SLASHPROC") -endif() -string(APPEND LDFLAGS " -mkl -lmemkind -zmuldefs") -set(HAS_F2008_CONTIGUOUS "FALSE") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_eastwind.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_eastwind.cmake deleted file mode 100644 index 1a1dcf69b3c..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_eastwind.cmake +++ /dev/null @@ -1,11 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -set(CONFIG_ARGS "--host=cray") -string(APPEND CPPDEFS " -DLINUX") -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() -set(NETCDF_PATH "$ENV{NETCDF_HOME}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(SLIBS "-L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_edison.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_edison.cmake deleted file mode 100644 index e9f657905b0..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_edison.cmake +++ /dev/null @@ -1,11 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -set(CONFIG_ARGS "--host=cray") -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_PAPI") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() -string(APPEND SLIBS " -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_euler2.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_euler2.cmake deleted file mode 100644 index 42580b2a64c..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_euler2.cmake +++ /dev/null @@ -1,7 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -xCORE-AVX2") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -xCORE-AVX2") -endif() -string(APPEND LDFLAGS " -mkl") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_euler3.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_euler3.cmake deleted file mode 100644 index 42580b2a64c..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_euler3.cmake +++ /dev/null @@ -1,7 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -xCORE-AVX2") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -xCORE-AVX2") -endif() -string(APPEND LDFLAGS " -mkl") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_euler4.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_euler4.cmake deleted file mode 100644 index 42580b2a64c..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_euler4.cmake +++ /dev/null @@ -1,7 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -xCORE-AVX2") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -xCORE-AVX2") -endif() -string(APPEND LDFLAGS " -mkl") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_greenplanet-sib29.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_greenplanet-sib29.cmake deleted file mode 100644 index e445abd253f..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_greenplanet-sib29.cmake +++ /dev/null @@ -1,3 +0,0 @@ -set(NETCDF_PATH "$ENV{NCDIR}") -set(PNETCDF_PATH "$ENV{PNDIR}") -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_greenplanet-sky24.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_greenplanet-sky24.cmake deleted file mode 100644 index e445abd253f..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_greenplanet-sky24.cmake +++ /dev/null @@ -1,3 +0,0 @@ -set(NETCDF_PATH "$ENV{NCDIR}") -set(PNETCDF_PATH "$ENV{PNDIR}") -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_hobart.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_hobart.cmake deleted file mode 100644 index e9ac234fd17..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_hobart.cmake +++ /dev/null @@ -1,16 +0,0 @@ -string(APPEND CFLAGS " -lifcore") -string(APPEND FFLAGS " -lifcore") -if (MPILIB STREQUAL mpi-serial) - string(APPEND FFLAGS " -mcmodel medium") -endif() -string(APPEND LDFLAGS " -lquadmath") -string(APPEND LDFLAGS " -Wl,-rpath,${NETCDF_PATH}/lib") -string(APPEND LDFLAGS " -Wl,-rpath,$ENV{COMPILER_PATH}/lib/intel64") -string(APPEND LDFLAGS " -Wl,-rpath,$ENV{COMPILER_PATH}/mkl/lib/intel64") -string(APPEND LDFLAGS " -Wl,-rpath,$ENV{MPI_PATH}/lib") -string(APPEND LDFLAGS " -lifcore") -if (MPILIB STREQUAL mvapich2) -endif() -if (MPILIB STREQUAL mpi-serial AND NOT compile_threaded) - set(PFUNIT_PATH "/fs/cgd/csm/tools/pFUnit/pFUnit3.2.8_hobart_Intel15.0.2_noMPI_noOpenMP") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_izumi.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_izumi.cmake deleted file mode 100644 index 13f2e3be289..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_izumi.cmake +++ /dev/null @@ -1,3 +0,0 @@ -if (MPILIB STREQUAL mpi-serial AND NOT compile_threaded) - set(PFUNIT_PATH "/fs/cgd/csm/tools/pFUnit/pFUnit3.3.3_izumi_Intel19.0.1_noMPI_noOpenMP") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_laramie.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_laramie.cmake deleted file mode 100644 index ba6a09daa27..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_laramie.cmake +++ /dev/null @@ -1,5 +0,0 @@ -string(APPEND CFLAGS " -vec-report") -string(APPEND FFLAGS " -vec-report") -if (DEBUG) - string(APPEND CMAKE_OPTS " -DPIO_ENABLE_LOGGING=ON") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_lawrencium-lr2.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_lawrencium-lr2.cmake deleted file mode 100644 index 1b0e9e4cca0..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_lawrencium-lr2.cmake +++ /dev/null @@ -1,12 +0,0 @@ -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_VPRINTF -DHAVE_TIMES -DHAVE_GETTIMEOFDAY") -endif() -string(APPEND SLIBS " -lnetcdff -lnetcdf -mkl") -if (DEBUG) - string(APPEND FFLAGS " -ftrapuv") -endif() -if (DEBUG) - string(APPEND CFLAGS " -ftrapuv") -endif() -set(NETCDF_PATH "$ENV{NETCDF_DIR}") -set(LAPACK_LIBDIR "/global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_lawrencium-lr3.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_lawrencium-lr3.cmake deleted file mode 100644 index 1b0e9e4cca0..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_lawrencium-lr3.cmake +++ /dev/null @@ -1,12 +0,0 @@ -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_VPRINTF -DHAVE_TIMES -DHAVE_GETTIMEOFDAY") -endif() -string(APPEND SLIBS " -lnetcdff -lnetcdf -mkl") -if (DEBUG) - string(APPEND FFLAGS " -ftrapuv") -endif() -if (DEBUG) - string(APPEND CFLAGS " -ftrapuv") -endif() -set(NETCDF_PATH "$ENV{NETCDF_DIR}") -set(LAPACK_LIBDIR "/global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_sandiatoss3.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_sandiatoss3.cmake deleted file mode 100644 index 8356ac5e733..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_sandiatoss3.cmake +++ /dev/null @@ -1,12 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -set(CONFIG_ARGS "--host=cray") -set(ESMF_LIBDIR "/projects/ccsm/esmf-6.3.0rp1/lib/libO/Linux.intel.64.openmpi.default") -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() -set(NETCDF_PATH "$ENV{NETCDFROOT}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(PNETCDF_PATH "$ENV{PNETCDFROOT}") -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdff -L/projects/ccsm/BLAS-intel -lblas_LINUX") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_stampede2-knl.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_stampede2-knl.cmake deleted file mode 100644 index 6cdd9fe9c16..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_stampede2-knl.cmake +++ /dev/null @@ -1,10 +0,0 @@ -string(APPEND CFLAGS " -xCOMMON-AVX512 -no-fma") -string(APPEND FFLAGS " -xCOMMON-AVX512 -no-fma") -if (MPILIB STREQUAL mpi-serial) - string(APPEND FFLAGS " -mcmodel medium") -endif() -string(APPEND LDFLAGS " -L$ENV{TACC_HDF5_LIB} -lhdf5 $(MKL) -zmuldefs -xCOMMON-AVX512") -execute_process(COMMAND ${NETCDF_PATH}/bin/nf-config --flibs OUTPUT_VARIABLE SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0 OUTPUT_STRIP_TRAILING_WHITESPACE) -string(APPEND SLIBS " ${SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0} -L$ENV{TACC_HDF5_LIB} -lhdf5") -set(TRILINOS_PATH "$ENV{TRILINOS_PATH}") -set(HAS_F2008_CONTIGUOUS "FALSE") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_stampede2-skx.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_stampede2-skx.cmake deleted file mode 100644 index 6cdd9fe9c16..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_stampede2-skx.cmake +++ /dev/null @@ -1,10 +0,0 @@ -string(APPEND CFLAGS " -xCOMMON-AVX512 -no-fma") -string(APPEND FFLAGS " -xCOMMON-AVX512 -no-fma") -if (MPILIB STREQUAL mpi-serial) - string(APPEND FFLAGS " -mcmodel medium") -endif() -string(APPEND LDFLAGS " -L$ENV{TACC_HDF5_LIB} -lhdf5 $(MKL) -zmuldefs -xCOMMON-AVX512") -execute_process(COMMAND ${NETCDF_PATH}/bin/nf-config --flibs OUTPUT_VARIABLE SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0 OUTPUT_STRIP_TRAILING_WHITESPACE) -string(APPEND SLIBS " ${SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0} -L$ENV{TACC_HDF5_LIB} -lhdf5") -set(TRILINOS_PATH "$ENV{TRILINOS_PATH}") -set(HAS_F2008_CONTIGUOUS "FALSE") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_theia.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_theia.cmake deleted file mode 100644 index 6a09fe8c2cb..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_theia.cmake +++ /dev/null @@ -1,4 +0,0 @@ -set(MPICC "mpiicc") -set(MPICXX "mpiicpc") -set(MPIFC "mpiifort") -set(NETCDF_PATH "/apps/netcdf/4.3.0-intel") diff --git a/CIME/data/config/cesm/machines/cmake_macros/intel_zeus.cmake b/CIME/data/config/cesm/machines/cmake_macros/intel_zeus.cmake deleted file mode 100644 index 20172b4a227..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/intel_zeus.cmake +++ /dev/null @@ -1,32 +0,0 @@ -set(AR "xiar") -set(ARFLAGS "cru") -if (MPILIB STREQUAL impi) - string(APPEND FFLAGS " -mkl=cluster -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma") -endif() -if (MPILIB STREQUAL mpi-serial) - string(APPEND FFLAGS " -mcmodel medium") -endif() -if (MPILIB STREQUAL mpi-serial) - string(APPEND FFLAGS " -mkl -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma") -endif() -if (MPILIB STREQUAL impi) - string(APPEND CFLAGS " -mkl=cluster -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma") -endif() -if (MPILIB STREQUAL mpi-serial) - string(APPEND CFLAGS " -mkl -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma") -endif() -if (MPILIB STREQUAL impi) - string(APPEND LDFLAGS " -mkl=cluster") -endif() -if (MPILIB STREQUAL mpi-serial) - string(APPEND LDFLAGS " -mkl -lstdc++") -endif() -if (MPILIB STREQUAL impi) - string(APPEND SLIBS " -lstdc++") -endif() -if (MPILIB STREQUAL mpi-serial) - string(APPEND SLIBS " -lstdc++") -endif() -set(MPICC "mpiicc") -set(MPICXX "mpiicpc") -set(MPIFC "mpiifort") diff --git a/CIME/data/config/cesm/machines/cmake_macros/izumi.cmake b/CIME/data/config/cesm/machines/cmake_macros/izumi.cmake deleted file mode 100644 index 0446d1ae2d6..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/izumi.cmake +++ /dev/null @@ -1,9 +0,0 @@ -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY") -endif() -set(LAPACK_LIBDIR "/usr/lib64") -if (MPILIB STREQUAL mvapich2) - set(MPI_LIB_NAME "mpich") -endif() -set(NETCDF_PATH "$ENV{NETCDF_PATH}") -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdff -lnetcdf") diff --git a/CIME/data/config/cesm/machines/cmake_macros/laramie.cmake b/CIME/data/config/cesm/machines/cmake_macros/laramie.cmake deleted file mode 100644 index 8cf9400fb2b..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/laramie.cmake +++ /dev/null @@ -1,6 +0,0 @@ -if (MODEL STREQUAL gptl) - string(APPEND CPPDEFS " -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY") -endif() -set(NETCDF_PATH "$ENV{NETCDF}") -set(PIO_FILESYSTEM_HINTS "gpfs") -set(PNETCDF_PATH "$ENV{PNETCDF}") diff --git a/CIME/data/config/cesm/machines/cmake_macros/lonestar5.cmake b/CIME/data/config/cesm/machines/cmake_macros/lonestar5.cmake deleted file mode 100644 index 0c5eec80545..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/lonestar5.cmake +++ /dev/null @@ -1,6 +0,0 @@ -string(APPEND CPPDEFS " -DHAVE_NANOTIME") -set(NETCDF_PATH "$ENV{TACC_NETCDF_DIR}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(PNETCDF_PATH "$ENV{TACC_PNETCDF_DIR}") -string(APPEND LDFLAGS " -Wl,-rpath,${NETCDF_PATH}/lib") -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdff -lnetcdf") diff --git a/CIME/data/config/cesm/machines/cmake_macros/nag.cmake b/CIME/data/config/cesm/machines/cmake_macros/nag.cmake deleted file mode 100644 index b1c97f5ea9f..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/nag.cmake +++ /dev/null @@ -1,29 +0,0 @@ -string(APPEND CFLAGS " -std=gnu99") -if (DEBUG) - string(APPEND CFLAGS " -g") -endif() -string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_CRAY_POINTERS -DNO_SHR_VMATH -DCPRNAG") -set(FC_AUTO_R8 "-r8") -string(APPEND FFLAGS " -Wp,-macro=no_com -convert=BIG_ENDIAN -indirect $ENV{CIMEROOT}/config/cesm/machines/nag_mpi_argument.txt") -if (NOT DEBUG) - string(APPEND FFLAGS " -ieee=full -O2") -endif() -if (DEBUG) - string(APPEND FFLAGS " -C=all -g -time -f2003 -ieee=stop") -endif() -if (DEBUG AND NOT compile_threaded) - string(APPEND FFLAGS " -gline") -endif() -if (MODEL STREQUAL cism) - string(APPEND FFLAGS " -mismatch_all") -endif() -set(FFLAGS_NOOPT "-O0") -set(FIXEDFLAGS "-fixed") -set(FREEFLAGS "-free") -set(HAS_F2008_CONTIGUOUS "FALSE") -set(MPICC "mpicc") -set(MPIFC "mpif90") -set(SCC "gcc") -set(SFC "nagfor") -string(APPEND LDFLAGS " -lpthread") -string(APPEND CONFIG_ARGS " FCLIBS='-Wl,--as-needed,--allow-shlib-undefined -L$(COMPILER_PATH)/lib/NAG_Fortran -lf62rts'") diff --git a/CIME/data/config/cesm/machines/cmake_macros/nvhpc-gpu.cmake b/CIME/data/config/cesm/machines/cmake_macros/nvhpc-gpu.cmake deleted file mode 100644 index 4e990adb27b..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/nvhpc-gpu.cmake +++ /dev/null @@ -1,46 +0,0 @@ -string(APPEND CFLAGS " -gopt -time") -if (compile_threaded) - string(APPEND CFLAGS " -mp") -endif() -string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI") -set(CXX_LINKER "CXX") -set(FC_AUTO_R8 "-r8") -string(APPEND FFLAGS " -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee") -if (compile_threaded) - string(APPEND FFLAGS " -mp") -endif() -if (DEBUG) - string(APPEND FFLAGS " -O0 -g -Ktrap=fp -Mbounds -Kieee") -endif() -if (MODEL STREQUAL datm) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dlnd) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL drof) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dwav) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dice) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL docn) - string(APPEND FFLAGS " -Mnovect") -endif() -set(FFLAGS_NOOPT "-O0") -set(FIXEDFLAGS "-Mfixed") -set(FREEFLAGS "-Mfree") -set(HAS_F2008_CONTIGUOUS "FALSE") -set(LDFLAGS "-time -Wl,--allow-multiple-definition") -if (compile_threaded) - string(APPEND LDFLAGS " -mp") -endif() -set(MPICC "mpicc") -set(MPICXX "mpicxx") -set(MPIFC "mpif90") -set(SCC "nvc") -set(SCXX "nvc++") -set(SFC "nvfortran") diff --git a/CIME/data/config/cesm/machines/cmake_macros/nvhpc-gpu_casper.cmake b/CIME/data/config/cesm/machines/cmake_macros/nvhpc-gpu_casper.cmake deleted file mode 100644 index 600521a1bb5..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/nvhpc-gpu_casper.cmake +++ /dev/null @@ -1,15 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O -tp=skylake -Mnofma") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O -tp=skylake -Mnofma") -endif() -string(APPEND FFLAGS " -I$(EXEROOT)/ocn/obj/FMS") -if (NOT DEBUG) - string(APPEND LDFLAGS " -O -tp=skylake -Mnofma -acc -ta=tesla:cc70,lineinfo,nofma -Minfo=accel") -endif() -string(APPEND SLIBS " -llapack -lblas") -if (MPILIB STREQUAL mpi-serial) - string(APPEND SLIBS " -ldl") -endif() -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff") diff --git a/CIME/data/config/cesm/machines/cmake_macros/nvhpc.cmake b/CIME/data/config/cesm/machines/cmake_macros/nvhpc.cmake deleted file mode 100644 index 4e990adb27b..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/nvhpc.cmake +++ /dev/null @@ -1,46 +0,0 @@ -string(APPEND CFLAGS " -gopt -time") -if (compile_threaded) - string(APPEND CFLAGS " -mp") -endif() -string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI") -set(CXX_LINKER "CXX") -set(FC_AUTO_R8 "-r8") -string(APPEND FFLAGS " -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee") -if (compile_threaded) - string(APPEND FFLAGS " -mp") -endif() -if (DEBUG) - string(APPEND FFLAGS " -O0 -g -Ktrap=fp -Mbounds -Kieee") -endif() -if (MODEL STREQUAL datm) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dlnd) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL drof) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dwav) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dice) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL docn) - string(APPEND FFLAGS " -Mnovect") -endif() -set(FFLAGS_NOOPT "-O0") -set(FIXEDFLAGS "-Mfixed") -set(FREEFLAGS "-Mfree") -set(HAS_F2008_CONTIGUOUS "FALSE") -set(LDFLAGS "-time -Wl,--allow-multiple-definition") -if (compile_threaded) - string(APPEND LDFLAGS " -mp") -endif() -set(MPICC "mpicc") -set(MPICXX "mpicxx") -set(MPIFC "mpif90") -set(SCC "nvc") -set(SCXX "nvc++") -set(SFC "nvfortran") diff --git a/CIME/data/config/cesm/machines/cmake_macros/nvhpc_casper.cmake b/CIME/data/config/cesm/machines/cmake_macros/nvhpc_casper.cmake deleted file mode 100644 index f3eb207d1e7..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/nvhpc_casper.cmake +++ /dev/null @@ -1,15 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O -tp=skylake -Mnofma") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O -tp=skylake -Mnofma") -endif() -string(APPEND FFLAGS " -I$(EXEROOT)/ocn/obj/FMS") -if (NOT DEBUG) - string(APPEND LDFLAGS " -O -tp=skylake -Mnofma") -endif() -string(APPEND SLIBS " -llapack -lblas") -if (MPILIB STREQUAL mpi-serial) - string(APPEND SLIBS " -ldl") -endif() -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi-gpu.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi-gpu.cmake deleted file mode 100644 index 6c750d2ff8e..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi-gpu.cmake +++ /dev/null @@ -1,46 +0,0 @@ -string(APPEND CFLAGS " -gopt -time") -if (compile_threaded) - string(APPEND CFLAGS " -mp") -endif() -string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI") -set(CXX_LINKER "CXX") -set(FC_AUTO_R8 "-r8") -string(APPEND FFLAGS " -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee") -if (compile_threaded) - string(APPEND FFLAGS " -mp") -endif() -if (DEBUG) - string(APPEND FFLAGS " -O0 -g -Ktrap=fp -Mbounds -Kieee") -endif() -if (MODEL STREQUAL datm) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dlnd) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL drof) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dwav) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dice) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL docn) - string(APPEND FFLAGS " -Mnovect") -endif() -set(FFLAGS_NOOPT "-O0") -set(FIXEDFLAGS "-Mfixed") -set(FREEFLAGS "-Mfree") -set(HAS_F2008_CONTIGUOUS "FALSE") -set(LDFLAGS "-time -Wl,--allow-multiple-definition") -if (compile_threaded) - string(APPEND LDFLAGS " -mp") -endif() -set(MPICC "mpicc") -set(MPICXX "mpicxx") -set(MPIFC "mpif90") -set(SCC "pgcc") -set(SCXX "pgc++") -set(SFC "pgf95") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi-gpu_casper.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi-gpu_casper.cmake deleted file mode 100644 index 600521a1bb5..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi-gpu_casper.cmake +++ /dev/null @@ -1,15 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O -tp=skylake -Mnofma") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O -tp=skylake -Mnofma") -endif() -string(APPEND FFLAGS " -I$(EXEROOT)/ocn/obj/FMS") -if (NOT DEBUG) - string(APPEND LDFLAGS " -O -tp=skylake -Mnofma -acc -ta=tesla:cc70,lineinfo,nofma -Minfo=accel") -endif() -string(APPEND SLIBS " -llapack -lblas") -if (MPILIB STREQUAL mpi-serial) - string(APPEND SLIBS " -ldl") -endif() -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi.cmake deleted file mode 100644 index 6c750d2ff8e..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi.cmake +++ /dev/null @@ -1,46 +0,0 @@ -string(APPEND CFLAGS " -gopt -time") -if (compile_threaded) - string(APPEND CFLAGS " -mp") -endif() -string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI") -set(CXX_LINKER "CXX") -set(FC_AUTO_R8 "-r8") -string(APPEND FFLAGS " -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee") -if (compile_threaded) - string(APPEND FFLAGS " -mp") -endif() -if (DEBUG) - string(APPEND FFLAGS " -O0 -g -Ktrap=fp -Mbounds -Kieee") -endif() -if (MODEL STREQUAL datm) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dlnd) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL drof) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dwav) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL dice) - string(APPEND FFLAGS " -Mnovect") -endif() -if (MODEL STREQUAL docn) - string(APPEND FFLAGS " -Mnovect") -endif() -set(FFLAGS_NOOPT "-O0") -set(FIXEDFLAGS "-Mfixed") -set(FREEFLAGS "-Mfree") -set(HAS_F2008_CONTIGUOUS "FALSE") -set(LDFLAGS "-time -Wl,--allow-multiple-definition") -if (compile_threaded) - string(APPEND LDFLAGS " -mp") -endif() -set(MPICC "mpicc") -set(MPICXX "mpicxx") -set(MPIFC "mpif90") -set(SCC "pgcc") -set(SCXX "pgc++") -set(SFC "pgf95") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_bluewaters.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_bluewaters.cmake deleted file mode 100644 index ff5f160388a..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_bluewaters.cmake +++ /dev/null @@ -1,10 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -string(APPEND CFLAGS " -nofma") -set(CXX_LIBS "-lmpichf90_pgi $ENV{PGI_PATH}/linux86-64/$ENV{PGI_VERSION}/lib/f90main.o") -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() -string(APPEND FFLAGS " -nofma") -set(SUPPORTS_CXX "TRUE") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_casper.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_casper.cmake deleted file mode 100644 index f3eb207d1e7..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_casper.cmake +++ /dev/null @@ -1,15 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O -tp=skylake -Mnofma") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O -tp=skylake -Mnofma") -endif() -string(APPEND FFLAGS " -I$(EXEROOT)/ocn/obj/FMS") -if (NOT DEBUG) - string(APPEND LDFLAGS " -O -tp=skylake -Mnofma") -endif() -string(APPEND SLIBS " -llapack -lblas") -if (MPILIB STREQUAL mpi-serial) - string(APPEND SLIBS " -ldl") -endif() -string(APPEND SLIBS " -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_cheyenne.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_cheyenne.cmake deleted file mode 100644 index e3769dd9d76..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_cheyenne.cmake +++ /dev/null @@ -1,4 +0,0 @@ -string(APPEND SLIBS " -llapack -lblas") -if (MPILIB STREQUAL mpi-serial) - string(APPEND SLIBS " -ldl") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_constance.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_constance.cmake deleted file mode 100644 index 1a1dcf69b3c..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_constance.cmake +++ /dev/null @@ -1,11 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -set(CONFIG_ARGS "--host=cray") -string(APPEND CPPDEFS " -DLINUX") -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() -set(NETCDF_PATH "$ENV{NETCDF_HOME}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(SLIBS "-L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_daint.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_daint.cmake deleted file mode 100644 index 9c9661883e3..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_daint.cmake +++ /dev/null @@ -1,3 +0,0 @@ -string(APPEND FFLAGS " -I/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/build/lib/mct -I/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/build/lib/psmile.MPI1") -string(APPEND SLIBS " -llapack -lblas") -string(APPEND SLIBS " -L/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/lib -lpsmile.MPI1 -lscrip -lmct_oasis -lmpeu_oasis") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_eastwind.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_eastwind.cmake deleted file mode 100644 index 6384e42c218..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_eastwind.cmake +++ /dev/null @@ -1,20 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -if (NOT compile_threaded) - string(APPEND CFLAGS " -nomp") -endif() -set(CONFIG_ARGS "--host=cray") -string(APPEND CPPDEFS " -DLINUX") -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() -if (NOT compile_threaded) - string(APPEND FFLAGS " -nomp") -endif() -if (NOT compile_threaded) - string(APPEND LDFLAGS " -nomp") -endif() -set(NETCDF_PATH "$ENV{NETCDF_HOME}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(SLIBS "-L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_euler2.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_euler2.cmake deleted file mode 100644 index be865e50f99..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_euler2.cmake +++ /dev/null @@ -1,6 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_euler3.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_euler3.cmake deleted file mode 100644 index be865e50f99..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_euler3.cmake +++ /dev/null @@ -1,6 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_euler4.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_euler4.cmake deleted file mode 100644 index be865e50f99..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_euler4.cmake +++ /dev/null @@ -1,6 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_hobart.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_hobart.cmake deleted file mode 100644 index d3daece90a6..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_hobart.cmake +++ /dev/null @@ -1,10 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O0") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O0") -endif() -string(APPEND LDFLAGS " -lgomp") -string(APPEND LDFLAGS " -Wl,-R${NETCDF_PATH}/lib") -string(APPEND LDFLAGS " -Wl,-R$ENV{COMPILER_PATH}/lib") -string(APPEND LDFLAGS " -Wl,-R$ENV{COMPILER_PATH}/libso") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_izumi.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_izumi.cmake deleted file mode 100644 index 3cb6b34e868..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_izumi.cmake +++ /dev/null @@ -1,2 +0,0 @@ -set(CXX_LINKER "FORTRAN") -set(SUPPORTS_CXX "TRUE") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pgi_olympus.cmake b/CIME/data/config/cesm/machines/cmake_macros/pgi_olympus.cmake deleted file mode 100644 index 1ee08168123..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pgi_olympus.cmake +++ /dev/null @@ -1,11 +0,0 @@ -if (NOT DEBUG) - string(APPEND CFLAGS " -O2") -endif() -set(CONFIG_ARGS "--host=cray") -string(APPEND CPPDEFS " -DLINUX") -if (NOT DEBUG) - string(APPEND FFLAGS " -O2") -endif() -set(NETCDF_PATH "$ENV{NETCDF_LIB}/..") -set(PIO_FILESYSTEM_HINTS "lustre") -set(SLIBS "-L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pleiades-bro.cmake b/CIME/data/config/cesm/machines/cmake_macros/pleiades-bro.cmake deleted file mode 100644 index 518c1d2e1e1..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pleiades-bro.cmake +++ /dev/null @@ -1,10 +0,0 @@ -set(CONFIG_ARGS "--host=cray") -string(APPEND CFLAGS " -axCORE-AVX2 -xSSE4.2 -no-fma") -string(APPEND FFLAGS " -axCORE-AVX2 -xSSE4.2 -no-fma") -set(MPICC "icc") -set(MPI_LIB_NAME "mpi") -set(MPI_PATH "$ENV{MPI_ROOT}") -set(NETCDF_PATH "$ENV{NETCDF}") -string(APPEND SLIBS " -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf") -set(PNETCDF_PATH "/home6/fvitt/pnetcdf-1.12.2") -set(ESMF_LIBDIR "/home6/fvitt/esmf-8_1_1/lib/libO/Linux.intel.64.mpt.default") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pleiades-has.cmake b/CIME/data/config/cesm/machines/cmake_macros/pleiades-has.cmake deleted file mode 100644 index 518c1d2e1e1..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pleiades-has.cmake +++ /dev/null @@ -1,10 +0,0 @@ -set(CONFIG_ARGS "--host=cray") -string(APPEND CFLAGS " -axCORE-AVX2 -xSSE4.2 -no-fma") -string(APPEND FFLAGS " -axCORE-AVX2 -xSSE4.2 -no-fma") -set(MPICC "icc") -set(MPI_LIB_NAME "mpi") -set(MPI_PATH "$ENV{MPI_ROOT}") -set(NETCDF_PATH "$ENV{NETCDF}") -string(APPEND SLIBS " -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf") -set(PNETCDF_PATH "/home6/fvitt/pnetcdf-1.12.2") -set(ESMF_LIBDIR "/home6/fvitt/esmf-8_1_1/lib/libO/Linux.intel.64.mpt.default") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pleiades-ivy.cmake b/CIME/data/config/cesm/machines/cmake_macros/pleiades-ivy.cmake deleted file mode 100644 index 518c1d2e1e1..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pleiades-ivy.cmake +++ /dev/null @@ -1,10 +0,0 @@ -set(CONFIG_ARGS "--host=cray") -string(APPEND CFLAGS " -axCORE-AVX2 -xSSE4.2 -no-fma") -string(APPEND FFLAGS " -axCORE-AVX2 -xSSE4.2 -no-fma") -set(MPICC "icc") -set(MPI_LIB_NAME "mpi") -set(MPI_PATH "$ENV{MPI_ROOT}") -set(NETCDF_PATH "$ENV{NETCDF}") -string(APPEND SLIBS " -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf") -set(PNETCDF_PATH "/home6/fvitt/pnetcdf-1.12.2") -set(ESMF_LIBDIR "/home6/fvitt/esmf-8_1_1/lib/libO/Linux.intel.64.mpt.default") diff --git a/CIME/data/config/cesm/machines/cmake_macros/pleiades-san.cmake b/CIME/data/config/cesm/machines/cmake_macros/pleiades-san.cmake deleted file mode 100644 index 518c1d2e1e1..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/pleiades-san.cmake +++ /dev/null @@ -1,10 +0,0 @@ -set(CONFIG_ARGS "--host=cray") -string(APPEND CFLAGS " -axCORE-AVX2 -xSSE4.2 -no-fma") -string(APPEND FFLAGS " -axCORE-AVX2 -xSSE4.2 -no-fma") -set(MPICC "icc") -set(MPI_LIB_NAME "mpi") -set(MPI_PATH "$ENV{MPI_ROOT}") -set(NETCDF_PATH "$ENV{NETCDF}") -string(APPEND SLIBS " -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf") -set(PNETCDF_PATH "/home6/fvitt/pnetcdf-1.12.2") -set(ESMF_LIBDIR "/home6/fvitt/esmf-8_1_1/lib/libO/Linux.intel.64.mpt.default") diff --git a/CIME/data/config/cesm/machines/cmake_macros/stampede2-knl.cmake b/CIME/data/config/cesm/machines/cmake_macros/stampede2-knl.cmake deleted file mode 100644 index ed1c0d299b6..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/stampede2-knl.cmake +++ /dev/null @@ -1,4 +0,0 @@ -string(APPEND CPPDEFS " -DHAVE_NANOTIME") -set(NETCDF_PATH "$ENV{TACC_NETCDF_DIR}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(PNETCDF_PATH "$ENV{TACC_PNETCDF_DIR}") diff --git a/CIME/data/config/cesm/machines/cmake_macros/stampede2-skx.cmake b/CIME/data/config/cesm/machines/cmake_macros/stampede2-skx.cmake deleted file mode 100644 index ed1c0d299b6..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/stampede2-skx.cmake +++ /dev/null @@ -1,4 +0,0 @@ -string(APPEND CPPDEFS " -DHAVE_NANOTIME") -set(NETCDF_PATH "$ENV{TACC_NETCDF_DIR}") -set(PIO_FILESYSTEM_HINTS "lustre") -set(PNETCDF_PATH "$ENV{TACC_PNETCDF_DIR}") diff --git a/CIME/data/config/cesm/machines/cmake_macros/theta.cmake b/CIME/data/config/cesm/machines/cmake_macros/theta.cmake deleted file mode 100644 index 81db81ed2c0..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/theta.cmake +++ /dev/null @@ -1,4 +0,0 @@ -string(APPEND CFLAGS " -xMIC-AVX512") -string(APPEND FFLAGS " -xMIC-AVX512") -set(CONFIG_ARGS "--host=cray") -string(APPEND SLIBS " -L$(NETCDF_DIR)/lib -lnetcdff -L$(NETCDF_DIR)/lib -lnetcdf -Wl,-rpath -Wl,$(NETCDF_DIR)/lib") diff --git a/CIME/data/config/cesm/machines/cmake_macros/universal.cmake b/CIME/data/config/cesm/machines/cmake_macros/universal.cmake deleted file mode 100644 index 6e535957ed6..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/universal.cmake +++ /dev/null @@ -1,18 +0,0 @@ -string(APPEND CPPDEFS " -DCESMCOUPLED") -if (MODEL STREQUAL pop) - string(APPEND CPPDEFS " -D_USE_FLOW_CONTROL") -endif() -if (MODEL STREQUAL ufsatm) - string(APPEND CPPDEFS " -DSPMD") -endif() -if (MODEL STREQUAL ufsatm) - string(APPEND INCLDIR " -I$(EXEROOT)/atm/obj/FMS") -endif() -if (MODEL STREQUAL ufsatm) - string(APPEND FFLAGS " $(FC_AUTO_R8)") -endif() -if (MODEL STREQUAL mom) - string(APPEND FFLAGS " $(FC_AUTO_R8) ") - string(APPEND CPPDEFS " -Duse_LARGEFILE") -endif() -set(SUPPORTS_CXX "FALSE") diff --git a/CIME/data/config/cesm/machines/cmake_macros/userdefined.cmake b/CIME/data/config/cesm/machines/cmake_macros/userdefined.cmake deleted file mode 100644 index 798dcc97145..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/userdefined.cmake +++ /dev/null @@ -1,9 +0,0 @@ -set(CONFIG_ARGS "") -string(APPEND CPPDEFS " ") -set(ESMF_LIBDIR "") -set(MPI_LIB_NAME "") -set(MPI_PATH "") -set(NETCDF_PATH "USERDEFINED_MUST_EDIT_THIS") -set(PNETCDF_PATH "") -execute_process(COMMAND ${NETCDF_PATH}/bin/nc-config --flibs OUTPUT_VARIABLE SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0 OUTPUT_STRIP_TRAILING_WHITESPACE) -string(APPEND SLIBS " # USERDEFINED ${SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0}") diff --git a/CIME/data/config/cesm/machines/cmake_macros/zeus.cmake b/CIME/data/config/cesm/machines/cmake_macros/zeus.cmake deleted file mode 100644 index ca625e36e12..00000000000 --- a/CIME/data/config/cesm/machines/cmake_macros/zeus.cmake +++ /dev/null @@ -1,9 +0,0 @@ -set(PIO_FILESYSTEM_HINTS "gpfs") -set(NETCDF_PATH "$ENV{NETCDF}") -set(PNETCDF_PATH "$ENV{PNETCDF}") -string(APPEND CPPDEFS " -DNO_R16 -DHAVE_NANOTIME") -if (MODEL STREQUAL nemo) - string(APPEND FFLAGS " $(FC_AUTO_R8) -O3 -assume norealloc_lhs") -endif() -execute_process(COMMAND ${NETCDF_PATH}/bin/nc-config --flibs OUTPUT_VARIABLE SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0 OUTPUT_STRIP_TRAILING_WHITESPACE) -string(APPEND SLIBS " ${SHELL_CMD_OUTPUT_BUILD_INTERNAL_IGNORE0}") diff --git a/CIME/data/config/cesm/machines/config_batch.xml b/CIME/data/config/cesm/machines/config_batch.xml deleted file mode 100644 index ebe7c60fb4b..00000000000 --- a/CIME/data/config/cesm/machines/config_batch.xml +++ /dev/null @@ -1,779 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - qstat - qsub - qdel - -v - - (\d+) - --dependencies - %H:%M:%s - -M - - - - - - - - - - - - - - - qstat - qsub - qdel - --env - #COBALT - (\d+) - --dependencies - -M - - - - - - - - - - - - - bjobs - bsub - bkill - < - - #BSUB - <(\d+)> - -w 'done(jobid)' - -w 'ended(jobid)' - && - %H:%M - -u - - - - -J {{ job_id }} - -n {{ total_tasks }} - -W $JOB_WALLCLOCK_TIME - -o {{ job_id }}.%J - -e {{ job_id }}.%J - - - - - qstat - qsub - qdel - -v - #PBS - ^(\S+)$ - -W depend=afterok:jobid - -W depend=afterany:jobid - : - %H:%M:%S - -M - -m - , bea, b, e, a - - - - - - - -N {{ job_id }} - -r {{ rerunnable }} - - -j oe - -V - - - - - squeue - scancel - #SBATCH - (\d+)$ - --dependency=afterok:jobid - --dependency=afterany:jobid - , - %H:%M:%S - --mail-user - --mail-type - none, all, begin, end, fail - - --job-name={{ job_id }} - --nodes={{ num_nodes }} - --ntasks-per-node={{ tasks_per_node }} - --output={{ job_id }} - --exclusive - - - - - - -l nodes={{ num_nodes }} - -q iccp - - - iccp - - - - - - - - - - - - -R "span[ptile={{ tasks_per_node }}]" - -N - -a {{ poe }} - - - poe_short - poe_medium - poe_long - - - - - - (\d+.bw)$ - - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }}:xe - -S {{ shell }} - - - normal - debug - - - - - - qsub - - - - - -S /glade/u/apps/dav/opt/nvidia-mps/mps_bash - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=700GB:ngpus={{ ngpus_per_node }} - -l gpu_type=v100 - - - - - - -S /glade/u/apps/dav/opt/nvidia-mps/mps_bash - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=700GB:ngpus={{ ngpus_per_node }} - -l gpu_type=v100 - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=700GB:ngpus={{ ngpus_per_node }} - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=700GB:ngpus={{ ngpus_per_node }} - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:mem=700GB:ngpus={{ ngpus_per_node }} - - - - casper - - - casper - - - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - - - - -S {{ shell }} - -l select=1:mpiprocs={{ total_tasks }}:ompthreads={{ thread_count }} - - - - regular - - - regular - premium - share - economy - - - - - - squeue - sbatch - scancel - #SBATCH - (\d+)$ - , - %H:%M:%S - --mail-user - --mail-type - none, all, begin, end, fail - - --job-name={{ job_id }} - --nodes={{ num_nodes }} - --ntasks-per-node={{ tasks_per_node }} - --output={{ job_id }} - --exclusive - - - medium - - - - - sbatch - - - - - - - - - sbatch - - - - - - - -C haswell - - - regular - - - - - - sbatch - - - - - - - -C knl,quad,cache - -S 2 - - - regular - - - - - - sbatch - - - - - - regular - - - - - sbatch - - - - - - - default - - - - - sbatch - - - - - - - batch - - - - - sbatch - - - - - - - regular - debug - - - - - - - -R "select[model==XeonE5_2680v3]" - - - normal.24h - normal.4h - - - - - - - -R "span[ptile=4] select[model==XeonE3_1585Lv5]" - - - normal.24h - normal.4h - - - - - - - -R "select[model==XeonGold_6150]" - - - normal.24h - normal.4h - - - - - - ssh login1 cd $CASEROOT ; sbatch - - - - - - - --export=ALL - - - - development - normal - large - - - - - - - -A cpo - -l {{ partition }} - -l size={{ mppsize }} - -E - -d $RUNDIR - -o $RUNDIR/$CASE.out - -S /bin/bash - - - debug - batch - - - - - - sbatch - - - - - - - sib2.9 - sib2.9,sky2.4 - - - - sbatch - - - - - - - sky2.4 - sib2.9,sky2.4 - - - - - - - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - -S {{ shell }} - - - short - medium - long - verylong - overnight - monster - - - - - qsub - (\d+.izumi.unified.ucar.edu)$ - - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - -S {{ shell }} - - - short - medium - long - verylong - overnight - monster - - - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - - - regular - - - - - sbatch - - --qos=lr_normal - --partition=lr3 - --account={{ project }} - --ntasks-per-node={{ tasks_per_node }} - - - lr3 - - - - - sbatch - - --qos=lr_normal - --partition=lr2 - --account={{ project }} - --ntasks-per-node={{ tasks_per_node }} - - - lr2 - - - - - ssh login1.ls5.tacc.utexas.edu cd $CASEROOT ; sbatch - - - - - - - normal - large - development - - - - - - default - - - - - - - -l nodes={{ num_nodes }}:ppn={{ tasks_per_node }} - -S {{ shell }} - - - batch - - - - - sbatch - - - - - - - queue - - - - - sbatch - - - - - - - -C gpu - - - regular - debug - - - - - - - - - - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=bro - -l place=scatter:excl - -S {{ shell }} - - - normal - devel - - - - - - - - - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=has - -l place=scatter:excl - -S {{ shell }} - - - normal - devel - - - - - - - - - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=ivy - -l place=scatter:excl - -S {{ shell }} - - - normal - devel - - - - - - - - - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }}:model=san - -l place=scatter:excl - -S {{ shell }} - - - normal - devel - - - - - ssh stampede2.tacc.utexas.edu cd $CASEROOT ; sbatch - - - - - - - skx-normal - skx-dev - - - - - ssh stampede2.tacc.utexas.edu cd $CASEROOT ; sbatch - - - - - - - normal - development - - - - - - -l nodes={{ num_nodes }} - - - default - - - - - sbatch - - - - - - - --partition=theia - - - batch - - - - - - default - - - - sbatch - - - - - - --ntasks-per-node=64 - --hint=nomultithread - - - regular - - - - - -env - - - - - - - -R "span[ptile={{ tasks_per_node }}]" - - - p_short - p_medium - p_long - - - - diff --git a/CIME/data/config/cesm/machines/config_compilers.xml b/CIME/data/config/cesm/machines/config_compilers.xml deleted file mode 100644 index 72c68b02ebc..00000000000 --- a/CIME/data/config/cesm/machines/config_compilers.xml +++ /dev/null @@ -1,1705 +0,0 @@ - - - - - - - - -DCESMCOUPLED - -D_USE_FLOW_CONTROL - -DSPMD - - - - -I$(EXEROOT)/atm/obj/FMS - - - $(FC_AUTO_R8) - $(FC_AUTO_R8) -Duse_LARGEFILE - - FALSE - - - - - mpicc - mpicxx - mpif90 - clang - clang++ - flang - - -DCPRLLVM - -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - - - - - -std=gnu99 - -fopenmp - -g -Wall -Og -fbacktrace -ffpe-trap=invalid,zero,overflow -fcheck=bounds - -O - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU - - FORTRAN - - -fdefault-real-8 - - - - -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none - -fopenmp - - -g -Wall -Og -fbacktrace -ffpe-trap=zero,overflow -fcheck=bounds - -O - - - -O0 - - - -ffixed-form - - - -ffree-form - - FALSE - - -fopenmp - - mpicc - mpicxx - mpif90 - gcc - g++ - gfortran - TRUE - - - - - - - - -h noomp - -fopenmp - -g -O0 - -O2 - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRCRAY - -DDIR=NOOP - -DDIR=NOOP - - - -s real64 - - - -f free -N 255 -h byteswapio -x dir -ef - -h noomp - -h omp - -g -O0 -K trap=fp -m1 - -O2,ipa2 -em - - - -O1,fp2,ipa0,scalar0,vector0 - - TRUE - - -Wl,--allow-multiple-definition -h byteswapio - -h omp - - - - - - -std=gnu99 - -fopenmp - -g -Wall -Og -fbacktrace -ffpe-trap=invalid,zero,overflow -fcheck=bounds - -O - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU - - FORTRAN - - -fdefault-real-8 -fdefault-double-8 - - - - -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none - -fopenmp - - -g -Wall -Og -fbacktrace -ffpe-trap=zero,overflow -fcheck=bounds - -O - - - -O0 - - - -ffixed-form - - - -ffree-form - - FALSE - - -fopenmp - - mpicc - mpicxx - mpif90 - gcc - g++ - gfortran - TRUE - - - - - -g -qfullpath -qmaxmem=-1 - -O3 - -qsmp=omp - -qsmp=omp:noopt - - - - -DFORTRAN_SAME -DCPRIBM - - -WF,-D - - -qrealsize=8 - - - -g -qfullpath -qmaxmem=-1 - -O2 -qstrict -qinline=auto - -qsmp=omp - -qinitauto=7FF7FFFF -qflttrap=ov:zero:inv:en - -qsmp=omp:noopt - -C - - - -qsuffix=f=f -qfixed=132 - - - -qsuffix=f=f90:cpp=F90 - - TRUE - - -qsmp=omp - -qsmp=omp:noopt - - - - - - -qno-opt-dynamic-align -fp-model precise -std=gnu99 - -qopenmp - -O2 -debug minimal - -O0 -g - - - - -DFORTRANUNDERSCORE -DCPRINTEL - - - -cxxlib - - FORTRAN - - -r8 - - - -qno-opt-dynamic-align -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model source - -qopenmp - -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created - -O2 -debug minimal - - - -O0 - - - -fixed - - - -free - - - -qopenmp - - mpicc - mpicxx - mpif90 - icc - icpc - ifort - - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl - - TRUE - - - - - -std=gnu99 - -g - - - -DFORTRANUNDERSCORE -DNO_CRAY_POINTERS -DNO_SHR_VMATH -DCPRNAG - - - -r8 - - - - - -Wp,-macro=no_com -convert=BIG_ENDIAN -indirect $ENV{CIMEROOT}/config/cesm/machines/nag_mpi_argument.txt - - -ieee=full -O2 - - - -C=all -g -time -f2003 -ieee=stop - -gline - - -mismatch_all - - - -O0 - - - -fixed - - - -free - - FALSE - mpicc - mpif90 - gcc - nagfor - - - -lpthread - - - FCLIBS="-Wl,--as-needed,--allow-shlib-undefined -L$(COMPILER_PATH)/lib/NAG_Fortran -lf62rts" - - - - - - - -gopt -time - -mp - - - - - - - - - - - - - - - - - - - - - - - - - - - - -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI - - CXX - - -r8 - - - -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee - -mp - -O0 -g -Ktrap=fp -Mbounds -Kieee - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - - - -O0 - - - -Mfixed - - - -Mfree - - - - FALSE - - -time -Wl,--allow-multiple-definition - -mp - - mpicc - mpicxx - mpif90 - pgcc - pgc++ - pgf95 - - - - - -gopt -time - -mp - - - -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI - - CXX - - -r8 - - - -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee - -mp - -O0 -g -Ktrap=fp -Mbounds -Kieee - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - - - -O0 - - - -Mfixed - - - -Mfree - - FALSE - - -time -Wl,--allow-multiple-definition - -mp - - mpicc - mpicxx - mpif90 - pgcc - pgc++ - pgf95 - - - - - -gopt -time - -mp - - - -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI - - CXX - - -r8 - - - -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee - -mp - -O0 -g -Ktrap=fp -Mbounds -Kieee - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - - - -O0 - - - -Mfixed - - - -Mfree - - FALSE - - -time -Wl,--allow-multiple-definition - -mp - - mpicc - mpicxx - mpif90 - nvc - nvc++ - nvfortran - - - - - -gopt -time - -mp - - - -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI - - CXX - - -r8 - - - -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee - -mp - -O0 -g -Ktrap=fp -Mbounds -Kieee - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - - - -O0 - - - -Mfixed - - - -Mfree - - FALSE - - -time -Wl,--allow-multiple-definition - -mp - - mpicc - mpicxx - mpif90 - nvc - nvc++ - nvfortran - - - - - -qarch=auto -qtune=auto -qcache=auto - - /usr/bin/bash - - -qarch=auto -qtune=auto -qcache=auto -qsclk=micro - -qspill=6000 - - - -qsigtrap=xl__trcedump - -bdatapsize:64K -bstackpsize:64K -btextpsize:32K - - mpcc_r - mpxlf2003_r - cc_r - xlf2003_r - - -lmassv -lessl - -lmass - - - - - - --build=powerpc-bgp-linux --host=powerpc64-suse-linux - - - -DLINUX - - - -g -qfullpath -qmaxmem=-1 -qspillsize=2500 -qextname=flush - -O3 -qstrict -qinline=auto - -qsmp=omp - -qsmp=omp:noopt - - - -Wl,--relax -Wl,--allow-multiple-definition - - - - - - -DCMAKE_SYSTEM_NAME=Catamount - - - -DLINUX - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - cc - CC - ftn - $ENV{NETCDF_DIR} - lustre - $ENV{PARALLEL_NETCDF_DIR} - cc - CC - ftn - - - - - -DSYSDARWIN - - - - - - -heap-arrays - - - - - - --host=cray - - - -xCORE-AVX2 - - - -xCORE-AVX2 - - - -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - - - -DHAVE_PAPI -DHAVE_SLASHPROC - - - -mkl - - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - $ENV{NETCDF} - gpfs - $ENV{PNETCDF} - xiar - - cru - - - - - - -xHost - - - -DINTEL_MKL -DHAVE_SSE2 - - - -xHost - - - $(FC_AUTO_R8) -O3 -assume norealloc_lhs - - - $SHELL{${NETCDF_PATH}/bin/nc-config --flibs} - - mpiicpc - mpiicc - mpiifort - icc - ifort - $ENV{TRILINOS_PATH} - - - - - -DHAVE_PAPI - - lustre - - - - FALSE - - -dynamic -mkl=sequential -no-fma - - - -dynamic -mkl=sequential -no-fma - - - - - - -O2 - -nofma - - - -lmpichf90_pgi $ENV{PGI_PATH}/linux86-64/$ENV{PGI_VERSION}/lib/f90main.o - - - -O2 - -nofma - - TRUE - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - /usr/local - /usr/local - - - - -lnetcdf -lnetcdff -llapack -lblas - - - - - - -I/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/build/lib/mct -I/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/build/lib/psmile.MPI1 - - - -llapack -lblas - -L/project/s824/edavin/OASIS3-MCT_2.0/build.pgi/lib -lpsmile.MPI1 -lscrip -lmct_oasis -lmpeu_oasis - - - - - - -I/project/s824/edavin/OASIS3-MCT_2.0/build.cray/build/lib/mct -I/project/s824/edavin/OASIS3-MCT_2.0/build.cray/build/lib/psmile.MPI1 - - - -L/project/s824/edavin/OASIS3-MCT_2.0/build.cray/lib -lpsmile.MPI1 -lscrip -lmct_oasis -lmpeu_oasis - - - - - -L$(NETCDF_PATH)/lib -Wl,-rpath,$(NETCDF_PATH)/lib -lnetcdff -lnetcdf - - - - - $ENV{NCDIR} - $ENV{PNDIR} - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff - - - - - $ENV{NCDIR} - $ENV{PNDIR} - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff - - - - - $ENV{NETCDF} - gpfs - $ENV{PNETCDF} - - - - - -O -tp=skylake -Mnofma - - - -O -tp=skylake -Mnofma - -I$(EXEROOT)/ocn/obj/FMS - - - -O -tp=skylake -Mnofma - - - -llapack -lblas - -ldl - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff - - - - - - -O -tp=skylake -Mnofma - - - -O -tp=skylake -Mnofma - -I$(EXEROOT)/ocn/obj/FMS - - - -O -tp=skylake -Mnofma -acc -ta=tesla:cc70,lineinfo,nofma -Minfo=accel - - - -llapack -lblas - -ldl - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff - - - - - - -O -tp=skylake -Mnofma - - - -O -tp=skylake -Mnofma - -I$(EXEROOT)/ocn/obj/FMS - - - -O -tp=skylake -Mnofma - - - -llapack -lblas - -ldl - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff - - - - - - -O -tp=skylake -Mnofma - - - -O -tp=skylake -Mnofma - -I$(EXEROOT)/ocn/obj/FMS - - - -O -tp=skylake -Mnofma -acc -ta=tesla:cc70,lineinfo,nofma -Minfo=accel - - - -llapack -lblas - -ldl - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff - - - - - - -qopt-report -xCORE_AVX2 -no-fma - - - -qopt-report -xCORE_AVX2 -no-fma - - - -DPIO_ENABLE_LOGGING=ON - - $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_noMPI_noOpenMP - - TRUE - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - $ENV{NETCDF} - gpfs - $ENV{PNETCDF} - - - - - -DNO_MPIMOD - - - - -fallow-argument-mismatch -fallow-invalid-boz - - - -ldl - - - - - - -qopt-report -xCORE_AVX2 -no-fma - - - -qopt-report -xCORE_AVX2 -no-fma - - - -DPIO_ENABLE_LOGGING=ON - - $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_noMPI_noOpenMP - $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP - - TRUE - - - - - -llapack -lblas - -ldl - - - - - /vol/apps/hpc/stow/netcdf/4.4.1.1/gcc-6.3.0 - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi - - - - - - -O2 - - - --host=cray - - - -DLINUX - - - -O2 - - $ENV{NETCDF_HOME} - lustre - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH} -lmkl_rt - - - - - - -O2 - - - --host=cray - - - -DLINUX - - - -O2 - - $ENV{NETCDF_HOME} - lustre - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi - - - - - - --host=cray - - - -xCORE-AVX2 - - - -xCORE-AVX2 - - - -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - - - -DHAVE_SLASHPROC - - - -mkl - - - FALSE - - - - - --host=cray - - - -xMIC-AVX512 - - - -xMIC-AVX512 - - - -L$(NETCDF_DIR) -lnetcdff -Wl,--as-needed,-L$(NETCDF_DIR)/lib -lnetcdff -lnetcdf - - - -DHAVE_SLASHPROC - - - -mkl -lmemkind -zmuldefs - - - FALSE - - - - - -O2 - - - --host=cray - - - -DLINUX - - - -O2 - - $ENV{NETCDF_HOME} - lustre - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi - - - - - - -O2 - -nomp - - - --host=cray - - - -DLINUX - - - -O2 - -nomp - - - -nomp - - $ENV{NETCDF_HOME} - lustre - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi - - - - - - -O2 - - - --host=cray - - - -DHAVE_PAPI - - - -O2 - - - -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf - - - - - - -DLINUX - - $ENV{NETCDF} - lustre - $ENV{PNETCDF} - - -L$ENV{NETCDF}/lib -lnetcdf -lnetcdff - - - - - - -xCORE-AVX2 - - - -xCORE-AVX2 - - - -mkl - - - - - - -O2 - - - -O2 - - - - - - -DLINUX - - $ENV{NETCDF} - lustre - $ENV{PNETCDF} - - -L$ENV{NETCDF}/lib -lnetcdf -lnetcdff - - - - - - -xCORE-AVX2 - - - -xCORE-AVX2 - - - -mkl - - - - - - -O2 - - - -O2 - - - - - - -DLINUX - - $ENV{NETCDF} - lustre - $ENV{PNETCDF} - - -L$ENV{NETCDF}/lib -lnetcdf -lnetcdff - - - - - - -xCORE-AVX2 - - - -xCORE-AVX2 - - - -mkl - - - - - - -O2 - - - -O2 - - - - - TRUE - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - $ENV{TACC_NETCDF_DIR} - lustre - $ENV{TACC_PNETCDF_DIR} - - -Wl,-rpath,${NETCDF_PATH}/lib - - - -L${NETCDF_PATH}/lib -lnetcdff -lnetcdf - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - /usr/lib64 - mpich - $ENV{NETCDF_PATH} - - -L$NETCDF_PATH/lib -lnetcdff -lnetcdf - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - /usr/lib64 - mpich - $ENV{NETCDF_PATH} - - -L$NETCDF_PATH/lib -lnetcdff -lnetcdf - - - - - /fs/cgd/csm/tools/pFUnit/pFUnit3.3.3_izumi_Intel19.0.1_noMPI_noOpenMP - - - - FORTRAN - TRUE - - - - - - -lifcore - - - -lifcore - -mcmodel medium - - - -lquadmath - -Wl,-rpath,${NETCDF_PATH}/lib - -Wl,-rpath,$ENV{COMPILER_PATH}/lib/intel64 - -Wl,-rpath,$ENV{COMPILER_PATH}/mkl/lib/intel64 - -Wl,-rpath,$ENV{MPI_PATH}/lib - -lifcore - - - -mkl=cluster - - /fs/cgd/csm/tools/pFUnit/pFUnit3.2.8_hobart_Intel15.0.2_noMPI_noOpenMP - - - - - -O0 - - - -O0 - - - -lgomp - -Wl,-R${NETCDF_PATH}/lib - -Wl,-R$ENV{COMPILER_PATH}/lib - -Wl,-R$ENV{COMPILER_PATH}/libso - - - - - - -lm -ldl - - - - - - - -framework Accelerate -Wl,-rpath $(NETCDF)/lib - - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - $ENV{NETCDF} - gpfs - $ENV{PNETCDF} - - - - -vec-report - - - -vec-report - - - -DPIO_ENABLE_LOGGING=ON - - - - - - -DHAVE_VPRINTF -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - - -lnetcdff -lnetcdf -mkl - - - -ftrapuv - - - -ftrapuv - - $ENV{NETCDF_DIR} - /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib - - - - - -DHAVE_VPRINTF -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - - -lnetcdff -lnetcdf -mkl - - - -ftrapuv - - - -ftrapuv - - $ENV{NETCDF_DIR} - /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib - - - - /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install - - -O2 - - - --host=cray - - - -lstdc++ -lmpi_cxx - - - -O2 - - $ENV{NETCDFROOT} - $ENV{PNETCDFROOT} - - $SHELL{${NETCDF_PATH}/bin/nf-config --flibs} -lblas -llapack - - - - - - -qfloat=nomaf - - - -qfloat=nomaf - - $ENV{HDF5} - - - /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf77_r - /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlc_r - /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf2003_r - /soft/libraries/netcdf/4.3.3-f4.4.1/cnk-xl/current/ - gpfs - /soft/libraries/pnetcdf/1.6.1/cnk-xl/current/ - /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlc_r - /home/pkcoff/mpich-sandboxes/master/install-production/bin/mpixlf2003_r - - -L${NETCDF_PATH}/lib -lnetcdff -lnetcdf -L$ENV{HDF5}/lib -lhdf5_hl -lhdf5 -L/soft/libraries/alcf/current/xl/ZLIB/lib -lz -L/soft/libraries/alcf/current/xl/LAPACK/lib -llapack -L/soft/libraries/alcf/current/xl/BLAS/lib -lblas -L/bgsys/drivers/ppcfloor/comm/sys/lib - - - - - - -L$ENV{HDF5_HOME}/lib -lhdf5_fortran -lhdf5 -lhdf5_hl -lhdf5hl_fortran - -L$ENV{NETCDF_PATH}/lib/ -lnetcdff -lnetcdf -lcurl -lblas -llapack - - - -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY -DHAVE_BACKTRACE - - - - - - -O2 - - - --host=cray - - - -DLINUX - - - -O2 - - $ENV{NETCDF_LIB}/.. - lustre - - -L${NETCDF_PATH}/lib -lnetcdf -lnetcdff -lpmi - - - - - - --host=cray - - - -axCORE-AVX2 -xSSE4.2 -no-fma - - - -axCORE-AVX2 -xSSE4.2 -no-fma - - icc - mpi - $ENV{MPI_ROOT} - $ENV{NETCDF} - - -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf - - /home6/fvitt/pnetcdf-1.12.2 - /home6/fvitt/esmf-8_1_1/lib/libO/Linux.intel.64.mpt.default - - - - - --host=cray - - - -axCORE-AVX2 -xSSE4.2 -no-fma - - - -axCORE-AVX2 -xSSE4.2 -no-fma - - icc - mpi - $ENV{MPI_ROOT} - $ENV{NETCDF} - - -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf - - /home6/fvitt/pnetcdf-1.12.2 - /home6/fvitt/esmf-8_1_1/lib/libO/Linux.intel.64.mpt.default - - - - - --host=cray - - - -axCORE-AVX2 -xSSE4.2 -no-fma - - - -axCORE-AVX2 -xSSE4.2 -no-fma - - icc - mpi - $ENV{MPI_ROOT} - $ENV{NETCDF} - - -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf - - /home6/fvitt/pnetcdf-1.12.2 - /home6/fvitt/esmf-8_1_1/lib/libO/Linux.intel.64.mpt.default - - - - - --host=cray - - - -axCORE-AVX2 -xSSE4.2 -no-fma - - - -axCORE-AVX2 -xSSE4.2 -no-fma - - icc - mpi - $ENV{MPI_ROOT} - $ENV{NETCDF} - - -L$ENV{NETCDF}/lib -lnetcdff -lnetcdf - - /home6/fvitt/pnetcdf-1.12.2 - /home6/fvitt/esmf-8_1_1/lib/libO/Linux.intel.64.mpt.default - - - - - -O2 - - - --host=cray - - /projects/ccsm/esmf-6.3.0rp1/lib/libO/Linux.intel.64.openmpi.default - - -O2 - - $ENV{NETCDFROOT} - lustre - $ENV{PNETCDFROOT} - - -L${NETCDF_PATH}/lib -lnetcdff -L/projects/ccsm/BLAS-intel -lblas_LINUX - - - - - - -DHAVE_NANOTIME - - $ENV{TACC_NETCDF_DIR} - lustre - $ENV{TACC_PNETCDF_DIR} - - -Wl,-rpath,${NETCDF_PATH}/lib - - - -L${NETCDF_PATH}/lib -lnetcdff -lnetcdf - - - - - - -DHAVE_NANOTIME - - $ENV{TACC_NETCDF_DIR} - lustre - $ENV{TACC_PNETCDF_DIR} - - - - - -xCOMMON-AVX512 -no-fma - - - -xCOMMON-AVX512 -no-fma - -mcmodel medium - - - -L$ENV{TACC_HDF5_LIB} -lhdf5 $(MKL) -zmuldefs -xCOMMON-AVX512 - - - $SHELL{${NETCDF_PATH}/bin/nf-config --flibs} -L$ENV{TACC_HDF5_LIB} -lhdf5 - - $ENV{TRILINOS_PATH} - FALSE - - - - - -DHAVE_NANOTIME - - $ENV{TACC_NETCDF_DIR} - lustre - $ENV{TACC_PNETCDF_DIR} - - - - - -xCOMMON-AVX512 -no-fma - - - -xCOMMON-AVX512 -no-fma - -mcmodel medium - - - -L$ENV{TACC_HDF5_LIB} -lhdf5 $(MKL) -zmuldefs -xCOMMON-AVX512 - - - $SHELL{${NETCDF_PATH}/bin/nf-config --flibs} -L$ENV{TACC_HDF5_LIB} -lhdf5 - - $ENV{TRILINOS_PATH} - FALSE - - - - mpiicc - mpiicpc - mpiifort - /apps/netcdf/4.3.0-intel - - - - - -xMIC-AVX512 - - - -xMIC-AVX512 - - - --host=cray - - - -L$(NETCDF_DIR)/lib -lnetcdff -L$(NETCDF_DIR)/lib -lnetcdf -Wl,-rpath -Wl,$(NETCDF_DIR)/lib - - - - - gpfs - $ENV{NETCDF} - $ENV{PNETCDF} - - - -DNO_R16 -DHAVE_NANOTIME - - - $(FC_AUTO_R8) -O3 -assume norealloc_lhs - - - $SHELL{${NETCDF_PATH}/bin/nc-config --flibs} - - - - - xiar - - cru - - - -mkl=cluster -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma - -mcmodel medium - -mkl -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma - - - -mkl=cluster -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma - -mkl -xSKYLAKE-AVX512 -qopt-zmm-usage=high -no-fma - - - -mkl=cluster - -mkl -lstdc++ - - - -lstdc++ - -lstdc++ - - mpiicc - mpiicpc - mpiifort - - - - - - - - - - - - - USERDEFINED_MUST_EDIT_THIS - - - # USERDEFINED $SHELL{${NETCDF_PATH}/bin/nc-config --flibs} - - - - diff --git a/CIME/data/config/cesm/machines/config_machines.xml b/CIME/data/config/cesm/machines/config_machines.xml deleted file mode 100644 index 3d3a9fa2de7..00000000000 --- a/CIME/data/config/cesm/machines/config_machines.xml +++ /dev/null @@ -1,3597 +0,0 @@ - - - - - - - XC50 SkyLake, os is CNL, 40 pes/node, batch system is PBSPro - .*eth\d - CNL - intel,gnu,cray - mpt,mpi-serial - /proj/$ENV{USER} - $ENV{DIN_LOC_ROOT} - $DIN_LOC_ROOT - ${CIME_OUTPUT_ROOT}/archive/$CASE - ${CIME_OUTPUT_ROOT}/cesm_baselines - 8 - pbs - @ pusan.ac.kr - 40 - 40 - - aprun - - -j {{ hyperthreading }} - -n {{ total_tasks }} - -N $MAX_MPITASKS_PER_NODE - -S {{ tasks_per_numa }} - -d $ENV{OMP_NUM_THREADS} - --mpmd-env OMP_NUM_THREADS=$OMP_NUM_THREADS - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - craype-x86-skylake - PrgEnv-pgi - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - cray-netcdf - cray-hdf5 - cray-parallel-netcdf - papi - - - PrgEnv-intel - craype-x86-skylake - craype-hugepages2M - perftools-base/7.0.4 - cray-netcdf/4.6.1.3 - cray-hdf5/1.10.2.0 - cray-parallel-netcdf/1.11.1.1 - papi/5.6.0.4 - gridftp/6.0 - cray-python/3.6.5.1 - - - - 256M - /home/jedwards/workflow/CESM_postprocessing - - - - - CMCC IBM iDataPlex, os is Linux, 16 pes/node, batch system is LSFd mpich - .*.cluster.net - LINUX - intel,intel15 - mpich2 - /work/$USER/CESM2 - /users/home/dp16116/CESM2/inputdata - $DIN_LOC_ROOT/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/ccsm_baselines - /users/home/dp16116/CESM2/cesm2.0.1/cime/tools/cprnc/cprnc - /usr/lib64/perl5:/usr/share/perl5 - 8 - lsf - - 30 - 15 - FALSE - - mpirun_Impi5 - - - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - /usr/share/Modules/init/csh - /usr/share/Modules/init/sh - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - - - ANACONDA2/python2.7 - INTEL/intel_xe_2015.3.187 - SZIP/szip-2.1_int15 - - - ESMF/esmf-6.3.0rp1-intelmpi-64-g_int15 - - - ESMF/esmf-6.3.0rp1-intelmpi-64-O_int15 - - - ESMF/esmf-6.3.0rp1-mpiuni-64-g_int15 - - - ESMF/esmf-6.3.0rp1-mpiuni-64-O_int15 - - - HDF5/hdf5-1.8.15-patch1 - NETCDF/netcdf-C_4.3.3.1-F_4.4.2_C++_4.2.1 - - - HDF5/hdf5-1.8.15-patch1_parallel - NETCDF/netcdf-C_4.3.3.1-F_4.4.2_C++_4.2.1_parallel - PARALLEL_NETCDF/parallel-netcdf-1.6.1 - - - CMAKE/cmake-3.3.0-rc1 - - - INTEL/intel_xe_2013.5.192 - INTEL/intel_xe_2013 - HDF5/hdf5-1.8.10-patch1 - INTEL/intel_xe_2015.3.187 - - - - 256M - - - gpfs - on - snb - lsf - 1 - on - on - /users/home/models/nemo/xios-cmip6/intel_xe_2013 - - - - - ORNL XE6, os is CNL, 32 pes/node, batch system is PBS - h2o - CNL - intel,pgi,cray,gnu - mpich - banu - /scratch/sciteam/$USER - $ENV{CESMDATAROOT}/inputdata - $ENV{CESMDATAROOT}/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/ccsm_baselines - $ENV{CESMDATAROOT}/tools/cprnc - 8 - pbs - cseg - 32 - 16 - TRUE - - aprun - - -n {{ total_tasks }} - - -N $MAX_MPITASKS_PER_NODE - -d $ENV{OMP_NUM_THREADS} - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/3.2.10.3/bin/modulecmd perl - /opt/modules/3.2.10.3/bin/modulecmd python - module - module - - PrgEnv-pgi - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - pgi - cray - intel - cray-netcdf - gcc - - - PrgEnv-intel - intel - intel/18.0.3.222 - - gcc - - - PrgEnv-pgi - pgi pgi/18.7.0 - - - PrgEnv-gnu - gcc gcc/6.3.0 - - - PrgEnv-cray - cce cce/8.5.8 - - - papi/5.5.1.1 - cray-mpich cray-mpich/7.7.1 - cray-libsci cray-libsci/18.04.1 - torque/6.0.4 - - - cray-hdf5-parallel/1.10.2.0 - cray-netcdf-hdf5parallel/4.6.1.0 - cray-parallel-netcdf/1.8.1.3 - - - cray-netcdf/4.6.1.0 - - - cmake/3.1.3 - darshan - /sw/modulefiles/CESM - CESM-ENV - - - - 64M - $ENV{HOME}/bin:$ENV{PATH} - - - - - - Example port to centos7 linux system with gcc, netcdf, pnetcdf and mpich - using modules from http://www.admin-magazine.com/HPC/Articles/Environment-Modules - - regex.expression.matching.your.machine - LINUX - https://howto.get.out - gnu - mpich - none - - $ENV{HOME}/cesm/scratch - $ENV{HOME}/cesm/inputdata - $ENV{HOME}/cesm/inputdata/lmwg - $ENV{HOME}/cesm/archive/$CASE - $ENV{HOME}/cesm/cesm_baselines - $ENV{HOME}/cesm/tools/cime/tools/cprnc/cprnc - make - 8 - none - me@my.address - 8 - 8 - FALSE - - mpiexec - - -np {{ total_tasks }} - - - - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - /usr/share/Modules/init/csh - /usr/share/Modules/init/sh - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - lang/python/3.6.5 - - - compiler/gnu/8.2.0 - mpi/3.3/gnu-8.2.0 - tool/netcdf/4.7.4/gcc-8.2.0 - tool/parallel-netcdf/1.12.1/mpich - - - - 256M - - - -1 - - - - - NCAR GPU platform, os is Linux, 36 pes/node, batch system is pbs - casper* - LINUX - pgi,intel,nvhpc,pgi-gpu,nvhpc-gpu - openmpi - /glade/scratch/$USER - $ENV{CESMDATAROOT}/inputdata - /glade/p/cgd/tss/CTSM_datm_forcing_data - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/cesm_baselines - 8 - pbs - CISL - 36 - 8 - 36 - TRUE - - mpirun - - -np {{ total_tasks }} - - - - mpirun - - -np {{ total_tasks }} - - - - /glade/u/apps/dav/opt/lmod/7.7.29/init/perl - /glade/u/apps/dav/opt/lmod/7.7.29/init/env_modules_python.py - /glade/u/apps/dav/opt/lmod/7.7.29/init/sh - /glade/u/apps/dav/opt/lmod/7.7.29/init/csh - /glade/u/apps/dav/opt/lmod/7.7.29/libexec/lmod perl - /glade/u/apps/dav/opt/lmod/7.7.29/libexec/lmod python - module - module - - - ncarenv/1.3 - cmake/3.18.2 - - - pgi/20.4 - - - pgi/20.4 - - - nvhpc/20.9 - - - nvhpc/20.9 - - - intel/19.1.1 - mkl/2020.0.1 - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/ - esmf-8.2.0b11_casper-ncdfio-openmpi-g - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/ - esmf-8.2.0b11_casper-ncdfio-openmpi-O - - - openmpi/4.1.0 - netcdf-mpi/4.8.0 - pnetcdf/1.12.2 - - - netcdf/4.8.0 - - - openmpi/4.1.0 - netcdf-mpi/4.7.4 - pnetcdf/1.12.2 - cuda/11.0.3 - - - netcdf/4.7.4 - - - openmpi/4.1.0 - netcdf-mpi/4.7.4 - pnetcdf/1.12.2 - - - netcdf/4.7.4 - - - openmpi/4.1.0 - netcdf-mpi/4.7.4 - pnetcdf/1.12.2 - cuda/11.0.3 - - - netcdf/4.7.4 - - - openmpi/4.1.0 - netcdf-mpi/4.7.4 - pnetcdf/1.12.2 - - - netcdf/4.7.4 - - - ncarcompilers/0.5.0 - - - - /glade/u/apps/dav/modulefiles/default/compilers:/glade/u/apps/dav/modulefiles/default/idep - 256M - /glade/scratch/$USER - /glade/p/cesmdata/cseg - $ENV{NETCDF} - - - -1 - - - - - - This is using the cray env emulator on cheyenne - $ENV{CRAY_CPU_TARGET}:broadwell - CNL - gnu,cray - mpich - - /glade/scratch/$USER/CPE - /glade/p/cesmdata/cseg/inputdata - /glade/p/cgd/tss/CTSM_datm_forcing_data - $CIME_OUTPUT_ROOT/archive/$CASE - /glade/p/cesmdata/cseg/CPE/cesm_baselines - /glade/p/cesmdata/cseg/tools/CPE/cprnc - 8 - slurm - jedwards - 36 - 36 - FALSE - - srun - - --ntasks={{ total_tasks }} - --cpu_bind=sockets --cpu_bind=verbose - --kill-on-bad-exit - - - - /usr/local/Modules/init/perl.pm - /usr/local/Modules/init/python.py - /usr/local/Modules/init/sh - /usr/local/Modules/init/csh - /usr/local/Modules/bin/modulecmd perl - /usr/local/Modules/bin/modulecmd python - module - module - - cpe-cray - cpe-gnu - cray-netcdf-hdf5para - cray-netcdf - - - cpe-cray - cce cce/11.0.2 - - - cpe-gnu - gcc gcc/9.3.0 - - - perftools-base - craype craype/2.7.9 - - - cray-libsci/20.12.1.2 - - - cray-mpich - cray-mpich/8.1.6.55 - - - cray-netcdf - cray-hdf5 - - cray-hdf5 - cray-netcdf - - - - cray-netcdf-hdf5parallel - cray-hdf5-parallel - cray-hdf5 - cray-netcdf - - - - ON - SUMMARY - - - /glade/work/jedwards/CPE/lib/libg/Unicos.cce.64.mpi.default/esmf.mk - - - /glade/work/jedwards/CPE/lib/libg/Unicos.gfortran.64.mpi.default/esmf.mk - - - /opt/cray/pe/mpich/8.1.6.55/ucx/cray/10.0/lib/pkgconfig/:$ENV{PKG_CONFIG_PATH} - - - /opt/cray/pe/mpich/8.1.6.55/ucx/gnu/9.1/lib/pkgconfig/:$ENV{PKG_CONFIG_PATH} - - - - - NCAR SGI platform, os is Linux, 36 pes/node, batch system is PBS - .*.?cheyenne\d?.ucar.edu - - MPT: Launcher network accept (MPI_LAUNCH_TIMEOUT) timed out - 10 - LINUX - intel,gnu,pgi - mpt,openmpi - openmpi,mpt - mpt,openmpi - /glade/scratch/$USER - $ENV{CESMDATAROOT}/inputdata - /glade/p/cgd/tss/CTSM_datm_forcing_data - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/cesm_baselines - $ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc.cheyenne - 8 - pbs - cseg - - 36 - 36 - TRUE - - - mpiexec_mpt - - -p "%g:" - -np {{ total_tasks }} - - omplace -tm open64 - - - - mpirun `hostname` - - -np {{ total_tasks }} - - omplace -tm open64 - - - - mpiexec_mpt - - -p "%g:" - -np {{ total_tasks }} - omplace -tm open64 -vv - - - - mpirun `hostname` - - -np {{ total_tasks }} - - - - mpirun - - -np {{ total_tasks }} - - - - - /opt/sgi/mpt/mpt-2.15/bin/mpirun $ENV{UNIT_TEST_HOST} -np 1 - - - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/perl - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/env_modules_python.py - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/csh - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/init/sh - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/libexec/lmod perl - /glade/u/apps/ch/opt/lmod/7.5.3/lmod/lmod/libexec/lmod python - module - module - - - ncarenv/1.3 - python/3.7.9 - cmake - - - intel/19.1.1 - esmf_libs - mkl - - - gnu/10.1.0 - openblas/0.3.9 - - - pgi/20.4 - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/ - esmf-8.2.0b13-ncdfio-mpt-g - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/ - esmf-8.2.0b13-ncdfio-mpt-O - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/ - esmf-8.2.0b13-ncdfio-mpiuni-g - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.1.1/ - esmf-8.2.0b13-ncdfio-mpiuni-O - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/ - esmf-8.2.0b13-ncdfio-mpt-g - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/ - esmf-8.2.0b13-ncdfio-mpt-O - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/ - esmf-8.2.0b13-ncdfio-openmpi-g - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/ - esmf-8.2.0b13-ncdfio-openmpi-O - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/ - esmf-8.2.0b13-ncdfio-mpiuni-g - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/gnu/10.1.0/ - esmf-8.2.0b13-ncdfio-mpiuni-O - - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/ - esmf-8.2.0b13-ncdfio-mpt-g - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/ - esmf-8.2.0b13-ncdfio-mpt-O - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/ - esmf-8.2.0b13-ncdfio-openmpi-g - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/ - esmf-8.2.0b13-ncdfio-openmpi-O - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/ - esmf-8.2.0b13-ncdfio-mpiuni-g - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/pgi/20.4/ - esmf-8.2.0b13-ncdfio-mpiuni-O - - - mpt/2.21 - netcdf-mpi/4.7.3 - pnetcdf/1.12.1 - - - mpt/2.22 - netcdf-mpi/4.8.0 - pnetcdf/1.12.2 - - - mpt/2.19 - netcdf-mpi/4.7.4 - pnetcdf/1.12.1 - - - openmpi/4.0.5 - netcdf-mpi/4.7.4 - - - openmpi/4.1.0 - netcdf-mpi/4.8.0 - - - ncarcompilers/0.5.0 - - - netcdf/4.7.4 - - - netcdf/4.7.4 - - - netcdf/4.7.4 - - - - 1024M - /glade/scratch/$USER - 16 - 1 - - - - ON - SUMMARY - /glade/work/turuncu/FV3GFS/benchmark-inputs/2012010100/gfs/fcst - /glade/work/turuncu/FV3GFS/fix_am - /glade/work/turuncu/FV3GFS/addon - PASSIVE - true - - - false - - - /glade/scratch/$USER - - - -1 - - - - - - Portland State University Coeus Cluster Dec 2019 CentOS 7 - - (login[1,2].cluster|compute[0-9]*.cluster) - LINUX - gnu - mvapich2 - none - $ENV{CESMDATAROOT}/$USER - $ENV{CESMDATAROOT}/inputdata - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/cesm_baselines - /vol/apps/hpc/src/cesm-2.1.0/cime/tools/cprnc/cprnc - make - 8 - slurm - oit-rc-groups@pdx.edu - 40 - 20 - FALSE - - srun - - --ntasks={{ total_tasks }} - --cpu_bind=sockets --cpu_bind=verbose - --kill-on-bad-exit - - - - - - /usr/share/Modules/init/csh - /usr/share/Modules/init/sh - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - - - gcc-6.3.0 - mvapich2-2.2-psm/gcc-6.3.0 - General/netcdf/4.4.1.1/gcc-6.3.0 - Python/2.7.13/gcc-6.3.0 - - - - 256M - /vol/apps/hpc/stow/netcdf/4.4.1.1/gcc-6.3.0/ - - - -1 - - - - - PNL Haswell cluster, OS is Linux, batch system is SLURM - LINUX - intel,pgi - mvapich2,openmpi,intelmpi,mvapich - /pic/scratch/$USER - /pic/scratch/tcraig/IRESM/inputdata - /pic/scratch/tcraig/IRESM/inputdata/atm/datm7 - /pic/scratch/$USER/cases/archive/$CASE - /pic/scratch/tcraig/IRESM/ccsm_baselines - /people/tcraig/bin/cprnc - 8 - slurm - tcraig -at- ucar.edu - 24 - 24 - FALSE - - srun - - --mpi=none - --ntasks={{ total_tasks }} - --cpu_bind=sockets --cpu_bind=verbose - --kill-on-bad-exit - - - - srun - - --ntasks={{ total_tasks }} - --cpu_bind=sockets --cpu_bind=verbose - --kill-on-bad-exit - - - - mpirun - - -n {{ total_tasks }} - - - - mpirun - - -n {{ total_tasks }} - - - - /share/apps/modules/Modules/3.2.10/init/perl.pm - /etc/profile.d/modules.csh - /etc/profile.d/modules.sh - /share/apps/modules/Modules/3.2.10/bin/modulecmd perl - module - module - - - - - perl/5.20.0 - cmake/2.8.12 - - - intel/15.0.1 - netcdf/4.3.2 - mkl/15.0.1 - - - pgi/14.10 - netcdf/4.3.2 - - - mvapich2/2.1 - - - mvapich2/2.1 - - - intelmpi/5.0.1.035 - - - openmpi/1.8.3 - - - - 64M - - - $MLIB_LIB - /share/apps/netcdf/4.3.2/intel/15.0.1 - - - /share/apps/netcdf/4.3.2/pgi/14.10 - - - - - - Containerized development environment (Docker/Singularity) for CESM w/ GNU compilers - - LINUX - gnu - mpich - $ENV{HOME}/scratch - $ENV{CESMDATAROOT}/inputdata - $DIN_LOC_ROOT/atm/datm7 - $ENV{HOME}/archive/$CASE - make - 4 - none - cgd - 4 - 4 - FALSE - - mpiexec - - -n {{ total_tasks }} - - - - - - /usr/local - /usr/local - /usr/lib64 - /usr/lib64 - - - -1 - - - - - - - - NERSC XC40 Haswell, os is CNL, 32 pes/node, batch system is Slurm - cori - CNL - intel,gnu,cray - mpt - $ENV{SCRATCH} - /project/projectdirs/ccsm1/inputdata - /project/projectdirs/ccsm1/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /project/projectdirs/ccsm1/ccsm_baselines - /project/projectdirs/ccsm1/tools/cprnc.corip1/cprnc - 8 - slurm - cseg - 64 - 32 - - srun - - --label - -n {{ total_tasks }} - -c {{ srun_binding }} - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - intel - cce - cray-parallel-netcdf - cray-parallel-hdf5 - pmi - cray-libsci - cray-mpich2 - cray-mpich - cray-netcdf - cray-hdf5 - cray-netcdf-hdf5parallel - craype-sandybridge - craype-ivybridge - craype - - - - PrgEnv-intel - intel intel/19.1.3.304 - /global/project/projectdirs/ccsm1/modulefiles/cori - - - esmf/7.1.0r-defio-intel18.0.1.163-mpi-O-cori-haswell - - - esmf/7.1.0r-netcdf-intel18.0.1.163-mpiuni-O-haswell - - - - PrgEnv-cray - cce cce/10.0.3 - - - PrgEnv-gnu - gcc gcc/10.1.0 - - - cray-memkind - craype craype/2.7.2 - - - cray-libsci/20.09.1 - - - cray-mpich/7.7.16 - - - cray-netcdf-hdf5parallel - cray-hdf5-parallel - cray-parallel-netcdf - cray-hdf5/1.12.0.0 - cray-netcdf/4.7.4.0 - - - cray-netcdf-hdf5parallel/4.7.4.0 - cray-hdf5-parallel/1.12.0.0 - cray-parallel-netcdf/1.12.1.0 - - - cmake/3.21.3 - - - - 256M - spread - threads - - - - - - - NERSC XC* KNL, os is CNL, 68 pes/node, batch system is Slurm - CNL - intel,gnu,cray - mpt - $ENV{SCRATCH} - /project/projectdirs/ccsm1/inputdata - /project/projectdirs/ccsm1/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /project/projectdirs/ccsm1/ccsm_baselines - /project/projectdirs/ccsm1/tools/cprnc.corip1/cprnc - 8 - slurm - cseg - 256 - 64 - 68 - - srun - - --label - -n {{ total_tasks }} - -c {{ srun_binding }} --cpu_bind=cores - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - craype-mic-knl - craype-haswell - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - intel - cce - cray-parallel-netcdf - cray-parallel-hdf5 - pmi - cray-libsci - cray-mpich2 - cray-mpich - cray-netcdf - cray-hdf5 - cray-netcdf-hdf5parallel - - - - PrgEnv-intel - intel intel/19.1.3.304 - /global/project/projectdirs/ccsm1/modulefiles/cori - - - esmf/7.1.0r-defio-intel18.0.1.163-mpi-O-cori-knl - - - esmf/7.1.0r-netcdf-intel18.0.1.163-mpiuni-O-knl - - - - PrgEnv-cray - cce cce/10.0.3 - - - PrgEnv-gnu - gcc gcc/10.1.0 - - - cray-memkind - craype craype/2.7.2 - craype-mic-knl - - - cray-libsci/20.09.1 - - - cray-mpich/7.7.16 - - - cray-netcdf-hdf5parallel - cray-hdf5-parallel - cray-parallel-netcdf - cray-hdf5/1.12.0.0 - cray-netcdf/4.7.4.0 - - - cray-netcdf-hdf5parallel/4.7.4.0 - cray-hdf5-parallel/1.12.0.0 - cray-parallel-netcdf/1.12.1.0 - - - - 256M - spread - threads - - - - - CSCS Cray XC50, os is SUSE SLES, 12 pes/node, batch system is SLURM - CNL - pgi,cray,gnu - mpich - /scratch/snx3000/$USER - /project/s824/cesm_inputdata - /project/s824/cesm_inputdata/atm/datm7 - /project/s824/$USER/archive/$CASE - /project/s824/ccsm_baselines - /project/s824/cesm_tools/ccsm_cprnc/cprnc - 12 - slurm - edouard.davin -at- env.ethz.ch - 12 - 12 - - srun - - -n {{ total_tasks }} - -d $ENV{OMP_NUM_THREADS} - - - - - 64M - - - - - PNL IBM Xeon cluster, os is Linux (pgi), batch system is SLURM - LINUX - pgi,intel - mvapich2,mvapich - /lustre/$USER - /lustre/tcraig/IRESM/inputdata - /lustre/tcraig/IRESM/inputdata/atm/datm7 - /lustre/$USER/archive/$CASE - /lustre/tcraig/IRESM/ccsm_baselines - /lustre/tcraig/IRESM/tools/cprnc/cprnc - 8 - slurm - tcraig -at- ucar.edu - 12 - 12 - - srun - - --ntasks={{ total_tasks }} - --cpu_bind=sockets - --cpu_bind=verbose - --kill-on-bad-exit - - - - srun - - --mpi=none - --ntasks={{ total_tasks }} - --cpu_bind=sockets - --cpu_bind=verbose - --kill-on-bad-exit - - - - /etc/profile.d/modules.perl - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /share/apps/modules/Modules/3.2.7/bin/modulecmd perl - module - module - - - perl/5.20.7 - cmake/3.0.0 - pgi/15.5 - mpi/mvapich2/1.5.1p1/pgi11.3 - netcdf/4.1.2/pgi - - - - 64M - - - - - NERSC XC30, os is CNL, 24 pes/node, batch system is SLURM - edison - CNL - intel,gnu,cray - mpt - $ENV{CSCRATCH} - /project/projectdirs/ccsm1/inputdata - /project/projectdirs/ccsm1/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /project/projectdirs/ccsm1/ccsm_baselines - /project/projectdirs/ccsm1/tools/cprnc.edison/cprnc - 8 - slurm - cseg - 48 - 24 - - srun - - --label - -n {{ total_tasks }} - -c {{ srun_binding }} - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - intel - cce - cray-parallel-netcdf - cray-parallel-hdf5 - pmi - cray-libsci - cray-mpich2 - cray-mpich - cray-netcdf - cray-hdf5 - cray-netcdf-hdf5parallel - craype-sandybridge - craype-ivybridge - craype - - - - PrgEnv-intel - intel intel/18.0.1.163 - cray-libsci - /global/project/projectdirs/ccsm1/modulefiles/edison - - - esmf/7.1.0r-defio-intel18.0.1.163-mpi-O - - - esmf/6.3.0rp1-defio-intel17.0-mpiuni-O - - - PrgEnv-cray - cce cce/8.6.5 - cray-libsci/18.03.1 - - - PrgEnv-gnu - gcc gcc/7.3.0 - cray-libsci/18.03.1 - - - papi/5.5.1.4 - craype craype/2.5.14 - craype-ivybridge - - - cray-mpich/7.7.0 - - - cray-hdf5/1.10.1.1 - cray-netcdf/4.4.1.1.6 - - - cray-netcdf-hdf5parallel/4.4.1.1.6 - cray-hdf5-parallel/1.10.1.1 - cray-parallel-netcdf/1.8.1.3 - - - - - 64M - spread - threads - - - - - - Euler II Linux Cluster ETH, 24 pes/node, InfiniBand, XeonE5_2680v3, batch system LSF - LINUX - intel,pgi - openmpi,mpich - /cluster/work/climate/$USER - /cluster/work/climate/cesm/inputdata - /cluster/work/climate/cesm/inputdata/atm/datm7 - /cluster/work/climate/$USER/archive/$CASE - /cluster/work/climate/cesm/ccsm_baselines - /cluster/work/climate/cesm/tools/cprnc/cprnc - 1 - lsf - urs.beyerle -at- env.ethz.ch - 24 - 24 - - mpirun - - -hostfile $ENV{PBS_JOBID} - -ppn $MAX_MPITASKS_PER_NODE - -n {{ total_tasks }} - - - - mpirun - - - - - /cluster/apps/modules/init/python.py - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /cluster/apps/modules/bin/modulecmd python - module - module - - - - - new - - - intel/2018.1 - - - netcdf/4.3.1 - - - pgi/14.1 - - - mvapich2/1.8.1 - - - open_mpi/1.6.5 - - - - 64M - - - - - Euler III Linux Cluster ETH, 4 pes/node, Ethernet, XeonE3_1585Lv5, batch system LSF - LINUX - intel,pgi - openmpi,mpich - /cluster/work/climate/$USER - /cluster/work/climate/cesm/inputdata - /cluster/work/climate/cesm/inputdata/atm/datm7 - /cluster/work/climate/$USER/archive/$CASE - /cluster/work/climate/cesm/ccsm_baselines - /cluster/work/climate/cesm/tools/cprnc/cprnc - 1 - lsf - urs.beyerle -at- env.ethz.ch - 4 - 4 - - mpirun - - -hostfile $ENV{PBS_JOBID} - -ppn $MAX_MPITASKS_PER_NODE - -n {{ total_tasks }} - - - - mpirun - - - - - /cluster/apps/modules/init/python.py - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /cluster/apps/modules/bin/modulecmd python - module - module - - - - - new - - - interconnect/ethernet - - - intel/2018.1 - - - netcdf/4.3.1 - - - pgi/14.1 - - - mvapich2/1.8.1 - - - open_mpi/1.6.5 - - - - 64M - - - - - Euler IV Linux Cluster ETH, 36 pes/node, InfiniBand, XeonGold_6150, batch system LSF - LINUX - intel,pgi - openmpi,mpich - /cluster/work/climate/$USER - /cluster/work/climate/cesm/inputdata - /cluster/work/climate/cesm/inputdata/atm/datm7 - /cluster/work/climate/$USER/archive/$CASE - /cluster/work/climate/cesm/ccsm_baselines - /cluster/work/climate/cesm/tools/cprnc/cprnc - 1 - lsf - urs.beyerle -at- env.ethz.ch - 36 - 36 - - mpirun - - -hostfile $ENV{PBS_JOBID} - -ppn $MAX_MPITASKS_PER_NODE - -n {{ total_tasks }} - - - - mpirun - - - - - /cluster/apps/modules/init/python.py - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /cluster/apps/modules/bin/modulecmd python - module - module - - - - - new - - - intel/2018.1 - - - - 64M - - - - - Intel Xeon Cascade Lake,56 cores, batch system is SLURM - .*frontera - LINUX - intel - impi,mvapich - ATM20005 - $ENV{SCRATCH} - /work/02503/edwardsj/CESM/inputdata/ - /work/02503/edwardsj/CESM/inputdata/lmwg - $ENV{SCRATCH}/archive/$CASE - /work/02503/edwardsj/CESM/cesm_baselines - /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc - 4 - slurm - cseg - 112 - 56 - - sbcast ${EXEROOT}/cesm.exe /tmp/cesm.exe; ibrun - - -n {{ total_tasks }} - - /tmp/cesm.exe - - - ibrun - - -n {{ total_tasks }} - - - - /opt/apps/lmod/lmod/init/perl - /opt/apps/lmod/lmod/init/env_modules_python.py - /opt/apps/lmod/lmod/init/sh - /opt/apps/lmod/lmod/init/csh - /opt/apps/lmod/lmod/libexec/lmod perl - /opt/apps/lmod/lmod/libexec/lmod python - module - module - - - TACC - intel/19.1.1 - cmake/3.16.1 - - - impi/19.0.9 - pnetcdf/1.11.2 - parallel-netcdf/4.6.2 - - - impi - mvapich2-x/2.3 - pnetcdf/1.11.2 - parallel-netcdf/4.6.2 - - - netcdf/4.6.2 - - - - 256M - 20 - - - -prepend-rank - 4 - 3 - 3 - 3 - 2 - enable - - - - spread - hybrid - 0 - 1 - 4096 - 4096 - mvapich2_ssh - - - - - NOAA XE6, os is CNL, 24 pes/node, batch system is PBS - CNL - pgi - mpich - /lustre/fs/scratch/Julio.T.Bacmeister - /lustre/fs/scratch/Julio.T.Bacmeister/inputdata - /lustre/fs/scratch/Julio.T.Bacmeister/inputdata - /lustre/fs/scratch/Julio.T.Bacmeister/archive/$CASE - UNSET - UNSET - 8 - pbs - julio -at- ucar.edu - 24 - 24 - - aprun - - -j {{ hyperthreading }} - -n {{ total_tasks }} - -S {{ tasks_per_numa }} - -N $MAX_MPITASKS_PER_NODE - -d $ENV{OMP_NUM_THREADS} - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/csh - /opt/modules/default/init/sh - /opt/modules/default/bin/modulecmd perl - module - module - - PrgEnv-pgi - PrgEnv-cray - PrgEnv-gnu - pgi - cray - - - PrgEnv-pgi - pgi pgi/12.5.0 - - - PrgEnv-gnu - torque - - - PrgEnv-cray/4.0.36 - cce/8.0.2 - - - torque/4.1.3 - netcdf-hdf5parallel/4.2.0 - parallel-netcdf/1.2.0 - - - - 64M - 1 - - - - - UCI Linux Cluster; 16 pes/node, batch system is slurm - LINUX - - - intel - openmpi - - - - - - /DFS-L/SCRATCH/moore/$USER/cesm_runs - /DFS-L/DATA/moore/cesm/inputdata - /DFS-L/DATA/moore/cesm/inputdata - $CIME_OUTPUT_ROOT/archive/$CASE - /DFS-L/DATA/moore/cesm/baselines - - /DFS-L/DATA/moore/cesm/tools/cprnc/cprnc - gmake - 16 - slurm - mlevy@ucar.edu - 16 - 16 - FALSE - - - - mpirun - - -np {{ total_tasks }} - - - - mpirun - - -np {{ total_tasks }} - - - - /usr/share/lmod/lmod/init/perl - /usr/share/lmod/lmod/init/env_modules_python.py - /usr/share/lmod/lmod/init/csh - /usr/share/lmod/lmod/init/sh - /usr/share/lmod/lmod/libexec/lmod perl - /usr/share/lmod/lmod/libexec/lmod python - module - module - - - - - intel/2018.3 - netcdf/4.7.0 - - - openmpi/3.1.6 - pnetcdf/1.10.0 - - - - - 256M - 16 - - - - -1 - - - - - UCI Linux Cluster; 40 pes/node, batch system is slurm - gplogin\d.gp.local - LINUX - - - intel - openmpi - - - - - - /DFS-L/SCRATCH/moore/$USER/cesm_runs - /DFS-L/DATA/moore/cesm/inputdata - /DFS-L/DATA/moore/cesm/inputdata - $CIME_OUTPUT_ROOT/archive/$CASE - /DFS-L/DATA/moore/cesm/baselines - - /DFS-L/DATA/moore/cesm/tools/cprnc/cprnc - gmake - 16 - slurm - mlevy@ucar.edu - 40 - 40 - FALSE - - - - mpirun - - -np {{ total_tasks }} - - - - mpirun - - -np {{ total_tasks }} - - - - /usr/share/lmod/lmod/init/perl - /usr/share/lmod/lmod/init/env_modules_python.py - /usr/share/lmod/lmod/init/csh - /usr/share/lmod/lmod/init/sh - /usr/share/lmod/lmod/libexec/lmod perl - /usr/share/lmod/lmod/libexec/lmod python - module - module - - - - - intel/2018.3 - netcdf/4.7.0 - - - openmpi/3.1.6 - pnetcdf/1.10.0 - - - - - 256M - 16 - - - - -1 - - - - - NCAR CGD Linux Cluster 48 pes/node, batch system is PBS - ^h.*\.cgd\.ucar\.edu - LINUX - intel,pgi,nag,gnu - mvapich2,openmpi - /scratch/cluster/$USER - /fs/cgd/csm/inputdata - /project/tss - /scratch/cluster/$USER/archive/$CASE - /fs/cgd/csm/ccsm_baselines - /fs/cgd/csm/tools/cime/tools/cprnc/cprnc - gmake --output-sync - 4 - pbs - cseg - 48 - 48 - - mpiexec - - --machinefile $ENV{PBS_NODEFILE} - -n {{ total_tasks }} - - - - mpiexec - - -n {{ total_tasks }} - - - - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - /usr/share/Modules/init/csh - /usr/share/Modules/init/sh - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - - - compiler/intel/18.0.3 - tool/netcdf/4.6.1/intel - - - mpi/intel/mvapich2-2.3rc2-intel-18.0.3 - - - compiler/pgi/18.1 - tool/netcdf/4.6.1/pgi - - - compiler/nag/6.2 - tool/netcdf/4.6.1/nag - - - mpi/nag/mvapich2-2.3rc2 - - - mpi/nag/openmpi-3.1.0 - - - compiler/gnu/8.1.0 - tool/netcdf/4.6.1/gcc - - - mpi/gcc/openmpi-3.1.0a - - - mpi/gcc/mvapich2-2.3rc2-qlc - - - - 64M - - $ENV{PATH}:/cluster/torque/bin - /home/dunlap/ESMF-INSTALL/8.0.0bs16/lib/libg/Linux.intel.64.mvapich2.default/esmf.mk - - - -1 - - - - - - - Customize these fields as appropriate for your system, - particularly changing MAX_TASKS_PER_NODE and MAX_MPITASKS_PER_NODE to the - number of cores on your machine. You may also want to change - instances of '$ENV{HOME}/projects' to your desired directory - organization. You can use this in either of two ways: (1) - Without making any changes, by adding `--machine homebrew` to - create_newcase or create_test (2) Copying this into a - config_machines.xml file in your personal .cime directory and - then changing the machine name (MACH="homebrew") to - your machine name and the NODENAME_REGEX to something matching - your machine's hostname. With (2), you should not need the - `--machine` argument, because the machine should be determined - automatically. However, with (2), you will also need to copy the - homebrew-specific settings in config_compilers.xml into a - config_compilers.xml file in your personal .cime directory, again - changing the machine name (MACH="homebrew") to your machine name. - - - something.matching.your.machine.hostname - Darwin - gnu - mpich - $ENV{HOME}/projects/scratch - $ENV{HOME}/projects/cesm-inputdata - $ENV{HOME}/projects/ptclm-data - $ENV{HOME}/projects/scratch/archive/$CASE - $ENV{HOME}/projects/baselines - $ENV{HOME}/cesm/tools/cprnc/cprnc - make - 4 - none - __YOUR_NAME_HERE__ - 8 - 4 - - mpirun - - -np {{ total_tasks }} - -prepend-rank - - - - - /usr/local - - - - - NCAR CGD Linux Cluster 48 pes/node, batch system is PBS - ^i.*\.ucar\.edu - LINUX - intel,pgi,nag,gnu - mvapich2,openmpi - /scratch/cluster/$USER - /fs/cgd/csm/inputdata - /project/tss - /scratch/cluster/$USER/archive/$CASE - /fs/cgd/csm/ccsm_baselines - /fs/cgd/csm/tools/cime/tools/cprnc/cprnc - gmake --output-sync - 4 - pbs - cseg - 48 - 48 - - mpiexec - - --machinefile $ENV{PBS_NODEFILE} - -n {{ total_tasks }} - --prepend-rank - - - - mpiexec - - -n {{ total_tasks }} - --tag-output - - - - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - /usr/share/Modules/init/csh - /usr/share/Modules/init/sh - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - - - compiler/gnu/9.3.0 - tool/netcdf/4.7.4/gnu/9.3.0 - - - openmpi/4.0.3/gnu/9.3.0 - - - mpi/2.3.3/gnu/9.3.0 - - - compiler/intel/20.0.1 - tool/netcdf/4.7.4/intel/20.0.1 - - - mpi/2.3.3/intel/20.0.1 - - - compiler/pgi/20.1 - tool/netcdf/4.7.4/pgi/20.1 - - - compiler/nag/6.2-8.1.0 - tool/netcdf/c4.6.1-f4.4.4/nag-gnu/6.2-8.1.0 - - - mpi/2.3.3/nag/6.2 - - - mpi - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/gfortran/9.3.0 - esmf-8.2.0b13-ncdfio-mvapich2-O - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/gfortran/9.3.0 - esmf-8.2.0b13-ncdfio-mvapich2-g - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/gfortran/9.3.0 - esmf-8.2.0b13-ncdfio-mpiuni-O - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/gfortran/9.3.0 - esmf-8.2.0b13-ncdfio-mpiuni-g - - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/nag/6.2 - esmf-8.2.0b13-ncdfio-mvapich2-O - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/nag/6.2 - esmf-8.2.0b13-ncdfio-mvapich2-g - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/nag/6.2 - esmf-8.2.0b13-ncdfio-mpiuni-g - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/nag/6.2 - esmf-8.2.0b13-ncdfio-mpiuni-O - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/intel/20.0.1 - esmf-8.2.0b13-ncdfio-mpiuni-g - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/intel/20.0.1 - esmf-8.2.0b13-ncdfio-mpiuni-O - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/intel/20.0.1 - esmf-8.2.0b13-ncdfio-mvapich2-g - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/intel/20.0.1 - esmf-8.2.0b13-ncdfio-mvapich2-O - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/pgi/20.1 - esmf-8.2.0b15-ncdfio-mpiuni-g - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/pgi/20.1 - esmf-8.2.0b15-ncdfio-mpiuni-O - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/pgi/20.1 - esmf-8.2.0b15-ncdfio-mvapich2-g - - - /fs/cgd/data0/modules/modulefiles/esmfpkgs/pgi/20.1 - esmf-8.2.0b15-ncdfio-mvapich2-O - - - - 64M - - $ENV{PATH}:/cluster/torque/bin - - - -1 - - - - - NCAR SGI test platform, os is Linux, 36 pes/node, batch system is PBS - .*.laramie.ucar.edu - LINUX - intel,gnu - mpt - /picnic/scratch/$USER - $ENV{CESMDATAROOT}/inputdata - $ENV{CESMDATAROOT}/lmwg - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/cesm_baselines - $ENV{CESMDATAROOT}/tools/cime/tools/cprnc/cprnc - 8 - pbs - cseg - - 36 - 36 - FALSE - - mpiexec_mpt - - -p "%g:" - omplace - - - - /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/perl - /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/env_modules_python.py - /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/csh - /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/init/sh - /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/libexec/lmod perl - /picnic/u/apps/la/opt/lmod/8.1.7/lmod/lmod/libexec/lmod python - module - module - - - ncarenv/1.3 - cmake/3.16.4 - - - intel/19.0.5 - mkl - - - gnu/9.1.0 - openblas/0.3.6 - - - mpt/2.21 - netcdf-mpi/4.7.3 - - - pnetcdf/1.12.1 - pio/2.4.4 - - - openmpi/3.1.4 - netcdf-mpi/4.7.3 - - - ncarcompilers/0.5.0 - - - netcdf/4.7.3 - - - - 256M - 16 - - - - - Lawrencium LR3 cluster at LBL, OS is Linux (intel), batch system is SLURM - LINUX - intel - openmpi - /global/scratch/$ENV{USER} - /global/scratch/$ENV{USER}/cesm_input_datasets/ - /global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7 - $CIME_OUTPUT_ROOT/cesm_archive/$CASE - $CIME_OUTPUT_ROOT/cesm_baselines - /$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc - 4 - slurm - rgknox at lbl dot gov - 16 - 16 - TRUE - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /usr/Modules/init/perl.pm - /usr/Modules/python.py - module - module - /usr/Modules/bin/modulecmd perl - /usr/Modules/bin/modulecmd python - - - cmake - perl xml-libxml switch python/2.7 - - - intel/2016.4.072 - mkl - - - netcdf/4.4.1.1-intel-s - - - openmpi - netcdf/4.4.1.1-intel-p - - - - - - Lawrencium LR2 cluster at LBL, OS is Linux (intel), batch system is SLURM - LINUX - intel - openmpi - /global/scratch/$ENV{USER} - /global/scratch/$ENV{USER}/cesm_input_datasets/ - /global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7 - $CIME_OUTPUT_ROOT/cesm_archive/$CASE - $CIME_OUTPUT_ROOT/cesm_baselines - /$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc - 4 - slurm - rgknox and gbisht at lbl dot gov - 12 - 12 - TRUE - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /usr/Modules/init/perl.pm - /usr/Modules/python.py - module - module - /usr/Modules/bin/modulecmd perl - /usr/Modules/bin/modulecmd python - - - cmake - perl xml-libxml switch python/2.7 - - - intel/2016.4.072 - mkl - - - netcdf/4.4.1.1-intel-s - - - openmpi - netcdf/4.4.1.1-intel-p - - - - - - Lonestar5 cluster at TACC, OS is Linux (intel), batch system is SLURM - .*ls5\.tacc\.utexas\.edu - LINUX - intel - mpich - $ENV{SCRATCH} - /work/02503/edwardsj/CESM/inputdata - /work/02503/edwardsj/CESM/inputdata/lmwg - $CIME_OUTPUT_ROOT/cesm_archive/$CASE - /work/02503/edwardsj/CESM/cesm_baselines - /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc - 4 - slurm - cseg - 48 - 24 - FALSE - - srun - - --ntasks={{ total_tasks }} - - - - - /opt/apps/lmod/lmod/init/perl - /opt/apps/lmod/lmod/init/env_modules_python.py - /opt/apps/lmod/lmod/init/sh - /opt/apps/lmod/lmod/init/csh - /opt/apps/lmod/lmod/libexec/lmod perl - /opt/apps/lmod/lmod/libexec/lmod python - module - module - - - - cmake - - - intel/18.0.2 - - - netcdf/4.6.2 - - - cray_mpich - - - pnetcdf/1.8.0 - parallel-netcdf/4.6.2 - - - - - - - Linux workstation for Jenkins testing - (melvin|watson) - LINUX - sonproxy.sandia.gov:80 - gnu - openmpi - /sems-data-store/ACME/timings - $ENV{HOME}/acme/scratch - /sems-data-store/ACME/inputdata - /sems-data-store/ACME/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /sems-data-store/ACME/baselines - /sems-data-store/ACME/cprnc/build/cprnc - make - 32 - acme_developer - none - jgfouca at sandia dot gov - 64 - 64 - - mpirun - - -np {{ total_tasks }} - --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread - - - - /usr/share/Modules/init/python.py - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/sh - /usr/share/Modules/init/csh - /usr/bin/modulecmd python - /usr/bin/modulecmd perl - module - module - - - sems-env - acme-env - sems-git - sems-python/2.7.9 - sems-cmake/2.8.12 - - - sems-gcc/5.3.0 - - - sems-intel/16.0.3 - - - sems-netcdf/4.4.1/exo - acme-pfunit/3.2.8/base - - - sems-openmpi/1.8.7 - sems-netcdf/4.4.1/exo_parallel - - - - $ENV{SEMS_NETCDF_ROOT} - 64M - spread - threads - - - $ENV{SEMS_NETCDF_ROOT} - - - - - ANL IBM BG/Q, os is BGP, 16 pes/node, batch system is cobalt - .*.fst.alcf.anl.gov - BGQ - ibm - ibm - /projects/$PROJECT/usr/$ENV{USER} - /projects/ccsm/inputdata - /projects/ccsm/inputdata/atm/datm7 - /projects/$PROJECT/usr/$USER/archive/$CASE - /projects/ccsm/ccsm_baselines/ - /projects/ccsm/tools/cprnc/cprnc - 4 - cobalt - cseg - 64 - 8 - TRUE - - /usr/bin/runjob - - --label short - - --ranks-per-node $MAX_MPITASKS_PER_NODE - - --np {{ total_tasks }} - --block $COBALT_PARTNAME --envs OMP_WAIT_POLICY=active --envs BG_SMP_FAST_WAKEUP=yes $LOCARGS - --envs BG_THREADLAYOUT=1 - --envs OMP_STACKSIZE=32M - --envs OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} - - - - /etc/profile.d/00softenv.csh - /etc/profile.d/00softenv.sh - soft - soft - - +mpiwrapper-xl - @ibm-compilers-2015-02 - +cmake - +python - - - - 10000 - FALSE - 64M - /soft/libraries/hdf5/1.8.14/cnk-xl/current - - - - - Medium sized linux cluster at BNL, torque scheduler. - LINUX - gnu - openmpi,mpi-serial - /data/$ENV{USER} - /data/Model_Data/cesm_input_datasets/ - /data/Model_Data/cesm_input_datasets/atm/datm7 - $CIME_OUTPUT_ROOT/cesm_archive/$CASE - $CIME_OUTPUT_ROOT/cesm_baselines - /data/software/cesm_tools/cprnc/cprnc - 4 - pbs - rgknox at lbl dot gov and sserbin at bnl gov - 12 - 12 - 12 - FALSE - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_TASKS_PER_NODE - - - - /etc/profile.d/modules.sh - /etc/profile.d/modules.csh - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - module - module - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - - - perl/5.22.1 - libxml2/2.9.2 - maui/3.3.1 - python/2.7.13 - - - gcc/5.4.0 - gfortran/5.4.0 - hdf5/1.8.19fates - netcdf/4.4.1.1-gnu540-fates - openmpi/2.1.1-gnu540 - - - openmpi/2.1.1-gnu540 - - - - /data/software/hdf5/1.8.19fates - /data/software/netcdf/4.4.1.1-gnu540-fates - - - - - PNL cluster, os is Linux (pgi), batch system is SLURM - LINUX - pgi - mpich - /pic/scratch/$USER - /pic/scratch/tcraig/IRESM/inputdata - /pic/scratch/tcraig/IRESM/inputdata/atm/datm7 - /pic/scratch/$USER/archive/$CASE - /pic/scratch/tcraig/IRESM/ccsm_baselines - /pic/scratch/tcraig/IRESM/tools/cprnc/cprnc - 8 - slurm - tcraig -at- ucar.edu - 32 - 32 - FALSE - - mpiexec_mpt - - --mpi=none - -n={{ total_tasks }} - --kill-on-bad-exit - - - - /share/apps/modules/Modules/3.2.7/init/perl.pm - /share/apps/modules/Modules/3.2.7/init/csh - /share/apps/modules/Modules/3.2.7/init/sh - /share/apps/modules/Modules/3.2.7/bin/modulecmd perl - module - module - - - precision/i4 - pgi/11.8 - mvapich2/1.7 - netcdf/4.1.3 - - - - 64M - - - - - NERSC EX AMD EPYC, os is CNL, 64 pes/node, batch system is Slurm - $ENV{NERSC_HOST}:perlmutter - CNL - gnu,cray,nvidia,aocc - mpich - mp9_g - $ENV{SCRATCH} - /global/cfs/cdirs/ccsm1/inputdata - /global/cfs/cdirs/ccsm1/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /global/cfs/cdirs/ccsm1/ccsm_baselines - /global/cfs/cdirs/ccsm1/tools/cprnc.perlmutter/cprnc - 8 - slurm - cseg - 128 - 4 - 64 - TRUE - - srun - - --label - -n {{ total_tasks }} - -c {{ srun_binding }} - - - - /usr/share/lmod/lmod/init/perl - /usr/share/lmod/lmod/init/env_modules_python.py - /usr/share/lmod/lmod/init/sh - /usr/share/lmod/lmod/init/csh - /usr/share/lmod/lmod/libexec/lmod perl - /usr/share/lmod/lmod/libexec/lmod python - module - module - - PrgEnv-nvidia - PrgEnv-cray - PrgEnv-aocc - PrgEnv-gnu - nvidia - cce - gnu - aocc - cray-parallel-netcdf - cray-hdf5-parallel - cray-libsci - cray-mpich - cray-hdf5 - cray-netcdf-hdf5parallel - cray-netcdf - craype - - - - PrgEnv-cray - cce cce/12.0.3 - - - PrgEnv-gnu - gcc gcc/11.2.0 - - - craype craype/2.7.10 - - - cray-libsci/21.08.1.2 - - - cray-mpich/8.1.9 - - - cray-netcdf-hdf5parallel - cray-hdf5-parallel - cray-parallel-netcdf - cray-hdf5/1.12.0.7 - cray-netcdf/4.7.4.7 - - - cray-hdf5-parallel/1.12.0.7 - cray-netcdf-hdf5parallel/4.7.4.7 - cray-parallel-netcdf/1.12.1.7 - - - cmake/3.20.5 - - - - 256M - - - - - - NASA/AMES Linux Cluster, Linux (ia64), 2.4 GHz Broadwell Intel Xeon E5-2680v4 processors, 28 pes/node (two 14-core processors) and 128 GB of memory/node, batch system is PBS - LINUX - intel - mpt - /nobackup/$USER - /nobackup/fvitt/csm/inputdata - /nobackup/fvitt/csm/inputdata/atm/datm7 - /nobackup/$USER/archive/$CASE - /nobackup/fvitt/cesm_baselines - /u/fvitt/bin/cprnc - 8 - pbs - fvitt -at- ucar.edu - 28 - 28 - TRUE - - mpiexec_mpt - - -n {{ total_tasks }} - - - - /usr/share/Modules/3.2.10/init/perl.pm - /usr/share/Modules/3.2.10/init/sh - /usr/share/Modules/3.2.10/init/csh - /usr/share/Modules/3.2.10/init/python.py - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - nas - pkgsrc - comp-intel/2020.4.304 - mpi-hpe/mpt.2.23 - szip/2.1.1 - hdf4/4.2.12 - hdf5/1.8.18_mpt - netcdf/4.4.1.1_mpt - - - - 1024 - 100000 - 16 - 256M - - - - - NASA/AMES Linux Cluster, Linux (ia64), 2.5 GHz Haswell Intel Xeon E5-2680v3 processors, 24 pes/node (two 12-core processors) and 128 GB of memory/node, batch system is PBS - LINUX - intel - mpt - /nobackup/$USER - /nobackup/fvitt/csm/inputdata - /nobackup/fvitt/csm/inputdata/atm/datm7 - /nobackup/$USER/archive/$CASE - /nobackup/fvitt/cesm_baselines - /u/fvitt/bin/cprnc - 8 - pbs - fvitt -at- ucar.edu - 24 - 24 - TRUE - - mpiexec_mpt - - -n {{ total_tasks }} - - - - /usr/share/Modules/3.2.10/init/perl.pm - /usr/share/Modules/3.2.10/init/sh - /usr/share/Modules/3.2.10/init/csh - /usr/share/Modules/3.2.10/init/python.py - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - nas - pkgsrc - comp-intel/2020.4.304 - mpi-hpe/mpt.2.23 - szip/2.1.1 - hdf4/4.2.12 - hdf5/1.8.18_mpt - netcdf/4.4.1.1_mpt - - - - 1024 - 100000 - 16 - 256M - - - - - NASA/AMES Linux Cluster, Linux (ia64), Altix ICE, 2.6 GHz Sandy Bridge processors, 16 cores/node and 32 GB of memory, batch system is PBS - LINUX - intel - mpt - /nobackup/$USER - /nobackup/fvitt/csm/inputdata - /nobackup/fvitt/csm/inputdata/atm/datm7 - /nobackup/$USER/archive/$CASE - /nobackup/fvitt/cesm_baselines - /u/fvitt/bin/cprnc - 8 - pbs - fvitt -at- ucar.edu - 16 - 16 - TRUE - - mpiexec_mpt - - -n {{ total_tasks }} - - - - /usr/share/Modules/3.2.10/init/perl.pm - /usr/share/Modules/3.2.10/init/sh - /usr/share/Modules/3.2.10/init/csh - /usr/share/Modules/3.2.10/init/python.py - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - nas - pkgsrc - comp-intel/2020.4.304 - mpi-hpe/mpt.2.23 - szip/2.1.1 - hdf4/4.2.12 - hdf5/1.8.18_mpt - netcdf/4.4.1.1_mpt - - - - 1024 - 100000 - 16 - 256M - - - - - NASA/AMES Linux Cluster, Linux (ia64), Altix ICE, 2.8 GHz Ivy Bridge processors, 20 cores/node and 3.2 GB of memory per core, batch system is PBS - LINUX - intel - mpich - /nobackup/$USER - /nobackup/fvitt/csm/inputdata - /nobackup/fvitt/csm/inputdata/atm/datm7 - /nobackup/$USER/archive/$CASE - /nobackup/fvitt/cesm_baselines - /u/fvitt/bin/cprnc - 8 - pbs - fvitt -at- ucar.edu - 20 - 20 - TRUE - - mpiexec_mpt - - -n {{ total_tasks }} - - - - /usr/share/Modules/3.2.10/init/perl.pm - /usr/share/Modules/3.2.10/init/sh - /usr/share/Modules/3.2.10/init/csh - /usr/share/Modules/3.2.10/init/python.py - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - nas - pkgsrc - comp-intel/2020.4.304 - mpi-hpe/mpt.2.23 - szip/2.1.1 - hdf4/4.2.12 - hdf5/1.8.18_mpt - netcdf/4.4.1.1_mpt - - - - 1024 - 100000 - 16 - 256M - - - - - Linux workstation at Sandia on SRN with SEMS TPL modules - (s999964|climate|penn) - LINUX - wwwproxy.sandia.gov:80 - gnu - openmpi - /sems-data-store/ACME/timings - $ENV{HOME}/acme/scratch - /sems-data-store/ACME/inputdata - /sems-data-store/ACME/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /sems-data-store/ACME/baselines - /sems-data-store/ACME/cprnc/build/cprnc - make - 32 - acme_developer - none - jgfouca at sandia dot gov - 64 - 64 - - mpirun - - -np {{ total_tasks }} - - - - /usr/share/Modules/init/python.py - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/sh - /usr/share/Modules/init/csh - /usr/bin/modulecmd python - /usr/bin/modulecmd perl - module - module - - - sems-env - sems-git - sems-python/2.7.9 - sems-gcc/5.1.0 - sems-openmpi/1.8.7 - sems-cmake/2.8.12 - sems-netcdf/4.3.2/parallel - - - - $ENV{SEMS_NETCDF_ROOT} - $ENV{SEMS_NETCDF_ROOT} - - - - - SNL clust - (skybridge|chama)-login - LINUX - wwwproxy.sandia.gov:80 - intel - openmpi - /projects/ccsm/timings - /gscratch/$USER/acme_scratch/$MACH - /projects/ccsm/inputdata - /projects/ccsm/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /projects/ccsm/ccsm_baselines - /projects/ccsm/cprnc/build/cprnc_wrap - 8 - acme_integration - slurm - jgfouca at sandia dot gov - 16 - 16 - TRUE - - - mpirun - - -np {{ total_tasks }} - -npernode $MAX_MPITASKS_PER_NODE - - - - /usr/share/Modules/init/python.py - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/sh - /usr/share/Modules/init/csh - /usr/bin/modulecmd python - /usr/bin/modulecmd perl - module - module - - - sems-env - sems-git - sems-python/2.7.9 - gnu/4.9.2 - intel/intel-15.0.3.187 - libraries/intel-mkl-15.0.2.164 - libraries/intel-mkl-15.0.2.164 - - - openmpi-intel/1.8 - sems-hdf5/1.8.12/parallel - sems-netcdf/4.3.2/parallel - sems-hdf5/1.8.12/base - sems-netcdf/4.3.2/base - - - - $ENV{SEMS_NETCDF_ROOT} - 64M - - - $ENV{SEMS_NETCDF_ROOT} - - - - - Intel Xeon Platinum 8160 ("Skylake"),48 cores on two sockets (24 cores/socket) , batch system is SLURM - .*stampede2 - LINUX - intel - impi,mvapich2 - $ENV{SCRATCH} - /work/02503/edwardsj/CESM/inputdata - /work/02503/edwardsj/CESM/inputdata/lmwg - $ENV{WORK}/archive/$CASE - /work/02503/edwardsj/CESM/cesm_baselines - /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc - 4 - slurm - cseg - 96 - 48 - - ibrun - - -n {{ total_tasks }} - - - - ibrun - - -n {{ total_tasks }} - - - - /opt/apps/lmod/lmod/init/perl - /opt/apps/lmod/lmod/init/env_modules_python.py - /opt/apps/lmod/lmod/init/sh - /opt/apps/lmod/lmod/init/csh - /opt/apps/lmod/lmod/libexec/lmod perl - /opt/apps/lmod/lmod/libexec/lmod python - module - module - - - TACC - python/2.7.13 - intel/18.0.2 - cmake/3.16.1 - - - mvapich2/2.3.1 - pnetcdf/1.11 - parallel-netcdf/4.6.2 - - - mvapich2 - impi/18.0.2 - pnetcdf/1.11 - parallel-netcdf/4.6.2 - - - netcdf/4.3.3.1 - - - - 256M - - - /work/01118/tg803972/stampede2/ESMF-INSTALL/8.0.0bs38/lib/libO/Linux.intel.64.intelmpi.default/esmf.mk - - - ON - SUMMARY - /work/06242/tg855414/stampede2/FV3GFS/benchmark-inputs/2012010100/gfs/fcst - /work/06242/tg855414/stampede2/FV3GFS/fix_am - /work/06242/tg855414/stampede2/FV3GFS/addon - - - - - - Intel Xeon Phi 7250 ("Knights Landing") , batch system is SLURM - LINUX - intel - impi,mvapich2 - $ENV{SCRATCH} - /work/02503/edwardsj/CESM/inputdata - /work/02503/edwardsj/CESM/inputdata/lmwg - $ENV{WORK}/archive/$CASE - /work/02503/edwardsj/CESM/cesm_baselines - /work/02503/edwardsj/CESM/cime/tools/cprnc/cprnc - 4 - slurm - cseg - 256 - 64 - - ibrun - - - ibrun - - - /opt/apps/lmod/lmod/init/perl - /opt/apps/lmod/lmod/init/env_modules_python.py - /opt/apps/lmod/lmod/init/sh - /opt/apps/lmod/lmod/init/csh - /opt/apps/lmod/lmod/libexec/lmod perl - /opt/apps/lmod/lmod/libexec/lmod python - module - module - - - TACC - python/2.7.13 - intel/18.0.2 - cmake/3.16.1 - - - mvapich2/2.3.1 - pnetcdf/1.11 - parallel-netcdf/4.6.2 - - - mvapich2 - impi/18.0.2 - pnetcdf/1.11 - parallel-netcdf/4.6.2 - - - netcdf/4.3.3.1 - - - - 256M - - - - - Cray test platform - swan.* - CNL - cray, intel - mpt - - /lus/scratch/$USER - /lus/scratch/$USER/inputdata - /lus/scratch/$USER/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /lus/scratch/$USER/cesm/baselines - /lus/scratch/$USER/cesm/tools/cprnc/cprnc - 8 - pbs - jedwards - 64 - 64 - FALSE - - aprun - - -n {{ total_tasks }} - -N {{ tasks_per_node }} - --cc depth -d $OMP_NUM_THREADS - -e OMP_STACKSIZE=64M - -e OMP_NUM_THREADS=$OMP_NUM_THREADS - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - PrgEnv-cray/6.0.10 - cce cce/12.0.3 - - - perftools-base - craype craype/2.7.10 - - - cray-libsci/20.09.1 - - - cray-mpich/7.7.18 - - - cray-netcdf-hdf5parallel - cray-hdf5-parallel - cray-parallel-netcdf - - - - ON - SUMMARY - /home/users/p62939/esmf/lib/libg/Unicos.cce.64.mpi.default/esmf.mk - - - - - theia - tfe - LINUX - intel - impi - nems - - /scratch4/NCEPDEV/nems/noscrub/$USER/cimecases - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/inputdata - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/BASELINES - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/cesmdataroot/tools/cprnc - make - 8 - slurm - cseg - 24 - 24 - TRUE - - srun - - -n $TOTALPES - - - - - - - /apps/lmod/lmod/init/sh - /apps/lmod/lmod/init/csh - module - module - /apps/lmod/lmod/libexec/lmod python - - - intel/15.1.133 - impi/5.1.1.109 - netcdf/4.3.0 - pnetcdf - /scratch4/NCEPDEV/nems/noscrub/emc.nemspara/soft/modulefiles - yaml-cpp - esmf/8.0.0bs29g - - - - ON - SUMMARY - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/benchmark-inputs/2012010100/gfs/fcst - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/fix_am - /scratch4/NCEPDEV/nems/noscrub/Rocky.Dunlap/INPUTDATA/addon - - - - - ALCF Cray XC* KNL, os is CNL, 64 pes/node, batch system is cobalt - theta.* - CNL - intel,gnu,cray - mpt - CESM_Highres_Testing - /projects/CESM_Highres_Testing/cesm/scratch/$USER - /projects/CESM_Highres_Testing/cesm/inputdata - /projects/CESM_Highres_Testing/cesm/inputdata/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - /projects/CESM_Highres_Testing/cesm/baselines - /projects/CESM_Highres_Testing/cesm/tools/cprnc/cprnc - 8 - cobalt_theta - cseg - 64 - 64 - TRUE - - aprun - - -n {{ total_tasks }} - -N {{ tasks_per_node }} - --cc depth -d $OMP_NUM_THREADS - -e OMP_STACKSIZE=64M - -e OMP_NUM_THREADS=$OMP_NUM_THREADS - - - - /opt/modules/default/init/perl.pm - /opt/modules/default/init/python.py - /opt/modules/default/init/sh - /opt/modules/default/init/csh - /opt/modules/default/bin/modulecmd perl - /opt/modules/default/bin/modulecmd python - module - module - - craype-mic-knl - PrgEnv-intel - PrgEnv-cray - PrgEnv-gnu - intel - cce - cray-parallel-netcdf - cray-hdf5-parallel - pmi - cray-libsci - cray-mpich - cray-netcdf - cray-hdf5 - cray-netcdf-hdf5parallel - craype - papi - - - - PrgEnv-intel/6.0.4 - intel intel/18.0.0.128 - cray-libsci - - - - PrgEnv-cray/6.0.4 - cce cce/8.7.0 - - - PrgEnv-gnu/6.0.4 - gcc gcc/7.3.0 - - - papi/5.6.0.1 - craype craype/2.5.14 - - - cray-libsci/18.04.1 - - - cray-mpich/7.7.0 - - - cray-netcdf-hdf5parallel/4.4.1.1.6 - cray-hdf5-parallel/1.10.1.1 - cray-parallel-netcdf/1.8.1.3 - - - - - - NCAR ARM platform, os is Linux, 64/128 pes/node, batch system is SLURM - .*.thunder.ucar.edu - LINUX - - armgcc,gnu,arm - openmpi - /glade/scratch/$USER - $ENV{CESMDATAROOT}/inputdata - $DIN_LOC_ROOT/CTSM_datm_forcing_data - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/cesm_baselines - $ENV{CESMDATAROOT}/tools/cprnc/cprnc - 16 - slurm - cseg - 64 - 128 - - mpiexec - - --tag-output - -np {{ total_tasks }} - - - - /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/perl - /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/env_modules_python.py - /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/csh - /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/init/sh - /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/libexec/lmod perl - /glade/u/apps/th/opt/lmod/8.1.7/lmod/lmod/libexec/lmod python - module - module - - - ncarenv/1.3 - cmake/3.14.4 - - - arm/19.3 - - - armgcc/8.2.0 - - - gnu/9.1.0 - openblas/0.3.6 - esmf_libs/8.0.0 - - - - ncarcompilers/0.5.0 - - - openmpi/4.0.3 - netcdf-mpi/4.7.1 - pnetcdf/1.12.1 - - - netcdf/4.7.1 - - - esmf-8.0.0-ncdfio-uni-g - - - esmf-8.0.0-ncdfio-uni-O - - - - 256M - $ENV{NETCDF} - - - ON - SUMMARY - - - - - - used for github testing - - - LINUX - - gnu - openmpi - none - - $ENV{HOME}/cesm/scratch - $ENV{HOME}/cesm/inputdata - $ENV{HOME}/cesm/inputdata/lmwg - $ENV{HOME}/cesm/archive/$CASE - $ENV{HOME}/cesm/cesm_baselines - - make - 8 - none - jedwards - 4 - 4 - FALSE - - mpiexec - - -n {{ total_tasks }} - - - - - - - CMCC Lenovo ThinkSystem SD530, os is Linux, 36 pes/node, batch system is LSF - (login[1,2]-ib|n[0-9][0-9][0-9]-ib) - LINUX - intel - impi,mpi-serial - R000 - /work/$ENV{DIVISION}/$ENV{USER}/CESM2 - $ENV{CESMDATAROOT}/inputdata - $DIN_LOC_ROOT/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{CESMDATAROOT}/ccsm_baselines - $ENV{CESMDATAROOT}/cesm2_tools/cprnc/cprnc - /usr/lib64/perl5:/usr/share/perl5 - 8 - lsf - cmcc - 72 - 36 - TRUE - - mpirun - - - /usr/share/Modules/init/perl.pm - /usr/share/Modules/init/python.py - /usr/share/Modules/init/csh - /usr/share/Modules/init/sh - /usr/bin/modulecmd perl - /usr/bin/modulecmd python - module - module - - - - - intel20.1/20.1.217 - intel20.1/szip/2.1.1 - cmake/3.17.3 - curl/7.70.0 - - - intel20.1/hdf5/1.12.0 - intel20.1/netcdf/C_4.7.4-F_4.5.3_CXX_4.3.1 - - - impi20.1/19.7.217 - impi20.1/hdf5/1.12.0 - impi20.1/netcdf/C_4.7.4-F_4.5.3_CXX_4.3.1 - impi20.1/parallel-netcdf/1.12.1 - - - impi20.1/esmf/8.0.1-intelmpi-64-g - - - impi20.1/esmf/8.0.1-intelmpi-64-O - - - intel20.1/esmf/8.0.1-mpiuni-64-g - - - intel20.1/esmf/8.0.1-mpiuni-64-O - - - - /work/csp/cmip01/csm/xios - - - 1 - gpfs - 0 - 60 - skx - skx_avx512 - lsf - 1 - {{ num_nodes }} - - - - - - ${EXEROOT}/cesm.exe - >> cesm.log.$LID 2>&1 - - - diff --git a/CIME/data/config/cesm/machines/config_pio.xml b/CIME/data/config/cesm/machines/config_pio.xml deleted file mode 100644 index d2f3ce7e23b..00000000000 --- a/CIME/data/config/cesm/machines/config_pio.xml +++ /dev/null @@ -1,349 +0,0 @@ - - - - - - - - - - - - - - - - - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - pnetcdf - netcdf - - - - - - coll - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/CIME/data/config/cesm/machines/config_workflow.xml b/CIME/data/config/cesm/machines/config_workflow.xml deleted file mode 100644 index 88336306b8e..00000000000 --- a/CIME/data/config/cesm/machines/config_workflow.xml +++ /dev/null @@ -1,153 +0,0 @@ - - - - - - - - $BUILD_COMPLETE and not $TEST - - - - $BUILD_COMPLETE and $TEST - - - - - case.run or case.test - $DOUT_S - - 1 - 1 - 0:20:00 - - - - - - - - case.st_archive - 1 - - 200 - 10 - 12:00:00 - - - 72 - 9 - 0:20:00 - - - - - - - - - - 1 - - 3 - 1 - 0:20:00 - - - - - - - - timeseries - 1 - - 1 - 1 - 1:00:00 - - - - - - - timeseriesL - $CASEROOT/postprocess/pp_config -value --get STANDARDIZE_TIMESERIES - - - - - timeseries or case.st_archive - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - - :lnd_avg(args) -timeseries or case.st_archive - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - - timeseries or case.st_archive - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - - - timeseries or case.st_archive - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - - atm_averages - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - - lnd_averages - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - ice_averages - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - ocn_averages - $CASEROOT/postprocess/pp_config -value --get GENERATE_AVGS_ATM - - - - diff --git a/CIME/data/config/cesm/machines/cylc_suite.rc.template b/CIME/data/config/cesm/machines/cylc_suite.rc.template deleted file mode 100644 index a3a731c4a5b..00000000000 --- a/CIME/data/config/cesm/machines/cylc_suite.rc.template +++ /dev/null @@ -1,28 +0,0 @@ -[meta] - title = CESM CYLC workflow for {{ workflow_description }} -[cylc] - [[parameters]] - member = {{ members }} - -[scheduling] - cycling mode = integer - initial cycle point = 1 - final cycle point = {{ cycles }} - - [[dependencies]] - [[[R1]]] - graph = "set_external_workflow => run => st_archive " - [[[R/P1]]] # Integer Cycling - graph = """ - st_archive[-P1] => run - run => st_archive - """ -[runtime] - [[set_external_workflow]] - script = cd {{ case_path_string }}\ - ./xmlchange EXTERNAL_WORKFLOW=TRUE;\ - cp env_batch.xml LockedFiles; - [[st_archive]] - script = cd {{ case_path_string }} \ - ./case.submit --job case.st_archive;\ - ./xmlchange CONTINUE_RUN=TRUE diff --git a/CIME/data/config/cesm/machines/mpi_run_gpu.casper b/CIME/data/config/cesm/machines/mpi_run_gpu.casper deleted file mode 100755 index ade12d50ef4..00000000000 --- a/CIME/data/config/cesm/machines/mpi_run_gpu.casper +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -unset CUDA_VISIBLE_DEVICES -let dev_id=$OMPI_COMM_WORLD_LOCAL_RANK%{{ ngpus_per_node }} -export ACC_DEVICE_NUM=$dev_id -export CUDA_VISIBLE_DEVICES=$dev_id -exec $* diff --git a/CIME/data/config/cesm/machines/nag_mpi_argument.txt b/CIME/data/config/cesm/machines/nag_mpi_argument.txt deleted file mode 100644 index 95e1380aaed..00000000000 --- a/CIME/data/config/cesm/machines/nag_mpi_argument.txt +++ /dev/null @@ -1,4 +0,0 @@ --wmismatch=mpi_send,mpi_recv,mpi_bsend,mpi_ssend,mpi_rsend,mpi_buffer_attach,mpi_buffer_detach,mpi_isend,mpi_ibsend,mpi_issend,mpi_irsend,mpi_irecv,mpi_mrecv,mpi_imrecv,mpi_send_init,mpi_bsend_init,mpi_ssend_init,mpi_rsend_init,mpi_recv_init,mpi_sendrecv,mpi_sendrecv_replace,mpi_get_address,mpi_pack,mpi_unpack,mpi_pack_external,mpi_unpack_external,mpi_bcast,mpi_gather,mpi_gatherv,mpi_scatter,mpi_scatterv,mpi_allgather,mpi_allgatherv,mpi_alltoall,mpi_alltoallv,mpi_alltoallw,mpi_reduce,mpi_allreduce,mpi_reduce_local,mpi_reduce_scatter_block,mpi_reduce_scatter,mpi_scan,mpi_exscan,mpi_ibcast,mpi_igather,mpi_igatherv,mpi_iscatter,mpi_iscatterv,mpi_iallgather,mpi_iallgatherv,mpi_ialltoall,mpi_ialltoallv,mpi_ialltoallw,mpi_ireduce,mpi_iallreduce,mpi_ireduce_scatter_block,mpi_ireduce_scatter,mpi_iscan,mpi_iexscan,mpi_neighbor_allgather,mpi_neighbor_allgatherv,mpi_neighbor_alltoall,mpi_neighbor_alltoallv,mpi_neighbor_alltoallw,mpi_ineighbor_allgather,mpi_ineighbor_allgatherv --wmismatch=mpi_ineighbor_alltoall,mpi_ineighbor_alltoallv,mpi_ineighbor_alltoallw,mpi_free_mem,mpi_win_create,mpi_win_attach,mpi_win_detach,mpi_win_allocate,mpi_win_shared_query,mpi_put,mpi_get,mpi_accumulate,mpi_get_accumulate,mpi_fetch_and_op,mpi_compare_and_swap,mpi_rput,mpi_rget,mpi_raccumulate,mpi_rget_accumulate,mpi_file_read_at,mpi_file_read_at_all,mpi_file_write_at,mpi_file_write_at_all,mpi_file_iread_at,mpi_file_iread_at_all,mpi_file_iwrite_at,mpi_file_iwrite_at_all,mpi_file_read,mpi_file_read_all,mpi_file_write,mpi_file_write_all,mpi_file_iread,mpi_file_iread_all,mpi_file_iwrite,mpi_file_iwrite_all,mpi_file_read_shared,mpi_file_write_shared,mpi_file_iread_shared,mpi_file_iwrite_shared,mpi_file_read_ordered,mpi_file_write_ordered,mpi_file_read_at_all_begin,mpi_file_read_at_all_end,mpi_file_write_at_all_begin,mpi_file_write_at_all_end,mpi_file_read_all_begin,mpi_file_read_all_end,mpi_file_write_all_begin,mpi_file_write_all_end,mpi_read_ordered_begin,mpi_read_ordered_end --wmismatch=mpi_write_ordered_begin,mpi_write_ordered_end,mpi_f_sync_reg,mpi_sizeof,mpibcast,mpiscatterv --wmismatch=mpi_startall,mpi_waitall diff --git a/CIME/data/config/cesm/machines/template.case.run b/CIME/data/config/cesm/machines/template.case.run deleted file mode 100755 index b18a2988f29..00000000000 --- a/CIME/data/config/cesm/machines/template.case.run +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 -# Batch system directives -{{ batchdirectives }} - -""" -template to create a case run script. This should only ever be called -by case.submit when on batch system. This script only exists as a way of providing -batch directives. Use case.submit from the command line to run your case. - -DO NOT RUN THIS SCRIPT MANUALLY -""" - -import os, sys -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * - -from CIME.case import Case - -logger = logging.getLogger(__name__) - -import argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser(description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--caseroot", - help="Case directory to build") - - parser.add_argument("--skip-preview-namelist", action="store_true", - help="Skip calling preview-namelist during case.run") - - parser.add_argument("--completion-sets-continue-run", action="store_true", - help="This is used to ensure CONTINUE_RUN is cleared for an initial run, " - "but set for subsequent runs.") - - parser.add_argument("--resubmit", default=False, action="store_true", - help="If RESUBMIT is set, this performs the resubmissions.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - if args.skip_preview_namelist is None: - args.skip_preview_namelist = False - - return args.caseroot, args.skip_preview_namelist, args.completion_sets_continue_run, args.resubmit - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - - caseroot, skip_pnl, set_continue_run, resubmit = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - success = case.case_run(skip_pnl=skip_pnl, set_continue_run=set_continue_run, submit_resubmits=resubmit) - - sys.exit(0 if success else 1) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/CIME/data/config/cesm/machines/template.case.test b/CIME/data/config/cesm/machines/template.case.test deleted file mode 100755 index 6f44dc1baf2..00000000000 --- a/CIME/data/config/cesm/machines/template.case.test +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python3 -{{ batchdirectives }} -""" -This is the system test submit script for CIME. This should only ever be called -by case.submit when on batch system. This script only exists as a way of providing -batch directives. Use case.submit from the command line to run your case. - -DO NOT RUN THIS SCRIPT MANUALLY -""" -import os, sys -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * - -from CIME.case import Case - -import argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser(description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("testname", nargs="?",default=None, - help="Name of the test to run, default is set in TESTCASE in env_test.xml") - - parser.add_argument("--caseroot", - help="Case directory to build") - - parser.add_argument("--reset", action="store_true", - help="Reset the case to its original state as defined by config_tests.xml") - - parser.add_argument("--resubmit", action="store_true", - help="Ignored in tests, but needed for all templates") - - parser.add_argument("--skip-preview-namelist", action="store_true", - help="Skip calling preview-namelist during case.run") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - return args.caseroot, args.testname, args.reset, args.skip_preview_namelist - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - - caseroot, testname, reset, skip_pnl = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - success = case.case_test(testname=testname, reset=reset, skip_pnl=skip_pnl) - - sys.exit(0 if success else 1) - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/CIME/data/config/cesm/machines/template.st_archive b/CIME/data/config/cesm/machines/template.st_archive deleted file mode 100755 index 8100e1ff5f6..00000000000 --- a/CIME/data/config/cesm/machines/template.st_archive +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python3 -# Batch system directives -{{ batchdirectives }} - -""" -Performs short term archiving for restart files, history and rpointer -files in the $RUNDIR associated with $CASEROOT. Normally this script -is called by case.submit on batch systems. - -""" - -import sys, os, time -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * -from CIME.case import Case - -logger = logging.getLogger(__name__) - - -############################################################################### -def parse_command_line(args, description): -############################################################################### - - parser = argparse.ArgumentParser(description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to build") - - parser.add_argument("--no-incomplete-logs", default=False, action="store_true", - help="Whether to archive logs which have been completed or not") - - parser.add_argument("--copy-only", default=False, action="store_true", - help="Copy instead of move the files to be archived") - - parser.add_argument("--last-date", default=None, - help="WARNING: This option with --force-move may corrupt your run directory! Use at your own risk! " - "Last simulation date to archive, specified as 'Year-Month-Day'. " - "Year must be specified with 4 digits, while month and day can be specified without zero padding. " - "'0003-11-4' would archive at most files for the simulated year 3, month 11, day 4." - "This option implies --copy-only unless --force-move is specified ") - - parser.add_argument("--force-move", default=False, action="store_true", - help="Move the files even if it's unsafe to do so, dangerous if used with --copy-only.") - - parser.add_argument("--test-all", default=False, action="store_true", - help="Run tests of st_archiver functionality on config_arvchive.xml") - - parser.add_argument("--test-case", default=False, action="store_true", - help="Run tests of st_archiver functionality on env_arvchive.xml") - - parser.add_argument("--resubmit", default=False, action="store_true", - help="If RESUBMIT is set, this performs the resubmissions." - "This is primarily meant for use by case.submit") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - if args.last_date is not None and args.force_move is False: - args.copy_only = True - - if args.force_move is True: - args.copy_only = False - - return (args.caseroot, args.last_date, args.no_incomplete_logs, args.copy_only, - args.test_all, args.test_case, args.resubmit) - - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - caseroot, last_date, no_incomplete_logs, copy_only, testall, testcase, resubmit = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - if testall: - success = case.test_st_archive() - elif testcase: - success = case.test_env_archive() - else: - success = case.case_st_archive(last_date_str=last_date, - archive_incomplete_logs=not no_incomplete_logs, - copy_only=copy_only, resubmit=resubmit) - - sys.exit(0 if success else 1) - -############################################################################### - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/CIME/data/config/cesm/machines/userdefined_laptop_template/README.md b/CIME/data/config/cesm/machines/userdefined_laptop_template/README.md deleted file mode 100644 index 1a0e03b663f..00000000000 --- a/CIME/data/config/cesm/machines/userdefined_laptop_template/README.md +++ /dev/null @@ -1,131 +0,0 @@ -Building CIME on an UNSUPPORTED local machine ---------------------------------------------- - -These directions are for a Mac OS X 10.9 or 10.10 laptop using -homebrew or macports to install the required software. The procedure -is similar for a linux workstation or cluster, you will just use -different package management tools to install the third party -libraries. - -Setup -===== - - - install xcode, including the command line tools. Failure to - install the command line tools is the most likely cause if you - get an error about the compilers not being able to create - executables. - - - install third party libraries from homebrew or macports. - - - home brew - - Install science tap : - - brew install gcc --without-multilib cmake mpich hdf5 --enable-fortran netcdf --enable-fortran - - - - macports - - sudo port install mpich +gcc48 hdf5-18 +mpich netcdf-fortran +gcc48 +mpich cmake - - Note: If you see an error while running create_newcase that - indicates perl can't find XML::LibXML, you may need to install - p5-xml-libxml as well. - - - - Some of the shell scripts used by cesm hard code "gmake" instead - of using the GMAKE variable from env_build.xml. To work around - this, you should install gnu make, or simply create a link from - make to gmake in you path. - - mkdir -p ${HOME}/local/bin - ln -s `whereis make` ${HOME}/local/bin/gmake - cat >> ${HOME}/.bashrc < - - - - - - -DFORTRANUNDERSCORE -DNO_R16 - - - -fopenmp - - - /usr/local/bin/gfortran - /usr/bin/cc - /usr/bin/c++ - /usr/local/bin/mpif90 - /usr/local/bin/mpicc - /usr/local/bin/mpicxx - FORTRAN - TRUE - /usr/local - - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -framework Accelerate - - - - - diff --git a/CIME/data/config/cesm/machines/userdefined_laptop_template/config_machines.xml b/CIME/data/config/cesm/machines/userdefined_laptop_template/config_machines.xml deleted file mode 100644 index 2d1005c5d0d..00000000000 --- a/CIME/data/config/cesm/machines/userdefined_laptop_template/config_machines.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - __USEFUL_DESCRIPTION__ - something.matching.your.machine.hostname - Darwin - gnu - mpich - $ENV{HOME}/projects/scratch - $ENV{HOME}/projects/cesm-inputdata - $ENV{HOME}/projects/ptclm-data - $ENV{HOME}/projects/scratch/archive/$CASE - $ENV{HOME}/projects/baselines - $CIMEROOT/tools/cprnc/build/cprnc - 4 - none - __YOUR_NAME_HERE__ - 4 - 4 - - mpiexec_mpt - - -np $TOTALPES - --prepend-rank - - - - - - diff --git a/CIME/data/config/cesm/machines/userdefined_laptop_template/config_pes.xml b/CIME/data/config/cesm/machines/userdefined_laptop_template/config_pes.xml deleted file mode 100644 index 0464137703c..00000000000 --- a/CIME/data/config/cesm/machines/userdefined_laptop_template/config_pes.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - $MAX_TASKS_PER_NODE 1 0 - - - - 1 1 0 - - - - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - - - 2 1 - 2 - - - - diff --git a/CIME/data/config/ufs/config_archive.xml b/CIME/data/config/ufs/config_archive.xml deleted file mode 100644 index 04bd730620e..00000000000 --- a/CIME/data/config/ufs/config_archive.xml +++ /dev/null @@ -1,105 +0,0 @@ - - - [ri] - h\d*.*\.nc$ - unset - - rpointer.ice$NINST_STRING - ./$CASE.cice$NINST_STRING.r.$DATENAME.nc - - - rpointer.ice - casename.cice.r.1976-01-01-00000.nc - casename.cice.h.1976-01-01-00000.nc - - - - - - r - r[ho] - h\d*.*\.nc$ - d[dovt] - unset - - rpointer.ocn$NINST_STRING.restart - ./$CASE.pop$NINST_STRING.r.$DATENAME.nc,RESTART_FMT=nc - - - rpointer.ocn$NINST_STRING.ovf - ./$CASE.pop$NINST_STRING.ro.$DATENAME - - - rpointer.ocn$NINST_STRING.tavg - ./$CASE.pop$NINST_STRING.rh.$DATENAME.nc - - - rpointer.pop - casename.pop_0001.r.1976-01-01-00000.nc - casename.pop.r.1976-01-01-00000.nc - casename.pop.h.1976-01-01-00000.nc - casename.pop.h.1975-02-01-00000.nc - casename.pop.h0.1976-01-01-00000.nc - casename.pop.dd.1976-01-01-00000.nc - casename.pop.r.1975-01-01-00000.nc - anothercasename.pop.r.1976-01-01-00000.nc - - - - - [ri] - h\d*.*\.nc$ - initial_hist - unset - - rpointer.glc$NINST_STRING - ./$CASE.cism$NINST_STRING.r.$DATENAME.nc - - - - rpointer.glc - rpointer.glc_9999 - - casename.cism.r.1975-01-01-00000.nc - casename.cism.r.1976-01-01-00000.nc - - casename.cism.initial_hist.0001-01-01-00000.nc - casename.cism.h.1975-01-01-00000.nc - casename.cism.h.1976-01-01-00000.nc - - casename.cism.h.1976-01-01-00000.nc.base - anothercasename.cism.r.1976-01-01-00000.nc - - - - - r - hi.*\.nc$ - unset - - rpointer.wav$NINST_STRING - unset - - - - - r - rh\d? - [ei] - restart_hist - - rpointer.unset - unset - - - - casename.dart.r.1976-01-01-00000.nc - casename.dart.rh.pop_preassim_priorinf_mean.1976-01-01-00000.nc - casename.dart.rh.cam_preassim_priorinf_mean.1976-01-01-00000.nc - - casename.dart.e.cam_postassim_mean.1976-01-01-00000.nc - casename.dart.i.cam_output_mean.1976-01-01-00000.nc - casename.dart.e.cam_obs_seq_final.1976-01-01-00000.nc - - - diff --git a/CIME/data/config/ufs/config_files.xml b/CIME/data/config/ufs/config_files.xml index a278fb74046..0ec22004a2c 100644 --- a/CIME/data/config/ufs/config_files.xml +++ b/CIME/data/config/ufs/config_files.xml @@ -26,7 +26,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/config_grids.xml + $SRCROOT/ccs_config_ufs/config_grids.xml case_last env_case.xml file containing specification of all supported model grids, domains and mapping files (for documentation only - DO NOT EDIT) @@ -35,7 +35,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/machines/config_machines.xml + $SRCROOT/ccs_config_ufs/machines/config_machines.xml case_last env_case.xml file containing machine specifications for target model primary component (for documentation only - DO NOT EDIT) @@ -44,7 +44,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/machines/config_batch.xml + $SRCROOT/ccs_config_ufs/machines/config_batch.xml case_last env_case.xml file containing batch system details for target system (for documentation only - DO NOT EDIT) @@ -53,7 +53,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/machines/config_workflow.xml + $SRCROOT/ccs_config_ufs/machines/config_workflow.xml case_last env_case.xml file containing workflow (for documentation only - DO NOT EDIT) @@ -62,7 +62,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/config_inputdata.xml + $SRCROOT/ccs_config_ufs/config_inputdata.xml case_last env_case.xml file containing inputdata server descriptions (for documentation only - DO NOT EDIT) @@ -71,7 +71,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/machines/config_compilers.xml + $SRCROOT/ccs_config_ufs/machines/config_compilers.xml case_last env_case.xml file containing compiler specifications for target model primary component (for documentation only - DO NOT EDIT) @@ -80,7 +80,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/machines/config_pio.xml + $SRCROOT/ccs_config_ufs/machines/config_pio.xml case_last env_case.xml file containing specification of pio settings for target model possible machine, compiler, mpilib, compset and/or grid attributes (for documentation only - DO NOT EDIT) @@ -298,7 +298,7 @@ char - $CIMEROOT/CIME/data/config/$MODEL/config_archive.xml + $SRCROOT/ccs_config_ufs/config_archive.xml $COMP_ROOT_DIR_CPL/cime_config/config_archive.xml $COMP_ROOT_DIR_ROF/cime_config/config_archive.xml diff --git a/CIME/data/config/ufs/config_grids.xml b/CIME/data/config/ufs/config_grids.xml deleted file mode 100644 index fda55ba81c5..00000000000 --- a/CIME/data/config/ufs/config_grids.xml +++ /dev/null @@ -1,275 +0,0 @@ - - - - - - ========================================= - GRID naming convention - ========================================= - The notation for the grid longname is - a%name_l%name_oi%name_r%name_m%mask_g%name_w%name - where - a% => atm, l% => lnd, oi% => ocn/ice, r% => river, m% => mask, g% => glc, w% => wav - - Supported out of the box grid configurations are given via alias specification in - the file "config_grids.xml". Each grid alias can also be associated with the - following optional attributes - - - - - null - null - null - null - null - null - null - - - - C96 - - - - C192 - - - - C384 - - - - C768 - - - - C768r - - - - T62 - T62 - Atlantic8 - - - - TL319 - TL319 - Atlantic8 - - - - TL639 - TL639 - Atlantic8 - - - - C768r - C768r - Atlantic8 - - - - C96 - C96 - tx0.66v1 - tx0.66v1 - - - - C96 - C96 - tx0.25v1 - tx0.25v1 - - - - C192 - C192 - tx0.66v1 - tx0.66v1 - - - - C192 - C192 - tx0.25v1 - tx0.25v1 - - - - C384 - C384 - tx0.66v1 - tx0.66v1 - - - - C384 - C384 - tx0.25v1 - tx0.25v1 - - - - C768 - C768 - tx0.66v1 - tx0.66v1 - - - - C768 - C768 - tx0.25v1 - tx0.25v1 - - - - C768r - C768r - Atlantic8 - - - - - - - - - - - 0 0 - unset - null is no grid: - - - - - 55296 1 - $DIN_LOC_ROOT/share/domains/domain.lnd.C96_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C96_gx1v6.181018.nc - $DIN_LOC_ROOT/share/domains/domain.lnd.C96_tx0.66v1.181210.nc - $DIN_LOC_ROOT/share/domains/domain.ocn.C96_tx0.66v1.181210.nc - $DIN_LOC_ROOT/share/meshes/C96_181018_ESMFmesh.nc - C96 is a fvcubed xx-deg grid: - Experimental for fv3 dycore - - - - 221184 1 - C192 is a fvcubed 50 km resolution grid: - Experimental for fv3 dycore - - - - 884736 1 - C384 is a fvcubed 25 km resolution grid: - Experimental for fv3 dycore - - - - 3538944 1 - C768 is a fvcubed 13 km resolution grid: - Experimental for fv3 dycore - - - - 1440 1050 - $DIN_LOC_ROOT/share/domains/domain.ocn.tn0.25v3.160721.nc - tn0.25v3 is NEMO ORCA1 tripole grid at 1/4 deg (reduced eORCA): - NEMO ORCA1 tripole ocean grid - - - - - 1440 1080 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.25v1.190207.nc - $DIN_LOC_ROOT/share/meshes/tx0.25v1_190204_ESMFmesh.nc - tx0.25v1 is tripole v1 0.25-deg MOM6 grid: - Experimental for MOM6 experiments - - - - 540 458 - $DIN_LOC_ROOT/share/domains/domain.ocn.tx0.66v1.190425.nc - $DIN_LOC_ROOT/share/meshes/tx0.66v1_190314_ESMFmesh.nc - tx0.66v1 is tripole v1 0.66-deg MOM6 grid: - Experimental for MOM6 experiments - - - - - - 192 94 - $DIN_LOC_ROOT/hafs/share/domains/domain.lnd.T62_Atlantic8.200423.nc - $DIN_LOC_ROOT/hafs/share/domains/domain.ocn.T62_Atlantic8.200423.nc - $DIN_LOC_ROOT/share/meshes/T62_040121_ESMFmesh.nc - T62 is Gaussian grid: - - - - 640 320 - $DIN_LOC_ROOT/hafs/share/domains/domain.lnd.TL319_Atlantic8.200527.nc - $DIN_LOC_ROOT/hafs/share/domains/domain.ocn.TL319_Atlantic8.200527.nc - $DIN_LOC_ROOT/share/meshes/TL319_151007_ESMFmesh.nc - TL319 grid for JRA55 - - - - 1440 721 - $DIN_LOC_ROOT/hafs/share/domains/domain.lnd.TL639_Atlantic8.200618.nc - $DIN_LOC_ROOT/hafs/share/domains/domain.ocn.TL639_Atlantic8.200618.nc - $DIN_LOC_ROOT/share/meshes/TL639_200618_ESMFmesh.nc - TL639 grid for ERA5 - - - - - 1135 633 - $DIN_LOC_ROOT/hafs/share/domains/domain.ocn.Atlantic8.2004123.nc - $DIN_LOC_ROOT/hafs/share/meshes/Atlantic8_SCRIP_230420_ESMFmesh.nc - Atlantic8 is a Atlantic grid for Dorian case at 7-8 km HYCOM grid: - For regional HYCOM experiments - - - - - 2561 2161 - C768r is a regional grid - Standalone Regional (SAR) domian for HAFS application - - - - - - - - - - - - - ATM2OCN_FMAPNAME - ATM2OCN_SMAPNAME - ATM2OCN_VMAPNAME - OCN2ATM_FMAPNAME - OCN2ATM_SMAPNAME - ATM2LND_FMAPNAME - ATM2LND_SMAPNAME - LND2ATM_FMAPNAME - LND2ATM_SMAPNAME - ATM2WAV_SMAPNAME - OCN2WAV_SMAPNAME - ICE2WAV_SMAPNAME - ROF2OCN_LIQ_RMAPNAME - ROF2OCN_ICE_RMAPNAME - LND2ROF_FMAPNAME - ROF2LND_FMAPNAME - - - - - diff --git a/CIME/data/config/ufs/config_inputdata.xml b/CIME/data/config/ufs/config_inputdata.xml deleted file mode 100644 index 80410b10178..00000000000 --- a/CIME/data/config/ufs/config_inputdata.xml +++ /dev/null @@ -1,20 +0,0 @@ - - - - - - - - - - - - - ftp site for ufs release - ftp -
ftp.emc.ncep.noaa.gov/EIB/UFS/
- anonymous - user@example.edu - /inputdata/ -
-
diff --git a/CIME/data/config/ufs/machines/Depends.cray b/CIME/data/config/ufs/machines/Depends.cray deleted file mode 100644 index bbe5a712d97..00000000000 --- a/CIME/data/config/ufs/machines/Depends.cray +++ /dev/null @@ -1,6 +0,0 @@ -NOOPTOBJS= ice_boundary.o dyn_comp.o unicon.o - -$(NOOPTOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FFLAGS_NOOPT) $(FREEFLAGS) $< - - diff --git a/CIME/data/config/ufs/machines/Depends.gnu b/CIME/data/config/ufs/machines/Depends.gnu deleted file mode 100644 index 2d53247217e..00000000000 --- a/CIME/data/config/ufs/machines/Depends.gnu +++ /dev/null @@ -1,2 +0,0 @@ -geopk.o:geopk.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fcray-pointer $< diff --git a/CIME/data/config/ufs/machines/Depends.intel b/CIME/data/config/ufs/machines/Depends.intel deleted file mode 100644 index 3dd4e885e27..00000000000 --- a/CIME/data/config/ufs/machines/Depends.intel +++ /dev/null @@ -1,40 +0,0 @@ -# -PERFOBJS=\ -prim_advection_mod.o \ -edge_mod.o \ -derivative_mod.o \ -bndry_mod.o \ -prim_advance_mod.o - -# CLM's SatellitePhenologyMod is compiled incorrectly with intel 15.0.0 at -O2 -REDUCED_OPT_OBJS=\ -SatellitePhenologyMod.o - -# shr_wv_sat_mod does not need to have better than ~0.1% precision, and benefits -# enormously from a lower precision in the vector functions. -REDUCED_PRECISION_OBJS=\ -shr_wv_sat_mod.o - -SHR_RANDNUM_FORT_OBJS=\ -kissvec_mod.o \ -mersennetwister_mod.o \ -dSFMT_interface.o \ -shr_RandNum_mod.o - -SHR_RANDNUM_C_OBJS=\ -dSFMT.o \ -dSFMT_utils.o \ -kissvec.o - -ifeq ($(DEBUG),FALSE) - $(PERFOBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -no-prec-div $< - $(REDUCED_OPT_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O1 $< - $(REDUCED_PRECISION_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -fimf-precision=low -fp-model fast $< - $(SHR_RANDNUM_FORT_OBJS): %.o: %.F90 - $(FC) -c $(INCLDIR) $(INCS) $(FFLAGS) $(FREEFLAGS) -O3 -fp-model fast -no-prec-div -no-prec-sqrt -qoverride-limits $< - $(SHR_RANDNUM_C_OBJS): %.o: %.c - $(CC) -c $(INCLDIR) $(INCS) $(CFLAGS) -O3 -fp-model fast $< -endif diff --git a/CIME/data/config/ufs/machines/README b/CIME/data/config/ufs/machines/README deleted file mode 100644 index 3bafbe274ee..00000000000 --- a/CIME/data/config/ufs/machines/README +++ /dev/null @@ -1,15 +0,0 @@ -config_pes_pop.xml -current assumptions: - prognostic: pop, cice - data: datm, drof - stub: slnd, sglc -DATM.+XLND.+CICE.+POP.+DROF.+SGLC -The current attributes that are supported are - lcompset_matchN= (where N can be any number) - pecount=[S,M,L,XL] - -Please refer to the documentation in the config_machines.xml and config_compilers.xml files. - - - - diff --git a/CIME/data/config/ufs/machines/config_batch.xml b/CIME/data/config/ufs/machines/config_batch.xml deleted file mode 100644 index 2e302157961..00000000000 --- a/CIME/data/config/ufs/machines/config_batch.xml +++ /dev/null @@ -1,279 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - qstat - qsub - qdel - -v - - (\d+) - --dependencies - %H:%M:%s - -M - - - - - - - - - - - - - - - qstat - qsub - qdel - --env - #COBALT - (\d+) - --dependencies - -M - - - - - - - - - - - - - bjobs - bsub - bkill - < - - #BSUB - <(\d+)> - -w 'done(jobid)' - -w 'ended(jobid)' - && - %H:%M - -u - - - - -J {{ job_id }} - -n {{ total_tasks }} - -W $JOB_WALLCLOCK_TIME - -o {{ job_id }}.%J - -e {{ job_id }}.%J - - - - - qstat - qsub - qdel - -v - #PBS - ^(\S+)$ - -W depend=afterok:jobid - -W depend=afterany:jobid - : - %H:%M:%S - -M - -m - , bea, b, e, a - - - - - - - -N {{ job_id }} - -r {{ rerunnable }} - - -j oe - -V - - - - - squeue - scancel - #SBATCH - (\d+)$ - --dependency=afterok:jobid - --dependency=afterany:jobid - , - %H:%M:%S - --mail-user - --mail-type - none, all, begin, end, fail - - --job-name={{ job_id }} - --nodes={{ num_nodes }} - --ntasks-per-node={{ tasks_per_node }} - --output={{ job_id }} - --exclusive - - - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - - - - -S {{ shell }} - -l select={{ num_nodes }}:ncpus={{ max_tasks_per_node }}:mpiprocs={{ tasks_per_node }}:ompthreads={{ thread_count }} - - - - -S {{ shell }} - -l select=1:mpiprocs={{ total_tasks }}:ompthreads={{ thread_count }} - - - - regular - - - regular - premium - share - economy - - - - - sbatch - - - - - - - --partition=hera - - - batch - debug - - - - - sbatch - - - - - - - --partition=orion - - - batch - debug - - - - - sbatch - - - - - - - --partition=kjet - - - batch - - - - - ssh login1.stampede2.tacc.utexas.edu cd $CASEROOT ; sbatch - - - - - - - skx-normal - skx-dev - - - - - sbatch - Submitted batch job (\d+) on cluster .* - - - - - - - --clusters=c4 - - - debug - - - diff --git a/CIME/data/config/ufs/machines/config_compilers.xml b/CIME/data/config/ufs/machines/config_compilers.xml deleted file mode 100644 index e3bd7a0997c..00000000000 --- a/CIME/data/config/ufs/machines/config_compilers.xml +++ /dev/null @@ -1,590 +0,0 @@ - - - - - - - - - -D_USE_FLOW_CONTROL - -DSPMD - - - - -I$(EXEROOT)/atm/obj/FMS - - - $(FC_AUTO_R8) - $(FC_AUTO_R8) -Duse_LARGEFILE - - FALSE - - - - - -h noomp - -g -O0 - -O2 - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRCRAY - -DDIR=NOOP - -DDIR=NOOP - - - -s real64 - - - -f free -N 255 -h byteswapio -x dir - -h noomp - -g -O0 -K trap=fp -m1 - -O2,ipa2 -em - - - -O1,fp2,ipa0,scalar0,vector0 - - TRUE - - -Wl,--allow-multiple-definition -h byteswapio - - - - - - -std=gnu99 - -fopenmp - -g -Wall -Og -fbacktrace -ffpe-trap=invalid,zero,overflow -fcheck=bounds - -O - - - - -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU - - FORTRAN - - -fdefault-real-8 - - - -fconvert=big-endian -ffree-line-length-none -fcray-pointer -fno-range-check -fbacktrace - -fopenmp - - -g -Wall -Og -fbacktrace -ffpe-trap=zero,overflow -fcheck=bounds - -O - - - -DOVERLOAD_R4 -DOVERLOAD_R8 - - - -fdefault-real-8 -fdefault-double-8 - - - -O0 - - - -ffixed-form - - - -ffree-form - - FALSE - - -fopenmp - - mpicc - mpicxx - mpif90 - gcc - g++ - gfortran - TRUE - - - - - -g -qfullpath -qmaxmem=-1 - -O3 - -qsmp=omp - -qsmp=omp:noopt - - - - -DFORTRAN_SAME -DCPRIBM - - -WF,-D - - -qrealsize=8 - - - -g -qfullpath -qmaxmem=-1 - -O2 -qstrict -qinline=auto - -qsmp=omp - -qinitauto=7FF7FFFF -qflttrap=ov:zero:inv:en - -qsmp=omp:noopt - -C - - - -qsuffix=f=f -qfixed=132 - - - -qsuffix=f=f90:cpp=F90 - - TRUE - - -qsmp=omp - -qsmp=omp:noopt - - - - - - -fp-model consistent -sox -std=gnu99 - -qopenmp - -O2 -debug minimal - -O0 -g - - - - -DFORTRANUNDERSCORE -DCPRINTEL - - - -cxxlib - - FORTRAN - - -r8 - - - -i4 -real-size 32 - - - -i4 -real-size 64 - - - -qno-opt-dynamic-align -fp-model consistent - - - -xCORE-AVX2 -no-prec-div -no-prec-sqrt - - - -fpp -fno-alias -auto -safe-cray-ptr -ftz -assume byterecl -nowarn -sox -align array64byte - -qopenmp - - -g -O0 -check -check noarg_temp_created -check nopointer -warn -warn noerrors -fp-stack-check -fstack-protector-all -debug -traceback -fpe0 -ftrapuv - -O2 -debug minimal - - - -O0 - - - -fixed - - - -free - - - -qopenmp - - mpicc - mpicxx - mpif90 - icc - icpc - ifort - - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl=cluster - -mkl - - TRUE - - - - - -std=gnu99 - -g - - - -DFORTRANUNDERSCORE -DNO_CRAY_POINTERS -DNO_SHR_VMATH -DCPRNAG - - - -r8 - - - - - -Wp,-macro=no_com -convert=BIG_ENDIAN -indirect $ENV{CIMEROOT}/config/cesm/machines/nag_mpi_argument.txt - - -ieee=full -O2 - - - -C=all -g -time -f2003 -ieee=stop - -gline - - -mismatch_all - - - -O0 - - - -fixed - - - -free - - FALSE - mpicc - mpif90 - gcc - nagfor - - - -lpthread - - - FCLIBS="-Wl,--as-needed,--allow-shlib-undefined -L$(COMPILER_PATH)/lib/NAG_Fortran -lf62rts" - - - - - - - -gopt -time - -mp - - - - - - - - - - - - - - - - - - - - - - - - - - - - -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI - - CXX - - -r8 - - - -i4 -gopt -time -Mextend -byteswapio -Mflushz -Kieee - -mp - -O0 -g -Ktrap=fp -Mbounds -Kieee - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - -Mnovect - - - -O0 - - - -Mfixed - - - -Mfree - - - - FALSE - - -time -Wl,--allow-multiple-definition - -mp - - mpicc - mpicxx - mpif90 - pgcc - pgc++ - pgf95 - - - - - -qarch=auto -qtune=auto -qcache=auto - - /usr/bin/bash - - -qarch=auto -qtune=auto -qcache=auto -qsclk=micro - -qspill=6000 - - - -qsigtrap=xl__trcedump - -bdatapsize:64K -bstackpsize:64K -btextpsize:32K - - mpcc_r - mpxlf2003_r - cc_r - xlf2003_r - - -lmassv -lessl - -lmass - - - - - - --build=powerpc-bgp-linux --host=powerpc64-suse-linux - - - -DLINUX - - - -g -qfullpath -qmaxmem=-1 -qspillsize=2500 -qextname=flush - -O3 -qstrict -qinline=auto - -qsmp=omp - -qsmp=omp:noopt - - - -Wl,--relax -Wl,--allow-multiple-definition - - - - - - -DCMAKE_SYSTEM_NAME=Catamount - - - -DLINUX - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - cc - CC - ftn - $ENV{NETCDF_DIR} - lustre - $ENV{PARALLEL_NETCDF_DIR} - cc - CC - ftn - - - - - -DSYSDARWIN - - - -Wl,-rpath $ENV{NETCDF}/lib - - - - - - -heap-arrays - - - - - - - -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY - - $ENV{NETCDF} - gpfs - $ENV{PNETCDF} - - - - - -DNO_MPIMOD - - - -ldl - - - - - - -qopt-report -xCORE-AVX2 - - - -qopt-report -xCORE-AVX2 - - - -DPIO_ENABLE_LOGGING=ON - - $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_noMPI_noOpenMP - $ENV{CESMDATAROOT}/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_MPI_openMP - - TRUE - - - - - -qopt-report -xCORE-AVX2 - - - -qopt-report -xCORE-AVX2 - - - - - mpiicc - mpiicpc - mpiifort - - - - mpiicc - mpiicpc - mpif90 - - - - - - -framework Accelerate - - - - - - -DHAVE_NANOTIME - - $ENV{TACC_NETCDF_DIR} - lustre - $ENV{TACC_PNETCDF_DIR} - - - - - -xCOMMON-AVX512 -no-fma - - - -xCOMMON-AVX512 -no-fma - -mcmodel medium - - - -L$ENV{TACC_HDF5_LIB} -lhdf5 -zmuldefs -xCOMMON-AVX512 - - - $SHELL{${NETCDF_PATH}/bin/nf-config --flibs} -L$ENV{TACC_HDF5_LIB} -lhdf5 - - $ENV{TRILINOS_PATH} - FALSE - - - - - $(FC_AUTO_R8) - - - diff --git a/CIME/data/config/ufs/machines/config_machines.xml b/CIME/data/config/ufs/machines/config_machines.xml deleted file mode 100644 index 11bdcd6750c..00000000000 --- a/CIME/data/config/ufs/machines/config_machines.xml +++ /dev/null @@ -1,604 +0,0 @@ - - - - - - - NCAR SGI platform, os is Linux, 36 pes/node, batch system is PBS - .*.?cheyenne\d?.ucar.edu - - MPT: Launcher network accept (MPI_LAUNCH_TIMEOUT) timed out - 10 - LINUX - intel,gnu - mpt - mpt,impi - $ENV{UFS_SCRATCH} - $ENV{UFS_INPUT}/ufs_inputdata - /glade/p/cgd/tss/CTSM_datm_forcing_data - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{UFS_INPUT}/ufs_baselines - 8 - pbs - cseg - - 36 - 36 - TRUE - - mpiexec_mpt - - -p "%g:" - -np {{ total_tasks }} - - omplace -tm open64 - - - - mpirun `hostname` - - -np {{ total_tasks }} - - omplace -tm open64 - - - - mpirun - - -np {{ total_tasks }} - - - - /glade/u/apps/ch/opt/lmod/8.1.7/lmod/lmod/init/perl - /glade/u/apps/ch/opt/lmod/8.1.7/lmod/lmod/init/env_modules_python.py - /glade/u/apps/ch/opt/lmod/8.1.7/lmod/lmod/init/csh - /glade/u/apps/ch/opt/lmod/8.1.7/lmod/lmod/init/sh - /glade/u/apps/ch/opt/lmod/8.1.7/lmod/lmod/libexec/lmod perl - /glade/u/apps/ch/opt/lmod/8.1.7/lmod/lmod/libexec/lmod python - module - module - - - ncarenv/1.3 - cmake/3.16.4 - - - intel/19.0.5 - mkl - - - gnu/8.3.0 - openblas/0.3.6 - - - mpt/2.19 - netcdf/4.7.3 - - - mpt/2.19 - pnetcdf/1.12.1 - - - - impi - netcdf-mpi/4.7.3 - pnetcdf/1.12.1 - - - ncarcompilers/0.5.0 - - - /glade/p/ral/jntp/GMTB/tools/modulefiles/gnu-8.3.0/mpt-2.19 - NCEPlibs/1.1.0 - - - /glade/p/ral/jntp/GMTB/tools/ufs-stack-20200909/intel-19.0.5/mpt-2.19/modules - libpng/1.6.35 - esmf/8.1.0bs27 - netcdf/4.7.4 - bacio/2.4.0 - crtm/2.3.0 - g2/3.4.0 - g2tmpl/1.9.0 - ip/3.3.0 - nceppost/dceca26 - nemsio/2.5.1 - sp/2.3.0 - w3emc/2.7.0 - w3nco/2.4.0 - gfsio/1.4.0 - sfcio/1.4.0 - sigio/2.3.0 - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.0.5 - esmf-8.1.0b24-ncdfio-intelmpi-O - - - /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/19.0.5 - esmf-8.1.0b24-ncdfio-intelmpi-g - - - netcdf/4.7.3 - - - netcdf/4.7.3 - - - - false - - - ON - SUMMARY - 1 - 1024M - 16 - 1 - - - - -1 - - - - - - port to gaea - - gaea* - CNL - intel - mpt - esrl_bmcs - - $ENV{UFS_SCRATCH} - $ENV{UFS_INPUT}/ufs_inputdata - ${DIN_LOC_ROOT}/lmwg - ${CIME_OUTPUT_ROOT}/ufs-runs/archive/$CASE - $ENV{UFS_INPUT}/ufs-runs/baselines - make - 8 - slurm - NOAA - 16 - 16 - FALSE - - srun - - --label - -n {{ total_tasks }} - - - - /opt/cray/pe/modules/3.2.10.5/init/perl.pm - /opt/cray/pe/modules/3.2.10.5/init/python.py - /opt/cray/pe/modules/3.2.10.5/init/csh - /opt/cray/pe/modules/3.2.10.5/init/sh - /opt/cray/pe/modules/4.1.3.1/bin/modulecmd perl - /opt/cray/pe/modules/4.1.3.1/bin/modulecmd python - module - module - - PrgEnv-intel/6.0.3 - intel/18.0.3.222 - cray-mpich/7.4.0 - cray-mpich/7.7.3 - cray-netcdf - /lustre/f2/pdata/esrl/gsd/ufs/modules/modulefiles/generic - cmake/3.16.4 - /lustre/f2/pdata/esrl/gsd/ufs/modules/modulefiles/intel-18.0.3.222 - NCEPlibs/1.0.0 - - - - 256M - $ENV{NCEPLIBS_DIR}/bin:$ENV{PATH} - - - -1 - - - - - NOAA hera system - hfe[0-9][0-9]\.hera - LINUX - intel - impi - nems - - $ENV{UFS_SCRATCH} - $ENV{UFS_INPUT}/ufs_inputdata - ${DIN_LOC_ROOT}/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{UFS_INPUT}/UFS_BASELINES - make - 8 - slurm - NCEP - 80 - 40 - TRUE - - srun - - -n $TOTALPES - - - - - - - /apps/lmod/lmod/init/sh - /apps/lmod/lmod/init/csh - /apps/lmod/lmod/init/env_modules_python.py - module - module - /apps/lmod/lmod/libexec/lmod python - - /contrib/sutils/modulefiles - sutils - - - - intel/18.0.5.274 - netcdf/4.7.0 - - - impi/2018.0.4 - /scratch1/BMC/gmtb/software/modulefiles/intel-18.0.5.274/impi-2018.0.4 - NCEPlibs/1.1.0 - - - /scratch1/BMC/gmtb/software/modulefiles/generic - cmake/3.16.3 - - - - ON - SUMMARY - - - - - NOAA orion system - Orion-login-\d+.HPC.MsState.Edu - LINUX - intel - impi - nems - - $ENV{UFS_SCRATCH} - $ENV{UFS_INPUT}/ufs_inputdata - ${DIN_LOC_ROOT}/atm/datm7 - $CIME_OUTPUT_ROOT/archive/$CASE - $ENV{UFS_INPUT}/UFS_BASELINES - make - 8 - slurm - NCEP - 80 - 40 - TRUE - - srun - - -n $TOTALPES - - - - - - - /apps/lmod/lmod/init/sh - /apps/lmod/lmod/init/csh - /apps/lmod/lmod/init/env_modules_python.py - module - module - /apps/lmod/lmod/libexec/lmod python - - - intel/2018 - - - impi/2018 - - - contrib noaatools - cmake/3.15.4 - /apps/contrib/NCEPLIBS/orion/cmake/install/NCEPLIBS/modules - bacio/2.4.0 - crtm_dev/2.3.0 - g2/3.4.0 - g2tmpl/1.9.0 - ip/3.3.0 - nceppost/dceca26 - nemsio/2.5.1 - sp/2.3.0 - w3emc/2.7.0 - w3nco/2.4.0 - - gfsio/1.4.0 - sfcio/1.4.0 - sigio/2.3.0 - - /apps/contrib/NCEPLIBS/orion/modulefiles - jasper/1.900.2 - png/1.2.44 - z/1.2.6 - - /apps/contrib/NCEPLIBS/lib/modulefiles - - netcdfp/4.7.4.release - esmflocal/8.1.0.27bs.release - - - - cmake/3.15.4 - - - - ON - SUMMARY - $ENV{NETCDF} - - - -1 - - - - - NOAA JET system - fe\d?.fsl.noaa.gov - LINUX - intel - impi - wrfruc - $ENV{UFS_SCRATCH} - $ENV{UFS_INPUT}/ufs_inputdata - ${DIN_LOC_ROOT}/lmwg - ${CIME_OUTPUT_ROOT}/$CASE - $ENV{UFS_INPUT}/ufs_baselines - 4 - slurm - NOAA - 40 - 40 - TRUE - - srun - - -n $TOTALPES - - - - - - - /apps/lmod/lmod/init/perl - /apps/lmod/lmod/init/env_modules_python.py - /apps/lmod/lmod/init/sh - /apps/lmod/lmod/init/csh - /apps/lmod/lmod/libexec/lmod perl - /apps/lmod/lmod/libexec/lmod python - module - module - - /contrib/sutils/modulefiles - sutils - - - - intel/18.0.5.274 - netcdf/4.7.0 - - - impi/2018.4.274 - pnetcdf/1.6.1 - /lfs3/projects/hfv3gfs/GMTB/modulefiles/intel-18.0.5.274/impi-2018.4.274 - NCEPlibs/1.0.0 - - - /lfs3/projects/hfv3gfs/GMTB/modulefiles/generic - cmake/3.16.4 - - - - ON - SUMMARY - - - - - - Customize these fields as appropriate for your system, - particularly changing MAX_TASKS_PER_NODE and MAX_MPITASKS_PER_NODE to the - number of cores on your machine. - - something.matching.your.machine.hostname - LINUX - gnu - mpich - $ENV{UFS_SCRATCH} - $ENV{UFS_INPUT}/ufs_inputdata - $ENV{UFS_SCRATCH}/archive/$CASE - $ENV{UFS_INPUT}/baselines - make - 4 - none - DTC - 8 - 8 - - mpirun - - -np {{ total_tasks }} - -prepend-rank - - - - - - - - Customize these fields as appropriate for your system, - particularly changing MAX_TASKS_PER_NODE and MAX_MPITASKS_PER_NODE to the - number of cores on your machine. - - something.matching.your.machine.hostname - Darwin - gnu - mpich - $ENV{UFS_SCRATCH} - $ENV{UFS_INPUT}/ufs_inputdata - $ENV{UFS_SCRATCH}/archive/$CASE - $ENV{UFS_INPUT}/baselines - make - 4 - none - DTC - 8 - 8 - - mpirun - - -np {{ total_tasks }} - -prepend-rank - - - - - - - Intel Xeon Platinum 8160 ("Skylake"),48 cores on two sockets (24 cores/socket) , batch system is SLURM - .*stampede2 - LINUX - intel - impi - TG-ATM190009 - $ENV{SCRATCH} - $ENV{UFS_INPUT}/ufs_inputdata - ${DIN_LOC_ROOT}/lmwg - $ENV{WORK}/archive/$CASE - $ENV{WORK}/ufs_baselines - 4 - slurm - cseg - 96 - 48 - - ibrun - - -n {{ total_tasks }} - - - - ibrun - - -n {{ total_tasks }} - - - - /opt/apps/lmod/lmod/init/perl - /opt/apps/lmod/lmod/init/env_modules_python.py - /opt/apps/lmod/lmod/init/sh - /opt/apps/lmod/lmod/init/csh - /opt/apps/lmod/lmod/libexec/lmod perl - /opt/apps/lmod/lmod/libexec/lmod python - module - module - - - TACC - python/2.7.15 - intel/18.0.2 - cmake - - - mvapich2/2.3b - pnetcdf/1.11.0 - netcdf/4.6.2 - - - mvapich2 - impi/18.0.2 - pnetcdf/1.11.0 - - - netcdf/4.6.2 - - - - 256M - - - ON - SUMMARY - - - $ENV{nemsio_DIR}/lib/libnemsio.a - $ENV{bacio_DIR}/lib/libbacio_4.a - $ENV{w3emc_DIR}/lib/libw3emc_d.a - $ENV{w3nco_DIR}/lib/libw3nco_d.a - $ENV{sp_DIR}/lib/libsp_d.a - - - - - ${EXEROOT}/ufs.exe - >> ufs.log.$LID 2>&1 - - - diff --git a/CIME/data/config/ufs/machines/config_pio.xml b/CIME/data/config/ufs/machines/config_pio.xml deleted file mode 100644 index 40f22b32a07..00000000000 --- a/CIME/data/config/ufs/machines/config_pio.xml +++ /dev/null @@ -1,347 +0,0 @@ - - - - - - - - - - - $MAX_MPITASKS_PER_NODE - 60 - - - - - - - - - - - - pnetcdf - netcdf - - - - - - - - 1 - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - netcdf - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/CIME/data/config/ufs/machines/config_workflow.xml b/CIME/data/config/ufs/machines/config_workflow.xml deleted file mode 100644 index 9047355a97c..00000000000 --- a/CIME/data/config/ufs/machines/config_workflow.xml +++ /dev/null @@ -1,130 +0,0 @@ - - - - - - - - $BUILD_COMPLETE and not $TEST - - - - $BUILD_COMPLETE and $TEST - - - - - case.run or case.test - $DOUT_S - - 1 - 1 - 0:20:00 - - - - - - - - - $BUILD_COMPLETE - - 72 - $MAX_MPITASKS_PER_NODE - 1:00:00 - - - - - case.chgres - $BUILD_COMPLETE and not $TEST - - - - case.chgres - $BUILD_COMPLETE and $TEST - - - - case.run or case.test - $BUILD_COMPLETE - - 72 - $MAX_MPITASKS_PER_NODE - 1:00:00 - - - - - - case.run or case.test - $DOUT_S - - 1 - 1 - 0:20:00 - - - - - - - - - $BUILD_COMPLETE - - 72 - $MAX_MPITASKS_PER_NODE - 1:00:00 - - - - - case.chgres - $BUILD_COMPLETE and not $TEST - - - - case.chgres - $BUILD_COMPLETE and $TEST - - - - - case.run or case.test - $DOUT_S - - 1 - 1 - 0:20:00 - - - - diff --git a/CIME/data/config/ufs/machines/cylc_suite.rc.template b/CIME/data/config/ufs/machines/cylc_suite.rc.template deleted file mode 100644 index 5511f6ab2e3..00000000000 --- a/CIME/data/config/ufs/machines/cylc_suite.rc.template +++ /dev/null @@ -1,24 +0,0 @@ -[meta] - title = CESM CYLC workflow for {{ workflow_description }} -[cylc] - [[parameters]] - member = {{ members }} - -[scheduling] - cycling mode = integer - initial cycle point = 1 - final cycle point = {{ cycles }} - - [[dependencies]] - [[[R1]]] - graph = "set_external_workflow => run => st_archive " - [[[R/P1]]] # Integer Cycling - graph = """ - st_archive[-P1] => run - run => st_archive - """ -[runtime] - [[set_external_workflow]] - script = cd {{ case_path_string }} ./xmlchange EXTERNAL_WORKFLOW=TRUE - [[st_archive]] - script = cd {{ case_path_string }} ./case.submit --job case.st_archive; ./xmlchange CONTINUE_RUN=TRUE diff --git a/CIME/data/config/ufs/machines/template.case.run b/CIME/data/config/ufs/machines/template.case.run deleted file mode 100755 index b18a2988f29..00000000000 --- a/CIME/data/config/ufs/machines/template.case.run +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python3 -# Batch system directives -{{ batchdirectives }} - -""" -template to create a case run script. This should only ever be called -by case.submit when on batch system. This script only exists as a way of providing -batch directives. Use case.submit from the command line to run your case. - -DO NOT RUN THIS SCRIPT MANUALLY -""" - -import os, sys -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * - -from CIME.case import Case - -logger = logging.getLogger(__name__) - -import argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser(description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--caseroot", - help="Case directory to build") - - parser.add_argument("--skip-preview-namelist", action="store_true", - help="Skip calling preview-namelist during case.run") - - parser.add_argument("--completion-sets-continue-run", action="store_true", - help="This is used to ensure CONTINUE_RUN is cleared for an initial run, " - "but set for subsequent runs.") - - parser.add_argument("--resubmit", default=False, action="store_true", - help="If RESUBMIT is set, this performs the resubmissions.") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - if args.skip_preview_namelist is None: - args.skip_preview_namelist = False - - return args.caseroot, args.skip_preview_namelist, args.completion_sets_continue_run, args.resubmit - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - - caseroot, skip_pnl, set_continue_run, resubmit = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - success = case.case_run(skip_pnl=skip_pnl, set_continue_run=set_continue_run, submit_resubmits=resubmit) - - sys.exit(0 if success else 1) - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/CIME/data/config/ufs/machines/template.case.test b/CIME/data/config/ufs/machines/template.case.test deleted file mode 100755 index 6f44dc1baf2..00000000000 --- a/CIME/data/config/ufs/machines/template.case.test +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env python3 -{{ batchdirectives }} -""" -This is the system test submit script for CIME. This should only ever be called -by case.submit when on batch system. This script only exists as a way of providing -batch directives. Use case.submit from the command line to run your case. - -DO NOT RUN THIS SCRIPT MANUALLY -""" -import os, sys -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * - -from CIME.case import Case - -import argparse - -############################################################################### -def parse_command_line(args, description): -############################################################################### - parser = argparse.ArgumentParser(description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("testname", nargs="?",default=None, - help="Name of the test to run, default is set in TESTCASE in env_test.xml") - - parser.add_argument("--caseroot", - help="Case directory to build") - - parser.add_argument("--reset", action="store_true", - help="Reset the case to its original state as defined by config_tests.xml") - - parser.add_argument("--resubmit", action="store_true", - help="Ignored in tests, but needed for all templates") - - parser.add_argument("--skip-preview-namelist", action="store_true", - help="Skip calling preview-namelist during case.run") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - return args.caseroot, args.testname, args.reset, args.skip_preview_namelist - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - - caseroot, testname, reset, skip_pnl = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - success = case.case_test(testname=testname, reset=reset, skip_pnl=skip_pnl) - - sys.exit(0 if success else 1) - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/CIME/data/config/ufs/machines/template.chgres.run b/CIME/data/config/ufs/machines/template.chgres.run deleted file mode 100755 index 3ed5da2c798..00000000000 --- a/CIME/data/config/ufs/machines/template.chgres.run +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash -{{ batchdirectives }} - -# Check environment variable -if [ -z "${NCEPLIBS_DIR}" ]; then - echo "NCEPLIBS_DIR needs to be set! Exiting ..." - exit -fi - -# Set environment variables and load modules -source {{ caseroot }}/.env_mach_specific.sh - -# Query run directory -rundir=`./xmlquery --value RUNDIR` -echo "Run directory is $rundir" - -# Query build directory -blddir="$rundir/../bld" - -# Link chgres executable to build directory -cd $blddir -ln -sf $NCEPLIBS_DIR/bin/chgres_cube.exe . -cd - - -# Find date command -OS=`uname -a` -MACOS='Darwin' -if [[ "$OS" == *"$MACOS"* ]]; then - date_cmd=gdate -else - date_cmd=date -fi - -# Query resolution and date -atm_grid=`./xmlquery --value ATM_GRID` -start_date=`./xmlquery --value RUN_STARTDATE` -start_tod=`./xmlquery --value START_TOD` -start_hh=$((start_tod/3600)) -echo "ATM_GRID = $atm_grid" -echo "RUN_STARTDATE = $start_date + $start_hh hours" - -# Prefix for file names -prefix=`printf "${atm_grid}.${start_date}_%02d" "$start_hh"` -echo "FILE PREFIX = $prefix" - -# Query it is a restart or not -isrestart=`./xmlquery --value CONTINUE_RUN` -echo "Is this run restart? $isrestart" - -# Make sure namelists are up to date -./preview_namelists - -# Query required number of task -np=`./xmlquery task_count --subgroup case.chgres --value` - -# Goto run directory -cd $rundir - -# Run it only if it is not restart and INPUT/ directory has no input for the given date -if [ "$isrestart" != "TRUE" -a ! -f "$rundir/INPUT/${prefix}.gfs_ctrl.nc" ]; then - # Link namelist file - ln -sf config.nml fort.41 - - # Get current date - LID=`${date_cmd} +%y%m%d-%H%M%S` - - # Run chgres - runcmd='{{ mpirun }}' - mpirun=`echo $runcmd | awk '{print $1}'` - eval "$mpirun -n $np $blddir/chgres_cube.exe 1> chgres_cube.$LID.log 2>&1" - - # Move output files to input directory - mv -f gfs_ctrl.nc INPUT/${prefix}.gfs_ctrl.nc - mv -f out.atm.tile1.nc INPUT/${prefix}.gfs_data.tile1.nc - mv -f out.atm.tile2.nc INPUT/${prefix}.gfs_data.tile2.nc - mv -f out.atm.tile3.nc INPUT/${prefix}.gfs_data.tile3.nc - mv -f out.atm.tile4.nc INPUT/${prefix}.gfs_data.tile4.nc - mv -f out.atm.tile5.nc INPUT/${prefix}.gfs_data.tile5.nc - mv -f out.atm.tile6.nc INPUT/${prefix}.gfs_data.tile6.nc - mv -f out.sfc.tile1.nc INPUT/${prefix}.sfc_data.tile1.nc - mv -f out.sfc.tile2.nc INPUT/${prefix}.sfc_data.tile2.nc - mv -f out.sfc.tile3.nc INPUT/${prefix}.sfc_data.tile3.nc - mv -f out.sfc.tile4.nc INPUT/${prefix}.sfc_data.tile4.nc - mv -f out.sfc.tile5.nc INPUT/${prefix}.sfc_data.tile5.nc - mv -f out.sfc.tile6.nc INPUT/${prefix}.sfc_data.tile6.nc -else - echo "Skip running CHGRES!" - echo "Restarted? - $isrestart" - echo "Input already exists or processed? - $rundir/INPUT/${prefix}.gfs_ctrl.nc" -fi diff --git a/CIME/data/config/ufs/machines/template.gfs_post.run b/CIME/data/config/ufs/machines/template.gfs_post.run deleted file mode 100755 index 466fad5daed..00000000000 --- a/CIME/data/config/ufs/machines/template.gfs_post.run +++ /dev/null @@ -1,102 +0,0 @@ -#!/bin/bash -{{ batchdirectives }} - -# Check environment variable -if [ -z "${NCEPLIBS_DIR}" ]; then - echo "NCEPLIBS_DIR needs to be set! Exiting ..." - exit -fi - -# Set environment variables and load modules -source {{ caseroot }}/.env_mach_specific.sh - -# Query run directory -rundir=`./xmlquery --value RUNDIR` -echo "run dir is $rundir" -cd $rundir - -# Query build directory -blddir="$rundir/../bld" - -# Link chgres executable to build directory -cd $blddir -ln -sf $NCEPLIBS_DIR/bin/ncep_post . -cd - - -# Find date command -OS=`uname -a` -MACOS='Darwin' -if [[ "$OS" == *"$MACOS"* ]]; then - date_cmd=gdate -else - date_cmd=date -fi - -# Query mpirun command -runcmd='{{ mpirun }}' -mpirun=`echo $runcmd | awk '{print $1}'` - -# Query file prefixes -dyn_prefix=`cat model_configure | grep filename_base | awk -F: '{print $2}' | awk '{print $1}'` -phy_prefix=`cat model_configure | grep filename_base | awk -F: '{print $2}' | awk '{print $2}'` - -# Query output format -file_ext=`cat model_configure | grep output_file | awk -F: '{print $2}' | tr -d " "` -if [ "$file_ext" == 'netcdf' ];then - file_ext="nc" -fi -if [ "$file_ext" == 'netcdf_esmf' ];then - file_ext="nc" -fi - -# Query template namelist file -date_str=`head -n 4 itag.tmp | tail -n 1` -date_str=${date_str/_/" "} -nlines=`cat itag.tmp | wc -l` -echo $nlines - -# Run post to process all output files one by one -for f in ${dyn_prefix}f*.$file_ext -do - # Query time before starting post - export T1=$SECONDS - - # Query time step and create new date for cycle - time_step=`echo $f | sed "s/${dyn_prefix}f//" | sed "s/\.$file_ext//"` - time_step_num=`expr $time_step + 0` - date_str_cycle=`${date_cmd} -d "$date_str $time_step_num hours" +%Y-%m-%d_%H:%M:%S` - - # Create namelist for specific date - head -n 3 itag.tmp > itaga.$time_step - echo $date_str_cycle >> itaga.$time_step - tail -n $((nlines-4)) itag.tmp >> itaga.$time_step - - # Update used flat file based on time step - if [ "$time_step_num" -eq "0" ]; then - ln -sf postxconfig-NT-GFS-F00.txt postxconfig-NT.txt - #sed 's/postxconfig-NT.txt/postxconfig-NT-GFS-F00.txt/' itaga.$time_step > .itaga.$time_step - #mv .itaga.$time_step itaga.$time_step - else - ln -sf postxconfig-NT-GFS.txt postxconfig-NT.txt - #sed 's/postxconfig-NT.txt/postxconfig-NT-GFS.txt/' itaga.$time_step > .itaga.$time_step - #mv .itaga.$time_step itaga.$time_step - fi - - # Copy files with common name - cp ${dyn_prefix}f${time_step}.${file_ext} nemsfile - echo "cp ${dyn_prefix}f${time_step}.${file_ext} nemsfile" - cp ${phy_prefix}f${time_step}.${file_ext} flxfile - echo "cp ${phy_prefix}f${time_step}.${file_ext} flxfile" - - # Copy cycle namelist with common name - cp itaga.$time_step itag - - # Run post - eval "time $mpirun $blddir/ncep_post >oi.$time_step 2>ei.$time_step" - - # Get timing resuts - export T2=$SECONDS - let dff=$T2'-'$T1 - echo ran $DECOMP on `date` in $dff seconds >>TIMES - echo done >>DONEOUT -done diff --git a/CIME/data/config/ufs/machines/template.st_archive b/CIME/data/config/ufs/machines/template.st_archive deleted file mode 100755 index 8100e1ff5f6..00000000000 --- a/CIME/data/config/ufs/machines/template.st_archive +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python3 -# Batch system directives -{{ batchdirectives }} - -""" -Performs short term archiving for restart files, history and rpointer -files in the $RUNDIR associated with $CASEROOT. Normally this script -is called by case.submit on batch systems. - -""" - -import sys, os, time -os.chdir( '{{ caseroot }}') - -_LIBDIR = os.path.join("{{ cimeroot }}", "scripts", "Tools") -sys.path.append(_LIBDIR) - -from standard_script_setup import * -from CIME.case import Case - -logger = logging.getLogger(__name__) - - -############################################################################### -def parse_command_line(args, description): -############################################################################### - - parser = argparse.ArgumentParser(description=description, - formatter_class=argparse.RawTextHelpFormatter) - - CIME.utils.setup_standard_logging_options(parser) - - parser.add_argument("--caseroot", default=os.getcwd(), - help="Case directory to build") - - parser.add_argument("--no-incomplete-logs", default=False, action="store_true", - help="Whether to archive logs which have been completed or not") - - parser.add_argument("--copy-only", default=False, action="store_true", - help="Copy instead of move the files to be archived") - - parser.add_argument("--last-date", default=None, - help="WARNING: This option with --force-move may corrupt your run directory! Use at your own risk! " - "Last simulation date to archive, specified as 'Year-Month-Day'. " - "Year must be specified with 4 digits, while month and day can be specified without zero padding. " - "'0003-11-4' would archive at most files for the simulated year 3, month 11, day 4." - "This option implies --copy-only unless --force-move is specified ") - - parser.add_argument("--force-move", default=False, action="store_true", - help="Move the files even if it's unsafe to do so, dangerous if used with --copy-only.") - - parser.add_argument("--test-all", default=False, action="store_true", - help="Run tests of st_archiver functionality on config_arvchive.xml") - - parser.add_argument("--test-case", default=False, action="store_true", - help="Run tests of st_archiver functionality on env_arvchive.xml") - - parser.add_argument("--resubmit", default=False, action="store_true", - help="If RESUBMIT is set, this performs the resubmissions." - "This is primarily meant for use by case.submit") - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - - if args.caseroot is not None: - os.chdir(args.caseroot) - - if args.last_date is not None and args.force_move is False: - args.copy_only = True - - if args.force_move is True: - args.copy_only = False - - return (args.caseroot, args.last_date, args.no_incomplete_logs, args.copy_only, - args.test_all, args.test_case, args.resubmit) - - -############################################################################### -def _main_func(description): -############################################################################### - sys.argv.extend([] if "ARGS_FOR_SCRIPT" not in os.environ else os.environ["ARGS_FOR_SCRIPT"].split()) - caseroot, last_date, no_incomplete_logs, copy_only, testall, testcase, resubmit = parse_command_line(sys.argv, description) - with Case(caseroot, read_only=False) as case: - if testall: - success = case.test_st_archive() - elif testcase: - success = case.test_env_archive() - else: - success = case.case_st_archive(last_date_str=last_date, - archive_incomplete_logs=not no_incomplete_logs, - copy_only=copy_only, resubmit=resubmit) - - sys.exit(0 if success else 1) - -############################################################################### - -if __name__ == "__main__": - _main_func(__doc__) diff --git a/CIME/data/config/ufs/machines/userdefined_laptop_template/README.md b/CIME/data/config/ufs/machines/userdefined_laptop_template/README.md deleted file mode 100644 index 1a0e03b663f..00000000000 --- a/CIME/data/config/ufs/machines/userdefined_laptop_template/README.md +++ /dev/null @@ -1,131 +0,0 @@ -Building CIME on an UNSUPPORTED local machine ---------------------------------------------- - -These directions are for a Mac OS X 10.9 or 10.10 laptop using -homebrew or macports to install the required software. The procedure -is similar for a linux workstation or cluster, you will just use -different package management tools to install the third party -libraries. - -Setup -===== - - - install xcode, including the command line tools. Failure to - install the command line tools is the most likely cause if you - get an error about the compilers not being able to create - executables. - - - install third party libraries from homebrew or macports. - - - home brew - - Install science tap : - - brew install gcc --without-multilib cmake mpich hdf5 --enable-fortran netcdf --enable-fortran - - - - macports - - sudo port install mpich +gcc48 hdf5-18 +mpich netcdf-fortran +gcc48 +mpich cmake - - Note: If you see an error while running create_newcase that - indicates perl can't find XML::LibXML, you may need to install - p5-xml-libxml as well. - - - - Some of the shell scripts used by cesm hard code "gmake" instead - of using the GMAKE variable from env_build.xml. To work around - this, you should install gnu make, or simply create a link from - make to gmake in you path. - - mkdir -p ${HOME}/local/bin - ln -s `whereis make` ${HOME}/local/bin/gmake - cat >> ${HOME}/.bashrc < - - - - - - -DFORTRANUNDERSCORE -DNO_R16 - - - -fopenmp - - - /usr/local/bin/gfortran - /usr/bin/cc - /usr/bin/c++ - /usr/local/bin/mpif90 - /usr/local/bin/mpicc - /usr/local/bin/mpicxx - FORTRAN - TRUE - /usr/local - - $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -framework Accelerate - - - - - diff --git a/CIME/data/config/ufs/machines/userdefined_laptop_template/config_machines.xml b/CIME/data/config/ufs/machines/userdefined_laptop_template/config_machines.xml deleted file mode 100644 index 2d1005c5d0d..00000000000 --- a/CIME/data/config/ufs/machines/userdefined_laptop_template/config_machines.xml +++ /dev/null @@ -1,33 +0,0 @@ - - - - - __USEFUL_DESCRIPTION__ - something.matching.your.machine.hostname - Darwin - gnu - mpich - $ENV{HOME}/projects/scratch - $ENV{HOME}/projects/cesm-inputdata - $ENV{HOME}/projects/ptclm-data - $ENV{HOME}/projects/scratch/archive/$CASE - $ENV{HOME}/projects/baselines - $CIMEROOT/tools/cprnc/build/cprnc - 4 - none - __YOUR_NAME_HERE__ - 4 - 4 - - mpiexec_mpt - - -np $TOTALPES - --prepend-rank - - - - - - diff --git a/CIME/data/config/ufs/machines/userdefined_laptop_template/config_pes.xml b/CIME/data/config/ufs/machines/userdefined_laptop_template/config_pes.xml deleted file mode 100644 index 0464137703c..00000000000 --- a/CIME/data/config/ufs/machines/userdefined_laptop_template/config_pes.xml +++ /dev/null @@ -1,31 +0,0 @@ - - - - - - $MAX_TASKS_PER_NODE 1 0 - - - - 1 1 0 - - - - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - 1 1 0 - - - 2 1 - 2 - - - - diff --git a/CIME/data/config/xml_schemas/config_grids_v2.2.xsd b/CIME/data/config/xml_schemas/config_grids_v2.2.xsd index 26d77f878f3..655b4759e7c 100644 --- a/CIME/data/config/xml_schemas/config_grids_v2.2.xsd +++ b/CIME/data/config/xml_schemas/config_grids_v2.2.xsd @@ -42,6 +42,7 @@ + @@ -51,6 +52,7 @@ + diff --git a/CIME/data/config/xml_schemas/config_machines.xsd b/CIME/data/config/xml_schemas/config_machines.xsd index 3dd64f15eea..59a5890bf0a 100644 --- a/CIME/data/config/xml_schemas/config_machines.xsd +++ b/CIME/data/config/xml_schemas/config_machines.xsd @@ -66,6 +66,7 @@ + @@ -181,6 +182,7 @@ + diff --git a/CIME/data/genf90/ChangeLog b/CIME/data/genf90/ChangeLog index 32db415b0ce..15c6f6929cf 100644 --- a/CIME/data/genf90/ChangeLog +++ b/CIME/data/genf90/ChangeLog @@ -59,4 +59,3 @@ Version: genf90_130320 One-line summary: Move to new directory =========================== - diff --git a/CIME/date.py b/CIME/date.py index c2adea8e90f..0d12b2ceede 100644 --- a/CIME/date.py +++ b/CIME/date.py @@ -1,9 +1,10 @@ import re from CIME.XML.standard_module_setup import * + logger = logging.getLogger(__name__) ############################################################################### def get_file_date(filename): -############################################################################### + ############################################################################### """ Returns the date associated with the filename as a date object representing the correct date Formats supported: @@ -34,10 +35,11 @@ def get_file_date(filename): # TODO: Add these to config_archive.xml, instead of here # Note these must be in order of most specific to least # so that lesser specificities aren't used to parse greater ones - re_formats = [r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}_[0-9]{1,2}\.[0-9]{1,2}\.[0-9]{1,2}", # [yy...]yyyy-mm-dd_hh.MM.ss - r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}[\-_][0-9]{1,5}", # [yy...]yyyy-mm-dd_sssss - r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}", # [yy...]yyyy-mm-dd - r"[0-9]*[0-9]{4}[\-\.][0-9]{1,2}", # [yy...]yyyy-mm + re_formats = [ + r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}_[0-9]{1,2}\.[0-9]{1,2}\.[0-9]{1,2}", # [yy...]yyyy-mm-dd_hh.MM.ss + r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}[\-_][0-9]{1,5}", # [yy...]yyyy-mm-dd_sssss + r"[0-9]*[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}", # [yy...]yyyy-mm-dd + r"[0-9]*[0-9]{4}[\-\.][0-9]{1,2}", # [yy...]yyyy-mm ] for re_str in re_formats: @@ -57,15 +59,16 @@ def get_file_date(filename): elif len(date_tuple) == 6: # Create a date object with arbitrary year, month, day, but the correct time of day # Then use _get_day_second to get the time of day in seconds - second = date.hms_to_second(hour = date_tuple[3], - minute = date_tuple[4], - second = date_tuple[5]) + second = date.hms_to_second( + hour=date_tuple[3], minute=date_tuple[4], second=date_tuple[5] + ) return date(year, month, day, 0, 0, second) # Not a valid filename date format logger.debug("{} is a filename without a supported date!".format(filename)) return None + class date: """ Simple struct for holding dates and the time of day and performing comparisons @@ -178,20 +181,21 @@ class date: >>> date(3, 5, 6, 8) > date(4, 5, 6, 8) False """ + @staticmethod def hms_to_second(hour, minute, second): _SECONDS_PER_HOUR = 3600 _SECONDS_PER_MINUTE = 60 - return (hour * _SECONDS_PER_HOUR + minute * _SECONDS_PER_MINUTE - + second) + return hour * _SECONDS_PER_HOUR + minute * _SECONDS_PER_MINUTE + second @staticmethod def second_to_hms(second): _SECONDS_PER_HOUR = 3600 _SECONDS_PER_MINUTE = 60 - return { 'hour': second // _SECONDS_PER_HOUR, - 'minute': (second % _SECONDS_PER_HOUR) // _SECONDS_PER_MINUTE, - 'second': second % _SECONDS_PER_MINUTE + return { + "hour": second // _SECONDS_PER_HOUR, + "minute": (second % _SECONDS_PER_HOUR) // _SECONDS_PER_MINUTE, + "second": second % _SECONDS_PER_MINUTE, } def __init__(self, year=1, month=1, day=1, hour=0, minute=0, second=0): @@ -206,12 +210,14 @@ def __str__(self): 'date(4, 5, 7, 0, 1, 4)' """ fmt_str = "date({year:d}, {month:d}, {day:d}, {hour:d}, {minute:d}, {second:d})" - return fmt_str.format(year = self.year(), - month = self.month(), - day = self.day(), - hour = self.hour(), - minute = self.minute(), - second = self.second()) + return fmt_str.format( + year=self.year(), + month=self.month(), + day=self.day(), + hour=self.hour(), + minute=self.minute(), + second=self.second(), + ) def year(self): return self._year @@ -223,13 +229,13 @@ def day(self): return self._day def hour(self): - return self.second_to_hms(self._second)['hour'] + return self.second_to_hms(self._second)["hour"] def minute(self): - return self.second_to_hms(self._second)['minute'] + return self.second_to_hms(self._second)["minute"] def second(self): - return self.second_to_hms(self._second)['second'] + return self.second_to_hms(self._second)["second"] def second_of_day(self): return self._second @@ -238,9 +244,12 @@ def __repr__(self): return str(self) def __eq__(self, other): - return ((self.year() == other.year()) and (self.month() == other.month()) - and (self.day() == other.day()) - and (self.second_of_day() == other.second_of_day())) + return ( + (self.year() == other.year()) + and (self.month() == other.month()) + and (self.day() == other.day()) + and (self.second_of_day() == other.second_of_day()) + ) def __ne__(self, other): return not (self == other) @@ -268,7 +277,7 @@ def __lt__(self, other): return False def __le__(self, other): - return ((self < other) or (self == other)) + return (self < other) or (self == other) def __ge__(self, other): return not (self < other) diff --git a/CIME/expected_fails.py b/CIME/expected_fails.py index 4fbe9eeba7f..f8e5339702b 100644 --- a/CIME/expected_fails.py +++ b/CIME/expected_fails.py @@ -5,17 +5,17 @@ from CIME.XML.standard_module_setup import * EXPECTED_FAILURE_COMMENT = "(EXPECTED FAILURE)" -UNEXPECTED_FAILURE_COMMENT_START = "(UNEXPECTED" # There will be some additional text after this, before the end parentheses +UNEXPECTED_FAILURE_COMMENT_START = "(UNEXPECTED" # There will be some additional text after this, before the end parentheses -class ExpectedFails(object): +class ExpectedFails(object): def __init__(self): """Initialize an empty ExpectedFails object""" self._fails = {} def __eq__(self, rhs): expect(isinstance(rhs, ExpectedFails), "Wrong type") - return self._fails == rhs._fails # pylint: disable=protected-access + return self._fails == rhs._fails # pylint: disable=protected-access def __ne__(self, rhs): result = self.__eq__(rhs) @@ -26,16 +26,19 @@ def __repr__(self): def add_failure(self, phase, expected_status): """Add an expected failure to the list""" - expect(phase not in self._fails, "Phase {} already present in list".format(phase)) + expect( + phase not in self._fails, "Phase {} already present in list".format(phase) + ) self._fails[phase] = expected_status def expected_fails_comment(self, phase, status): """Returns a string giving the expected fails comment for this phase and status""" if phase not in self._fails: - return '' + return "" if self._fails[phase] == status: return EXPECTED_FAILURE_COMMENT else: - return "{}: expected {})".format(UNEXPECTED_FAILURE_COMMENT_START, - self._fails[phase]) + return "{}: expected {})".format( + UNEXPECTED_FAILURE_COMMENT_START, self._fails[phase] + ) diff --git a/CIME/get_tests.py b/CIME/get_tests.py index b3cb0782495..12c80501a85 100644 --- a/CIME/get_tests.py +++ b/CIME/get_tests.py @@ -10,7 +10,8 @@ sys.path.insert(0, os.path.join(get_cime_root(), "../cime_config")) _ALL_TESTS = {} try: - from tests import _TESTS # pylint: disable=import-error + from tests import _TESTS # pylint: disable=import-error + _ALL_TESTS.update(_TESTS) except ImportError: pass @@ -26,36 +27,32 @@ # } _CIME_TESTS = { - - "cime_tiny" : { - "time" : "0:10:00", - "tests" : ( + "cime_tiny": { + "time": "0:10:00", + "tests": ( "ERS.f19_g16_rx1.A", "NCK.f19_g16_rx1.A", - ) - }, - - "cime_test_only_pass" : { - "time" : "0:10:00", - "tests" : ( + ), + }, + "cime_test_only_pass": { + "time": "0:10:00", + "tests": ( "TESTRUNPASS_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.ne30_g16_rx1.A", "TESTRUNPASS_P1.f45_g37_rx1.A", - ) - }, - - "cime_test_only_slow_pass" : { - "time" : "0:10:00", - "tests" : ( + ), + }, + "cime_test_only_slow_pass": { + "time": "0:10:00", + "tests": ( "TESTRUNSLOWPASS_P1.f19_g16_rx1.A", "TESTRUNSLOWPASS_P1.ne30_g16_rx1.A", "TESTRUNSLOWPASS_P1.f45_g37_rx1.A", - ) - }, - - "cime_test_only" : { - "time" : "0:10:00", - "tests" : ( + ), + }, + "cime_test_only": { + "time": "0:10:00", + "tests": ( "TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTBUILDFAILEXC_P1.f19_g16_rx1.A", "TESTRUNFAIL_P1.f19_g16_rx1.A", @@ -65,59 +62,50 @@ "TESTTESTDIFF_P1.f19_g16_rx1.A", "TESTMEMLEAKFAIL_P1.f09_g16.X", "TESTMEMLEAKPASS_P1.f09_g16.X", - ) - }, - - "cime_test_all" : { - "inherit" : "cime_test_only", - "time" : "0:10:00", - "tests" : "TESTRUNDIFF_P1.f19_g16_rx1.A" - }, - - "cime_test_share" : { - "time" : "0:10:00", - "share" : True, - "tests" : ( + ), + }, + "cime_test_all": { + "inherit": "cime_test_only", + "time": "0:10:00", + "tests": "TESTRUNDIFF_P1.f19_g16_rx1.A", + }, + "cime_test_share": { + "time": "0:10:00", + "share": True, + "tests": ( "SMS_P2.f19_g16_rx1.A", "SMS_P4.f19_g16_rx1.A", "SMS_P8.f19_g16_rx1.A", "SMS_P16.f19_g16_rx1.A", - ) - }, - - "cime_test_share2" : { - "time" : "0:10:00", - "share" : True, - "tests" : ( + ), + }, + "cime_test_share2": { + "time": "0:10:00", + "share": True, + "tests": ( "SMS_P2.f19_g16_rx1.X", "SMS_P4.f19_g16_rx1.X", "SMS_P8.f19_g16_rx1.X", "SMS_P16.f19_g16_rx1.X", - ) - }, - - "cime_test_repeat" : { - "tests" : ( + ), + }, + "cime_test_repeat": { + "tests": ( "TESTRUNPASS_P1.f19_g16_rx1.A", "TESTRUNPASS_P2.ne30_g16_rx1.A", "TESTRUNPASS_P4.f45_g37_rx1.A", - ) - }, - - "cime_test_time" : { - "time" : "0:13:00", - "tests" : ( - "TESTRUNPASS_P69.f19_g16_rx1.A.testmod", - ) - }, - - "cime_test_multi_inherit" : { - "inherit" : ("cime_test_repeat", "cime_test_only_pass", "cime_test_all") - }, - - "cime_developer" : { - "time" : "0:15:00", - "tests" : ( + ) + }, + "cime_test_time": { + "time": "0:13:00", + "tests": ("TESTRUNPASS_P69.f19_g16_rx1.A.testmod",), + }, + "cime_test_multi_inherit": { + "inherit": ("cime_test_repeat", "cime_test_only_pass", "cime_test_all") + }, + "cime_developer": { + "time": "0:15:00", + "tests": ( "NCK_Ld3.f45_g37_rx1.A", "ERI_Ln9.f09_g16.X", "ERIO_Ln11.f09_g16.X", @@ -135,16 +123,15 @@ "PRE.f19_f19.ADESP_TEST", "MCC_P1.f19_g16_rx1.A", "LDSTA.f45_g37_rx1.A", - ) - }, - + ), + }, } _ALL_TESTS.update(_CIME_TESTS) ############################################################################### def _get_key_data(raw_dict, key, the_type): -############################################################################### + ############################################################################### if key not in raw_dict: if the_type is tuple: return () @@ -157,33 +144,50 @@ def _get_key_data(raw_dict, key, the_type): else: val = raw_dict[key] if the_type is tuple and isinstance(val, CIME.six.string_types): - val = (val, ) + val = (val,) - expect(isinstance(val, the_type), - "Wrong type for {}, {} is a {} but expected {}".format(key, val, type(val), the_type)) + expect( + isinstance(val, the_type), + "Wrong type for {}, {} is a {} but expected {}".format( + key, val, type(val), the_type + ), + ) return val + ############################################################################### def get_test_data(suite): -############################################################################### + ############################################################################### """ For a given suite, returns (inherit, time, share, tests) """ raw_dict = _ALL_TESTS[suite] for key in raw_dict.keys(): - expect(key in ["inherit", "time", "share", "tests"], "Unexpected test key '{}'".format(key)) + expect( + key in ["inherit", "time", "share", "tests"], + "Unexpected test key '{}'".format(key), + ) + + return ( + _get_key_data(raw_dict, "inherit", tuple), + _get_key_data(raw_dict, "time", str), + _get_key_data(raw_dict, "share", bool), + _get_key_data(raw_dict, "tests", tuple), + ) - return _get_key_data(raw_dict, "inherit", tuple), _get_key_data(raw_dict, "time", str), _get_key_data(raw_dict, "share", bool), _get_key_data(raw_dict, "tests", tuple) ############################################################################### def get_test_suites(): -############################################################################### + ############################################################################### return list(_ALL_TESTS.keys()) + ############################################################################### -def get_test_suite(suite, machine=None, compiler=None, skip_inherit=False, skip_tests=None): -############################################################################### +def get_test_suite( + suite, machine=None, compiler=None, skip_inherit=False, skip_tests=None +): + ############################################################################### """ Return a list of FULL test names for a suite. """ @@ -191,26 +195,39 @@ def get_test_suite(suite, machine=None, compiler=None, skip_inherit=False, skip_ machobj = Machines(machine=machine) machine = machobj.get_machine_name() - if(compiler is None): + if compiler is None: compiler = machobj.get_default_compiler() - expect(machobj.is_valid_compiler(compiler),"Compiler {} not valid for machine {}".format(compiler,machine)) + expect( + machobj.is_valid_compiler(compiler), + "Compiler {} not valid for machine {}".format(compiler, machine), + ) inherits_from, _, _, tests_raw = get_test_data(suite) tests = [] for item in tests_raw: - expect(isinstance(item, CIME.six.string_types), "Bad type of test {}, expected string".format(item)) + expect( + isinstance(item, CIME.six.string_types), + "Bad type of test {}, expected string".format(item), + ) test_mods = None test_components = item.split(".") expect(len(test_components) in [3, 4], "Bad test name {}".format(item)) - if (len(test_components) == 4): + if len(test_components) == 4: test_name = ".".join(test_components[:-1]) test_mods = test_components[-1] else: test_name = item if not skip_tests or not test_name in skip_tests: - tests.append(CIME.utils.get_full_test_name(test_name, machine=machine, compiler=compiler, testmods_string=test_mods)) + tests.append( + CIME.utils.get_full_test_name( + test_name, + machine=machine, + compiler=compiler, + testmods_string=test_mods, + ) + ) if not skip_inherit: for inherits in inherits_from: @@ -222,18 +239,22 @@ def get_test_suite(suite, machine=None, compiler=None, skip_inherit=False, skip_ return tests + ############################################################################### def suite_has_test(suite, test_full_name, skip_inherit=False): -############################################################################### + ############################################################################### _, _, _, _, machine, compiler, _ = CIME.utils.parse_test_name(test_full_name) expect(machine is not None, "{} is not a full test name".format(test_full_name)) - tests = get_test_suite(suite, machine=machine, compiler=compiler, skip_inherit=skip_inherit) + tests = get_test_suite( + suite, machine=machine, compiler=compiler, skip_inherit=skip_inherit + ) return test_full_name in tests + ############################################################################### def get_build_groups(tests): -############################################################################### + ############################################################################### """ Given a list of tests, return a list of lists, with each list representing a group of tests that can share executables. @@ -242,7 +263,7 @@ def get_build_groups(tests): >>> get_build_groups(tests) [('SMS_P2.f19_g16_rx1.A.melvin_gnu', 'SMS_P4.f19_g16_rx1.A.melvin_gnu'), ('SMS_P2.f19_g16_rx1.X.melvin_gnu', 'SMS_P4.f19_g16_rx1.X.melvin_gnu'), ('TESTRUNSLOWPASS_P1.f19_g16_rx1.A.melvin_gnu',), ('TESTRUNSLOWPASS_P1.ne30_g16_rx1.A.melvin_gnu',)] """ - build_groups = [] # list of tuples ([tests], set(suites)) + build_groups = [] # list of tuples ([tests], set(suites)) # Get a list of suites that share exes suites = get_test_suites() @@ -277,9 +298,10 @@ def get_build_groups(tests): return [tuple(item[0]) for item in build_groups] + ############################################################################### def infer_machine_name_from_tests(testargs): -############################################################################### + ############################################################################### """ >>> infer_machine_name_from_tests(["NCK.f19_g16_rx1.A.melvin_gnu"]) 'melvin' @@ -303,13 +325,18 @@ def infer_machine_name_from_tests(testargs): if machine is None: machine = machine_for_this_test else: - expect(machine == machine_for_this_test, "Must have consistent machine '%s' != '%s'" % (machine, machine_for_this_test)) + expect( + machine == machine_for_this_test, + "Must have consistent machine '%s' != '%s'" + % (machine, machine_for_this_test), + ) return machine + ############################################################################### def get_full_test_names(testargs, machine, compiler): -############################################################################### + ############################################################################### """ Return full test names in the form: TESTCASE.GRID.COMPSET.MACHINE_COMPILER.TESTMODS @@ -342,13 +369,17 @@ def get_full_test_names(testargs, machine, compiler): for testarg in testargs: # remove any whitespace in name testarg = testarg.strip() - if (testarg.startswith("^")): + if testarg.startswith("^"): negations.add(testarg[1:]) - elif (testarg in e3sm_test_suites): + elif testarg in e3sm_test_suites: tests_to_run.update(get_test_suite(testarg, machine, compiler)) else: try: - tests_to_run.add(CIME.utils.get_full_test_name(testarg, machine=machine, compiler=compiler)) + tests_to_run.add( + CIME.utils.get_full_test_name( + testarg, machine=machine, compiler=compiler + ) + ) except Exception: if "." not in testarg: expect(False, "Unrecognized test suite '{}'".format(testarg)) @@ -356,18 +387,21 @@ def get_full_test_names(testargs, machine, compiler): raise for negation in negations: - if (negation in e3sm_test_suites): + if negation in e3sm_test_suites: tests_to_run -= set(get_test_suite(negation, machine, compiler)) else: - fullname = CIME.utils.get_full_test_name(negation, machine=machine, compiler=compiler) - if (fullname in tests_to_run): + fullname = CIME.utils.get_full_test_name( + negation, machine=machine, compiler=compiler + ) + if fullname in tests_to_run: tests_to_run.remove(fullname) return list(sorted(tests_to_run)) + ############################################################################### def get_recommended_test_time(test_full_name): -############################################################################### + ############################################################################### """ >>> get_recommended_test_time("ERS.f19_g16_rx1.A.melvin_gnu") '0:10:00' @@ -382,14 +416,21 @@ def get_recommended_test_time(test_full_name): suites = get_test_suites() for suite in suites: rec_time = get_test_data(suite)[1] - if suite_has_test(suite, test_full_name, skip_inherit=True) and rec_time is not None and \ - (best_time is None or convert_to_seconds(rec_time) < convert_to_seconds(best_time)): + if ( + suite_has_test(suite, test_full_name, skip_inherit=True) + and rec_time is not None + and ( + best_time is None + or convert_to_seconds(rec_time) < convert_to_seconds(best_time) + ) + ): best_time = rec_time return best_time + ############################################################################### def key_test_time(test_full_name): -############################################################################### + ############################################################################### result = get_recommended_test_time(test_full_name) return 99999999 if result is None else convert_to_seconds(result) diff --git a/CIME/get_timing.py b/CIME/get_timing.py index cd121093c2e..5a176e21d56 100644 --- a/CIME/get_timing.py +++ b/CIME/get_timing.py @@ -12,6 +12,7 @@ logger = logging.getLogger(__name__) + class _GetTimingInfo: def __init__(self, name): self.name = name @@ -19,6 +20,7 @@ def __init__(self, name): self.tmax = 0 self.adays = 0 + class _TimingParser: def __init__(self, case, lid="999999-999999"): self.case = case @@ -26,18 +28,19 @@ def __init__(self, case, lid="999999-999999"): self.lid = lid self.finlines = None self.fout = None - self.adays=0 + self.adays = 0 self._driver = case.get_value("COMP_INTERFACE") self.models = {} self.ncount = 0 self.nprocs = 0 + self.version = -1 def write(self, text): self.fout.write(text) def prttime(self, label, offset=None, div=None, coff=-999): if offset is None: - offset=self.models['CPL'].offset + offset = self.models["CPL"].offset if div is None: div = self.adays datalen = 20 @@ -46,8 +49,8 @@ def prttime(self, label, offset=None, div=None, coff=-999): minval, maxval, found = self.gettime(label) if div >= 1.0: - mind = minval/div - maxd = maxval/div + mind = minval / div + maxd = maxval / div else: mind = minval maxd = maxval @@ -55,17 +58,32 @@ def prttime(self, label, offset=None, div=None, coff=-999): pstrlen = 25 if mind >= 0 and maxd >= 0 and found: if coff >= 0: - zoff = pstrlen + coff + int((datalen-clen)/2) - csp = offset - coff - int((datalen-clen)/2) - self.write(" {label:<{width1}}{cstr:<{width2}} {minv:8.3f}:{maxv:8.3f} \n".format(label=label, width1=zoff, cstr=cstr, width2=csp, minv=mind, maxv=maxd)) + zoff = pstrlen + coff + int((datalen - clen) / 2) + csp = offset - coff - int((datalen - clen) / 2) + self.write( + " {label:<{width1}}{cstr:<{width2}} {minv:8.3f}:{maxv:8.3f} \n".format( + label=label, + width1=zoff, + cstr=cstr, + width2=csp, + minv=mind, + maxv=maxd, + ) + ) else: zoff = pstrlen + offset - self.write(" {label:<{width1}} {minv:8.3f}:{maxv:8.3f} \n".format(label=label, width1=zoff, minv=mind, maxv=maxd)) + self.write( + " {label:<{width1}} {minv:8.3f}:{maxv:8.3f} \n".format( + label=label, width1=zoff, minv=mind, maxv=maxd + ) + ) def gettime2(self, heading_padded): - if self._driver == 'mct': + if self._driver == "mct": return self._gettime2_mct(heading_padded) - elif self._driver == 'nuopc': + elif self._driver == "nuopc": + if self.version < 0: + self._get_esmf_profile_version() return self._gettime2_nuopc() def _gettime2_mct(self, heading_padded): @@ -74,13 +92,13 @@ def _gettime2_mct(self, heading_padded): heading = '"' + heading_padded.strip() + '"' for line in self.finlines: - m = re.match(r'\s*{}\s+\S\s+(\d+)\s*\d+\s*(\S+)'.format(heading), line) + m = re.match(r"\s*{}\s+\S\s+(\d+)\s*\d+\s*(\S+)".format(heading), line) if m: nprocs = int(float(m.groups()[0])) ncount = int(float(m.groups()[1])) return (nprocs, ncount) else: - m = re.match(r'\s*{}\s+\S\s+(\d+)\s'.format(heading), line) + m = re.match(r"\s*{}\s+\S\s+(\d+)\s".format(heading), line) if m: nprocs = 1 ncount = int(float(m.groups()[0])) @@ -90,8 +108,8 @@ def _gettime2_mct(self, heading_padded): def _gettime2_nuopc(self): self.nprocs = 0 self.ncount = 0 -# expression = re.compile(r'\s*\MED:\s*\(med_phases_profile\)\s+(\d+)\s+(\d+)') - expression = re.compile(r'\s*\[ATM]\s*RunPhase1\s+(\d+)\s+(\d+)') + # expression = re.compile(r'\s*\MED:\s*\(med_phases_profile\)\s+(\d+)\s+(\d+)') + expression = re.compile(r"\s*\[ATM]\s*RunPhase1\s+(\d+)\s+(\d+)") for line in self.finlines: match = expression.match(line) @@ -103,20 +121,25 @@ def _gettime2_nuopc(self): return (0, 0) def gettime(self, heading_padded): - if self._driver == 'mct': + if self._driver == "mct": return self._gettime_mct(heading_padded) - elif self._driver == 'nuopc': + elif self._driver == "nuopc": + if self.version < 0: + self._get_esmf_profile_version() return self._gettime_nuopc(heading_padded) - def _gettime_mct(self, heading_padded): found = False heading = '"' + heading_padded.strip() + '"' minval = 0 maxval = 0 - for line in self.finlines: - m = re.match(r'\s*{}\s+\S\s+\d+\s*\d+\s*\S+\s*\S+\s*(\d*\.\d+)\s*\(.*\)\s*(\d*\.\d+)\s*\(.*\)'.format(heading), line) + m = re.match( + r"\s*{}\s+\S\s+\d+\s*\d+\s*\S+\s*\S+\s*(\d*\.\d+)\s*\(.*\)\s*(\d*\.\d+)\s*\(.*\)".format( + heading + ), + line, + ) if m: maxval = float(m.groups()[0]) minval = float(m.groups()[1]) @@ -124,25 +147,56 @@ def _gettime_mct(self, heading_padded): return (minval, maxval, found) return (0, 0, False) - def _gettime_nuopc(self, heading, instance='0001'): - if instance == '': - instance = '0001' + def _get_esmf_profile_version(self): + """ + Prior to ESMF8_3_0_beta_snapshot_04 the PEs column was not in ESMF_Profile.summary + this routine looks for that in the header field to determine if this file was produced + by a newer (version 1) or older (version 0) ESMF library. + """ + expect(self.finlines, " No ESMF_Profile.summary file found") + for line in self.finlines: + if line.startswith("Region"): + if "PEs" in line: + self.version = 1 + else: + self.version = 0 + + def _gettime_nuopc(self, heading, instance="0001"): + if instance == "": + instance = "0001" minval = 0 maxval = 0 m = None + timeline = [] # PETs Count Mean (s) Min (s) Min PET Max (s) Max PET - timeline = re.compile(r'\s*{}\s+\d+\s+\d+\s+(\d*\.\d+)\s+(\d*\.\d+)\s+\d+\s+(\d*\.\d+)\s+\d+'.format(re.escape(heading))) + timeline.append( + re.compile( + r"\s*{}\s+\d+\s+\d+\s+(\d*\.\d+)\s+(\d*\.\d+)\s+\d+\s+(\d*\.\d+)\s+\d+".format( + re.escape(heading) + ) + ) + ) + # PETs PEs Count Mean (s) Min (s) Min PET Max (s) Max PET + timeline.append( + re.compile( + r"\s*{}\s+\d+\s+\d+\s+\d+\s+(\d*\.\d+)\s+(\d*\.\d+)\s+\d+\s+(\d*\.\d+)\s+\d+".format( + re.escape(heading) + ) + ) + ) phase = None for line in self.finlines: phase = self._get_nuopc_phase(line, instance, phase) if phase != "run" and not "[ensemble]" in heading: continue if heading in line: - m = timeline.match(line) + m = timeline[self.version].match(line) if m: minval = float(m.group(2)) maxval = float(m.group(3)) return (minval, maxval, True) + else: + expect(False, "Parsing error in ESMF_Profile.summary file") return (0, 0, False) @@ -150,20 +204,43 @@ def _gettime_nuopc(self, heading, instance='0001'): def _get_nuopc_phase(line, instance, phase): if "[ensemble] Init 1" in line: phase = "init" - elif "[ESM"+instance+"] RunPhase1" in line: + elif "[ESM" + instance + "] RunPhase1" in line: phase = "run" - elif "[ESM"+instance+"] Finalize" in line: + elif "[ESM" + instance + "] Finalize" in line: phase = "finalize" elif "[ESM" in line and "RunPhase1" in line: phase = "other" return phase def getMEDtime(self, instance): - if instance == '': - instance = '0001' - med_phase_line = re.compile(r'\s*(\[MED\] med_phases\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+') - med_connector_line = re.compile(r'\s*(\[MED\] med_connectors\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+') - med_fraction_line = re.compile(r'\s*(\[MED\] med_fraction\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+') + if instance == "": + instance = "0001" + + med_phase_line = [] + med_connector_line = [] + med_fraction_line = [] + med_phase_line.append( + re.compile(r"\s*(\[MED\] med_phases\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+") + ) + med_connector_line.append( + re.compile(r"\s*(\[MED\] med_connectors\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+") + ) + med_fraction_line.append( + re.compile(r"\s*(\[MED\] med_fraction\S+)\s+\d+\s+\d+\s+(\d*\.\d+)\s+") + ) + med_phase_line.append( + re.compile(r"\s*(\[MED\] med_phases\S+)\s+\d+\s+\d+\s+\d+\s+(\d*\.\d+)\s+") + ) + med_connector_line.append( + re.compile( + r"\s*(\[MED\] med_connectors\S+)\s+\d+\s+\d+\s+\d+\s+(\d*\.\d+)\s+" + ) + ) + med_fraction_line.append( + re.compile( + r"\s*(\[MED\] med_fraction\S+)\s+\d+\s+\d+\s+\d+\s+(\d*\.\d+)\s+" + ) + ) m = None minval = 0 @@ -173,21 +250,29 @@ def getMEDtime(self, instance): phase = self._get_nuopc_phase(line, instance, phase) if phase != "run": continue - m = med_phase_line.match(line) + m = med_phase_line[self.version].match(line) if not m: - m = med_connector_line.match(line) + m = med_connector_line[self.version].match(line) if not m: - m = med_fraction_line.match(line) + m = med_fraction_line[self.version].match(line) if m: minval += float(m.group(2)) maxval += float(m.group(2)) - return(minval, maxval) + return (minval, maxval) def getCOMMtime(self, instance): - if instance == '': - instance = '0001' - comm_line = re.compile(r'\s*(\[\S+-TO-\S+\] RunPhase1)\s+\d+\s+\d+\s+(\d*\.\d+)\s+') + if instance == "": + instance = "0001" + comm_line = [] + comm_line.append( + re.compile(r"\s*(\[\S+-TO-\S+\] RunPhase1)\s+\d+\s+\d+\s+(\d*\.\d+)\s+") + ) + comm_line.append( + re.compile( + r"\s*(\[\S+-TO-\S+\] RunPhase1)\s+\d+\s+\d+\s+\d+\s+(\d*\.\d+)\s+" + ) + ) m = None maxval = 0 phase = None @@ -195,7 +280,7 @@ def getCOMMtime(self, instance): phase = self._get_nuopc_phase(line, instance, phase) if phase != "run": continue - m = comm_line.match(line) + m = comm_line[self.version].match(line) if m: heading = m.group(1) maxv = float(m.group(2)) @@ -203,7 +288,6 @@ def getCOMMtime(self, instance): logger.debug("{} time={} sum={}".format(heading, maxv, maxval)) return maxval - def getTiming(self): ninst = 1 multi_driver = self.case.get_value("MULTI_DRIVER") @@ -212,12 +296,12 @@ def getTiming(self): if ninst > 1: for inst in range(ninst): - self._getTiming(inst+1) + self._getTiming(inst + 1) else: self._getTiming() def _getTiming(self, inst=0): - components=self.case.get_values("COMP_CLASSES") + components = self.case.get_values("COMP_CLASSES") for s in components: self.models[s] = _GetTimingInfo(s) atm = None @@ -227,20 +311,20 @@ def _getTiming(self, inst=0): ocn = None glc = None cpl = None - if 'ATM' in self.models: - atm = self.models['ATM'] - if 'LND' in self.models: - lnd = self.models['LND'] - if 'ROF' in self.models: - rof = self.models['ROF'] - if 'ICE' in self.models: - ice = self.models['ICE'] - if 'OCN' in self.models: - ocn = self.models['OCN'] - if 'GLC' in self.models: - glc = self.models['GLC'] - if 'CPL' in self.models: - cpl = self.models['CPL'] + if "ATM" in self.models: + atm = self.models["ATM"] + if "LND" in self.models: + lnd = self.models["LND"] + if "ROF" in self.models: + rof = self.models["ROF"] + if "ICE" in self.models: + ice = self.models["ICE"] + if "OCN" in self.models: + ocn = self.models["OCN"] + if "GLC" in self.models: + glc = self.models["GLC"] + if "CPL" in self.models: + cpl = self.models["CPL"] cime_model = self.case.get_value("MODEL") caseid = self.case.get_value("CASE") @@ -272,7 +356,9 @@ def _getTiming(self, inst=0): totalpes = self.case.get_value("TOTALPES") max_mpitasks_per_node = self.case.get_value("MAX_MPITASKS_PER_NODE") - smt_factor = max(1,int(self.case.get_value("MAX_TASKS_PER_NODE") / max_mpitasks_per_node)) + smt_factor = max( + 1, int(self.case.get_value("MAX_TASKS_PER_NODE") / max_mpitasks_per_node) + ) if cost_pes > 0: pecost = cost_pes @@ -286,33 +372,46 @@ def _getTiming(self, inst=0): if key == "NINST" and m.name == "CPL": m.ninst = 1 else: - setattr(m, key.lower(), - int(self.case.get_value("{}_{}".format(key, m.name)))) + setattr( + m, + key.lower(), + int(self.case.get_value("{}_{}".format(key, m.name))), + ) m.comp = self.case.get_value("COMP_{}".format(m.name)) m.pemax = m.rootpe + m.ntasks * m.pstrid - 1 now = datetime.datetime.ctime(datetime.datetime.now()) inittype = "FALSE" - if (run_type == "startup" or run_type == "hybrid") and \ - not continue_run: + if (run_type == "startup" or run_type == "hybrid") and not continue_run: inittype = "TRUE" if inst > 0: - inst_label = '_{:04d}'.format(inst) + inst_label = "_{:04d}".format(inst) else: - inst_label = '' - if self._driver == 'mct': - binfilename = os.path.join(rundir, "timing", "model_timing{}_stats" . format(inst_label)) - finfilename = os.path.join(self.caseroot, "timing", - "{}_timing{}_stats.{}".format(cime_model, inst_label, self.lid)) - elif self._driver == 'nuopc': + inst_label = "" + if self._driver == "mct": + binfilename = os.path.join( + rundir, "timing", "model_timing{}_stats".format(inst_label) + ) + finfilename = os.path.join( + self.caseroot, + "timing", + "{}_timing{}_stats.{}".format(cime_model, inst_label, self.lid), + ) + elif self._driver == "nuopc": binfilename = os.path.join(rundir, "ESMF_Profile.summary") - finfilename = os.path.join(self.caseroot, "timing", - "{}.ESMF_Profile.summary.{}".format(cime_model, self.lid)) - - foutfilename = os.path.join(self.caseroot, "timing", - "{}_timing{}.{}.{}".format(cime_model, inst_label, caseid, self.lid)) + finfilename = os.path.join( + self.caseroot, + "timing", + "{}.ESMF_Profile.summary.{}".format(cime_model, self.lid), + ) + + foutfilename = os.path.join( + self.caseroot, + "timing", + "{}_timing{}.{}.{}".format(cime_model, inst_label, caseid, self.lid), + ) timingDir = os.path.join(self.caseroot, "timing") if not os.path.isfile(binfilename): @@ -341,25 +440,24 @@ def _getTiming(self, inst=0): elif ncpl_base_period == "day": tlen = 1.0 elif ncpl_base_period == "hour": - tlen = 1.0/24.0 + tlen = 1.0 / 24.0 else: logger.warning("Unknown NCPL_BASE_PERIOD={}".format(ncpl_base_period)) - # at this point the routine becomes driver specific - if self._driver == 'mct': - nprocs, ncount = self.gettime2('CPL:CLOCK_ADVANCE ') + if self._driver == "mct": + nprocs, ncount = self.gettime2("CPL:CLOCK_ADVANCE ") nsteps = ncount / nprocs - elif self._driver == 'nuopc': - nprocs, nsteps = self.gettime2('') + elif self._driver == "nuopc": + nprocs, nsteps = self.gettime2("") - adays = nsteps*tlen/ncpl - odays = nsteps*tlen/ncpl + adays = nsteps * tlen / ncpl + odays = nsteps * tlen / ncpl if ocn_ncpl and inittype == "TRUE": - odays = odays - (tlen/ocn_ncpl) + odays = odays - (tlen / ocn_ncpl) - peminmax = max([m.rootpe for m in self.models.values()])+1 - if ncpl_base_period in ["decade","year","day"] and int(adays) > 0: + peminmax = max([m.rootpe for m in self.models.values()]) + 1 + if ncpl_base_period in ["decade", "year", "day"] and int(adays) > 0: adays = int(adays) if tlen % ocn_ncpl == 0: odays = int(odays) @@ -367,7 +465,7 @@ def _getTiming(self, inst=0): maxoffset = 40 extraoff = 20 for m in self.models.values(): - m.offset = int((maxoffset*m.rootpe)/peminmax) + extraoff + m.offset = int((maxoffset * m.rootpe) / peminmax) + extraoff if cpl: cpl.offset = 0 try: @@ -388,15 +486,23 @@ def _getTiming(self, inst=0): self.write(" grid : {}\n".format(grid)) self.write(" compset : {}\n".format(compset)) - self.write(" run type : {}, continue_run = {} (inittype = {})\n".format(run_type, str(continue_run).upper(), inittype)) + self.write( + " run type : {}, continue_run = {} (inittype = {})\n".format( + run_type, str(continue_run).upper(), inittype + ) + ) self.write(" stop option : {}, stop_n = {}\n".format(stop_option, stop_n)) self.write(" run length : {} days ({} for ocean)\n\n".format(adays, odays)) - self.write(" component comp_pes root_pe tasks " - "x threads" - " instances (stride) \n") - self.write(" --------- ------ ------- ------ " - "------ --------- ------ \n") + self.write( + " component comp_pes root_pe tasks " + "x threads" + " instances (stride) \n" + ) + self.write( + " --------- ------ ------- ------ " + "------ --------- ------ \n" + ) maxthrds = 0 xmax = 0 for k in self.case.get_values("COMP_CLASSES"): @@ -405,14 +511,27 @@ def _getTiming(self, inst=0): comp_label = m.comp + inst_label else: comp_label = m.comp - self.write(" {} = {:<8s} {:<6d} {:<6d} {:<6d} x {:<6d} {:<6d} ({:<6d}) \n".format(m.name.lower(), comp_label, (m.ntasks*m.nthrds), m.rootpe, m.ntasks, m.nthrds, m.ninst, m.pstrid)) + self.write( + " {} = {:<8s} {:<6d} {:<6d} {:<6d} x {:<6d} {:<6d} ({:<6d}) \n".format( + m.name.lower(), + comp_label, + (m.ntasks * m.nthrds), + m.rootpe, + m.ntasks, + m.nthrds, + m.ninst, + m.pstrid, + ) + ) if m.nthrds > maxthrds: maxthrds = m.nthrds - if self._driver == 'nuopc': + if self._driver == "nuopc": for k in components: m = self.models[k] if k != "CPL": - m.tmin, m.tmax, _ = self._gettime_nuopc(' [{}] RunPhase1 '.format(m.name), inst_label[1:]) + m.tmin, m.tmax, _ = self._gettime_nuopc( + " [{}] RunPhase1 ".format(m.name), inst_label[1:] + ) else: m.tmin, m.tmax = self.getMEDtime(inst_label[1:]) nmax = self.gettime("[ensemble] Init 1")[1] @@ -420,28 +539,28 @@ def _getTiming(self, inst=0): fmax = self.gettime("[ensemble] FinalizePhase1")[1] xmax = self.getCOMMtime(inst_label[1:]) - if self._driver == 'mct': + if self._driver == "mct": for k in components: if k != "CPL": m = self.models[k] - m.tmin, m.tmax, _ = self.gettime(' CPL:{}_RUN '.format(m.name)) - nmax = self.gettime(' CPL:INIT ')[1] - tmax = self.gettime(' CPL:RUN_LOOP ')[1] - wtmin = self.gettime(' CPL:TPROF_WRITE ')[0] - fmax = self.gettime(' CPL:FINAL ')[1] - otmin, otmax, _ = self.gettime(' CPL:OCNT_RUN ') + m.tmin, m.tmax, _ = self.gettime(" CPL:{}_RUN ".format(m.name)) + nmax = self.gettime(" CPL:INIT ")[1] + tmax = self.gettime(" CPL:RUN_LOOP ")[1] + wtmin = self.gettime(" CPL:TPROF_WRITE ")[0] + fmax = self.gettime(" CPL:FINAL ")[1] + otmin, otmax, _ = self.gettime(" CPL:OCNT_RUN ") # pick OCNT_RUN for tight coupling if otmax > ocn.tmax: ocn.tmin = otmin ocn.tmax = otmax - cpl.tmin, cpl.tmax, _ = self.gettime(' CPL:RUN ') - xmax = self.gettime(' CPL:COMM ')[1] - ocnwaittime = self.gettime(' CPL:C2O_INITWAIT')[0] + cpl.tmin, cpl.tmax, _ = self.gettime(" CPL:RUN ") + xmax = self.gettime(" CPL:COMM ")[1] + ocnwaittime = self.gettime(" CPL:C2O_INITWAIT")[0] if odays != 0: - ocnrunitime = ocn.tmax * (adays/odays - 1.0) + ocnrunitime = ocn.tmax * (adays / odays - 1.0) else: ocnrunitime = 0.0 @@ -453,56 +572,97 @@ def _getTiming(self, inst=0): for m in self.models.values(): m.tmaxr = 0 if m.tmax > 0: - m.tmaxr = adays*86400.0/(m.tmax*365.0) + m.tmaxr = adays * 86400.0 / (m.tmax * 365.0) xmaxr = 0 if xmax > 0: - xmaxr = adays*86400.0/(xmax*365.0) + xmaxr = adays * 86400.0 / (xmax * 365.0) tmaxr = 0 if tmax > 0: - tmaxr = adays*86400.0/(tmax*365.0) + tmaxr = adays * 86400.0 / (tmax * 365.0) self.write("\n") - self.write(" total pes active : {} \n".format(totalpes*smt_factor )) + self.write(" total pes active : {} \n".format(totalpes * smt_factor)) self.write(" mpi tasks per node : {} \n".format(max_mpitasks_per_node)) self.write(" pe count for cost estimate : {} \n".format(pecost)) self.write("\n") self.write(" Overall Metrics: \n") if adays > 0: - self.write(" Model Cost: {:10.2f} pe-hrs/simulated_year \n".format((tmax*365.0*pecost)/(3600.0*adays))) + self.write( + " Model Cost: {:10.2f} pe-hrs/simulated_year \n".format( + (tmax * 365.0 * pecost) / (3600.0 * adays) + ) + ) if tmax > 0: - self.write(" Model Throughput: {:10.2f} simulated_years/day \n".format((86400.0*adays)/(tmax*365.0)) ) + self.write( + " Model Throughput: {:10.2f} simulated_years/day \n".format( + (86400.0 * adays) / (tmax * 365.0) + ) + ) self.write("\n") self.write(" Init Time : {:10.3f} seconds \n".format(nmax)) if adays > 0: - self.write(" Run Time : {:10.3f} seconds {:10.3f} seconds/day \n".format(tmax, tmax/adays)) + self.write( + " Run Time : {:10.3f} seconds {:10.3f} seconds/day \n".format( + tmax, tmax / adays + ) + ) self.write(" Final Time : {:10.3f} seconds \n".format(fmax)) self.write("\n") - if self._driver == 'mct': - self.write(" Actual Ocn Init Wait Time : {:10.3f} seconds \n".format(ocnwaittime)) - self.write(" Estimated Ocn Init Run Time : {:10.3f} seconds \n".format(ocnrunitime)) - self.write(" Estimated Run Time Correction : {:10.3f} seconds \n".format(correction)) - self.write(" (This correction has been applied to the ocean and" - " total run times) \n") + if self._driver == "mct": + self.write( + " Actual Ocn Init Wait Time : {:10.3f} seconds \n".format( + ocnwaittime + ) + ) + self.write( + " Estimated Ocn Init Run Time : {:10.3f} seconds \n".format( + ocnrunitime + ) + ) + self.write( + " Estimated Run Time Correction : {:10.3f} seconds \n".format( + correction + ) + ) + self.write( + " (This correction has been applied to the ocean and" + " total run times) \n" + ) self.write("\n") - self.write("Runs Time in total seconds, seconds/model-day, and" - " model-years/wall-day \n") - self.write("CPL Run Time represents time in CPL pes alone, " - "not including time associated with data exchange " - "with other components \n") + self.write( + "Runs Time in total seconds, seconds/model-day, and" + " model-years/wall-day \n" + ) + self.write( + "CPL Run Time represents time in CPL pes alone, " + "not including time associated with data exchange " + "with other components \n" + ) self.write("\n") - if adays > 0: - self.write(" TOT Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(tmax, tmax/adays, tmaxr)) + self.write( + " TOT Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format( + tmax, tmax / adays, tmaxr + ) + ) for k in self.case.get_values("COMP_CLASSES"): m = self.models[k] - self.write(" {} Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(k, m.tmax, m.tmax/adays, m.tmaxr)) - self.write(" CPL COMM Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format(xmax, xmax/adays, xmaxr)) + self.write( + " {} Run Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format( + k, m.tmax, m.tmax / adays, m.tmaxr + ) + ) + self.write( + " CPL COMM Time: {:10.3f} seconds {:10.3f} seconds/mday {:10.2f} myears/wday \n".format( + xmax, xmax / adays, xmaxr + ) + ) pstrlen = 25 hoffset = 1 @@ -510,249 +670,254 @@ def _getTiming(self, inst=0): for k in self.case.get_values("COMP_CLASSES"): m = self.models[k] - xspace = (pstrlen+hoffset+m.offset) * ' ' - self.write(" {} {} (pes {:d} to {:d}) \n".format(xspace, k, m.rootpe, m.pemax)) + xspace = (pstrlen + hoffset + m.offset) * " " + self.write( + " {} {} (pes {:d} to {:d}) \n".format(xspace, k, m.rootpe, m.pemax) + ) self.write("\n") - self.prttime(' CPL:CLOCK_ADVANCE ') - self.prttime(' CPL:OCNPRE1_BARRIER ') - self.prttime(' CPL:OCNPRE1 ') - self.prttime(' CPL:ATMOCN1_BARRIER ') - self.prttime(' CPL:ATMOCN1 ') - self.prttime(' CPL:OCNPREP_BARRIER ') - self.prttime(' CPL:OCNPREP ') - self.prttime(' CPL:C2O_BARRIER ', offset=ocn.offset, div=odays, - coff=cpl.offset) - self.prttime(' CPL:C2O ', offset=ocn.offset, div=odays, coff=cpl.offset) - self.prttime(' CPL:LNDPREP_BARRIER ') - self.prttime(' CPL:LNDPREP ') - self.prttime(' CPL:C2L_BARRIER ', offset=lnd.offset, coff=cpl.offset) - self.prttime(' CPL:C2L ', offset=lnd.offset, coff=cpl.offset) - self.prttime(' CPL:ICEPREP_BARRIER ') - self.prttime(' CPL:ICEPREP ') - self.prttime(' CPL:C2I_BARRIER ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:C2I ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:WAVPREP_BARRIER ') - self.prttime(' CPL:WAVPREP ') - self.prttime(' CPL:C2W_BARRIER ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:C2W ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:ROFPREP_BARRIER ') - self.prttime(' CPL:ROFPREP ') - self.prttime(' CPL:C2R_BARRIER ', offset=rof.offset, coff=cpl.offset) - self.prttime(' CPL:C2R ', offset=rof.offset, coff=cpl.offset) - self.prttime(' CPL:ICE_RUN_BARRIER ', offset=ice.offset) - self.prttime(' CPL:ICE_RUN ', offset=ice.offset) - self.prttime(' CPL:LND_RUN_BARRIER ', offset=lnd.offset) - self.prttime(' CPL:LND_RUN ', offset=lnd.offset) - self.prttime(' CPL:ROF_RUN_BARRIER ', offset=rof.offset) - self.prttime(' CPL:ROF_RUN ', offset=rof.offset) - self.prttime(' CPL:WAV_RUN_BARRIER ', offset=rof.offset) - self.prttime(' CPL:WAV_RUN ', offset=rof.offset) - self.prttime(' CPL:OCNT_RUN_BARRIER ', offset=ocn.offset, div=odays) - self.prttime(' CPL:OCNT_RUN ', offset=ocn.offset, div=odays) - self.prttime(' CPL:O2CT_BARRIER ', offset=ocn.offset, div=odays, - coff=cpl.offset) - self.prttime(' CPL:O2CT ', offset=ocn.offset, div=odays, - coff=cpl.offset) - self.prttime(' CPL:OCNPOSTT_BARRIER ') - self.prttime(' CPL:OCNPOSTT ') - self.prttime(' CPL:ATMOCNP_BARRIER ') - self.prttime(' CPL:ATMOCNP ') - self.prttime(' CPL:L2C_BARRIER ', offset=lnd.offset, coff=cpl.offset) - self.prttime(' CPL:L2C ', offset=lnd.offset, div=cpl.offset) - self.prttime(' CPL:LNDPOST_BARRIER ') - self.prttime(' CPL:LNDPOST ') - self.prttime(' CPL:GLCPREP_BARRIER ') - self.prttime(' CPL:GLCPREP ') - self.prttime(' CPL:C2G_BARRIER ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:C2G ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:R2C_BARRIER ', offset=rof.offset, coff=cpl.offset) - self.prttime(' CPL:R2C ', offset=rof.offset, coff=cpl.offset) - self.prttime(' CPL:ROFPOST_BARRIER ') - self.prttime(' CPL:ROFPOST ') - self.prttime(' CPL:BUDGET1_BARRIER ') - self.prttime(' CPL:BUDGET1 ') - self.prttime(' CPL:I2C_BARRIER ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:I2C ', offset=ice.offset, coff=cpl.offset) - self.prttime(' CPL:ICEPOST_BARRIER ') - self.prttime(' CPL:ICEPOST ') - self.prttime(' CPL:FRACSET_BARRIER ') - self.prttime(' CPL:FRACSET ') - self.prttime(' CPL:ATMOCN2_BARRIER ') - self.prttime(' CPL:ATMOCN2 ') - self.prttime(' CPL:OCNPRE2_BARRIER ') - self.prttime(' CPL:OCNPRE2 ') - self.prttime(' CPL:C2O2_BARRIER ', offset=ocn.offset, div=odays, - coff=cpl.offset) - self.prttime(' CPL:C2O2 ', offset=ocn.offset, div=odays, - coff=cpl.offset) - self.prttime(' CPL:ATMOCNQ_BARRIER') - self.prttime(' CPL:ATMOCNQ ') - self.prttime(' CPL:ATMPREP_BARRIER ') - self.prttime(' CPL:ATMPREP ') - self.prttime(' CPL:C2A_BARRIER ', offset=atm.offset, coff=cpl.offset) - self.prttime(' CPL:C2A ', offset=atm.offset, coff=cpl.offset) - self.prttime(' CPL:OCN_RUN_BARRIER ', offset=ocn.offset, div=odays) - self.prttime(' CPL:OCN_RUN ', offset=ocn.offset, div=odays) - self.prttime(' CPL:ATM_RUN_BARRIER ', offset=atm.offset) - self.prttime(' CPL:ATM_RUN ', offset=atm.offset) - self.prttime(' CPL:GLC_RUN_BARRIER ', offset=glc.offset) - self.prttime(' CPL:GLC_RUN ', offset=glc.offset) - self.prttime(' CPL:W2C_BARRIER ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:W2C ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:WAVPOST_BARRIER ') - self.prttime(' CPL:WAVPOST ', cpl.offset) - self.prttime(' CPL:G2C_BARRIER ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:G2C ', offset=glc.offset, coff=cpl.offset) - self.prttime(' CPL:GLCPOST_BARRIER ') - self.prttime(' CPL:GLCPOST ') - self.prttime(' CPL:A2C_BARRIER ', offset=atm.offset, coff=cpl.offset) - self.prttime(' CPL:A2C ', offset=atm.offset, coff=cpl.offset) - self.prttime(' CPL:ATMPOST_BARRIER ') - self.prttime(' CPL:ATMPOST ') - self.prttime(' CPL:BUDGET2_BARRIER ') - self.prttime(' CPL:BUDGET2 ') - self.prttime(' CPL:BUDGET3_BARRIER ') - self.prttime(' CPL:BUDGET3 ') - self.prttime(' CPL:BUDGETF_BARRIER ') - self.prttime(' CPL:BUDGETF ') - self.prttime(' CPL:O2C_BARRIER ', offset=ocn.offset, - div=odays, coff=cpl.offset) - self.prttime(' CPL:O2C ', offset=ocn.offset, div=odays, coff=cpl.offset) - self.prttime(' CPL:OCNPOST_BARRIER ') - self.prttime(' CPL:OCNPOST ') - self.prttime(' CPL:RESTART_BARRIER ') - self.prttime(' CPL:RESTART') - self.prttime(' CPL:HISTORY_BARRIER ') - self.prttime(' CPL:HISTORY ') - self.prttime(' CPL:TSTAMP_WRITE ') - self.prttime(' CPL:TPROF_WRITE ') - self.prttime(' CPL:RUN_LOOP_BSTOP ') + self.prttime(" CPL:CLOCK_ADVANCE ") + self.prttime(" CPL:OCNPRE1_BARRIER ") + self.prttime(" CPL:OCNPRE1 ") + self.prttime(" CPL:ATMOCN1_BARRIER ") + self.prttime(" CPL:ATMOCN1 ") + self.prttime(" CPL:OCNPREP_BARRIER ") + self.prttime(" CPL:OCNPREP ") + self.prttime( + " CPL:C2O_BARRIER ", offset=ocn.offset, div=odays, coff=cpl.offset + ) + self.prttime(" CPL:C2O ", offset=ocn.offset, div=odays, coff=cpl.offset) + self.prttime(" CPL:LNDPREP_BARRIER ") + self.prttime(" CPL:LNDPREP ") + self.prttime(" CPL:C2L_BARRIER ", offset=lnd.offset, coff=cpl.offset) + self.prttime(" CPL:C2L ", offset=lnd.offset, coff=cpl.offset) + self.prttime(" CPL:ICEPREP_BARRIER ") + self.prttime(" CPL:ICEPREP ") + self.prttime(" CPL:C2I_BARRIER ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:C2I ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:WAVPREP_BARRIER ") + self.prttime(" CPL:WAVPREP ") + self.prttime(" CPL:C2W_BARRIER ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:C2W ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:ROFPREP_BARRIER ") + self.prttime(" CPL:ROFPREP ") + self.prttime(" CPL:C2R_BARRIER ", offset=rof.offset, coff=cpl.offset) + self.prttime(" CPL:C2R ", offset=rof.offset, coff=cpl.offset) + self.prttime(" CPL:ICE_RUN_BARRIER ", offset=ice.offset) + self.prttime(" CPL:ICE_RUN ", offset=ice.offset) + self.prttime(" CPL:LND_RUN_BARRIER ", offset=lnd.offset) + self.prttime(" CPL:LND_RUN ", offset=lnd.offset) + self.prttime(" CPL:ROF_RUN_BARRIER ", offset=rof.offset) + self.prttime(" CPL:ROF_RUN ", offset=rof.offset) + self.prttime(" CPL:WAV_RUN_BARRIER ", offset=rof.offset) + self.prttime(" CPL:WAV_RUN ", offset=rof.offset) + self.prttime(" CPL:OCNT_RUN_BARRIER ", offset=ocn.offset, div=odays) + self.prttime(" CPL:OCNT_RUN ", offset=ocn.offset, div=odays) + self.prttime( + " CPL:O2CT_BARRIER ", offset=ocn.offset, div=odays, coff=cpl.offset + ) + self.prttime(" CPL:O2CT ", offset=ocn.offset, div=odays, coff=cpl.offset) + self.prttime(" CPL:OCNPOSTT_BARRIER ") + self.prttime(" CPL:OCNPOSTT ") + self.prttime(" CPL:ATMOCNP_BARRIER ") + self.prttime(" CPL:ATMOCNP ") + self.prttime(" CPL:L2C_BARRIER ", offset=lnd.offset, coff=cpl.offset) + self.prttime(" CPL:L2C ", offset=lnd.offset, div=cpl.offset) + self.prttime(" CPL:LNDPOST_BARRIER ") + self.prttime(" CPL:LNDPOST ") + self.prttime(" CPL:GLCPREP_BARRIER ") + self.prttime(" CPL:GLCPREP ") + self.prttime(" CPL:C2G_BARRIER ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:C2G ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:R2C_BARRIER ", offset=rof.offset, coff=cpl.offset) + self.prttime(" CPL:R2C ", offset=rof.offset, coff=cpl.offset) + self.prttime(" CPL:ROFPOST_BARRIER ") + self.prttime(" CPL:ROFPOST ") + self.prttime(" CPL:BUDGET1_BARRIER ") + self.prttime(" CPL:BUDGET1 ") + self.prttime(" CPL:I2C_BARRIER ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:I2C ", offset=ice.offset, coff=cpl.offset) + self.prttime(" CPL:ICEPOST_BARRIER ") + self.prttime(" CPL:ICEPOST ") + self.prttime(" CPL:FRACSET_BARRIER ") + self.prttime(" CPL:FRACSET ") + self.prttime(" CPL:ATMOCN2_BARRIER ") + self.prttime(" CPL:ATMOCN2 ") + self.prttime(" CPL:OCNPRE2_BARRIER ") + self.prttime(" CPL:OCNPRE2 ") + self.prttime( + " CPL:C2O2_BARRIER ", offset=ocn.offset, div=odays, coff=cpl.offset + ) + self.prttime(" CPL:C2O2 ", offset=ocn.offset, div=odays, coff=cpl.offset) + self.prttime(" CPL:ATMOCNQ_BARRIER") + self.prttime(" CPL:ATMOCNQ ") + self.prttime(" CPL:ATMPREP_BARRIER ") + self.prttime(" CPL:ATMPREP ") + self.prttime(" CPL:C2A_BARRIER ", offset=atm.offset, coff=cpl.offset) + self.prttime(" CPL:C2A ", offset=atm.offset, coff=cpl.offset) + self.prttime(" CPL:OCN_RUN_BARRIER ", offset=ocn.offset, div=odays) + self.prttime(" CPL:OCN_RUN ", offset=ocn.offset, div=odays) + self.prttime(" CPL:ATM_RUN_BARRIER ", offset=atm.offset) + self.prttime(" CPL:ATM_RUN ", offset=atm.offset) + self.prttime(" CPL:GLC_RUN_BARRIER ", offset=glc.offset) + self.prttime(" CPL:GLC_RUN ", offset=glc.offset) + self.prttime(" CPL:W2C_BARRIER ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:W2C ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:WAVPOST_BARRIER ") + self.prttime(" CPL:WAVPOST ", cpl.offset) + self.prttime(" CPL:G2C_BARRIER ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:G2C ", offset=glc.offset, coff=cpl.offset) + self.prttime(" CPL:GLCPOST_BARRIER ") + self.prttime(" CPL:GLCPOST ") + self.prttime(" CPL:A2C_BARRIER ", offset=atm.offset, coff=cpl.offset) + self.prttime(" CPL:A2C ", offset=atm.offset, coff=cpl.offset) + self.prttime(" CPL:ATMPOST_BARRIER ") + self.prttime(" CPL:ATMPOST ") + self.prttime(" CPL:BUDGET2_BARRIER ") + self.prttime(" CPL:BUDGET2 ") + self.prttime(" CPL:BUDGET3_BARRIER ") + self.prttime(" CPL:BUDGET3 ") + self.prttime(" CPL:BUDGETF_BARRIER ") + self.prttime(" CPL:BUDGETF ") + self.prttime( + " CPL:O2C_BARRIER ", offset=ocn.offset, div=odays, coff=cpl.offset + ) + self.prttime(" CPL:O2C ", offset=ocn.offset, div=odays, coff=cpl.offset) + self.prttime(" CPL:OCNPOST_BARRIER ") + self.prttime(" CPL:OCNPOST ") + self.prttime(" CPL:RESTART_BARRIER ") + self.prttime(" CPL:RESTART") + self.prttime(" CPL:HISTORY_BARRIER ") + self.prttime(" CPL:HISTORY ") + self.prttime(" CPL:TSTAMP_WRITE ") + self.prttime(" CPL:TPROF_WRITE ") + self.prttime(" CPL:RUN_LOOP_BSTOP ") self.write("\n\n") self.write("More info on coupler timing:\n") self.write("\n") - self.prttime(' CPL:OCNPRE1 ') - self.prttime(' CPL:ocnpre1_atm2ocn ') + self.prttime(" CPL:OCNPRE1 ") + self.prttime(" CPL:ocnpre1_atm2ocn ") self.write("\n") - self.prttime(' CPL:OCNPREP ') - self.prttime(' CPL:OCNPRE2 ') - self.prttime(' CPL:ocnprep_avg ') - self.prttime(' CPL:ocnprep_diagav ') + self.prttime(" CPL:OCNPREP ") + self.prttime(" CPL:OCNPRE2 ") + self.prttime(" CPL:ocnprep_avg ") + self.prttime(" CPL:ocnprep_diagav ") self.write("\n") - self.prttime(' CPL:LNDPREP ') - self.prttime(' CPL:lndprep_atm2lnd ') - self.prttime(' CPL:lndprep_mrgx2l ') - self.prttime(' CPL:lndprep_diagav ') + self.prttime(" CPL:LNDPREP ") + self.prttime(" CPL:lndprep_atm2lnd ") + self.prttime(" CPL:lndprep_mrgx2l ") + self.prttime(" CPL:lndprep_diagav ") self.write("\n") - self.prttime(' CPL:ICEPREP ') - self.prttime(' CPL:iceprep_ocn2ice ') - self.prttime(' CPL:iceprep_atm2ice ') - self.prttime(' CPL:iceprep_mrgx2i ') - self.prttime(' CPL:iceprep_diagav ') + self.prttime(" CPL:ICEPREP ") + self.prttime(" CPL:iceprep_ocn2ice ") + self.prttime(" CPL:iceprep_atm2ice ") + self.prttime(" CPL:iceprep_mrgx2i ") + self.prttime(" CPL:iceprep_diagav ") self.write("\n") - self.prttime(' CPL:WAVPREP ') - self.prttime(' CPL:wavprep_atm2wav ') - self.prttime(' CPL:wavprep_ocn2wav ') - self.prttime(' CPL:wavprep_ice2wav ') - self.prttime(' CPL:wavprep_mrgx2w ') - self.prttime(' CPL:wavprep_diagav ') + self.prttime(" CPL:WAVPREP ") + self.prttime(" CPL:wavprep_atm2wav ") + self.prttime(" CPL:wavprep_ocn2wav ") + self.prttime(" CPL:wavprep_ice2wav ") + self.prttime(" CPL:wavprep_mrgx2w ") + self.prttime(" CPL:wavprep_diagav ") self.write("\n") - self.prttime(' CPL:ROFPREP ') - self.prttime(' CPL:rofprep_l2xavg ') - self.prttime(' CPL:rofprep_lnd2rof ') - self.prttime(' CPL:rofprep_mrgx2r ') - self.prttime(' CPL:rofprep_diagav ') + self.prttime(" CPL:ROFPREP ") + self.prttime(" CPL:rofprep_l2xavg ") + self.prttime(" CPL:rofprep_lnd2rof ") + self.prttime(" CPL:rofprep_mrgx2r ") + self.prttime(" CPL:rofprep_diagav ") self.write("\n") - self.prttime(' CPL:GLCPREP ') - self.prttime(' CPL:glcprep_avg ') - self.prttime(' CPL:glcprep_lnd2glc ') - self.prttime(' CPL:glcprep_mrgx2g ') - self.prttime(' CPL:glcprep_diagav ') + self.prttime(" CPL:GLCPREP ") + self.prttime(" CPL:glcprep_avg ") + self.prttime(" CPL:glcprep_lnd2glc ") + self.prttime(" CPL:glcprep_mrgx2g ") + self.prttime(" CPL:glcprep_diagav ") self.write("\n") - self.prttime(' CPL:ATMPREP ') - self.prttime(' CPL:atmprep_xao2atm ') - self.prttime(' CPL:atmprep_ocn2atm ') - self.prttime(' CPL:atmprep_alb2atm ') - self.prttime(' CPL:atmprep_ice2atm ') - self.prttime(' CPL:atmprep_lnd2atm ') - self.prttime(' CPL:atmprep_mrgx2a ') - self.prttime(' CPL:atmprep_diagav ') + self.prttime(" CPL:ATMPREP ") + self.prttime(" CPL:atmprep_xao2atm ") + self.prttime(" CPL:atmprep_ocn2atm ") + self.prttime(" CPL:atmprep_alb2atm ") + self.prttime(" CPL:atmprep_ice2atm ") + self.prttime(" CPL:atmprep_lnd2atm ") + self.prttime(" CPL:atmprep_mrgx2a ") + self.prttime(" CPL:atmprep_diagav ") self.write("\n") - self.prttime(' CPL:ATMOCNP ') - self.prttime(' CPL:ATMOCN1 ') - self.prttime(' CPL:ATMOCN2 ') - self.prttime(' CPL:atmocnp_ice2ocn ') - self.prttime(' CPL:atmocnp_wav2ocn ') - self.prttime(' CPL:atmocnp_fluxo ') - self.prttime(' CPL:atmocnp_fluxe ') - self.prttime(' CPL:atmocnp_mrgx2o ') - self.prttime(' CPL:atmocnp_accum ') - self.prttime(' CPL:atmocnp_ocnalb ') + self.prttime(" CPL:ATMOCNP ") + self.prttime(" CPL:ATMOCN1 ") + self.prttime(" CPL:ATMOCN2 ") + self.prttime(" CPL:atmocnp_ice2ocn ") + self.prttime(" CPL:atmocnp_wav2ocn ") + self.prttime(" CPL:atmocnp_fluxo ") + self.prttime(" CPL:atmocnp_fluxe ") + self.prttime(" CPL:atmocnp_mrgx2o ") + self.prttime(" CPL:atmocnp_accum ") + self.prttime(" CPL:atmocnp_ocnalb ") self.write("\n") - self.prttime(' CPL:ATMOCNQ ') - self.prttime(' CPL:atmocnq_ocn2atm ') - self.prttime(' CPL:atmocnq_fluxa ') - self.prttime(' CPL:atmocnq_atm2ocnf ') + self.prttime(" CPL:ATMOCNQ ") + self.prttime(" CPL:atmocnq_ocn2atm ") + self.prttime(" CPL:atmocnq_fluxa ") + self.prttime(" CPL:atmocnq_atm2ocnf ") self.write("\n") - self.prttime(' CPL:OCNPOSTT ') - self.prttime(' CPL:OCNPOST ') - self.prttime(' CPL:ocnpost_diagav ') + self.prttime(" CPL:OCNPOSTT ") + self.prttime(" CPL:OCNPOST ") + self.prttime(" CPL:ocnpost_diagav ") self.write("\n") - self.prttime(' CPL:LNDPOST ') - self.prttime(' CPL:lndpost_diagav ') - self.prttime(' CPL:lndpost_acc2lr ') - self.prttime(' CPL:lndpost_acc2lg ') + self.prttime(" CPL:LNDPOST ") + self.prttime(" CPL:lndpost_diagav ") + self.prttime(" CPL:lndpost_acc2lr ") + self.prttime(" CPL:lndpost_acc2lg ") self.write("\n") - self.prttime(' CPL:ROFOST ') - self.prttime(' CPL:rofpost_diagav ') - self.prttime(' CPL:rofpost_histaux ') - self.prttime(' CPL:rofpost_rof2lnd ') - self.prttime(' CPL:rofpost_rof2ice ') - self.prttime(' CPL:rofpost_rof2ocn ') + self.prttime(" CPL:ROFOST ") + self.prttime(" CPL:rofpost_diagav ") + self.prttime(" CPL:rofpost_histaux ") + self.prttime(" CPL:rofpost_rof2lnd ") + self.prttime(" CPL:rofpost_rof2ice ") + self.prttime(" CPL:rofpost_rof2ocn ") self.write("\n") - self.prttime(' CPL:ICEPOST ') - self.prttime(' CPL:icepost_diagav ') + self.prttime(" CPL:ICEPOST ") + self.prttime(" CPL:icepost_diagav ") self.write("\n") - self.prttime(' CPL:WAVPOST ') - self.prttime(' CPL:wavpost_diagav ') + self.prttime(" CPL:WAVPOST ") + self.prttime(" CPL:wavpost_diagav ") self.write("\n") - self.prttime(' CPL:GLCPOST ') - self.prttime(' CPL:glcpost_diagav ') - self.prttime(' CPL:glcpost_glc2lnd ') - self.prttime(' CPL:glcpost_glc2ice ') - self.prttime(' CPL:glcpost_glc2ocn ') + self.prttime(" CPL:GLCPOST ") + self.prttime(" CPL:glcpost_diagav ") + self.prttime(" CPL:glcpost_glc2lnd ") + self.prttime(" CPL:glcpost_glc2ice ") + self.prttime(" CPL:glcpost_glc2ocn ") self.write("\n") - self.prttime(' CPL:ATMPOST ') - self.prttime(' CPL:atmpost_diagav ') + self.prttime(" CPL:ATMPOST ") + self.prttime(" CPL:atmpost_diagav ") self.write("\n") - self.prttime(' CPL:BUDGET ') - self.prttime(' CPL:BUDGET1 ') - self.prttime(' CPL:BUDGET2 ') - self.prttime(' CPL:BUDGET3 ') - self.prttime(' CPL:BUDGETF ') + self.prttime(" CPL:BUDGET ") + self.prttime(" CPL:BUDGET1 ") + self.prttime(" CPL:BUDGET2 ") + self.prttime(" CPL:BUDGET3 ") + self.prttime(" CPL:BUDGETF ") self.write("\n\n") self.fout.close() + def get_timing(case, lid): parser = _TimingParser(case, lid) parser.getTiming() diff --git a/CIME/hist_utils.py b/CIME/hist_utils.py index 718fab5e4d1..61a073753ba 100644 --- a/CIME/hist_utils.py +++ b/CIME/hist_utils.py @@ -3,9 +3,17 @@ """ from CIME.XML.standard_module_setup import * from CIME.test_status import TEST_NO_BASELINES_COMMENT, TEST_STATUS_FILENAME -from CIME.utils import get_current_commit, get_timestamp, get_model, safe_copy, SharedArea, parse_test_name +from CIME.utils import ( + get_current_commit, + get_timestamp, + get_model, + safe_copy, + SharedArea, + parse_test_name, +) import logging, os, re, filecmp + logger = logging.getLogger(__name__) BLESS_LOG_NAME = "bless_log" @@ -20,26 +28,30 @@ # Strings used in the comments generated by _compare_hists # ------------------------------------------------------------------------ -NO_COMPARE = "had no compare counterpart" -NO_ORIGINAL = "had no original counterpart" +NO_COMPARE = "had no compare counterpart" +NO_ORIGINAL = "had no original counterpart" FIELDLISTS_DIFFER = "had a different field list from" -DIFF_COMMENT = "did NOT match" +DIFF_COMMENT = "did NOT match" # COMPARISON_COMMENT_OPTIONS should include all of the above: these are any of the special # comment strings that describe the reason for a comparison failure -COMPARISON_COMMENT_OPTIONS = set([NO_COMPARE, - NO_ORIGINAL, - FIELDLISTS_DIFFER, - DIFF_COMMENT]) +COMPARISON_COMMENT_OPTIONS = set( + [NO_COMPARE, NO_ORIGINAL, FIELDLISTS_DIFFER, DIFF_COMMENT] +) # Comments that indicate a true baseline comparison failure -COMPARISON_FAILURE_COMMENT_OPTIONS = (COMPARISON_COMMENT_OPTIONS - - set([NO_COMPARE, FIELDLISTS_DIFFER])) +COMPARISON_FAILURE_COMMENT_OPTIONS = COMPARISON_COMMENT_OPTIONS - set( + [NO_COMPARE, FIELDLISTS_DIFFER] +) + +NO_HIST_TESTS = ["IRT", "PFS", "TSC"] + def _iter_model_file_substrs(case): models = case.get_compset_components() - models.append('cpl') + models.append("cpl") for model in models: yield model + def copy_histfiles(case, suffix): """Copy the most recent batch of hist files in a case, adding the given suffix. @@ -49,7 +61,7 @@ def copy_histfiles(case, suffix): case - The case containing the files you want to save suffix - The string suffix you want to add to saved files, this can be used to find them later. """ - rundir = case.get_value("RUNDIR") + rundir = case.get_value("RUNDIR") ref_case = case.get_value("RUN_REFCASE") casename = case.get_value("CASE") # Loop over models @@ -58,11 +70,13 @@ def copy_histfiles(case, suffix): num_copied = 0 for model in _iter_model_file_substrs(case): comments += " Copying hist files for model '{}'\n".format(model) - test_hists = archive.get_latest_hist_files(casename, model, rundir, ref_case=ref_case) + test_hists = archive.get_latest_hist_files( + casename, model, rundir, ref_case=ref_case + ) num_copied += len(test_hists) for test_hist in test_hists: - test_hist = os.path.join(rundir,test_hist) - if not test_hist.endswith('.nc') or 'once' in os.path.basename(test_hist): + test_hist = os.path.join(rundir, test_hist) + if not test_hist.endswith(".nc") or "once" in os.path.basename(test_hist): logger.info("Will not compare non-netcdf file {}".format(test_hist)) continue new_file = "{}.{}".format(test_hist, suffix) @@ -85,17 +99,21 @@ def copy_histfiles(case, suffix): # noted above.) safe_copy(test_hist, new_file) - expect(num_copied > 0, "copy_histfiles failed: no hist files found in rundir '{}'".format(rundir)) + expect( + num_copied > 0, + "copy_histfiles failed: no hist files found in rundir '{}'".format(rundir), + ) return comments + def rename_all_hist_files(case, suffix): """Renaming all hist files in a case, adding the given suffix. case - The case containing the files you want to save suffix - The string suffix you want to add to saved files, this can be used to find them later. """ - rundir = case.get_value("RUNDIR") + rundir = case.get_value("RUNDIR") ref_case = case.get_value("RUN_REFCASE") # Loop over models archive = case.get_env("archive") @@ -104,11 +122,13 @@ def rename_all_hist_files(case, suffix): for model in _iter_model_file_substrs(case): comments += " Renaming hist files for model '{}'\n".format(model) - if model == 'cpl': - mname = 'drv' + if model == "cpl": + mname = "drv" else: mname = model - test_hists = archive.get_all_hist_files(case.get_value("CASE"), mname, rundir, ref_case=ref_case) + test_hists = archive.get_all_hist_files( + case.get_value("CASE"), mname, rundir, ref_case=ref_case + ) num_renamed += len(test_hists) for test_hist in test_hists: test_hist = os.path.join(rundir, test_hist) @@ -120,10 +140,14 @@ def rename_all_hist_files(case, suffix): os.rename(test_hist, new_file) - expect(num_renamed > 0, "renaming failed: no hist files found in rundir '{}'".format(rundir)) + expect( + num_renamed > 0, + "renaming failed: no hist files found in rundir '{}'".format(rundir), + ) return comments + def _hists_match(model, hists1, hists2, suffix1="", suffix2=""): """ return (num in set 1 but not 2 , num in set 2 but not 1, matchups) @@ -149,20 +173,34 @@ def _hists_match(model, hists1, hists2, suffix1="", suffix2=""): multi_normalized1, multi_normalized2 = [], [] multiinst = False - for hists, suffix, normalized, multi_normalized in [(hists1, suffix1, normalized1, multi_normalized1), (hists2, suffix2, normalized2, multi_normalized2)]: + if model == "ww3dev": + model = "ww3" + + for hists, suffix, normalized, multi_normalized in [ + (hists1, suffix1, normalized1, multi_normalized1), + (hists2, suffix2, normalized2, multi_normalized2), + ]: for hist in hists: hist_basename = os.path.basename(hist) offset = hist_basename.rfind(model) - expect(offset >= 0,"ERROR: cant find model name {} in {}".format(model, hist_basename)) + expect( + offset >= 0, + "ERROR: cant find model name {} in {}".format(model, hist_basename), + ) normalized_name = os.path.basename(hist_basename[offset:]) if suffix != "": - expect(normalized_name.endswith(suffix), "How did '{}' not have suffix '{}'".format(hist, suffix)) - normalized_name = normalized_name[:len(normalized_name) - len(suffix) - 1] - - m = re.search("(.+)_[0-9]{4}(.+.nc)",normalized_name) + expect( + normalized_name.endswith(suffix), + "How did '{}' not have suffix '{}'".format(hist, suffix), + ) + normalized_name = normalized_name[ + : len(normalized_name) - len(suffix) - 1 + ] + + m = re.search("(.+)_[0-9]{4}(.+.nc)", normalized_name) if m is not None: multiinst = True - multi_normalized.append(m.group(1)+m.group(2)) + multi_normalized.append(m.group(1) + m.group(2)) normalized.append(normalized_name) @@ -174,7 +212,12 @@ def _hists_match(model, hists1, hists2, suffix1="", suffix2=""): both = set(normalized1) & set(normalized2) - match_ups = sorted([ (hists1[normalized1.index(item)], hists2[normalized2.index(item)]) for item in both]) + match_ups = sorted( + [ + (hists1[normalized1.index(item)], hists2[normalized2.index(item)]) + for item in both + ] + ) # Special case - comparing multiinstance to single instance files @@ -203,13 +246,25 @@ def _hists_match(model, hists1, hists2, suffix1="", suffix2=""): two_not_one.remove(hists2[idx]) if not multiinst: - expect(len(match_ups) + len(set_of_1_not_2) == len(hists1), "Programming error1") - expect(len(match_ups) + len(set_of_2_not_1) == len(hists2), "Programming error2") + expect( + len(match_ups) + len(set_of_1_not_2) == len(hists1), "Programming error1" + ) + expect( + len(match_ups) + len(set_of_2_not_1) == len(hists2), "Programming error2" + ) return one_not_two, two_not_one, match_ups -def _compare_hists(case, from_dir1, from_dir2, suffix1="", suffix2="", outfile_suffix="", - ignore_fieldlist_diffs=False): + +def _compare_hists( + case, + from_dir1, + from_dir2, + suffix1="", + suffix2="", + outfile_suffix="", + ignore_fieldlist_diffs=False, +): if from_dir1 == from_dir2: expect(suffix1 != suffix2, "Comparing files to themselves?") @@ -218,45 +273,62 @@ def _compare_hists(case, from_dir1, from_dir2, suffix1="", suffix2="", outfile_s casedir = case.get_value("CASEROOT") all_success = True num_compared = 0 - comments = "Comparing hists for case '{}' dir1='{}', suffix1='{}', dir2='{}' suffix2='{}'\n".format(casename, from_dir1, suffix1, from_dir2, suffix2) + comments = "Comparing hists for case '{}' dir1='{}', suffix1='{}', dir2='{}' suffix2='{}'\n".format( + casename, from_dir1, suffix1, from_dir2, suffix2 + ) multiinst_driver_compare = False - archive = case.get_env('archive') + archive = case.get_env("archive") ref_case = case.get_value("RUN_REFCASE") for model in _iter_model_file_substrs(case): - if model == 'cpl' and suffix2 == 'multiinst': + if model == "cpl" and suffix2 == "multiinst": multiinst_driver_compare = True comments += " comparing model '{}'\n".format(model) - hists1 = archive.get_latest_hist_files(casename, model, from_dir1, suffix=suffix1, ref_case=ref_case) - hists2 = archive.get_latest_hist_files(casename, model, from_dir2, suffix=suffix2, ref_case=ref_case) + hists1 = archive.get_latest_hist_files( + casename, model, from_dir1, suffix=suffix1, ref_case=ref_case + ) + hists2 = archive.get_latest_hist_files( + casename, model, from_dir2, suffix=suffix2, ref_case=ref_case + ) if len(hists1) == 0 and len(hists2) == 0: comments += " no hist files found for model {}\n".format(model) continue - one_not_two, two_not_one, match_ups = _hists_match(model, hists1, hists2, suffix1, suffix2) + one_not_two, two_not_one, match_ups = _hists_match( + model, hists1, hists2, suffix1, suffix2 + ) for item in one_not_two: - if 'initial' in item: + if "initial" in item: continue - comments += " File '{}' {} in '{}' with suffix '{}'\n".format(item, NO_COMPARE, from_dir2, suffix2) + comments += " File '{}' {} in '{}' with suffix '{}'\n".format( + item, NO_COMPARE, from_dir2, suffix2 + ) all_success = False for item in two_not_one: - if 'initial' in item: + if "initial" in item: continue - comments += " File '{}' {} in '{}' with suffix '{}'\n".format(item, NO_ORIGINAL, from_dir1, suffix1) + comments += " File '{}' {} in '{}' with suffix '{}'\n".format( + item, NO_ORIGINAL, from_dir1, suffix1 + ) all_success = False num_compared += len(match_ups) for hist1, hist2 in match_ups: - if not '.nc' in hist1: + if not ".nc" in hist1: logger.info("Ignoring non-netcdf file {}".format(hist1)) continue - success, cprnc_log_file, cprnc_comment = cprnc(model, os.path.join(from_dir1,hist1), - os.path.join(from_dir2,hist2), case, from_dir1, - multiinst_driver_compare=multiinst_driver_compare, - outfile_suffix=outfile_suffix, - ignore_fieldlist_diffs=ignore_fieldlist_diffs) + success, cprnc_log_file, cprnc_comment = cprnc( + model, + os.path.join(from_dir1, hist1), + os.path.join(from_dir2, hist2), + case, + from_dir1, + multiinst_driver_compare=multiinst_driver_compare, + outfile_suffix=outfile_suffix, + ignore_fieldlist_diffs=ignore_fieldlist_diffs, + ) if success: comments += " {} matched {}\n".format(hist1, hist2) else: @@ -265,16 +337,23 @@ def _compare_hists(case, from_dir1, from_dir2, suffix1="", suffix2="", outfile_s else: comments += " {} {} {}\n".format(hist1, DIFF_COMMENT, hist2) comments += " cat " + cprnc_log_file + "\n" - expected_log_file = os.path.join(casedir, os.path.basename(cprnc_log_file)) - if not (os.path.exists(expected_log_file) and filecmp.cmp(cprnc_log_file, expected_log_file)): + expected_log_file = os.path.join( + casedir, os.path.basename(cprnc_log_file) + ) + if not ( + os.path.exists(expected_log_file) + and filecmp.cmp(cprnc_log_file, expected_log_file) + ): try: safe_copy(cprnc_log_file, casedir) except (OSError, IOError) as _: - logger.warning("Could not copy {} to {}".format(cprnc_log_file, casedir)) + logger.warning( + "Could not copy {} to {}".format(cprnc_log_file, casedir) + ) all_success = False - # PFS test may not have any history files to compare. - if num_compared == 0 and testcase != "PFS": + # Some tests don't save history files. + if num_compared == 0 and testcase not in NO_HIST_TESTS: all_success = False comments += "Did not compare any hist files! Missing baselines?\n" @@ -282,6 +361,7 @@ def _compare_hists(case, from_dir1, from_dir2, suffix1="", suffix2="", outfile_s return all_success, comments + def compare_test(case, suffix1, suffix2, ignore_fieldlist_diffs=False): """ Compares two sets of component history files in the testcase directory @@ -296,13 +376,29 @@ def compare_test(case, suffix1, suffix2, ignore_fieldlist_diffs=False): returns (SUCCESS, comments) """ - rundir = case.get_value("RUNDIR") - - return _compare_hists(case, rundir, rundir, suffix1, suffix2, - ignore_fieldlist_diffs=ignore_fieldlist_diffs) - -def cprnc(model, file1, file2, case, rundir, multiinst_driver_compare=False, outfile_suffix="", - ignore_fieldlist_diffs=False, cprnc_exe=None): + rundir = case.get_value("RUNDIR") + + return _compare_hists( + case, + rundir, + rundir, + suffix1, + suffix2, + ignore_fieldlist_diffs=ignore_fieldlist_diffs, + ) + + +def cprnc( + model, + file1, + file2, + case, + rundir, + multiinst_driver_compare=False, + outfile_suffix="", + ignore_fieldlist_diffs=False, + cprnc_exe=None, +): """ Run cprnc to compare two individual nc files @@ -325,10 +421,10 @@ def cprnc(model, file1, file2, case, rundir, multiinst_driver_compare=False, out if not cprnc_exe: cprnc_exe = case.get_value("CCSM_CPRNC") basename = os.path.basename(file1) - multiinst_regex = re.compile(r'.*%s[^_]*(_[0-9]{4})[.]h.?[.][^.]+?[.]nc' % model) - mstr = '' - mstr1 = '' - mstr2 = '' + multiinst_regex = re.compile(r".*%s[^_]*(_[0-9]{4})[.]h.?[.][^.]+?[.]nc" % model) + mstr = "" + mstr1 = "" + mstr2 = "" # If one is a multiinstance file but the other is not add an instance string m1 = multiinst_regex.match(file1) m2 = multiinst_regex.match(file2) @@ -337,24 +433,30 @@ def cprnc(model, file1, file2, case, rundir, multiinst_driver_compare=False, out if m2 is not None: mstr2 = m2.group(1) if mstr1 != mstr2: - mstr = mstr1+mstr2 + mstr = mstr1 + mstr2 output_filename = os.path.join(rundir, "{}{}.cprnc.out".format(basename, mstr)) if outfile_suffix: output_filename += ".{}".format(outfile_suffix) if outfile_suffix is None: - cpr_stat, out, _ = run_cmd("{} -m {} {}".format(cprnc_exe, file1, file2), combine_output=True) + cpr_stat, out, _ = run_cmd( + "{} -m {} {}".format(cprnc_exe, file1, file2), combine_output=True + ) else: # Remove existing output file if it exists if os.path.exists(output_filename): os.remove(output_filename) - cpr_stat = run_cmd("{} -m {} {}".format(cprnc_exe, file1, file2), combine_output=True, arg_stdout=output_filename)[0] - with open(output_filename, "r") as fd: + cpr_stat = run_cmd( + "{} -m {} {}".format(cprnc_exe, file1, file2), + combine_output=True, + arg_stdout=output_filename, + )[0] + with open(output_filename, "r", encoding="utf-8") as fd: out = fd.read() - comment = '' + comment = "" if cpr_stat == 0: # Successful exit from cprnc if multiinst_driver_compare: @@ -375,13 +477,19 @@ def cprnc(model, file1, file2, case, rundir, multiinst_driver_compare=False, out elif "files seem to be IDENTICAL" in out: files_match = True else: - expect(False, "Did not find an expected summary string in cprnc output:\n{}".format(out)) + expect( + False, + "Did not find an expected summary string in cprnc output:\n{}".format( + out + ), + ) else: # If there is an error in cprnc, we do the safe thing of saying the comparison failed files_match = False return (files_match, output_filename, comment) + def compare_baseline(case, baseline_dir=None, outfile_suffix=""): """ compare the current test output to a baseline result @@ -394,7 +502,7 @@ def compare_baseline(case, baseline_dir=None, outfile_suffix=""): returns (SUCCESS, comments) SUCCESS means all hist files matched their corresponding baseline """ - rundir = case.get_value("RUNDIR") + rundir = case.get_value("RUNDIR") if baseline_dir is None: baselineroot = case.get_value("BASELINE_ROOT") basecmp_dir = os.path.join(baselineroot, case.get_value("BASECMP_CASE")) @@ -405,19 +513,24 @@ def compare_baseline(case, baseline_dir=None, outfile_suffix=""): for bdir in dirs_to_check: if not os.path.isdir(bdir): - return False, "ERROR {} baseline directory '{}' does not exist".format(TEST_NO_BASELINES_COMMENT,bdir) + return False, "ERROR {} baseline directory '{}' does not exist".format( + TEST_NO_BASELINES_COMMENT, bdir + ) - success, comments = _compare_hists(case, rundir, basecmp_dir, outfile_suffix=outfile_suffix) + success, comments = _compare_hists( + case, rundir, basecmp_dir, outfile_suffix=outfile_suffix + ) if get_model() == "e3sm": bless_log = os.path.join(basecmp_dir, BLESS_LOG_NAME) if os.path.exists(bless_log): - lines = open(bless_log, "r").readlines() + lines = open(bless_log, "r", encoding="utf-8").readlines() if lines: last_line = lines[-1] comments += "\n Most recent bless: {}".format(last_line) return success, comments + def generate_teststatus(testdir, baseline_dir): """ CESM stores it's TestStatus file in baselines. Do not let exceptions @@ -429,9 +542,18 @@ def generate_teststatus(testdir, baseline_dir): if not os.path.isdir(baseline_dir): os.makedirs(baseline_dir) - safe_copy(os.path.join(testdir, TEST_STATUS_FILENAME), baseline_dir, preserve_meta=False) + safe_copy( + os.path.join(testdir, TEST_STATUS_FILENAME), + baseline_dir, + preserve_meta=False, + ) except Exception as e: - logger.warning("Could not copy {} to baselines, {}".format(os.path.join(testdir, TEST_STATUS_FILENAME), str(e))) + logger.warning( + "Could not copy {} to baselines, {}".format( + os.path.join(testdir, TEST_STATUS_FILENAME), str(e) + ) + ) + def _generate_baseline_impl(case, baseline_dir=None, allow_baseline_overwrite=False): """ @@ -443,7 +565,7 @@ def _generate_baseline_impl(case, baseline_dir=None, allow_baseline_overwrite=Fa returns (SUCCESS, comments) """ - rundir = case.get_value("RUNDIR") + rundir = case.get_value("RUNDIR") ref_case = case.get_value("RUN_REFCASE") if baseline_dir is None: baselineroot = case.get_value("BASELINE_ROOT") @@ -451,32 +573,43 @@ def _generate_baseline_impl(case, baseline_dir=None, allow_baseline_overwrite=Fa else: basegen_dir = baseline_dir testcase = case.get_value("CASE") - archive = case.get_env('archive') + archive = case.get_env("archive") if not os.path.isdir(basegen_dir): os.makedirs(basegen_dir) - if (os.path.isdir(os.path.join(basegen_dir,testcase)) and - not allow_baseline_overwrite): + if ( + os.path.isdir(os.path.join(basegen_dir, testcase)) + and not allow_baseline_overwrite + ): expect(False, " Cowardly refusing to overwrite existing baseline directory") comments = "Generating baselines into '{}'\n".format(basegen_dir) num_gen = 0 for model in _iter_model_file_substrs(case): + + if model == "ww3dev": + model = "ww3" comments += " generating for model '{}'\n".format(model) - hists = archive.get_latest_hist_files(testcase, model, rundir, ref_case=ref_case) + hists = archive.get_latest_hist_files( + testcase, model, rundir, ref_case=ref_case + ) logger.debug("latest_files: {}".format(hists)) num_gen += len(hists) for hist in hists: offset = hist.rfind(model) - expect(offset >= 0,"ERROR: cant find model name {} in {}".format(model, hist)) + expect( + offset >= 0, "ERROR: cant find model name {} in {}".format(model, hist) + ) baseline = os.path.join(basegen_dir, hist[offset:]) if os.path.exists(baseline): os.remove(baseline) - safe_copy(os.path.join(rundir,hist), baseline, preserve_meta=False) - comments += " generating baseline '{}' from file {}\n".format(baseline, hist) + safe_copy(os.path.join(rundir, hist), baseline, preserve_meta=False) + comments += " generating baseline '{}' from file {}\n".format( + baseline, hist + ) # copy latest cpl log to baseline # drop the date so that the name is generic @@ -485,29 +618,53 @@ def _generate_baseline_impl(case, baseline_dir=None, allow_baseline_overwrite=Fa else: cplname = "cpl" - newestcpllogfile = case.get_latest_cpl_log(coupler_log_path=case.get_value("RUNDIR"), cplname=cplname) + newestcpllogfile = case.get_latest_cpl_log( + coupler_log_path=case.get_value("RUNDIR"), cplname=cplname + ) if newestcpllogfile is None: - logger.warning("No {}.log file found in directory {}".format(cplname,case.get_value("RUNDIR"))) + logger.warning( + "No {}.log file found in directory {}".format( + cplname, case.get_value("RUNDIR") + ) + ) else: - safe_copy(newestcpllogfile, os.path.join(basegen_dir, "{}.log.gz".format(cplname)), preserve_meta=False) + safe_copy( + newestcpllogfile, + os.path.join(basegen_dir, "{}.log.gz".format(cplname)), + preserve_meta=False, + ) testname = case.get_value("TESTCASE") testopts = parse_test_name(case.get_value("CASEBASEID"))[1] testopts = [] if testopts is None else testopts - expect(num_gen > 0 or (testname in ["PFS", "TSC"] or "B" in testopts), - "Could not generate any hist files for case '{}', something is seriously wrong".format(os.path.join(rundir, testcase))) + expect( + num_gen > 0 or (testname in NO_HIST_TESTS or "B" in testopts), + "Could not generate any hist files for case '{}', something is seriously wrong".format( + os.path.join(rundir, testcase) + ), + ) if get_model() == "e3sm": bless_log = os.path.join(basegen_dir, BLESS_LOG_NAME) - with open(bless_log, "a") as fd: - fd.write("sha:{} date:{}\n".format(get_current_commit(repo=case.get_value("SRCROOT")), - get_timestamp(timestamp_format="%Y-%m-%d_%H:%M:%S"))) + with open(bless_log, "a", encoding="utf-8") as fd: + fd.write( + "sha:{} date:{}\n".format( + get_current_commit(repo=case.get_value("SRCROOT")), + get_timestamp(timestamp_format="%Y-%m-%d_%H:%M:%S"), + ) + ) return True, comments + def generate_baseline(case, baseline_dir=None, allow_baseline_overwrite=False): with SharedArea(): - return _generate_baseline_impl(case, baseline_dir=baseline_dir, allow_baseline_overwrite=allow_baseline_overwrite) + return _generate_baseline_impl( + case, + baseline_dir=baseline_dir, + allow_baseline_overwrite=allow_baseline_overwrite, + ) + def get_ts_synopsis(comments): r""" @@ -571,6 +728,8 @@ def get_ts_synopsis(comments): elif has_fieldlist_differences: return "FIELDLIST field lists differ (otherwise bit-for-bit)" elif has_bfails: - return "ERROR {} some baseline files were missing".format(TEST_NO_BASELINES_COMMENT) + return "ERROR {} some baseline files were missing".format( + TEST_NO_BASELINES_COMMENT + ) else: return "" diff --git a/CIME/jenkins_generic_job.py b/CIME/jenkins_generic_job.py index 5fcd8e94a95..def0c930a63 100644 --- a/CIME/jenkins_generic_job.py +++ b/CIME/jenkins_generic_job.py @@ -6,7 +6,7 @@ ############################################################################## def cleanup_queue(test_root, test_id): -############################################################################### + ############################################################################### """ Delete all jobs left in the queue """ @@ -16,30 +16,49 @@ def cleanup_queue(test_root, test_id): jobmap = case.get_job_info() jobkills = [] for jobname, jobid in jobmap.items(): - logging.warning("Found leftover batch job {} ({}) that need to be deleted".format(jobid, jobname)) + logging.warning( + "Found leftover batch job {} ({}) that need to be deleted".format( + jobid, jobname + ) + ) jobkills.append(jobid) case.cancel_batch_jobs(jobkills) + ############################################################################### -def delete_old_test_data(mach_comp, test_id_root, scratch_root, test_root, run_area, build_area, archive_area, avoid_test_id): -############################################################################### +def delete_old_test_data( + mach_comp, + test_id_root, + scratch_root, + test_root, + run_area, + build_area, + archive_area, + avoid_test_id, +): + ############################################################################### # Remove old dirs for clutter_area in [scratch_root, test_root, run_area, build_area, archive_area]: - for old_file in glob.glob("{}/*{}*{}*".format(clutter_area, mach_comp, test_id_root)): + for old_file in glob.glob( + "{}/*{}*{}*".format(clutter_area, mach_comp, test_id_root) + ): if avoid_test_id not in old_file: logging.info("TEST ARCHIVER: Removing {}".format(old_file)) - if (os.path.isdir(old_file)): + if os.path.isdir(old_file): shutil.rmtree(old_file) else: os.remove(old_file) + ############################################################################### def scan_for_test_ids(old_test_archive, mach_comp, test_id_root): -############################################################################### + ############################################################################### results = set([]) test_id_re = re.compile(".+[.]([^.]+)") - for item in glob.glob("{}/{}/*{}*{}*".format(old_test_archive, "old_cases", mach_comp, test_id_root)): + for item in glob.glob( + "{}/{}/*{}*{}*".format(old_test_archive, "old_cases", mach_comp, test_id_root) + ): filename = os.path.basename(item) the_match = test_id_re.match(filename) if the_match: @@ -48,14 +67,26 @@ def scan_for_test_ids(old_test_archive, mach_comp, test_id_root): return list(results) -############################################################################### -def archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_root, old_test_archive, avoid_test_id): -############################################################################### - gb_allowed = machine.get_value("MAX_GB_OLD_TEST_DATA") - gb_allowed = 500 if gb_allowed is None else gb_allowed +############################################################################### +def archive_old_test_data( + machine, + mach_comp, + test_id_root, + scratch_root, + test_root, + old_test_archive, + avoid_test_id, +): + ############################################################################### + + gb_allowed = machine.get_value("MAX_GB_OLD_TEST_DATA") + gb_allowed = 500 if gb_allowed is None else gb_allowed bytes_allowed = gb_allowed * 1000000000 - expect(bytes_allowed > 0, "Machine {} does not support test archiving".format(machine.get_machine_name())) + expect( + bytes_allowed > 0, + "Machine {} does not support test archiving".format(machine.get_machine_name()), + ) # Remove old cs.status, cs.submit. I don't think there's any value to leaving these around # or archiving them @@ -65,8 +96,10 @@ def archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_r os.remove(old_cs_file) # Remove the old CTest XML, same reason as above - if (os.path.isdir("Testing")): - logging.info("TEST ARCHIVER: Removing {}".format(os.path.join(os.getcwd(), "Testing"))) + if os.path.isdir("Testing"): + logging.info( + "TEST ARCHIVER: Removing {}".format(os.path.join(os.getcwd(), "Testing")) + ) shutil.rmtree("Testing") if not os.path.exists(old_test_archive): @@ -76,76 +109,163 @@ def archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_r for old_case in glob.glob("{}/*{}*{}*".format(test_root, mach_comp, test_id_root)): if avoid_test_id not in old_case: logging.info("TEST ARCHIVER: archiving case {}".format(old_case)) - exeroot, rundir, archdir = run_cmd_no_fail("./xmlquery EXEROOT RUNDIR DOUT_S_ROOT --value", from_dir=old_case).split(",") - - for the_dir, target_area in [(exeroot, "old_builds"), (rundir, "old_runs"), (archdir, "old_archives"), (old_case, "old_cases")]: + exeroot, rundir, archdir = run_cmd_no_fail( + "./xmlquery EXEROOT RUNDIR DOUT_S_ROOT --value", from_dir=old_case + ).split(",") + + for the_dir, target_area in [ + (exeroot, "old_builds"), + (rundir, "old_runs"), + (archdir, "old_archives"), + (old_case, "old_cases"), + ]: if os.path.exists(the_dir): start_time = time.time() - logging.info("TEST ARCHIVER: archiving {} to {}".format(the_dir, os.path.join(old_test_archive, target_area))) + logging.info( + "TEST ARCHIVER: archiving {} to {}".format( + the_dir, os.path.join(old_test_archive, target_area) + ) + ) if not os.path.exists(os.path.join(old_test_archive, target_area)): os.mkdir(os.path.join(old_test_archive, target_area)) old_case_name = os.path.basename(old_case) - with tarfile.open(os.path.join(old_test_archive, target_area, "{}.tar.gz".format(old_case_name)), "w:gz") as tfd: + with tarfile.open( + os.path.join( + old_test_archive, + target_area, + "{}.tar.gz".format(old_case_name), + ), + "w:gz", + ) as tfd: tfd.add(the_dir, arcname=old_case_name) shutil.rmtree(the_dir) # Remove parent dir if it's empty parent_dir = os.path.dirname(the_dir) - if not os.listdir(parent_dir) or os.listdir(parent_dir) == ["case2_output_root"]: + if not os.listdir(parent_dir) or os.listdir(parent_dir) == [ + "case2_output_root" + ]: shutil.rmtree(parent_dir) end_time = time.time() - logging.info("TEST ARCHIVER: archiving {} took {} seconds".format(the_dir, int(end_time - start_time))) + logging.info( + "TEST ARCHIVER: archiving {} took {} seconds".format( + the_dir, int(end_time - start_time) + ) + ) # Check size of archive - bytes_of_old_test_data = int(run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0]) + bytes_of_old_test_data = int( + run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0] + ) if bytes_of_old_test_data > bytes_allowed: - logging.info("TEST ARCHIVER: Too much test data, {}GB (actual) > {}GB (limit)".format(bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000)) + logging.info( + "TEST ARCHIVER: Too much test data, {}GB (actual) > {}GB (limit)".format( + bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000 + ) + ) old_test_ids = scan_for_test_ids(old_test_archive, mach_comp, test_id_root) for old_test_id in sorted(old_test_ids): - logging.info("TEST ARCHIVER: Removing old data for test {}".format(old_test_id)) + logging.info( + "TEST ARCHIVER: Removing old data for test {}".format(old_test_id) + ) for item in ["old_cases", "old_builds", "old_runs", "old_archives"]: - for dir_to_rm in glob.glob("{}/{}/*{}*{}*".format(old_test_archive, item, mach_comp, old_test_id)): + for dir_to_rm in glob.glob( + "{}/{}/*{}*{}*".format( + old_test_archive, item, mach_comp, old_test_id + ) + ): logging.info("TEST ARCHIVER: Removing {}".format(dir_to_rm)) - if (os.path.isdir(dir_to_rm)): + if os.path.isdir(dir_to_rm): shutil.rmtree(dir_to_rm) else: os.remove(dir_to_rm) - bytes_of_old_test_data = int(run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0]) + bytes_of_old_test_data = int( + run_cmd_no_fail("du -sb {}".format(old_test_archive)).split()[0] + ) if bytes_of_old_test_data < bytes_allowed: break else: - logging.info("TEST ARCHIVER: Test data is within accepted bounds, {}GB (actual) < {}GB (limit)".format(bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000)) + logging.info( + "TEST ARCHIVER: Test data is within accepted bounds, {}GB (actual) < {}GB (limit)".format( + bytes_of_old_test_data / 1000000000, bytes_allowed / 1000000000 + ) + ) + ############################################################################### -def handle_old_test_data(machine, compiler, test_id_root, scratch_root, test_root, avoid_test_id): -############################################################################### - run_area = os.path.dirname(os.path.dirname(machine.get_value("RUNDIR"))) # Assumes XXX/$CASE/run - build_area = os.path.dirname(os.path.dirname(machine.get_value("EXEROOT"))) # Assumes XXX/$CASE/build - archive_area = os.path.dirname(machine.get_value("DOUT_S_ROOT")) # Assumes XXX/archive/$CASE +def handle_old_test_data( + machine, compiler, test_id_root, scratch_root, test_root, avoid_test_id +): + ############################################################################### + run_area = os.path.dirname( + os.path.dirname(machine.get_value("RUNDIR")) + ) # Assumes XXX/$CASE/run + build_area = os.path.dirname( + os.path.dirname(machine.get_value("EXEROOT")) + ) # Assumes XXX/$CASE/build + archive_area = os.path.dirname( + machine.get_value("DOUT_S_ROOT") + ) # Assumes XXX/archive/$CASE old_test_archive = os.path.join(scratch_root, "old_test_archive") mach_comp = "{}_{}".format(machine.get_machine_name(), compiler) try: - archive_old_test_data(machine, mach_comp, test_id_root, scratch_root, test_root, old_test_archive, avoid_test_id) + archive_old_test_data( + machine, + mach_comp, + test_id_root, + scratch_root, + test_root, + old_test_archive, + avoid_test_id, + ) except Exception: - logging.warning("TEST ARCHIVER: Archiving of old test data FAILED: {}\nDeleting data instead".format(sys.exc_info()[1])) - delete_old_test_data(mach_comp, test_id_root, scratch_root, test_root, run_area, build_area, archive_area, avoid_test_id) + logging.warning( + "TEST ARCHIVER: Archiving of old test data FAILED: {}\nDeleting data instead".format( + sys.exc_info()[1] + ) + ) + delete_old_test_data( + mach_comp, + test_id_root, + scratch_root, + test_root, + run_area, + build_area, + archive_area, + avoid_test_id, + ) + ############################################################################### -def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, - baseline_name, - arg_cdash_build_name, cdash_project, - arg_test_suite, - cdash_build_group, baseline_compare, - scratch_root, parallel_jobs, walltime, - machine, compiler, real_baseline_name, baseline_root, update_success): -############################################################################### +def jenkins_generic_job( + generate_baselines, + submit_to_cdash, + no_batch, + baseline_name, + arg_cdash_build_name, + cdash_project, + arg_test_suite, + cdash_build_group, + baseline_compare, + scratch_root, + parallel_jobs, + walltime, + machine, + compiler, + real_baseline_name, + baseline_root, + update_success, + check_throughput, + check_memory, +): + ############################################################################### """ Return True if all tests passed """ @@ -155,18 +275,21 @@ def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, test_suite = test_suite if arg_test_suite is None else arg_test_suite test_root = os.path.join(scratch_root, "J") - if (use_batch): + if use_batch: batch_system = machine.get_value("BATCH_SYSTEM") - expect(batch_system is not None, "Bad XML. Batch machine has no batch_system configuration.") + expect( + batch_system is not None, + "Bad XML. Batch machine has no batch_system configuration.", + ) # # Env changes # - if (submit_to_cdash and proxy is not None): + if submit_to_cdash and proxy is not None: os.environ["http_proxy"] = proxy - if (not os.path.isdir(scratch_root)): + if not os.path.isdir(scratch_root): os.makedirs(scratch_root) # Important, need to set up signal handlers before we officially @@ -180,19 +303,30 @@ def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, # the Jenkins jobs with timeouts to avoid this. # - test_id_root = "J{}{}".format(baseline_name.capitalize(), test_suite.replace("e3sm_", "").capitalize()) + test_id_root = "J{}{}".format( + baseline_name.capitalize(), test_suite.replace("e3sm_", "").capitalize() + ) test_id = "%s%s" % (test_id_root, CIME.utils.get_timestamp()) - archiver_thread = threading.Thread(target=handle_old_test_data, args=(machine, compiler, test_id_root, scratch_root, test_root, test_id)) + archiver_thread = threading.Thread( + target=handle_old_test_data, + args=(machine, compiler, test_id_root, scratch_root, test_root, test_id), + ) archiver_thread.start() # # Set up create_test command and run it # - create_test_args = [test_suite, "--test-root %s" % test_root, "-t %s" % test_id, "--machine %s" % machine.get_machine_name(), "--compiler %s" % compiler] - if (generate_baselines): + create_test_args = [ + test_suite, + "--test-root %s" % test_root, + "-t %s" % test_id, + "--machine %s" % machine.get_machine_name(), + "--compiler %s" % compiler, + ] + if generate_baselines: create_test_args.append("-g -b " + real_baseline_name) - elif (baseline_compare): + elif baseline_compare: create_test_args.append("-c -b " + real_baseline_name) if scratch_root != machine.get_value("CIME_OUTPUT_ROOT"): @@ -212,36 +346,55 @@ def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch, create_test_cmd = "./create_test " + " ".join(create_test_args) - if (not CIME.wait_for_tests.SIGNAL_RECEIVED): - create_test_stat = CIME.utils.run_cmd(create_test_cmd, from_dir=CIME.utils.get_scripts_root(), - verbose=True, arg_stdout=None, arg_stderr=None)[0] + if not CIME.wait_for_tests.SIGNAL_RECEIVED: + create_test_stat = CIME.utils.run_cmd( + create_test_cmd, + from_dir=CIME.utils.get_scripts_root(), + verbose=True, + arg_stdout=None, + arg_stderr=None, + )[0] # Create_test should have either passed, detected failing tests, or timed out - expect(create_test_stat in [0, CIME.utils.TESTS_FAILED_ERR_CODE, -signal.SIGTERM], - "Create_test script FAILED with error code '{:d}'!".format(create_test_stat)) + expect( + create_test_stat in [0, CIME.utils.TESTS_FAILED_ERR_CODE, -signal.SIGTERM], + "Create_test script FAILED with error code '{:d}'!".format( + create_test_stat + ), + ) # # Wait for tests # - if (submit_to_cdash): - cdash_build_name = "_".join([test_suite, baseline_name, compiler]) if arg_cdash_build_name is None else arg_cdash_build_name + if submit_to_cdash: + cdash_build_name = ( + "_".join([test_suite, baseline_name, compiler]) + if arg_cdash_build_name is None + else arg_cdash_build_name + ) else: cdash_build_name = None os.environ["CIME_MACHINE"] = machine.get_machine_name() if submit_to_cdash: - logging.info("To resubmit to dashboard: wait_for_tests {}/*{}/TestStatus --no-wait -b {}".format(test_root, test_id, cdash_build_name)) - - tests_passed = CIME.wait_for_tests.wait_for_tests(glob.glob("{}/*{}/TestStatus".format(test_root, test_id)), - no_wait=not use_batch, # wait if using queue - check_throughput=False, # don't check throughput - check_memory=False, # don't check memory - ignore_namelists=False, # don't ignore namelist diffs - cdash_build_name=cdash_build_name, - cdash_project=cdash_project, - cdash_build_group=cdash_build_group, - update_success=update_success) + logging.info( + "To resubmit to dashboard: wait_for_tests {}/*{}/TestStatus --no-wait -b {}".format( + test_root, test_id, cdash_build_name + ) + ) + + tests_passed = CIME.wait_for_tests.wait_for_tests( + glob.glob("{}/*{}/TestStatus".format(test_root, test_id)), + no_wait=not use_batch, # wait if using queue + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=False, # don't ignore namelist diffs + cdash_build_name=cdash_build_name, + cdash_project=cdash_project, + cdash_build_group=cdash_build_group, + update_success=update_success, + ) logging.info("TEST ARCHIVER: Waiting for archiver thread") archiver_thread.join() diff --git a/CIME/locked_files.py b/CIME/locked_files.py index 4d30c1cfe7a..784b5674941 100644 --- a/CIME/locked_files.py +++ b/CIME/locked_files.py @@ -6,6 +6,7 @@ LOCKED_DIR = "LockedFiles" + def lock_file(filename, caseroot=None, newname=None): expect("/" not in filename, "Please just provide basename of locked file") caseroot = os.getcwd() if caseroot is None else caseroot @@ -24,6 +25,7 @@ def lock_file(filename, caseroot=None, newname=None): safe_copy(os.path.join(caseroot, filename), os.path.join(fulllockdir, newname)) GenericXML.invalidate(os.path.join(fulllockdir, newname)) + def unlock_file(filename, caseroot=None): expect("/" not in filename, "Please just provide basename of locked file") caseroot = os.getcwd() if caseroot is None else caseroot @@ -33,6 +35,7 @@ def unlock_file(filename, caseroot=None): logging.debug("Unlocking file {}".format(filename)) + def is_locked(filename, caseroot=None): expect("/" not in filename, "Please just provide basename of locked file") caseroot = os.getcwd() if caseroot is None else caseroot diff --git a/CIME/namelist.py b/CIME/namelist.py index 80df47976e6..ef70f47f5e0 100644 --- a/CIME/namelist.py +++ b/CIME/namelist.py @@ -114,39 +114,45 @@ # Fortran syntax regular expressions. # Variable names. -#FORTRAN_NAME_REGEX = re.compile(r"(^[a-z][a-z0-9_]{0,62})(\([+-]?\d*:?[+-]?\d*:?[+-]?\d*\))?$", re.IGNORECASE) -FORTRAN_NAME_REGEX = re.compile(r"""(^[a-z][a-z0-9_@]{0,62}) # The variable name +# FORTRAN_NAME_REGEX = re.compile(r"(^[a-z][a-z0-9_]{0,62})(\([+-]?\d*:?[+-]?\d*:?[+-]?\d*\))?$", re.IGNORECASE) +FORTRAN_NAME_REGEX = re.compile( + r"""(^[a-z][a-z0-9_@]{0,62}) # The variable name (\( # begin optional index expression (([+-]?\d+) # Single valued index | # or (([+-]?\d+)?:([+-]?\d+)?:?([+-]?\d+)?)) # colon seperated triplet - \))?\s*$""" # end optional index expression - , re.IGNORECASE | re.VERBOSE) + \))?\s*$""", # end optional index expression + re.IGNORECASE | re.VERBOSE, +) FORTRAN_LITERAL_REGEXES = {} # Integer literals. _int_re_string = r"(\+|-)?[0-9]+" -FORTRAN_LITERAL_REGEXES['integer'] = re.compile("^" + _int_re_string + "$") +FORTRAN_LITERAL_REGEXES["integer"] = re.compile("^" + _int_re_string + "$") # Real/complex literals. _ieee_exceptional_re_string = r"inf(inity)?|nan(\([^)]+\))?" -_float_re_string = r"((\+|-)?([0-9]+(\.[0-9]*)?|\.[0-9]+)([ed]?{})?|{})".format(_int_re_string, _ieee_exceptional_re_string) -FORTRAN_LITERAL_REGEXES['real'] = re.compile("^" + _float_re_string + "$", - re.IGNORECASE) -FORTRAN_LITERAL_REGEXES['complex'] = re.compile(r"^\([ \n]*" + - _float_re_string + - r"[ \n]*,[ \n]*" + - _float_re_string + - r"[ \n]*\)$", re.IGNORECASE) +_float_re_string = r"((\+|-)?([0-9]+(\.[0-9]*)?|\.[0-9]+)([ed]?{})?|{})".format( + _int_re_string, _ieee_exceptional_re_string +) +FORTRAN_LITERAL_REGEXES["real"] = re.compile( + "^" + _float_re_string + "$", re.IGNORECASE +) +FORTRAN_LITERAL_REGEXES["complex"] = re.compile( + r"^\([ \n]*" + + _float_re_string + + r"[ \n]*,[ \n]*" + + _float_re_string + + r"[ \n]*\)$", + re.IGNORECASE, +) # Character literals. _char_single_re_string = r"'[^']*(''[^']*)*'" _char_double_re_string = r'"[^"]*(""[^"]*)*"' -FORTRAN_LITERAL_REGEXES['character'] = re.compile("^(" + - _char_single_re_string + "|" + - _char_double_re_string + - ")$") +FORTRAN_LITERAL_REGEXES["character"] = re.compile( + "^(" + _char_single_re_string + "|" + _char_double_re_string + ")$" +) # Logical literals. -FORTRAN_LITERAL_REGEXES['logical'] = re.compile(r"^\.?[tf][^=/ \n]*$", - re.IGNORECASE) +FORTRAN_LITERAL_REGEXES["logical"] = re.compile(r"^\.?[tf][^=/ \n]*$", re.IGNORECASE) # Repeated value prefix. FORTRAN_REPEAT_PREFIX_REGEX = re.compile(r"^[0-9]*[1-9]+[0-9]*\*") @@ -195,8 +201,9 @@ def is_valid_fortran_name(string): """ return FORTRAN_NAME_REGEX.search(string) is not None + def get_fortran_name_only(full_var): - """ remove array section if any and return only the variable name + """remove array section if any and return only the variable name >>> get_fortran_name_only('foo') 'foo' >>> get_fortran_name_only('foo(3)') @@ -215,8 +222,9 @@ def get_fortran_name_only(full_var): m = FORTRAN_NAME_REGEX.search(full_var) return m.group(1) + def get_fortran_variable_indices(varname, varlen=1, allow_any_len=False): - """ get indices from a fortran namelist variable as a triplet of minindex, maxindex and step + """get indices from a fortran namelist variable as a triplet of minindex, maxindex and step >>> get_fortran_variable_indices('foo(3)') (3, 3, 1) @@ -248,10 +256,11 @@ def get_fortran_variable_indices(varname, varlen=1, allow_any_len=False): if allow_any_len and maxindex == minindex: maxindex = -1 - expect(step != 0,"Step size 0 not allowed") + expect(step != 0, "Step size 0 not allowed") return (minindex, maxindex, step) + def fortran_namelist_base_value(string): r"""Strip off whitespace and repetition syntax from a namelist value. @@ -272,7 +281,7 @@ def fortran_namelist_base_value(string): string = string.strip(" \n") # Strip off repeated value prefix. if FORTRAN_REPEAT_PREFIX_REGEX.search(string) is not None: - string = string[string.find('*') + 1:] + string = string[string.find("*") + 1 :] return string @@ -298,7 +307,7 @@ def character_literal_to_string(literal): # Find left and right edges of the string, extract middle. left_pos = literal.find(delimiter) right_pos = literal.rfind(delimiter) - new_literal = literal[left_pos+1:right_pos] + new_literal = literal[left_pos + 1 : right_pos] # Replace escaped quote and apostrophe characters. return new_literal.replace(delimiter * 2, delimiter) @@ -318,6 +327,7 @@ def string_to_character_literal(string): string = string.replace('"', '""') return '"' + string + '"' + def is_valid_fortran_namelist_literal(type_, string): r"""Determine whether a literal is valid in a Fortran namelist. @@ -573,12 +583,14 @@ def is_valid_fortran_namelist_literal(type_, string): >>> is_valid_fortran_namelist_literal("logical", ".t2 ") True """ - expect(type_ in FORTRAN_LITERAL_REGEXES, - "Invalid Fortran type for a namelist: {!r}".format(str(type_))) + expect( + type_ in FORTRAN_LITERAL_REGEXES, + "Invalid Fortran type for a namelist: {!r}".format(str(type_)), + ) # Strip off whitespace and repetition. string = fortran_namelist_base_value(string) # Null values are always allowed. - if string == '': + if string == "": return True return FORTRAN_LITERAL_REGEXES[type_].search(string) is not None @@ -643,37 +655,45 @@ def literal_to_python_value(literal, type_=None): -10000000000.0 >>> shouldRaise(ValueError, literal_to_python_value, "nan(1234)") """ - expect(FORTRAN_REPEAT_PREFIX_REGEX.search(literal) is None, - "Cannot use repetition syntax in literal_to_python_value") + expect( + FORTRAN_REPEAT_PREFIX_REGEX.search(literal) is None, + "Cannot use repetition syntax in literal_to_python_value", + ) # Handle null value. - if fortran_namelist_base_value(literal) == '': + if fortran_namelist_base_value(literal) == "": return None if type_ is None: # Autodetect type. - for test_type in ('character', 'complex', 'integer', 'logical', 'real'): + for test_type in ("character", "complex", "integer", "logical", "real"): if is_valid_fortran_namelist_literal(test_type, literal): type_ = test_type break - expect(type_ is not None, - "{!r} is not a valid literal for any Fortran type.".format(str(literal))) + expect( + type_ is not None, + "{!r} is not a valid literal for any Fortran type.".format(str(literal)), + ) else: # Check that type is valid. - expect(is_valid_fortran_namelist_literal(type_, literal), - "{!r} is not a valid literal of type {!r}.".format(str(literal), str(type_))) + expect( + is_valid_fortran_namelist_literal(type_, literal), + "{!r} is not a valid literal of type {!r}.".format( + str(literal), str(type_) + ), + ) # Conversion for each type. - if type_ == 'character': + if type_ == "character": return character_literal_to_string(literal) - elif type_ == 'complex': - literal = literal.lstrip(' \n(').rstrip(' \n)') - real_part, _, imag_part = literal.partition(',') + elif type_ == "complex": + literal = literal.lstrip(" \n(").rstrip(" \n)") + real_part, _, imag_part = literal.partition(",") return complex(float(real_part), float(imag_part)) - elif type_ == 'integer': + elif type_ == "integer": return int(literal) - elif type_ == 'logical': - literal = literal.lstrip(' \n.') - return literal[0] in 'tT' - elif type_ == 'real': - literal = literal.lower().replace('d', 'e') + elif type_ == "logical": + literal = literal.lstrip(" \n.") + return literal[0] in "tT" + elif type_ == "real": + literal = literal.lower().replace("d", "e") return float(literal) @@ -692,7 +712,7 @@ def expand_literal_list(literals): expanded = [] for literal in literals: if FORTRAN_REPEAT_PREFIX_REGEX.search(literal) is not None: - num, _, value = literal.partition('*') + num, _, value = literal.partition("*") expanded += int(num) * [value] else: expanded.append(literal) @@ -729,14 +749,14 @@ def compress_literal_list(literals): else: # Otherwise, write out the previous literal and start tracking the # new one. - rep_str = str(num_reps) + '*' if num_reps > 1 else '' + rep_str = str(num_reps) + "*" if num_reps > 1 else "" if isinstance(old_literal, CIME.six.string_types): compressed.append(rep_str + old_literal) else: compressed.append(rep_str + str(old_literal)) old_literal = literal num_reps = 1 - rep_str = str(num_reps) + '*' if num_reps > 1 else '' + rep_str = str(num_reps) + "*" if num_reps > 1 else "" if isinstance(old_literal, CIME.six.string_types): compressed.append(rep_str + old_literal) else: @@ -750,6 +770,7 @@ def compress_literal_list(literals): compressed.append(str(literal)) return compressed + def merge_literal_lists(default, overwrite): """Merge two lists of literal value strings. @@ -780,7 +801,7 @@ def merge_literal_lists(default, overwrite): overwrite = expand_literal_list(overwrite) for default_elem, elem in zip(default, overwrite): - if elem == '': + if elem == "": merged.append(default_elem) else: merged.append(elem) @@ -827,10 +848,14 @@ def parse(in_file=None, text=None, groupless=False, convert_tab_to_space=True): of "6*2" is returned as that string; it is not converted to 6 copies of the Python integer `2`. Null values are returned as the empty string (""). """ - expect(in_file is not None or text is not None, - "Must specify an input file or text to the namelist parser.") - expect(in_file is None or text is None, - "Cannot specify both input file and text to the namelist parser.") + expect( + in_file is not None or text is not None, + "Must specify an input file or text to the namelist parser.", + ) + expect( + in_file is None or text is None, + "Cannot specify both input file and text to the namelist parser.", + ) if isinstance(in_file, CIME.six.string_types): logger.debug("Reading namelist at: {}".format(in_file)) with open(in_file) as in_file_obj: @@ -839,7 +864,7 @@ def parse(in_file=None, text=None, groupless=False, convert_tab_to_space=True): logger.debug("Reading namelist from file object") text = in_file.read() if convert_tab_to_space: - text = text.replace('\t', ' ') + text = text.replace("\t", " ") try: namelist_dict = _NamelistParser(text, groupless).parse_namelist() except (_NamelistEOF, _NamelistParseError) as error: @@ -863,8 +888,8 @@ def shouldRaise(eclass, method, *args, **kw): if not isinstance(e, eclass): raise return - raise Exception("Expected exception %s not raised" % - str(eclass)) + raise Exception("Expected exception %s not raised" % str(eclass)) + class Namelist(object): @@ -898,7 +923,9 @@ def __init__(self, groups=None): expect(group_name is not None, " Got None in groups {}".format(groups)) self._groups[group_name] = collections.OrderedDict() for variable_name in groups[group_name]: - self._groups[group_name][variable_name] = groups[group_name][variable_name] + self._groups[group_name][variable_name] = groups[group_name][ + variable_name + ] def clean_groups(self): self._groups = collections.OrderedDict() @@ -930,7 +957,7 @@ def get_variable_names(self, group_name): >>> sorted(x.get_variable_names('fOo')) ['bar(::)', 'bazz', 'bazz(2)', 'bazz(:2:)'] """ - gn = string_in_list(group_name,self._groups) + gn = string_in_list(group_name, self._groups) if not gn: return [] return list(self._groups[gn].keys()) @@ -949,14 +976,14 @@ def get_variable_value(self, group_name, variable_name): >>> parse(text='&foo bar=1,2 /').get_variable_value('foO', 'Bar') ['1', '2'] """ - gn = string_in_list(group_name,self._groups) + gn = string_in_list(group_name, self._groups) if gn: - vn = string_in_list(variable_name,self._groups[gn]) + vn = string_in_list(variable_name, self._groups[gn]) if vn: # Make a copy of the list so that any modifications done by the caller # don't modify the internal values. return self._groups[gn][vn][:] - return [''] + return [""] def get_value(self, variable_name): """Return the value of a uniquely-named variable. @@ -983,13 +1010,15 @@ def get_value(self, variable_name): if vnt: vn = vnt possible_groups.append(group_name) - expect(len(possible_groups) <= 1, - "Namelist.get_value: Variable {} is present in multiple groups: " - + str(possible_groups)) + expect( + len(possible_groups) <= 1, + "Namelist.get_value: Variable {} is present in multiple groups: " + + str(possible_groups), + ) if possible_groups: return self._groups[possible_groups[0]][vn] else: - return [''] + return [""] def set_variable_value(self, group_name, variable_name, value, var_size=1): """Set the value of the specified variable. @@ -1019,8 +1048,13 @@ def set_variable_value(self, group_name, variable_name, value, var_size=1): minindex, maxindex, step = get_fortran_variable_indices(variable_name, var_size) variable_name = get_fortran_name_only(variable_name) - expect(minindex > 0, "Indices < 1 not supported in CIME interface to fortran namelists... lower bound={}".format(minindex)) - gn = string_in_list(group_name,self._groups) + expect( + minindex > 0, + "Indices < 1 not supported in CIME interface to fortran namelists... lower bound={}".format( + minindex + ), + ) + gn = string_in_list(group_name, self._groups) if not gn: gn = group_name self._groups[gn] = {} @@ -1032,15 +1066,15 @@ def set_variable_value(self, group_name, variable_name, value, var_size=1): else: vn = variable_name tlen = 1 - self._groups[gn][vn] = [''] + self._groups[gn][vn] = [""] if minindex > tlen: - self._groups[gn][vn].extend(['']*(minindex-tlen-1)) + self._groups[gn][vn].extend([""] * (minindex - tlen - 1)) - for i in range(minindex, maxindex+2*step, step): + for i in range(minindex, maxindex + 2 * step, step): while len(self._groups[gn][vn]) < i: - self._groups[gn][vn].append('') - self._groups[gn][vn][i-1] = value.pop(0) + self._groups[gn][vn].append("") + self._groups[gn][vn][i - 1] = value.pop(0) if len(value) == 0: break @@ -1058,9 +1092,9 @@ def delete_variable(self, group_name, variable_name): >>> x.get_variable_names('brack') [] """ - gn = string_in_list(group_name,self._groups) + gn = string_in_list(group_name, self._groups) if gn: - vn=string_in_list(variable_name,self._groups[gn]) + vn = string_in_list(variable_name, self._groups[gn]) if vn: del self._groups[gn][vn] @@ -1122,8 +1156,9 @@ def merge_nl(self, other, overwrite=False): merged_val = merge_literal_lists(self_val, other_val) else: merged_val = merge_literal_lists(other_val, self_val) - self.set_variable_value(group_name, variable_name, merged_val, - var_size=len(merged_val)) + self.set_variable_value( + group_name, variable_name, merged_val, var_size=len(merged_val) + ) def get_group_variables(self, group_name): group_variables = {} @@ -1133,7 +1168,9 @@ def get_group_variables(self, group_name): group_variables[name] = value return group_variables - def write(self, out_file, groups=None, append=False, format_='nml', sorted_groups=True): + def write( + self, out_file, groups=None, append=False, format_="nml", sorted_groups=True + ): """Write a the output data (normally fortran namelist) to the out_file @@ -1149,11 +1186,13 @@ def write(self, out_file, groups=None, append=False, format_='nml', sorted_group specifies the file format. Formats other than 'nml' may not support all possible output values. """ - expect(format_ in ('nml', 'rc', 'nmlcontents'), - "Namelist.write: unexpected output format {!r}".format(str(format_))) + expect( + format_ in ("nml", "rc", "nmlcontents"), + "Namelist.write: unexpected output format {!r}".format(str(format_)), + ) if isinstance(out_file, CIME.six.string_types): logger.debug("Writing namelist to: {}".format(out_file)) - flag = 'a' if append else 'w' + flag = "a" if append else "w" with open(out_file, flag) as file_obj: self._write(file_obj, groups, format_, sorted_groups=sorted_groups) else: @@ -1164,16 +1203,16 @@ def _write(self, out_file, groups, format_, sorted_groups): """Unwrapped version of `write` assuming that a file object is input.""" if groups is None: groups = list(self._groups.keys()) - if format_ == 'nml' or format_ == 'nmlcontents': - equals = ' =' - elif format_ == 'rc': - equals = ':' - if (sorted_groups): + if format_ == "nml" or format_ == "nmlcontents": + equals = " =" + elif format_ == "rc": + equals = ":" + if sorted_groups: group_names = sorted(group for group in groups) else: group_names = groups for group_name in group_names: - if format_ == 'nml': + if format_ == "nml": out_file.write("&{}\n".format(group_name)) # allow empty group if group_name in self._groups: @@ -1185,16 +1224,22 @@ def _write(self, out_file, groups, format_, sorted_groups): # in the write phase, all characters in the namelist variable name after # the @ and including the @ should be removed if "@" in name: - name = re.sub('@.+$', "", name) + name = re.sub("@.+$", "", name) # To prettify things for long lists of values, build strings # line-by-line. if values[0] == "True" or values[0] == "False": - values[0] = values[0].replace("True",".true.").replace("False",".false.") + values[0] = ( + values[0] + .replace("True", ".true.") + .replace("False", ".false.") + ) lines = [" {}{} {}".format(name, equals, values[0])] for value in values[1:]: if value == "True" or value == "False": - value = value.replace("True",".true.").replace("False",".false.") + value = value.replace("True", ".true.").replace( + "False", ".false." + ) if len(lines[-1]) + len(value) <= 77: lines[-1] += ", " + value else: @@ -1203,9 +1248,9 @@ def _write(self, out_file, groups, format_, sorted_groups): lines[-1] += "\n" for line in lines: out_file.write(line) - if format_ == 'nml': + if format_ == "nml": out_file.write("/\n") - if format_ == 'nmlcontents': + if format_ == "nmlcontents": out_file.write("\n") def write_nuopc(self, out_file, groups=None, sorted_groups=True): @@ -1217,26 +1262,29 @@ def write_nuopc(self, out_file, groups=None, sorted_groups=True): """ if isinstance(out_file, CIME.six.string_types): logger.debug("Writing nuopc config file to: {}".format(out_file)) - flag = 'w' + flag = "w" with open(out_file, flag) as file_obj: self._write_nuopc(file_obj, groups, sorted_groups=sorted_groups) else: logger.debug("Writing nuopc config data to file object") self._write_nuopc(out_file, groups, sorted_groups=sorted_groups) - - def _write_nuopc(self, out_file, groups, sorted_groups): + def _write_nuopc(self, out_file, groups, sorted_groups): """Unwrapped version of `write` assuming that a file object is input.""" if groups is None: groups = self._groups.keys() - if (sorted_groups): + if sorted_groups: group_names = sorted(group for group in groups) else: group_names = groups for group_name in group_names: - if "_attributes" not in group_name and "nuopc_" not in group_name and "_no_group" not in group_name: + if ( + "_attributes" not in group_name + and "nuopc_" not in group_name + and "_no_group" not in group_name + ): continue if "_attributes" in group_name: out_file.write("{}::\n".format(group_name)) @@ -1249,16 +1297,18 @@ def _write_nuopc(self, out_file, groups, sorted_groups): # in the write phase, all characters in the namelist variable name after # the @ and including the @ should be removed if "@" in name: - name = re.sub('@.+$', "", name) + name = re.sub("@.+$", "", name) equals = " =" if "_var" in group_name: - equals = ':' + equals = ":" # To prettify things for long lists of values, build strings # line-by-line. if values[0] == "True" or values[0] == "False": - values[0] = values[0].replace("True",".true.").replace("False",".false.") + values[0] = ( + values[0].replace("True", ".true.").replace("False", ".false.") + ) if "_attribute" in group_name: lines = [" {}{} {}".format(name, equals, values[0])] @@ -1267,7 +1317,9 @@ def _write_nuopc(self, out_file, groups, sorted_groups): for value in values[1:]: if value == "True" or value == "False": - value = value.replace("True",".true.").replace("False",".false.") + value = value.replace("True", ".true.").replace( + "False", ".false." + ) if len(lines[-1]) + len(value) <= 77: lines[-1] += ", " + value else: @@ -1275,12 +1327,13 @@ def _write_nuopc(self, out_file, groups, sorted_groups): lines.append(" " + value) lines[-1] += "\n" for line in lines: - line = line.replace('"','') + line = line.replace('"', "") out_file.write(line) if "_attribute" in group_name: out_file.write("::\n\n") + class _NamelistEOF(Exception): """Exception thrown for an unexpected end-of-file in a namelist. @@ -1325,7 +1378,7 @@ def __str__(self): return string -class _NamelistParser(object): # pylint:disable=too-few-public-methods +class _NamelistParser(object): # pylint:disable=too-few-public-methods """Class to validate and read from Fortran namelist input. @@ -1375,7 +1428,7 @@ def _next(self): # way to do this is to just advance. if self._pos == self._len - 1: self._advance() - return self._text[self._pos+1] + return self._text[self._pos + 1] def _advance(self, nchars=1, check_eof=False): r"""Advance the parser's current position by `nchars` characters. @@ -1420,17 +1473,16 @@ def _advance(self, nchars=1, check_eof=False): >>> x._advance(check_eof=True) True """ - assert nchars >= 0, \ - "_NamelistParser attempted to 'advance' backwards" + assert nchars >= 0, "_NamelistParser attempted to 'advance' backwards" new_pos = min(self._pos + nchars, self._len) - consumed_text = self._text[self._pos:new_pos] + consumed_text = self._text[self._pos : new_pos] self._pos = new_pos - lines = consumed_text.count('\n') + lines = consumed_text.count("\n") self._line += lines # If we started a new line, set self._col to be relative to the start of # the current line. if lines > 0: - self._col = -(consumed_text.rfind('\n') + 1) + self._col = -(consumed_text.rfind("\n") + 1) self._col += len(consumed_text) end_of_file = new_pos == self._len if check_eof: @@ -1480,8 +1532,8 @@ def _eat_whitespace(self, allow_initial_comment=False): eaten = False comment_allowed = allow_initial_comment while True: - while self._curr() in (' ', '\n'): - comment_allowed |= self._curr() == '\n' + while self._curr() in (" ", "\n"): + comment_allowed |= self._curr() == "\n" eaten = True self._advance() # Note the reliance on short-circuit `and` here. @@ -1515,9 +1567,9 @@ def _eat_comment(self): >>> shouldRaise(_NamelistEOF, x._eat_comment) """ - if self._curr() != '!': + if self._curr() != "!": return False - newline_pos = self._text[self._pos:].find('\n') + newline_pos = self._text[self._pos :].find("\n") if newline_pos == -1: # This is the last line. self._advance(self._len - self._pos) @@ -1546,7 +1598,9 @@ def _expect_char(self, chars): char_description = repr(str(chars)) else: char_description = "one of the characters in {!r}".format(str(chars)) - raise _NamelistParseError("expected {} but found {!r}".format(char_description, str(self._curr()))) + raise _NamelistParseError( + "expected {} but found {!r}".format(char_description, str(self._curr())) + ) def _parse_namelist_group_name(self): r"""Parses and returns a namelist group name at the current position. @@ -1614,28 +1668,30 @@ def _parse_variable_name(self, allow_equals=True): 'foo' """ old_pos = self._pos - separators = (' ', '\n', '=', '+') if allow_equals else (' ', '\n') + separators = (" ", "\n", "=", "+") if allow_equals else (" ", "\n") while self._curr() not in separators: self._advance() - text = self._text[old_pos:self._pos] - if '(' in text: - expect(')' in text,"Parsing error ") - elif ')' in text: - expect(False,"Parsing error ") + text = self._text[old_pos : self._pos] + if "(" in text: + expect(")" in text, "Parsing error ") + elif ")" in text: + expect(False, "Parsing error ") # @ is used in a namelist to put the same namelist variable in multiple groups # in the write phase, all characters in the namelist variable name after # the @ and including the @ should be removed if "%" in text: - text_check = re.sub('%.+$', "", text) + text_check = re.sub("%.+$", "", text) elif "@" in text: - text_check = re.sub('@.+$', "", text) + text_check = re.sub("@.+$", "", text) else: text_check = text if not is_valid_fortran_name(text_check): if re.search(r".*\(.*\,.*\)", text_check): - err_str = "Multiple dimensions not supported in CIME namelist variables {!r}".format(str(text)) + err_str = "Multiple dimensions not supported in CIME namelist variables {!r}".format( + str(text) + ) else: err_str = "{!r} is not a valid variable name".format(str(text)) raise _NamelistParseError(err_str) @@ -1676,9 +1732,11 @@ def _parse_character_literal(self): self._advance(2) else: break - text = self._text[old_pos:self._pos+1] + text = self._text[old_pos : self._pos + 1] if not is_valid_fortran_namelist_literal("character", text): - raise _NamelistParseError("{} is not a valid character literal".format(text)) + raise _NamelistParseError( + "{} is not a valid character literal".format(text) + ) return text def _parse_complex_literal(self): @@ -1695,11 +1753,13 @@ def _parse_complex_literal(self): """ old_pos = self._pos - while self._curr() != ')': + while self._curr() != ")": self._advance() - text = self._text[old_pos:self._pos+1] + text = self._text[old_pos : self._pos + 1] if not is_valid_fortran_namelist_literal("complex", text): - raise _NamelistParseError("{!r} is not a valid complex literal".format(str(text))) + raise _NamelistParseError( + "{!r} is not a valid complex literal".format(str(text)) + ) return text def _look_ahead_for_equals(self, pos): @@ -1718,8 +1778,8 @@ def _look_ahead_for_equals(self, pos): False """ for test_pos in range(pos, self._len): - if self._text[test_pos] not in (' ', '\n'): - if self._text[test_pos] == '=': + if self._text[test_pos] not in (" ", "\n"): + if self._text[test_pos] == "=": return True else: break @@ -1741,8 +1801,8 @@ def _look_ahead_for_plusequals(self, pos): False """ for test_pos in range(pos, self._len): - if self._text[test_pos] not in (' ', '\n'): - if self._text[test_pos] == '+': + if self._text[test_pos] not in (" ", "\n"): + if self._text[test_pos] == "+": return self._look_ahead_for_equals(test_pos + 1) else: break @@ -1842,38 +1902,38 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): """ # Deal with empty input string. if allow_eof_end and self._pos == self._len: - return '' + return "" # Deal with a repeated value prefix. old_pos = self._pos - if FORTRAN_REPEAT_PREFIX_REGEX.search(self._text[self._pos:]): + if FORTRAN_REPEAT_PREFIX_REGEX.search(self._text[self._pos :]): allow_name = False - while self._curr() != '*': + while self._curr() != "*": self._advance() if self._advance(check_eof=allow_eof_end): # In case the file ends with the 'r*' form of null value. return self._text[old_pos:] - prefix = self._text[old_pos:self._pos] + prefix = self._text[old_pos : self._pos] # Deal with delimited literals. if self._curr() in ('"', "'"): literal = self._parse_character_literal() self._advance(check_eof=allow_eof_end) return prefix + literal - if self._curr() == '(': + if self._curr() == "(": literal = self._parse_complex_literal() self._advance(check_eof=allow_eof_end) return prefix + literal # Deal with non-delimited literals. new_pos = self._pos - separators = [' ', '\n', ',', '/'] + separators = [" ", "\n", ",", "/"] if allow_name: - separators.append('=') - separators.append('+') + separators.append("=") + separators.append("+") while new_pos != self._len and self._text[new_pos] not in separators: # allow commas if they are inside () - if self._text[new_pos] == '(': - separators.remove(',') - elif self._text[new_pos] == ')': - separators.append(',') + if self._text[new_pos] == "(": + separators.remove(",") + elif self._text[new_pos] == ")": + separators.append(",") new_pos += 1 if not allow_eof_end and new_pos == self._len: @@ -1887,10 +1947,14 @@ def _parse_literal(self, allow_name=False, allow_eof_end=False): return self._advance(new_pos - self._pos, check_eof=allow_eof_end) - text = self._text[old_pos:self._pos] - if not any(is_valid_fortran_namelist_literal(type_, text) - for type_ in ("integer", "logical", "real")): - raise _NamelistParseError("expected literal value, but got {!r}".format(str(text))) + text = self._text[old_pos : self._pos] + if not any( + is_valid_fortran_namelist_literal(type_, text) + for type_ in ("integer", "logical", "real") + ): + raise _NamelistParseError( + "expected literal value, but got {!r}".format(str(text)) + ) return text def _expect_separator(self, allow_eof=False): @@ -1969,10 +2033,10 @@ def _expect_separator(self, allow_eof=False): if allow_eof and self._pos == self._len: return False # Must actually be at a value separator. - self._expect_char(' \n,/') + self._expect_char(" \n,/") try: self._eat_whitespace() - if self._curr() == '/': + if self._curr() == "/": if allow_eof: raise _NamelistParseError(errstring) else: @@ -1983,7 +2047,7 @@ def _expect_separator(self, allow_eof=False): else: raise try: - if self._curr() == ',': + if self._curr() == ",": self._advance() self._eat_whitespace(allow_initial_comment=True) except _NamelistEOF: @@ -2051,9 +2115,9 @@ def _parse_name_and_values(self, allow_eof_end=False): self._eat_whitespace() # check to see if we have a "+=" - if self._curr() == '+': + if self._curr() == "+": self._advance() - addto=True # tell parser that we want to add to dictionary values + addto = True # tell parser that we want to add to dictionary values self._expect_char("=") try: self._advance() @@ -2061,7 +2125,7 @@ def _parse_name_and_values(self, allow_eof_end=False): except _NamelistEOF: # If we hit the end of file, return a name assigned to a null value. if allow_eof_end: - return name, [''], addto + return name, [""], addto else: raise # Expect at least one literal, even if it's a null value. @@ -2069,15 +2133,16 @@ def _parse_name_and_values(self, allow_eof_end=False): # While we haven't reached the end of the namelist group... while self._expect_separator(allow_eof=allow_eof_end): # see if we can parse a literal (we might get a variable name)... - literal = self._parse_literal(allow_name=True, - allow_eof_end=allow_eof_end) + literal = self._parse_literal(allow_name=True, allow_eof_end=allow_eof_end) if literal is None: break # and if it really is a literal, add it. values.append(literal) - (minindex, maxindex, step) = get_fortran_variable_indices(name,allow_any_len=True) + (minindex, maxindex, step) = get_fortran_variable_indices( + name, allow_any_len=True + ) if (minindex > 1 or maxindex > minindex or step > 1) and maxindex > 0: - arraylen =max(0,1 + ((maxindex - minindex)/step)) + arraylen = max(0, 1 + ((maxindex - minindex) / step)) expect(len(values) <= arraylen, "Too many values for array {}".format(name)) return name, values, addto @@ -2142,10 +2207,12 @@ def _parse_namelist_group(self): if not self._groupless: # Make sure that this is the first time we've seen this group. if group_name in self._settings: - raise _NamelistParseError("Namelist group {!r} encountered twice.".format(str(group_name))) + raise _NamelistParseError( + "Namelist group {!r} encountered twice.".format(str(group_name)) + ) self._settings[group_name] = {} self._eat_whitespace() - while self._curr() != '/': + while self._curr() != "/": name, values, addto = self._parse_name_and_values() dsettings = [] if self._groupless: @@ -2219,7 +2286,7 @@ def parse_namelist(self): except _NamelistEOF: return self._settings # Handle case with no namelist groups. - if self._groupless and self._curr() != '&': + if self._groupless and self._curr() != "&": while self._pos < self._len: name, values, addto = self._parse_name_and_values(allow_eof_end=True) if name in self._settings: diff --git a/CIME/nmlgen.py b/CIME/nmlgen.py index bc61d58a135..149f2f142dd 100644 --- a/CIME/nmlgen.py +++ b/CIME/nmlgen.py @@ -11,9 +11,15 @@ import hashlib from CIME.XML.standard_module_setup import * -from CIME.namelist import Namelist, parse, \ - character_literal_to_string, string_to_character_literal, \ - expand_literal_list, compress_literal_list, merge_literal_lists +from CIME.namelist import ( + Namelist, + parse, + character_literal_to_string, + string_to_character_literal, + expand_literal_list, + compress_literal_list, + merge_literal_lists, +) from CIME.XML.namelist_definition import NamelistDefinition from CIME.utils import expect, safe_copy from CIME.XML.stream import Stream @@ -58,13 +64,14 @@
""" + class NamelistGenerator(object): """Utility class for generating namelists for a given component.""" _streams_variables = [] - #pylint:disable=too-many-arguments + # pylint:disable=too-many-arguments def __init__(self, case, definition_files, files=None): """Construct a namelist generator. @@ -76,7 +83,7 @@ def __init__(self, case, definition_files, files=None): """ # Save off important information from inputs. self._case = case - self._din_loc_root = case.get_value('DIN_LOC_ROOT') + self._din_loc_root = case.get_value("DIN_LOC_ROOT") # Create definition object - this will validate the xml schema in the definition file self._definition = NamelistDefinition(definition_files[0], files=files) @@ -102,8 +109,14 @@ def __enter__(self): def __exit__(self, *_): return False - def init_defaults(self, infiles, config, skip_groups=None, skip_entry_loop=False, - skip_default_for_groups=None): + def init_defaults( + self, + infiles, + config, + skip_groups=None, + skip_entry_loop=False, + skip_default_for_groups=None, + ): """Return array of names of all definition nodes infiles should be a list of file paths, each one giving namelist settings that @@ -193,23 +206,26 @@ def quote_string(string): Does nothing if the string appears to be quoted already. """ - if string == "" or \ - (string[0] not in ('"', "'") or string[0] != string[-1]): + if string == "" or (string[0] not in ('"', "'") or string[0] != string[-1]): string = string_to_character_literal(string) return string def _to_python_value(self, name, literals): """Transform a literal list as needed for `get_value`.""" - var_type, _, var_size, = self._definition.split_type_string(name) + ( + var_type, + _, + var_size, + ) = self._definition.split_type_string(name) if len(literals) > 0 and literals[0] is not None: values = expand_literal_list(literals) else: return "" for i, scalar in enumerate(values): - if scalar == '': + if scalar == "": values[i] = None - elif var_type == 'character': + elif var_type == "character": values[i] = character_literal_to_string(scalar) if var_size == 1: @@ -223,14 +239,18 @@ def _to_namelist_literals(self, name, values): This is the inverse of `_to_python_value`, except that many of the changes have potentially already been performed. """ - var_type, _, var_size, = self._definition.split_type_string(name) + ( + var_type, + _, + var_size, + ) = self._definition.split_type_string(name) if var_size == 1 and not isinstance(values, list): values = [values] for i, scalar in enumerate(values): if scalar is None: values[i] = "" - elif var_type == 'character': + elif var_type == "character": expect(not isinstance(scalar, list), name) values[i] = self.quote_string(scalar) @@ -275,7 +295,11 @@ def set_value(self, name, value): """ var_group = self._definition.get_group(name) literals = self._to_namelist_literals(name, value) - _, _, var_size, = self._definition.split_type_string(name) + ( + _, + _, + var_size, + ) = self._definition.split_type_string(name) if len(literals) > 0 and literals[0] is not None: self._namelist.set_variable_value(var_group, name, literals, var_size) @@ -308,38 +332,46 @@ def get_default(self, name, config=None, allow_none=False): exists. This behavior is suppressed within single-quoted strings (similar to parameter expansion in shell scripts). """ - default = self._definition.get_value_match(name, attributes=config, exact_match=False) + default = self._definition.get_value_match( + name, attributes=config, exact_match=False + ) if default is None: expect(allow_none, "No default value found for {}.".format(name)) return None default = expand_literal_list(default) - var_type,_,_ = self._definition.split_type_string(name) + var_type, _, _ = self._definition.split_type_string(name) for i, scalar in enumerate(default): # Skip single-quoted strings. - if var_type == 'character' and scalar != '' and \ - scalar[0] == scalar[-1] == "'": + if ( + var_type == "character" + and scalar != "" + and scalar[0] == scalar[-1] == "'" + ): continue match = _var_ref_re.search(scalar) while match: - env_val = self._case.get_value(match.group('name')) + env_val = self._case.get_value(match.group("name")) if env_val is not None: scalar = scalar.replace(match.group(0), str(env_val), 1) match = _var_ref_re.search(scalar) else: scalar = None - logger.warning("Namelist default for variable {} refers to unknown XML variable {}.". - format(name, match.group('name'))) + logger.warning( + "Namelist default for variable {} refers to unknown XML variable {}.".format( + name, match.group("name") + ) + ) match = None default[i] = scalar # Deal with missing quotes. - if var_type == 'character': + if var_type == "character": for i, scalar in enumerate(default): # Preserve null values. - if scalar != '': + if scalar != "": default[i] = self.quote_string(scalar) default = self._to_python_value(name, default) @@ -356,7 +388,7 @@ def clean_streams(self): self._streams_namelists["streams"] = [] def new_instance(self): - """ Clean the object just enough to introduce a new instance """ + """Clean the object just enough to introduce a new instance""" self.clean_streams() self._namelist.clean_groups() @@ -391,10 +423,10 @@ def _sub_fields(self, varnames): if not line: continue if "%glc" in line: - if self._case.get_value('GLC_NEC') == 0: + if self._case.get_value("GLC_NEC") == 0: glc_nec_indices = [] else: - glc_nec_indices = range(self._case.get_value('GLC_NEC')+1) + glc_nec_indices = range(self._case.get_value("GLC_NEC") + 1) for i in glc_nec_indices: new_lines.append(line.replace("%glc", "{:02d}".format(i))) else: @@ -410,7 +442,7 @@ def _days_in_month(month, year=1): """ month_start = datetime.date(year, month, 1) if month == 12: - next_year = year+1 + next_year = year + 1 next_month = 1 else: next_year = year @@ -452,19 +484,21 @@ def _sub_paths(self, filenames, year_start, year_end): if match is None: new_lines.append(line) continue - if match.group('digits'): - year_format = "{:0"+match.group('digits')+"d}" + if match.group("digits"): + year_format = "{:0" + match.group("digits") + "d}" else: year_format = "{:04d}" - for year in range(year_start, year_end+1): - if match.group('day'): + for year in range(year_start, year_end + 1): + if match.group("day"): for month in range(1, 13): days = self._days_in_month(month) - for day in range(1, days+1): - date_string = (year_format + "-{:02d}-{:02d}").format(year, month, day) + for day in range(1, days + 1): + date_string = (year_format + "-{:02d}-{:02d}").format( + year, month, day + ) new_line = line.replace(match.group(0), date_string) new_lines.append(new_line) - elif match.group('month'): + elif match.group("month"): for month in range(1, 13): date_string = (year_format + "-{:02d}").format(year, month) new_line = line.replace(match.group(0), date_string) @@ -480,13 +514,18 @@ def _add_xml_delimiter(list_to_deliminate, delimiter): expect(delimiter and not " " in delimiter, "Missing or badly formed delimiter") pred = "<{}>".format(delimiter) postd = "".format(delimiter) - for n,_ in enumerate(list_to_deliminate): + for n, _ in enumerate(list_to_deliminate): list_to_deliminate[n] = pred + list_to_deliminate[n].strip() + postd return "\n ".join(list_to_deliminate) - - def create_stream_file_and_update_shr_strdata_nml(self, config, caseroot, #pylint:disable=too-many-locals - stream, stream_path, data_list_path): + def create_stream_file_and_update_shr_strdata_nml( + self, + config, + caseroot, # pylint:disable=too-many-locals + stream, + stream_path, + data_list_path, + ): """Write the pseudo-XML file corresponding to a given stream. Arguments: @@ -501,13 +540,14 @@ def create_stream_file_and_update_shr_strdata_nml(self, config, caseroot, #pylin if os.path.exists(stream_path): os.unlink(stream_path) - user_stream_path = os.path.join(caseroot, "user_"+os.path.basename(stream_path)) + user_stream_path = os.path.join( + caseroot, "user_" + os.path.basename(stream_path) + ) # Use the user's stream file, or create one if necessary. config = config.copy() config["stream"] = stream - # Stream-specific configuration. if os.path.exists(user_stream_path): safe_copy(user_stream_path, stream_path) @@ -539,7 +579,7 @@ def create_stream_file_and_update_shr_strdata_nml(self, config, caseroot, #pylin domain_filenames = self._sub_paths(domain_filenames, year_start, year_end) # Overwrite domain_file if should be set from stream data - if domain_filenames == 'null': + if domain_filenames == "null": domain_filepath = data_filepath domain_filenames = data_filenames.splitlines()[0] @@ -553,27 +593,27 @@ def create_stream_file_and_update_shr_strdata_nml(self, config, caseroot, #pylin offset=offset, ) - with open(stream_path, 'w') as stream_file: + with open(stream_path, "w") as stream_file: stream_file.write(stream_file_text) lines_hash = self._get_input_file_hash(data_list_path) - with open(data_list_path, 'a') as input_data_list: + with open(data_list_path, "a") as input_data_list: for i, filename in enumerate(domain_filenames.split("\n")): - if filename.strip() == '': + if filename.strip() == "": continue filepath, filename = os.path.split(filename) if not filepath: filepath = os.path.join(domain_filepath, filename.strip()) - string = "domain{:d} = {}\n".format(i+1, filepath) - hashValue = hashlib.md5(string.rstrip().encode('utf-8')).hexdigest() + string = "domain{:d} = {}\n".format(i + 1, filepath) + hashValue = hashlib.md5(string.rstrip().encode("utf-8")).hexdigest() if hashValue not in lines_hash: input_data_list.write(string) for i, filename in enumerate(data_filenames.split("\n")): - if filename.strip() == '': + if filename.strip() == "": continue filepath = os.path.join(data_filepath, filename.strip()) - string = "file{:d} = {}\n".format(i+1, filepath) - hashValue = hashlib.md5(string.rstrip().encode('utf-8')).hexdigest() + string = "file{:d} = {}\n".format(i + 1, filepath) + hashValue = hashlib.md5(string.rstrip().encode("utf-8")).hexdigest() if hashValue not in lines_hash: input_data_list.write(string) self.update_shr_strdata_nml(config, stream, stream_path) @@ -584,23 +624,34 @@ def update_shr_strdata_nml(self, config, stream, stream_path): This should be done once per stream, and it shouldn't usually be called directly, since `create_stream_file` calls this method itself. """ - assert config['stream'] == stream, \ - "config stream is {}, but input stream is {}".format(config['stream'], stream) + assert ( + config["stream"] == stream + ), "config stream is {}, but input stream is {}".format( + config["stream"], stream + ) # Double-check the years for sanity. year_start = int(self.get_default("strm_year_start", config)) year_end = int(self.get_default("strm_year_end", config)) year_align = int(self.get_default("strm_year_align", config)) - expect(year_end >= year_start, - "Stream {} starts at year {:d}, but ends at earlier year {:d}.".format(stream, year_start, year_end)) + expect( + year_end >= year_start, + "Stream {} starts at year {:d}, but ends at earlier year {:d}.".format( + stream, year_start, year_end + ), + ) # Add to streams file. - stream_string = "{} {:d} {:d} {:d}".format(os.path.basename(stream_path), - year_align, year_start, - year_end) + stream_string = "{} {:d} {:d} {:d}".format( + os.path.basename(stream_path), year_align, year_start, year_end + ) self._streams_namelists["streams"].append(stream_string) for variable in self._streams_variables: default = self.get_default(variable, config) - expect(len(default) == 1, - "Stream {} had multiple settings for variable {}.".format(stream, variable)) + expect( + len(default) == 1, + "Stream {} had multiple settings for variable {}.".format( + stream, variable + ), + ) self._streams_namelists[variable].append(default[0]) def set_abs_file_path(self, file_path): @@ -649,18 +700,22 @@ def add_default(self, name, value=None, ignore_abs_path=None): have_value = True default_literals = self._to_namelist_literals(name, default) current_literals = merge_literal_lists(default_literals, current_literals) - expect(have_value, "No default value found for {} with attributes {}.".format( - name, self._definition.get_attributes())) + expect( + have_value, + "No default value found for {} with attributes {}.".format( + name, self._definition.get_attributes() + ), + ) # Go through file names and prepend input data root directory for # absolute pathnames. var_type, _, var_size = self._definition.split_type_string(name) if var_type == "character" and ignore_abs_path is None: var_input_pathname = self._definition.get_input_pathname(name) - if var_input_pathname == 'abs': + if var_input_pathname == "abs": current_literals = expand_literal_list(current_literals) for i, literal in enumerate(current_literals): - if literal == '': + if literal == "": continue file_path = character_literal_to_string(literal) abs_file_path = self._convert_to_abs_file_path(file_path, name) @@ -685,28 +740,34 @@ def _convert_to_abs_file_path(self, file_path, name): # components with multiple grids (e.g., GLC). for one_file_path in file_path.split(GRID_SEP): # NOTE - these are hard-coded here and a better way is to make these extensible - if one_file_path == 'UNSET' or one_file_path == 'idmap' or one_file_path == 'idmap_ignore' or one_file_path == 'unset': + if ( + one_file_path == "UNSET" + or one_file_path == "idmap" + or one_file_path == "idmap_ignore" + or one_file_path == "unset" + ): abs_file_paths.append(one_file_path) - elif one_file_path in ('null','create_mesh'): + elif one_file_path in ("null", "create_mesh"): abs_file_paths.append(one_file_path) else: one_abs_file_path = self.set_abs_file_path(one_file_path) if not os.path.exists(one_abs_file_path): - logger.warning("File not found: {} = {}, will attempt to download in check_input_data phase".format( - name, one_abs_file_path)) + logger.warning( + "File not found: {} = {}, will attempt to download in check_input_data phase".format( + name, one_abs_file_path + ) + ) abs_file_paths.append(one_abs_file_path) return GRID_SEP.join(abs_file_paths) def create_shr_strdata_nml(self): - """Set defaults for `shr_strdata_nml` variables other than the variable domainfile """ + """Set defaults for `shr_strdata_nml` variables other than the variable domainfile""" self.add_default("datamode") - if self.get_value("datamode") != 'NULL': - self.add_default("streams", - value=self._streams_namelists["streams"]) + if self.get_value("datamode") != "NULL": + self.add_default("streams", value=self._streams_namelists["streams"]) for variable in self._streams_variables: - self.add_default(variable, - value=self._streams_namelists[variable]) + self.add_default(variable, value=self._streams_namelists[variable]) def get_group_variables(self, group_name): return self._namelist.get_group_variables(group_name) @@ -716,8 +777,8 @@ def _get_input_file_hash(self, data_list_path): if os.path.isfile(data_list_path): with open(data_list_path, "r") as input_data_list: for line in input_data_list: - hashValue = hashlib.md5(line.rstrip().encode('utf-8')).hexdigest() - logger.debug( "Found line {} with hash {}".format(line,hashValue)) + hashValue = hashlib.md5(line.rstrip().encode("utf-8")).hexdigest() + logger.debug("Found line {} with hash {}".format(line, hashValue)) lines_hash.add(hashValue) return lines_hash @@ -728,20 +789,28 @@ def _write_input_files(self, data_list_path): with open(data_list_path, "a") as input_data_list: for group_name in self._namelist.get_group_names(): for variable_name in self._namelist.get_variable_names(group_name): - input_pathname = self._definition.get_node_element_info(variable_name, "input_pathname") + input_pathname = self._definition.get_node_element_info( + variable_name, "input_pathname" + ) if input_pathname is not None: # This is where we end up for all variables that are paths # to input data files. - literals = self._namelist.get_variable_value(group_name, variable_name) + literals = self._namelist.get_variable_value( + group_name, variable_name + ) for literal in literals: file_path = character_literal_to_string(literal) - self._add_file_to_input_data_list(input_data_list=input_data_list, - variable_name=variable_name, - file_path=file_path, - input_pathname=input_pathname, - lines_hash=lines_hash) - - def _add_file_to_input_data_list(self, input_data_list, variable_name, file_path, input_pathname, lines_hash): + self._add_file_to_input_data_list( + input_data_list=input_data_list, + variable_name=variable_name, + file_path=file_path, + input_pathname=input_pathname, + lines_hash=lines_hash, + ) + + def _add_file_to_input_data_list( + self, input_data_list, variable_name, file_path, input_pathname, lines_hash + ): """Add one file to the input data list, if needed It's possible that file_path actually contains multiple files delimited by @@ -759,24 +828,27 @@ def _add_file_to_input_data_list(self, input_data_list, variable_name, file_path """ for one_file_path in file_path.split(GRID_SEP): # NOTE - these are hard-coded here and a better way is to make these extensible - if one_file_path == 'UNSET' or one_file_path == 'idmap' or one_file_path == 'idmap_ignore': + if ( + one_file_path == "UNSET" + or one_file_path == "idmap" + or one_file_path == "idmap_ignore" + ): continue - if input_pathname == 'abs': + if input_pathname == "abs": # No further mangling needed for absolute paths. # At this point, there are overwrites that should be ignored if not os.path.isabs(one_file_path): continue else: pass - elif input_pathname.startswith('rel:'): + elif input_pathname.startswith("rel:"): # The part past "rel" is the name of a variable that # this variable specifies its path relative to. root_var = input_pathname[4:] root_dir = self.get_value(root_var) one_file_path = os.path.join(root_dir, one_file_path) else: - expect(False, - "Bad input_pathname value: {}.".format(input_pathname)) + expect(False, "Bad input_pathname value: {}.".format(input_pathname)) # Write to the input data list. # @@ -784,14 +856,16 @@ def _add_file_to_input_data_list(self, input_data_list, variable_name, file_path # seems okay for check_input_data, but if it becomes a problem, we could # change this, e.g., appending an index to the end of variable_name. string = "{} = {}".format(variable_name, one_file_path) - hashValue = hashlib.md5(string.rstrip().encode('utf-8')).hexdigest() + hashValue = hashlib.md5(string.rstrip().encode("utf-8")).hexdigest() if hashValue not in lines_hash: - logger.debug("Adding line {} with hash {}".format(string,hashValue)) - input_data_list.write(string+"\n") + logger.debug("Adding line {} with hash {}".format(string, hashValue)) + input_data_list.write(string + "\n") else: logger.debug("Line already in file {}".format(string)) - def write_output_file(self, namelist_file, data_list_path=None, groups=None, sorted_groups=True): + def write_output_file( + self, namelist_file, data_list_path=None, groups=None, sorted_groups=True + ): """Write out the namelists and input data files. The `namelist_file` and `modelio_file` are the locations to which the @@ -815,27 +889,36 @@ def write_output_file(self, namelist_file, data_list_path=None, groups=None, sor if data_list_path is not None: self._write_input_files(data_list_path) - # For MCT - def add_nmlcontents(self, filename, group, append=True, format_="nmlcontents", sorted_groups=True): - """ Write only contents of nml group """ - self._namelist.write(filename, groups=[group], append=append, format_=format_, sorted_groups=sorted_groups) + def add_nmlcontents( + self, filename, group, append=True, format_="nmlcontents", sorted_groups=True + ): + """Write only contents of nml group""" + self._namelist.write( + filename, + groups=[group], + append=append, + format_=format_, + sorted_groups=sorted_groups, + ) def write_seq_maps(self, filename): - """ Write mct out seq_maps.rc""" + """Write mct out seq_maps.rc""" self._namelist.write(filename, groups=["seq_maps"], format_="rc") def write_modelio_file(self, filename): - """ Write mct component modelio files""" + """Write mct component modelio files""" self._namelist.write(filename, groups=["modelio", "pio_inparm"], format_="nml") # For NUOPC def write_nuopc_modelio_file(self, filename): - """ Write nuopc component modelio files""" + """Write nuopc component modelio files""" self._namelist.write(filename, groups=["pio_inparm"], format_="nml") - def write_nuopc_config_file(self, filename, data_list_path=None, sorted_groups=False): - """ Write the nuopc config file""" + def write_nuopc_config_file( + self, filename, data_list_path=None, sorted_groups=False + ): + """Write the nuopc config file""" self._definition.validate(self._namelist) groups = self._namelist.get_group_names() # write the config file diff --git a/CIME/provenance.py b/CIME/provenance.py index cd921c7dfe6..f89f6a68c5d 100644 --- a/CIME/provenance.py +++ b/CIME/provenance.py @@ -5,32 +5,45 @@ """ from CIME.XML.standard_module_setup import * -from CIME.utils import touch, gzip_existing_file, SharedArea, convert_to_babylonian_time, get_current_commit, get_current_submodule_status, indent_string, run_cmd, run_cmd_no_fail, safe_copy +from CIME.utils import ( + touch, + gzip_existing_file, + SharedArea, + convert_to_babylonian_time, + get_current_commit, + get_current_submodule_status, + indent_string, + run_cmd, + run_cmd_no_fail, + safe_copy, +) import tarfile, getpass, signal, glob, shutil, sys logger = logging.getLogger(__name__) + def _get_batch_job_id_for_syslog(case): """ mach_syslog only works on certain machines """ mach = case.get_value("MACH") try: - if mach in ['anvil', 'chrysalis', 'compy', 'cori-haswell', 'cori-knl']: + if mach in ["anvil", "chrysalis", "compy", "cori-haswell", "cori-knl"]: return os.environ["SLURM_JOB_ID"] - elif mach in ['theta']: + elif mach in ["theta"]: return os.environ["COBALT_JOBID"] - elif mach in ['summit']: + elif mach in ["summit"]: return os.environ["LSB_JOBID"] except KeyError: pass return None + def _extract_times(zipfiles, target_file): - contents ="Target Build_time\n" + contents = "Target Build_time\n" for zipfile in zipfiles: stat, output, _ = run_cmd("zgrep 'built in' {}".format(zipfile)) if stat == 0: @@ -44,8 +57,9 @@ def _extract_times(zipfiles, target_file): with open(target_file, "w") as fd: fd.write(contents) + def _run_git_cmd_recursively(cmd, srcroot, output): - """ Runs a git command recursively + """Runs a git command recursively Runs the git command in srcroot then runs it on each submodule. Then output from both commands is written to the output file. @@ -53,13 +67,14 @@ def _run_git_cmd_recursively(cmd, srcroot, output): rc1, output1, err1 = run_cmd("git {}".format(cmd), from_dir=srcroot) rc2, output2, err2 = run_cmd( - "git submodule foreach --recursive \"git {}; echo\"".format(cmd), - from_dir=srcroot) + 'git submodule foreach --recursive "git {}; echo"'.format(cmd), from_dir=srcroot + ) with open(output, "w") as fd: fd.write((output1 if rc1 == 0 else err1) + "\n\n") fd.write((output2 if rc2 == 0 else err2) + "\n") + def _parse_dot_git_path(srcroot): dot_git_pattern = r"^(.*/\.git).*" @@ -69,11 +84,14 @@ def _parse_dot_git_path(srcroot): return m.group(1) + def _find_git_root(srcroot): gitroot = f"{srcroot}/.git" - expect(os.path.exists(gitroot), - f"{srcroot!r} is not a git repository, failed to collect provenance") + expect( + os.path.exists(gitroot), + f"{srcroot!r} is not a git repository, failed to collect provenance", + ) # Handle normal git repositories if os.path.isdir(gitroot): @@ -92,8 +110,9 @@ def _find_git_root(srcroot): # First group is the actual gitroot return m.group(1) + def _record_git_provenance(srcroot, exeroot, lid): - """ Records git provenance + """Records git provenance Records git status, diff and logs for main repo and all submodules. """ @@ -122,6 +141,7 @@ def _record_git_provenance(srcroot, exeroot, lid): config_prov = os.path.join(exeroot, "GIT_CONFIG.{}".format(lid)) safe_copy(config_src, config_prov, preserve_meta=False) + def _save_build_provenance_e3sm(case, lid): srcroot = case.get_value("SRCROOT") exeroot = case.get_value("EXEROOT") @@ -178,13 +198,25 @@ def _save_build_provenance_e3sm(case, lid): # For all the just-created post-build provenance files, symlink a generic name # to them to indicate that these are the most recent or active. - for item in ["GIT_DESCRIBE", "GIT_LOGS_HEAD", "GIT_SUBMODULE_STATUS", - "GIT_STATUS", "GIT_DIFF", "GIT_LOG", "GIT_CONFIG", - "GIT_REMOTE", "SourceMods", "build_environment", - "build_times"]: + for item in [ + "GIT_DESCRIBE", + "GIT_LOGS_HEAD", + "GIT_SUBMODULE_STATUS", + "GIT_STATUS", + "GIT_DIFF", + "GIT_LOG", + "GIT_CONFIG", + "GIT_REMOTE", + "SourceMods", + "build_environment", + "build_times", + ]: globstr = "{}/{}.{}*".format(exeroot, item, lid) matches = glob.glob(globstr) - expect(len(matches) < 2, "Multiple matches for glob {} should not have happened".format(globstr)) + expect( + len(matches) < 2, + "Multiple matches for glob {} should not have happened".format(globstr), + ) if matches: the_match = matches[0] generic_name = the_match.replace(".{}".format(lid), "") @@ -192,11 +224,12 @@ def _save_build_provenance_e3sm(case, lid): os.remove(generic_name) os.symlink(the_match, generic_name) -def _save_build_provenance_cesm(case, lid): # pylint: disable=unused-argument + +def _save_build_provenance_cesm(case, lid): # pylint: disable=unused-argument version = case.get_value("MODEL_VERSION") # version has already been recorded srcroot = case.get_value("SRCROOT") - manic = os.path.join("manage_externals","checkout_externals") + manic = os.path.join("manage_externals", "checkout_externals") manic_full_path = os.path.join(srcroot, manic) out = None if os.path.exists(manic_full_path): @@ -216,9 +249,14 @@ def _save_build_provenance_cesm(case, lid): # pylint: disable=unused-argument {manic}{args} (2) If you don't need provenance information, rebuild with --skip-provenance-check -""".format(out=indent_string(out, 4), err=indent_string(err, 4), - srcroot=srcroot, manic=manic, args=args) - expect(stat==0,errmsg) +""".format( + out=indent_string(out, 4), + err=indent_string(err, 4), + srcroot=srcroot, + manic=manic, + args=args, + ) + expect(stat == 0, errmsg) caseroot = case.get_value("CASEROOT") with open(os.path.join(caseroot, "CaseStatus"), "a") as fd: @@ -227,6 +265,7 @@ def _save_build_provenance_cesm(case, lid): # pylint: disable=unused-argument if out is not None: fd.write("{}\n".format(out)) + def save_build_provenance(case, lid=None): with SharedArea(): model = case.get_value("MODEL") @@ -237,6 +276,7 @@ def save_build_provenance(case, lid=None): elif model == "cesm": _save_build_provenance_cesm(case, lid) + def _save_prerun_timing_e3sm(case, lid): project = case.get_value("PROJECT", subgroup=case.get_primary_job()) if not case.is_save_timing_dir_project(project): @@ -244,24 +284,40 @@ def _save_prerun_timing_e3sm(case, lid): timing_dir = case.get_value("SAVE_TIMING_DIR") if timing_dir is None or not os.path.isdir(timing_dir): - logger.warning("SAVE_TIMING_DIR {} is not valid. E3SM requires a valid SAVE_TIMING_DIR to archive timing data.".format(timing_dir)) + logger.warning( + "SAVE_TIMING_DIR {} is not valid. E3SM requires a valid SAVE_TIMING_DIR to archive timing data.".format( + timing_dir + ) + ) return - logger.info("Archiving timing data and associated provenance in {}.".format(timing_dir)) + logger.info( + "Archiving timing data and associated provenance in {}.".format(timing_dir) + ) rundir = case.get_value("RUNDIR") blddir = case.get_value("EXEROOT") caseroot = case.get_value("CASEROOT") srcroot = case.get_value("SRCROOT") base_case = case.get_value("CASE") - full_timing_dir = os.path.join(timing_dir, "performance_archive", getpass.getuser(), base_case, lid) + full_timing_dir = os.path.join( + timing_dir, "performance_archive", getpass.getuser(), base_case, lid + ) if os.path.exists(full_timing_dir): - logger.warning("{} already exists. Skipping archive of timing data and associated provenance.".format(full_timing_dir)) + logger.warning( + "{} already exists. Skipping archive of timing data and associated provenance.".format( + full_timing_dir + ) + ) return try: os.makedirs(full_timing_dir) except OSError: - logger.warning("{} cannot be created. Skipping archive of timing data and associated provenance.".format(full_timing_dir)) + logger.warning( + "{} cannot be created. Skipping archive of timing data and associated provenance.".format( + full_timing_dir + ) + ) return mach = case.get_value("MACH") @@ -271,33 +327,51 @@ def _save_prerun_timing_e3sm(case, lid): job_id = _get_batch_job_id_for_syslog(case) if job_id is not None: if mach == "theta": - for cmd, filename in [("qstat -l --header JobID:JobName:User:Project:WallTime:QueuedTime:Score:RunTime:TimeRemaining:Nodes:State:Location:Mode:Command:Args:Procs:Queue:StartTime:attrs:Geometry", "qstatf"), - ("qstat -lf %s" % job_id, "qstatf_jobid"), - ("xtnodestat", "xtnodestat"), - ("xtprocadmin", "xtprocadmin")]: + for cmd, filename in [ + ( + "qstat -l --header JobID:JobName:User:Project:WallTime:QueuedTime:Score:RunTime:TimeRemaining:Nodes:State:Location:Mode:Command:Args:Procs:Queue:StartTime:attrs:Geometry", + "qstatf", + ), + ("qstat -lf %s" % job_id, "qstatf_jobid"), + ("xtnodestat", "xtnodestat"), + ("xtprocadmin", "xtprocadmin"), + ]: filename = "%s.%s" % (filename, lid) run_cmd_no_fail(cmd, arg_stdout=filename, from_dir=full_timing_dir) gzip_existing_file(os.path.join(full_timing_dir, filename)) elif mach in ["cori-haswell", "cori-knl"]: - for cmd, filename in [("sinfo -a -l", "sinfol"), ("scontrol show jobid %s" % job_id, "sqsf_jobid"), - # ("sqs -f", "sqsf"), - ("squeue -o '%.10i %.15P %.20j %.10u %.7a %.2t %.6D %.8C %.10M %.10l %.20S %.20V'", "squeuef"), - ("squeue -t R -o '%.10i %R'", "squeues")]: + for cmd, filename in [ + ("sinfo -a -l", "sinfol"), + ("scontrol show jobid %s" % job_id, "sqsf_jobid"), + # ("sqs -f", "sqsf"), + ( + "squeue -o '%.10i %.15P %.20j %.10u %.7a %.2t %.6D %.8C %.10M %.10l %.20S %.20V'", + "squeuef", + ), + ("squeue -t R -o '%.10i %R'", "squeues"), + ]: filename = "%s.%s" % (filename, lid) run_cmd_no_fail(cmd, arg_stdout=filename, from_dir=full_timing_dir) gzip_existing_file(os.path.join(full_timing_dir, filename)) elif mach in ["anvil", "chrysalis", "compy"]: - for cmd, filename in [("sinfo -l", "sinfol"), - ("squeue -o '%all' --job {}".format(job_id), "squeueall_jobid"), - ("squeue -o '%.10i %.10P %.15u %.20a %.2t %.6D %.8C %.12M %.12l %.20S %.20V %j'", "squeuef"), - ("squeue -t R -o '%.10i %R'", "squeues")]: + for cmd, filename in [ + ("sinfo -l", "sinfol"), + ("squeue -o '%all' --job {}".format(job_id), "squeueall_jobid"), + ( + "squeue -o '%.10i %.10P %.15u %.20a %.2t %.6D %.8C %.12M %.12l %.20S %.20V %j'", + "squeuef", + ), + ("squeue -t R -o '%.10i %R'", "squeues"), + ]: filename = "%s.%s" % (filename, lid) run_cmd_no_fail(cmd, arg_stdout=filename, from_dir=full_timing_dir) gzip_existing_file(os.path.join(full_timing_dir, filename)) elif mach == "summit": - for cmd, filename in [("bjobs -u all >", "bjobsu_all"), - ("bjobs -r -u all -o 'jobid slots exec_host' >", "bjobsru_allo"), - ("bjobs -l -UF %s >" % job_id, "bjobslUF_jobid")]: + for cmd, filename in [ + ("bjobs -u all >", "bjobsu_all"), + ("bjobs -r -u all -o 'jobid slots exec_host' >", "bjobsru_allo"), + ("bjobs -l -UF %s >" % job_id, "bjobslUF_jobid"), + ]: full_cmd = cmd + " " + filename run_cmd_no_fail(full_cmd + "." + lid, from_dir=full_timing_dir) gzip_existing_file(os.path.join(full_timing_dir, filename + "." + lid)) @@ -305,7 +379,9 @@ def _save_prerun_timing_e3sm(case, lid): # copy/tar SourceModes source_mods_dir = os.path.join(caseroot, "SourceMods") if os.path.isdir(source_mods_dir): - with tarfile.open(os.path.join(full_timing_dir, "SourceMods.{}.tar.gz".format(lid)), "w:gz") as tfd: + with tarfile.open( + os.path.join(full_timing_dir, "SourceMods.{}.tar.gz".format(lid)), "w:gz" + ) as tfd: tfd.add(source_mods_dir, arcname="SourceMods") # Save various case configuration items @@ -324,24 +400,34 @@ def _save_prerun_timing_e3sm(case, lid): "Depends.{}".format(mach), "Depends.{}".format(compiler), "Depends.{}.{}".format(mach, compiler), - "software_environment.txt" - ] + "software_environment.txt", + ] for glob_to_copy in globs_to_copy: for item in glob.glob(os.path.join(caseroot, glob_to_copy)): - safe_copy(item, os.path.join(case_docs, "{}.{}".format(os.path.basename(item).lstrip("."), lid)), preserve_meta=False) + safe_copy( + item, + os.path.join( + case_docs, "{}.{}".format(os.path.basename(item).lstrip("."), lid) + ), + preserve_meta=False, + ) # Copy some items from build provenance - blddir_globs_to_copy = [ - "GIT_LOGS_HEAD", - "build_environment.txt", - "build_times.txt" - ] + blddir_globs_to_copy = ["GIT_LOGS_HEAD", "build_environment.txt", "build_times.txt"] for blddir_glob_to_copy in blddir_globs_to_copy: for item in glob.glob(os.path.join(blddir, blddir_glob_to_copy)): - safe_copy(item, os.path.join(full_timing_dir, os.path.basename(item) + "." + lid), preserve_meta=False) + safe_copy( + item, + os.path.join(full_timing_dir, os.path.basename(item) + "." + lid), + preserve_meta=False, + ) # Save state of repo - from_repo = srcroot if os.path.exists(os.path.join(srcroot, ".git")) else os.path.dirname(srcroot) + from_repo = ( + srcroot + if os.path.exists(os.path.join(srcroot, ".git")) + else os.path.dirname(srcroot) + ) desc = get_current_commit(tag=True, repo=from_repo) with open(os.path.join(full_timing_dir, "GIT_DESCRIBE.{}".format(lid)), "w") as fd: fd.write(desc) @@ -364,24 +450,58 @@ def _save_prerun_timing_e3sm(case, lid): # If requested, spawn a mach_syslog process to monitor job progress sample_interval = case.get_value("SYSLOG_N") if sample_interval > 0: - archive_checkpoints = os.path.join(full_timing_dir, "checkpoints.{}".format(lid)) + archive_checkpoints = os.path.join( + full_timing_dir, "checkpoints.{}".format(lid) + ) os.mkdir(archive_checkpoints) touch("{}/e3sm.log.{}".format(rundir, lid)) - syslog_jobid = run_cmd_no_fail("./mach_syslog {si} {jobid} {lid} {rundir} {rundir}/timing/checkpoints {ac} >& /dev/null & echo $!".format(si=sample_interval, jobid=job_id, lid=lid, rundir=rundir, ac=archive_checkpoints), - from_dir=os.path.join(caseroot, "Tools")) - with open(os.path.join(rundir, "syslog_jobid.{}".format(job_id)), "w") as fd: + syslog_jobid = run_cmd_no_fail( + "./mach_syslog {si} {jobid} {lid} {rundir} {rundir}/timing/checkpoints {ac} >& /dev/null & echo $!".format( + si=sample_interval, + jobid=job_id, + lid=lid, + rundir=rundir, + ac=archive_checkpoints, + ), + from_dir=os.path.join(caseroot, "Tools"), + ) + with open( + os.path.join(rundir, "syslog_jobid.{}".format(job_id)), "w" + ) as fd: fd.write("{}\n".format(syslog_jobid)) + +def _cleanup_spio_stats(case): + rundir = case.get_value("RUNDIR") + for item in glob.glob(os.path.join(rundir, "io_perf_summary*")): + os.remove(item) + + spio_stats_dir = os.path.join(rundir, "spio_stats") + if os.path.exists(spio_stats_dir): + shutil.rmtree(spio_stats_dir) + + try: + os.makedirs(spio_stats_dir) + except OSError: + logger.warning( + "{} could not be created. Scorpio I/O statistics will be stored in the run directory.".format( + spio_stats_dir + ) + ) + + def _save_prerun_provenance_e3sm(case, lid): + _cleanup_spio_stats(case) if case.get_value("SAVE_TIMING"): _save_prerun_timing_e3sm(case, lid) -def _save_prerun_provenance_cesm(case, lid): # pylint: disable=unused-argument + +def _save_prerun_provenance_cesm(case, lid): # pylint: disable=unused-argument pass + def _save_prerun_provenance_common(case, lid): - """ Saves common prerun provenance. - """ + """Saves common prerun provenance.""" run_dir = case.get_value("RUNDIR") base_preview_run = os.path.join(run_dir, "preview_run.log") @@ -396,6 +516,7 @@ def _save_prerun_provenance_common(case, lid): # Create copy rather than symlink, the log is automatically gzipped safe_copy(base_preview_run, preview_run) + def save_prerun_provenance(case, lid=None): with SharedArea(): # Always save env @@ -404,7 +525,9 @@ def save_prerun_provenance(case, lid=None): logdir = os.path.join(case.get_value("CASEROOT"), "logs") if not os.path.isdir(logdir): os.makedirs(logdir) - env_module.save_all_env_info(os.path.join(logdir, "run_environment.txt.{}".format(lid))) + env_module.save_all_env_info( + os.path.join(logdir, "run_environment.txt.{}".format(lid)) + ) _save_prerun_provenance_common(case, lid) @@ -414,13 +537,16 @@ def save_prerun_provenance(case, lid=None): elif model == "cesm": _save_prerun_provenance_cesm(case, lid) + def _save_postrun_provenance_cesm(case, lid): save_timing = case.get_value("SAVE_TIMING") if save_timing: rundir = case.get_value("RUNDIR") timing_dir = os.path.join("timing", case.get_value("CASE")) - shutil.move(os.path.join(rundir,"timing"), - os.path.join(timing_dir,"timing."+lid)) + shutil.move( + os.path.join(rundir, "timing"), os.path.join(timing_dir, "timing." + lid) + ) + def _save_postrun_timing_e3sm(case, lid): caseroot = case.get_value("CASEROOT") @@ -436,26 +562,35 @@ def _save_postrun_timing_e3sm(case, lid): atm_chunk_costs_src_path = os.path.join(rundir, "atm_chunk_costs.txt") if os.path.exists(atm_chunk_costs_src_path): - atm_chunk_costs_dst_path = os.path.join(rundir, "atm_chunk_costs.{}".format(lid)) + atm_chunk_costs_dst_path = os.path.join( + rundir, "atm_chunk_costs.{}".format(lid) + ) shutil.move(atm_chunk_costs_src_path, atm_chunk_costs_dst_path) gzip_existing_file(atm_chunk_costs_dst_path) # gzip memory profile log glob_to_copy = "memory.[0-4].*.log" for item in glob.glob(os.path.join(rundir, glob_to_copy)): - mprof_dst_path = os.path.join(os.path.dirname(item), (os.path.basename(item) + ".{}").format(lid)) + mprof_dst_path = os.path.join( + os.path.dirname(item), (os.path.basename(item) + ".{}").format(lid) + ) shutil.move(item, mprof_dst_path) gzip_existing_file(mprof_dst_path) - # Copy Scorpio I/O performance stats to a separate dir + tar + compress - spio_stats_dir = os.path.join(rundir, "spio_stats." + lid) - os.mkdir(spio_stats_dir) + # Copy Scorpio I/O performance stats in "spio_stats" to "spio_stats.[LID]" + tar + compress + spio_stats_dir = os.path.join(rundir, "spio_stats") + if not os.path.exists(spio_stats_dir): + os.mkdir(spio_stats_dir) + for item in glob.glob(os.path.join(rundir, "io_perf_summary*")): safe_copy(item, spio_stats_dir) - with tarfile.open("%s.tar.gz" % spio_stats_dir, "w:gz") as tfd: - tfd.add(spio_stats_dir, arcname=os.path.basename(spio_stats_dir)) - shutil.rmtree(spio_stats_dir) + spio_stats_job_dir = os.path.join(rundir, "spio_stats." + lid) + shutil.copytree(spio_stats_dir, spio_stats_job_dir) + with tarfile.open("%s.tar.gz" % spio_stats_job_dir, "w:gz") as tfd: + tfd.add(spio_stats_job_dir, arcname=os.path.basename(spio_stats_job_dir)) + + shutil.rmtree(spio_stats_job_dir) gzip_existing_file(os.path.join(caseroot, "timing", "e3sm_timing_stats.%s" % lid)) @@ -473,7 +608,9 @@ def _save_postrun_timing_e3sm(case, lid): mach = case.get_value("MACH") base_case = case.get_value("CASE") - full_timing_dir = os.path.join(timing_dir, "performance_archive", getpass.getuser(), base_case, lid) + full_timing_dir = os.path.join( + timing_dir, "performance_archive", getpass.getuser(), base_case, lid + ) if not os.path.isdir(full_timing_dir): return @@ -520,8 +657,14 @@ def _save_postrun_timing_e3sm(case, lid): globs_to_copy.append(os.path.join(rundir, "spio_stats.{}.tar.gz".format(lid))) globs_to_copy.append(os.path.join(caseroot, "replay.sh")) # Can't use a single glob, similar files e.g. {filename}.{lid} get picked up. - bld_filenames = ["GIT_STATUS", "GIT_DIFF", "GIT_LOG", "GIT_REMOTE", - "GIT_CONFIG", "GIT_SUBMODULE_STATUS"] + bld_filenames = [ + "GIT_STATUS", + "GIT_DIFF", + "GIT_LOG", + "GIT_REMOTE", + "GIT_CONFIG", + "GIT_SUBMODULE_STATUS", + ] bld_globs = map(lambda x: f"bld/{x}", bld_filenames) globs_to_copy.extend(bld_globs) globs_to_copy.append("run/preview_run.log") @@ -531,7 +674,11 @@ def _save_postrun_timing_e3sm(case, lid): basename = os.path.basename(item) if basename != timing_saved_file: if lid not in basename and not basename.endswith(".gz"): - safe_copy(item, os.path.join(full_timing_dir, "{}.{}".format(basename, lid)), preserve_meta=False) + safe_copy( + item, + os.path.join(full_timing_dir, "{}.{}".format(basename, lid)), + preserve_meta=False, + ) else: safe_copy(item, full_timing_dir, preserve_meta=False) @@ -541,10 +688,12 @@ def _save_postrun_timing_e3sm(case, lid): if not filename.endswith(".gz"): gzip_existing_file(os.path.join(root, filename)) + def _save_postrun_provenance_e3sm(case, lid): if case.get_value("SAVE_TIMING"): _save_postrun_timing_e3sm(case, lid) + def save_postrun_provenance(case, lid=None): with SharedArea(): model = case.get_value("MODEL") @@ -555,16 +704,20 @@ def save_postrun_provenance(case, lid=None): elif model == "cesm": _save_postrun_provenance_cesm(case, lid) + _WALLTIME_BASELINE_NAME = "walltimes" -_WALLTIME_FILE_NAME = "walltimes" -_GLOBAL_MINUMUM_TIME = 900 -_GLOBAL_WIGGLE = 1000 -_WALLTIME_TOLERANCE = ( (600, 2.0), (1800, 1.5), (9999999999, 1.25) ) +_WALLTIME_FILE_NAME = "walltimes" +_GLOBAL_MINUMUM_TIME = 900 +_GLOBAL_WIGGLE = 1000 +_WALLTIME_TOLERANCE = ((600, 2.0), (1800, 1.5), (9999999999, 1.25)) + def get_recommended_test_time_based_on_past(baseline_root, test, raw=False): if baseline_root is not None: try: - the_path = os.path.join(baseline_root, _WALLTIME_BASELINE_NAME, test, _WALLTIME_FILE_NAME) + the_path = os.path.join( + baseline_root, _WALLTIME_BASELINE_NAME, test, _WALLTIME_FILE_NAME + ) if os.path.exists(the_path): last_line = int(open(the_path, "r").readlines()[-1].split()[0]) if raw: @@ -588,6 +741,7 @@ def get_recommended_test_time_based_on_past(baseline_root, test, raw=False): return None + def save_test_time(baseline_root, test, time_seconds, commit): if baseline_root is not None: try: @@ -604,16 +758,23 @@ def save_test_time(baseline_root, test, time_seconds, commit): # We NEVER want a failure here to kill the run logger.warning("Failed to store test time: {}".format(sys.exc_info()[1])) + _SUCCESS_BASELINE_NAME = "success-history" -_SUCCESS_FILE_NAME = "last-transitions" +_SUCCESS_FILE_NAME = "last-transitions" + def _read_success_data(baseline_root, test): - success_path = os.path.join(baseline_root, _SUCCESS_BASELINE_NAME, test, _SUCCESS_FILE_NAME) + success_path = os.path.join( + baseline_root, _SUCCESS_BASELINE_NAME, test, _SUCCESS_FILE_NAME + ) if os.path.exists(success_path): with open(success_path, "r") as fd: prev_results_raw = fd.read().strip() prev_results = prev_results_raw.split() - expect(len(prev_results) == 2, "Bad success data: '{}'".format(prev_results_raw)) + expect( + len(prev_results) == 2, + "Bad success data: '{}'".format(prev_results_raw), + ) else: prev_results = ["None", "None"] @@ -624,6 +785,7 @@ def _read_success_data(baseline_root, test): return success_path, prev_results + def _is_test_working(prev_results, src_root, testing=False): # If there is no history of success, prev run could not have succeeded and vice versa for failures if prev_results[0] is None: @@ -632,8 +794,14 @@ def _is_test_working(prev_results, src_root, testing=False): return True else: if not testing: - stat, out, err = run_cmd("git merge-base --is-ancestor {}".format(" ".join(prev_results)), from_dir=src_root) - expect(stat in [0, 1], "Unexpected status from ancestor check:\n{}\n{}".format(out, err)) + stat, out, err = run_cmd( + "git merge-base --is-ancestor {}".format(" ".join(prev_results)), + from_dir=src_root, + ) + expect( + stat in [0, 1], + "Unexpected status from ancestor check:\n{}\n{}".format(out, err), + ) else: # Hack for testing stat = 0 if prev_results[0] < prev_results[1] else 1 @@ -641,6 +809,7 @@ def _is_test_working(prev_results, src_root, testing=False): # stat == 0 tells us that pass is older than fail, so we must have failed, otherwise we passed return stat != 0 + def get_test_success(baseline_root, src_root, test, testing=False): """ Returns (was prev run success, commit when test last passed, commit when test last transitioned from pass to fail) @@ -659,6 +828,7 @@ def get_test_success(baseline_root, src_root, test, testing=False): return False, None, None + def save_test_success(baseline_root, src_root, test, succeeded, force_commit_test=None): """ Update success data accordingly based on succeeded flag @@ -672,19 +842,32 @@ def save_test_success(baseline_root, src_root, test, succeeded, force_commit_tes if not os.path.exists(the_dir): os.makedirs(the_dir) - prev_succeeded = _is_test_working(prev_results, src_root, testing=(force_commit_test is not None)) + prev_succeeded = _is_test_working( + prev_results, src_root, testing=(force_commit_test is not None) + ) # if no transition occurred then no update is needed - if succeeded or succeeded != prev_succeeded or (prev_results[0] is None and succeeded) or (prev_results[1] is None and not succeeded): + if ( + succeeded + or succeeded != prev_succeeded + or (prev_results[0] is None and succeeded) + or (prev_results[1] is None and not succeeded) + ): new_results = list(prev_results) - my_commit = force_commit_test if force_commit_test else get_current_commit(repo=src_root) + my_commit = ( + force_commit_test + if force_commit_test + else get_current_commit(repo=src_root) + ) if succeeded: - new_results[0] = my_commit # we passed + new_results[0] = my_commit # we passed else: - new_results[1] = my_commit # we transitioned to a failing state + new_results[1] = my_commit # we transitioned to a failing state - str_results = ["None" if item is None else item for item in new_results] + str_results = [ + "None" if item is None else item for item in new_results + ] with open(success_path, "w") as fd: fd.write("{}\n".format(" ".join(str_results))) diff --git a/CIME/simple_compare.py b/CIME/simple_compare.py index 99859f579fb..0f9d933fc34 100644 --- a/CIME/simple_compare.py +++ b/CIME/simple_compare.py @@ -4,53 +4,55 @@ ############################################################################### def _normalize_string_value(value, case): -############################################################################### + ############################################################################### """ Some of the strings are inherently prone to diffs, like file paths, etc. This function attempts to normalize that data so that it will not cause diffs. """ # Any occurance of case must be normalized because test-ids might not match - if (case is not None): - case_re = re.compile(r'{}[.]([GC])[.]([^./\s]+)'.format(case)) + if case is not None: + case_re = re.compile(r"{}[.]([GC])[.]([^./\s]+)".format(case)) value = case_re.sub("{}.ACTION.TESTID".format(case), value) - if ("/" in value): + if "/" in value: # File path, just return the basename return os.path.basename(value) - elif ("username" in value): - return '' - elif (".log." in value): + elif "username" in value: + return "" + elif ".log." in value: # Remove the part that's prone to diff components = value.split(".") return os.path.basename(".".join(components[0:-1])) else: return value + ############################################################################### def _skip_comments_and_whitespace(lines, idx): -############################################################################### + ############################################################################### """ Starting at idx, return next valid idx of lines that contains real data """ - if (idx == len(lines)): + if idx == len(lines): return idx - comment_re = re.compile(r'^[#!]') + comment_re = re.compile(r"^[#!]") lines_slice = lines[idx:] for line in lines_slice: line = line.strip() - if (comment_re.match(line) is not None or line == ""): + if comment_re.match(line) is not None or line == "": idx += 1 else: return idx return idx + ############################################################################### def _compare_data(gold_lines, comp_lines, case, offset_method=False): -############################################################################### + ############################################################################### """ >>> teststr = ''' ... data1 @@ -93,33 +95,35 @@ def _compare_data(gold_lines, comp_lines, case, offset_method=False): cnt = 0 gidx, cidx = 0, 0 gnum, cnum = len(gold_lines), len(comp_lines) - while (gidx < gnum or cidx < cnum): + while gidx < gnum or cidx < cnum: gidx = _skip_comments_and_whitespace(gold_lines, gidx) cidx = _skip_comments_and_whitespace(comp_lines, cidx) - if (gidx == gnum): - if (cidx == cnum): + if gidx == gnum: + if cidx == cnum: return comments, cnt else: comments += "Found extra lines\n" comments += "\n".join(comp_lines[cidx:]) + "\n" return comments, cnt - elif (cidx == cnum): + elif cidx == cnum: comments += "Missing lines\n" comments += "\n".join(gold_lines[gidx:1]) + "\n" return comments, cnt gold_value = gold_lines[gidx].strip() - gold_value = gold_value.replace('"',"'") + gold_value = gold_value.replace('"', "'") comp_value = comp_lines[cidx].strip() - comp_value = comp_value.replace('"',"'") + comp_value = comp_value.replace('"', "'") norm_gold_value = _normalize_string_value(gold_value, case) norm_comp_value = _normalize_string_value(comp_value, case) - if (norm_gold_value != norm_comp_value): + if norm_gold_value != norm_comp_value: comments += "Inequivalent lines {} != {}\n".format(gold_value, comp_value) - comments += " NORMALIZED: {} != {}\n".format(norm_gold_value, norm_comp_value) + comments += " NORMALIZED: {} != {}\n".format( + norm_gold_value, norm_comp_value + ) cnt += 1 if offset_method and (norm_gold_value != norm_comp_value): if gnum > cnum: @@ -132,9 +136,10 @@ def _compare_data(gold_lines, comp_lines, case, offset_method=False): return comments, cnt + ############################################################################### def compare_files(gold_file, compare_file, case=None): -############################################################################### + ############################################################################### """ Returns true if files are the same, comments are returned too: (success, comments) @@ -142,21 +147,26 @@ def compare_files(gold_file, compare_file, case=None): expect(os.path.exists(gold_file), "File not found: {}".format(gold_file)) expect(os.path.exists(compare_file), "File not found: {}".format(compare_file)) - comments, cnt = _compare_data(open(gold_file, "r").readlines(), - open(compare_file, "r").readlines(), case) + comments, cnt = _compare_data( + open(gold_file, "r").readlines(), open(compare_file, "r").readlines(), case + ) if cnt > 0: - comments2, cnt2 = _compare_data(open(gold_file, "r").readlines(), - open(compare_file, "r").readlines(), - case, offset_method=True) + comments2, cnt2 = _compare_data( + open(gold_file, "r").readlines(), + open(compare_file, "r").readlines(), + case, + offset_method=True, + ) if cnt2 < cnt: comments = comments2 return comments == "", comments + ############################################################################### def compare_runconfigfiles(gold_file, compare_file, case=None): -############################################################################### + ############################################################################### """ Returns true if files are the same, comments are returned too: (success, comments) @@ -164,7 +174,7 @@ def compare_runconfigfiles(gold_file, compare_file, case=None): expect(os.path.exists(gold_file), "File not found: {}".format(gold_file)) expect(os.path.exists(compare_file), "File not found: {}".format(compare_file)) - #create dictionary's of the runconfig files and compare them + # create dictionary's of the runconfig files and compare them gold_dict = _parse_runconfig(gold_file) compare_dict = _parse_runconfig(compare_file) @@ -179,27 +189,28 @@ def compare_runconfigfiles(gold_file, compare_file, case=None): return comments == "", comments + def _parse_runconfig(filename): runconfig = {} inrunseq = False insubsection = None - subsection_re = re.compile(r'\s*(\S+)::') - group_re = re.compile(r'\s*(\S+)\s*:\s*(\S+)') - var_re = re.compile(r'\s*(\S+)\s*=\s*(\S+)') + subsection_re = re.compile(r"\s*(\S+)::") + group_re = re.compile(r"\s*(\S+)\s*:\s*(\S+)") + var_re = re.compile(r"\s*(\S+)\s*=\s*(\S+)") with open(filename, "r") as fd: for line in fd: # remove comments - line = line.split('#')[0] + line = line.split("#")[0] subsection_match = subsection_re.match(line) group_match = group_re.match(line) var_match = var_re.match(line) - if re.match(r'\s*runSeq\s*::', line): - runconfig['runSeq'] = [] + if re.match(r"\s*runSeq\s*::", line): + runconfig["runSeq"] = [] inrunseq = True - elif re.match(r'\s*::\s*', line): + elif re.match(r"\s*::\s*", line): inrunseq = False elif inrunseq: - runconfig['runSeq'].append(line) + runconfig["runSeq"].append(line) elif subsection_match: insubsection = subsection_match.group(1) runconfig[insubsection] = {} @@ -209,19 +220,20 @@ def _parse_runconfig(filename): runconfig[insubsection][var_match.group(1)] = var_match.group(2) return runconfig + def findDiff(d1, d2, path="", case=None): comment = "" for k in d1.keys(): if not k in d2: comment += path + ":\n" - comment += k + " as key not in d2\n" + comment += k + " as key not in d2\n" else: if type(d1[k]) is dict: if path == "": path = k else: path = path + "->" + k - comment += findDiff(d1[k],d2[k], path=path, case=case) + comment += findDiff(d1[k], d2[k], path=path, case=case) else: if case in d1[k]: pass @@ -230,7 +242,7 @@ def findDiff(d1, d2, path="", case=None): elif "logfile" in k: pass elif d1[k] != d2[k]: - comment += path+":\n" - comment += " - {} : {}\n".format(k,d1[k]) - comment += " + {} : {}\n".format(k,d2[k]) + comment += path + ":\n" + comment += " - {} : {}\n".format(k, d1[k]) + comment += " + {} : {}\n".format(k, d2[k]) return comment diff --git a/CIME/six.py b/CIME/six.py index 81633a67164..4ea6fdc526d 100644 --- a/CIME/six.py +++ b/CIME/six.py @@ -38,15 +38,15 @@ PY34 = sys.version_info[0:2] >= (3, 4) if PY3: - string_types = str, - integer_types = int, - class_types = type, + string_types = (str,) + integer_types = (int,) + class_types = (type,) text_type = str binary_type = bytes MAXSIZE = sys.maxsize else: - string_types = basestring, + string_types = (basestring,) integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode @@ -58,9 +58,9 @@ else: # It's possible to have sizeof(long) != sizeof(Py_ssize_t). class X(object): - def __len__(self): return 1 << 31 + try: len(X()) except OverflowError: @@ -84,7 +84,6 @@ def _import_module(name): class _LazyDescr(object): - def __init__(self, name): self.name = name @@ -101,7 +100,6 @@ def __get__(self, obj, tp): class MovedModule(_LazyDescr): - def __init__(self, name, old, new=None): super(MovedModule, self).__init__(name) if PY3: @@ -122,7 +120,6 @@ def __getattr__(self, attr): class _LazyModule(types.ModuleType): - def __init__(self, name): super(_LazyModule, self).__init__(name) self.__doc__ = self.__class__.__doc__ @@ -137,7 +134,6 @@ def __dir__(self): class MovedAttribute(_LazyDescr): - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): super(MovedAttribute, self).__init__(name) if PY3: @@ -221,21 +217,26 @@ def get_code(self, fullname): Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None + get_source = get_code # same as get_code + _importer = _SixMetaPathImporter(__name__) class _MovedItems(_LazyModule): """Lazy loading of moved objects""" + __path__ = [] # mark as package _moved_attributes = [ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute( + "filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse" + ), MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), MovedAttribute("intern", "__builtin__", "sys"), MovedAttribute("map", "itertools", "builtins", "imap", "map"), @@ -243,7 +244,9 @@ class _MovedItems(_LazyModule): MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), MovedAttribute("getoutput", "commands", "subprocess"), MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute( + "reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload" + ), MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), @@ -252,7 +255,9 @@ class _MovedItems(_LazyModule): MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedAttribute( + "zip_longest", "itertools", "itertools", "izip_longest", "zip_longest" + ), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), MovedModule("copyreg", "copy_reg"), @@ -266,7 +271,9 @@ class _MovedItems(_LazyModule): MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule( + "email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart" + ), MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), @@ -285,15 +292,12 @@ class _MovedItems(_LazyModule): MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), + MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"), MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), MovedModule("tkinter_font", "tkFont", "tkinter.font"), MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"), MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), @@ -339,7 +343,9 @@ class Module_six_moves_urllib_parse(_LazyModule): MovedAttribute("quote_plus", "urllib", "urllib.parse"), MovedAttribute("unquote", "urllib", "urllib.parse"), MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), + MovedAttribute( + "unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes" + ), MovedAttribute("urlencode", "urllib", "urllib.parse"), MovedAttribute("splitquery", "urllib", "urllib.parse"), MovedAttribute("splittag", "urllib", "urllib.parse"), @@ -357,8 +363,11 @@ class Module_six_moves_urllib_parse(_LazyModule): Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") +_importer._add_module( + Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", + "moves.urllib.parse", +) class Module_six_moves_urllib_error(_LazyModule): @@ -377,8 +386,11 @@ class Module_six_moves_urllib_error(_LazyModule): Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") +_importer._add_module( + Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", + "moves.urllib.error", +) class Module_six_moves_urllib_request(_LazyModule): @@ -429,8 +441,11 @@ class Module_six_moves_urllib_request(_LazyModule): Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") +_importer._add_module( + Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", + "moves.urllib.request", +) class Module_six_moves_urllib_response(_LazyModule): @@ -450,8 +465,11 @@ class Module_six_moves_urllib_response(_LazyModule): Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") +_importer._add_module( + Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", + "moves.urllib.response", +) class Module_six_moves_urllib_robotparser(_LazyModule): @@ -466,15 +484,21 @@ class Module_six_moves_urllib_robotparser(_LazyModule): setattr(Module_six_moves_urllib_robotparser, attr.name, attr) del attr -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes +Module_six_moves_urllib_robotparser._moved_attributes = ( + _urllib_robotparser_moved_attributes +) -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") +_importer._add_module( + Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", + "moves.urllib.robotparser", +) class Module_six_moves_urllib(types.ModuleType): """Create a CIME.six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package parse = _importer._get_module("moves.urllib_parse") error = _importer._get_module("moves.urllib_error") @@ -483,10 +507,12 @@ class Module_six_moves_urllib(types.ModuleType): robotparser = _importer._get_module("moves.urllib_robotparser") def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] + return ["parse", "error", "request", "response", "robotparser"] + -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") +_importer._add_module( + Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib" +) def add_move(move): @@ -526,19 +552,24 @@ def remove_move(name): try: advance_iterator = next except NameError: + def advance_iterator(it): return it.next() + + next = advance_iterator try: callable = callable except NameError: + def callable(obj): return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) if PY3: + def get_unbound_function(unbound): return unbound @@ -549,6 +580,7 @@ def create_unbound_method(func, cls): Iterator = object else: + def get_unbound_function(unbound): return unbound.im_func @@ -559,13 +591,13 @@ def create_unbound_method(func, cls): return types.MethodType(func, None, cls) class Iterator(object): - def next(self): return type(self).__next__(self) callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") +_add_doc( + get_unbound_function, """Get the function out of a possibly unbound function""" +) get_method_function = operator.attrgetter(_meth_func) @@ -577,6 +609,7 @@ def next(self): if PY3: + def iterkeys(d, **kw): return iter(d.keys(**kw)) @@ -595,6 +628,7 @@ def iterlists(d, **kw): viewitems = operator.methodcaller("items") else: + def iterkeys(d, **kw): return d.iterkeys(**kw) @@ -615,26 +649,30 @@ def iterlists(d, **kw): _add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") _add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") +_add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc( + iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary." +) if PY3: + def b(s): return s.encode("latin-1") def u(s): return s + unichr = chr import struct + int2byte = struct.Struct(">B").pack del struct byte2int = operator.itemgetter(0) indexbytes = operator.getitem iterbytes = iter import io + StringIO = io.StringIO BytesIO = io.BytesIO _assertCountEqual = "assertCountEqual" @@ -645,12 +683,15 @@ def u(s): _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" else: + def b(s): return s + # Workaround for standalone backslash def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + return unicode(s.replace(r"\\", r"\\\\"), "unicode_escape") + unichr = unichr int2byte = chr @@ -659,8 +700,10 @@ def byte2int(bs): def indexbytes(buf, i): return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) import StringIO + StringIO = BytesIO = StringIO.StringIO _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" @@ -695,7 +738,9 @@ def reraise(tp, value, tb=None): value = None tb = None + else: + def exec_(_code_, _globs_=None, _locs_=None): """Execute code in a namespace.""" if _globs_ is None: @@ -708,37 +753,45 @@ def exec_(_code_, _globs_=None, _locs_=None): _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") - exec_("""def reraise(tp, value, tb=None): + exec_( + """def reraise(tp, value, tb=None): try: raise tp, value, tb finally: tb = None -""") +""" + ) if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): + exec_( + """def raise_from(value, from_value): try: if from_value is None: raise value raise value from from_value finally: value = None -""") +""" + ) elif sys.version_info[:2] > (3, 2): - exec_("""def raise_from(value, from_value): + exec_( + """def raise_from(value, from_value): try: raise value from from_value finally: value = None -""") +""" + ) else: + def raise_from(value, from_value): raise value print_ = getattr(moves.builtins, "print", None) if print_ is None: + def print_(*args, **kwargs): """The new-style print function for Python 2.4 and 2.5.""" fp = kwargs.pop("file", sys.stdout) @@ -749,14 +802,17 @@ def write(data): if not isinstance(data, basestring): data = str(data) # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): + if ( + isinstance(fp, file) + and isinstance(data, unicode) + and fp.encoding is not None + ): errors = getattr(fp, "errors", None) if errors is None: errors = "strict" data = data.encode(fp.encoding, errors) fp.write(data) + want_unicode = False sep = kwargs.pop("sep", None) if sep is not None: @@ -792,6 +848,8 @@ def write(data): write(sep) write(arg) write(end) + + if sys.version_info[:2] < (3, 3): _print = print_ @@ -802,16 +860,24 @@ def print_(*args, **kwargs): if flush and fp is not None: fp.flush() + _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): + + def wraps( + wrapped, + assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES, + ): def wrapper(f): f = functools.wraps(wrapped, assigned, updated)(f) f.__wrapped__ = wrapped return f + return wrapper + + else: wraps = functools.wraps @@ -822,29 +888,31 @@ def with_metaclass(meta, *bases): # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(type): - def __new__(cls, name, this_bases, d): return meta(name, bases, d) @classmethod def __prepare__(cls, name, this_bases): return meta.__prepare__(name, bases) - return type.__new__(metaclass, 'temporary_class', (), {}) + + return type.__new__(metaclass, "temporary_class", (), {}) def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') + slots = orig_vars.get("__slots__") if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) + orig_vars.pop("__dict__", None) + orig_vars.pop("__weakref__", None) return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper @@ -857,14 +925,16 @@ def python_2_unicode_compatible(klass): returning text and apply this decorator to the class. """ if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) + if "__str__" not in klass.__dict__: + raise ValueError( + "@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % klass.__name__ + ) klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + klass.__str__ = lambda self: self.__unicode__().encode("utf-8") return klass + # Complete the moves implementation. # This code is at the end of this module to speed up module loading. # Turn this module into a package. @@ -881,8 +951,10 @@ def python_2_unicode_compatible(klass): # be floating around. Therefore, we can't use isinstance() to check for # the CIME.six meta path importer, since the other CIME.six instance will have # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): + if ( + type(importer).__name__ == "_SixMetaPathImporter" + and importer.name == __name__ + ): del sys.meta_path[i] break del i, importer diff --git a/CIME/six_additions.py b/CIME/six_additions.py index 6f7634b6ff0..b7975edd793 100644 --- a/CIME/six_additions.py +++ b/CIME/six_additions.py @@ -9,5 +9,6 @@ else: _assertNotRegex = "assertNotRegexpMatches" + def assertNotRegex(self, *args, **kwargs): return getattr(self, _assertNotRegex)(*args, **kwargs) diff --git a/CIME/test_scheduler.py b/CIME/test_scheduler.py index 0ec9c0bb5ad..ee19dc35aff 100644 --- a/CIME/test_scheduler.py +++ b/CIME/test_scheduler.py @@ -15,9 +15,21 @@ from CIME.XML.standard_module_setup import * import CIME.six from CIME.get_tests import get_recommended_test_time, get_build_groups -from CIME import utils -from CIME.utils import append_status, append_testlog, TESTS_FAILED_ERR_CODE, parse_test_name, get_full_test_name, get_model, \ - convert_to_seconds, get_cime_root, get_project, get_timestamp, get_cime_default_driver, get_template_path +from CIME.utils import ( + append_status, + append_testlog, + TESTS_FAILED_ERR_CODE, + parse_test_name, + get_full_test_name, + get_model, + convert_to_seconds, + get_cime_root, + get_project, + get_timestamp, + get_python_libs_root, + get_cime_default_driver, + clear_folder, +) from CIME.test_status import * from CIME.XML.machines import Machines from CIME.XML.generic_xml import GenericXML @@ -37,17 +49,32 @@ logger = logging.getLogger(__name__) # Phases managed by TestScheduler -TEST_START = "INIT" # Special pseudo-phase just for test_scheduler bookkeeping -PHASES = [TEST_START, CREATE_NEWCASE_PHASE, XML_PHASE, SETUP_PHASE, - SHAREDLIB_BUILD_PHASE, MODEL_BUILD_PHASE, RUN_PHASE] # Order matters +TEST_START = "INIT" # Special pseudo-phase just for test_scheduler bookkeeping +PHASES = [ + TEST_START, + CREATE_NEWCASE_PHASE, + XML_PHASE, + SETUP_PHASE, + SHAREDLIB_BUILD_PHASE, + MODEL_BUILD_PHASE, + RUN_PHASE, +] # Order matters ############################################################################### def _translate_test_names_for_new_pecount(test_names, force_procs, force_threads): -############################################################################### + ############################################################################### new_test_names = [] caseopts = [] for test_name in test_names: - testcase, caseopts, grid, compset, machine, compiler, testmods = parse_test_name(test_name) + ( + testcase, + caseopts, + grid, + compset, + machine, + compiler, + testmods, + ) = parse_test_name(test_name) rewrote_caseopt = False if caseopts is not None: for idx, caseopt in enumerate(caseopts): @@ -59,9 +86,15 @@ def _translate_test_names_for_new_pecount(test_names, force_procs, force_threads old_procs, old_thrds = caseopt, None new_procs = force_procs if force_procs is not None else old_procs - new_thrds = force_threads if force_threads is not None else old_thrds - - newcaseopt = ("P{}".format(new_procs)) if new_thrds is None else ("P{}x{}".format(new_procs, new_thrds)) + new_thrds = ( + force_threads if force_threads is not None else old_thrds + ) + + newcaseopt = ( + ("P{}".format(new_procs)) + if new_thrds is None + else ("P{}x{}".format(new_procs, new_thrds)) + ) caseopts[idx] = newcaseopt rewrote_caseopt = True @@ -69,25 +102,40 @@ def _translate_test_names_for_new_pecount(test_names, force_procs, force_threads if not rewrote_caseopt: force_procs = "M" if force_procs is None else force_procs - newcaseopt = ("P{}".format(force_procs)) if force_threads is None else ("P{}x{}".format(force_procs, force_threads)) + newcaseopt = ( + ("P{}".format(force_procs)) + if force_threads is None + else ("P{}x{}".format(force_procs, force_threads)) + ) if caseopts is None: caseopts = [newcaseopt] else: caseopts.append(newcaseopt) - new_test_name = get_full_test_name(testcase, caseopts=caseopts, grid=grid, compset=compset, machine=machine, compiler=compiler, testmods_list=testmods) + new_test_name = get_full_test_name( + testcase, + caseopts=caseopts, + grid=grid, + compset=compset, + machine=machine, + compiler=compiler, + testmods_list=testmods, + ) new_test_names.append(new_test_name) return new_test_names + _TIME_CACHE = {} ############################################################################### def _get_time_est(test, baseline_root, as_int=False, use_cache=False, raw=False): -############################################################################### + ############################################################################### if test in _TIME_CACHE and use_cache: return _TIME_CACHE[test] - recommended_time = get_recommended_test_time_based_on_past(baseline_root, test, raw=raw) + recommended_time = get_recommended_test_time_based_on_past( + baseline_root, test, raw=raw + ) if recommended_time is None: recommended_time = get_recommended_test_time(test) @@ -103,45 +151,80 @@ def _get_time_est(test, baseline_root, as_int=False, use_cache=False, raw=False) return recommended_time + ############################################################################### def _order_tests_by_runtime(tests, baseline_root): -############################################################################### - tests.sort(key=lambda x: _get_time_est(x, baseline_root, as_int=True, use_cache=True, raw=True), reverse=True) + ############################################################################### + tests.sort( + key=lambda x: _get_time_est( + x, baseline_root, as_int=True, use_cache=True, raw=True + ), + reverse=True, + ) + ############################################################################### class TestScheduler(object): -############################################################################### + ############################################################################### ########################################################################### - def __init__(self, test_names, test_data=None, - no_run=False, no_build=False, no_setup=False, no_batch=None, - test_root=None, test_id=None, - machine_name=None, compiler=None, - baseline_root=None, baseline_cmp_name=None, baseline_gen_name=None, - clean=False, namelists_only=False, - project=None, parallel_jobs=None, - walltime=None, proc_pool=None, - use_existing=False, save_timing=False, queue=None, - allow_baseline_overwrite=False, output_root=None, - force_procs=None, force_threads=None, mpilib=None, - input_dir=None, pesfile=None, mail_user=None, mail_type=None, allow_pnl=False, - non_local=False, single_exe=False, workflow=None, chksum=False): - ########################################################################### - self._cime_root = get_cime_root() - self._cime_model = get_model() - self._cime_driver = get_cime_default_driver() - self._save_timing = save_timing - self._queue = queue - self._test_data = {} if test_data is None else test_data # Format: {test_name -> {data_name -> data}} - self._mpilib = mpilib # allow override of default mpilib + def __init__( + self, + test_names, + test_data=None, + no_run=False, + no_build=False, + no_setup=False, + no_batch=None, + test_root=None, + test_id=None, + machine_name=None, + compiler=None, + baseline_root=None, + baseline_cmp_name=None, + baseline_gen_name=None, + clean=False, + namelists_only=False, + project=None, + parallel_jobs=None, + walltime=None, + proc_pool=None, + use_existing=False, + save_timing=False, + queue=None, + allow_baseline_overwrite=False, + output_root=None, + force_procs=None, + force_threads=None, + mpilib=None, + input_dir=None, + pesfile=None, + mail_user=None, + mail_type=None, + allow_pnl=False, + non_local=False, + single_exe=False, + workflow=None, + chksum=False, + ): + ########################################################################### + self._cime_root = get_cime_root() + self._cime_model = get_model() + self._cime_driver = get_cime_default_driver() + self._save_timing = save_timing + self._queue = queue + self._test_data = ( + {} if test_data is None else test_data + ) # Format: {test_name -> {data_name -> data}} + self._mpilib = mpilib # allow override of default mpilib self._completed_tests = 0 - self._input_dir = input_dir - self._pesfile = pesfile + self._input_dir = input_dir + self._pesfile = pesfile self._allow_baseline_overwrite = allow_baseline_overwrite - self._allow_pnl = allow_pnl - self._non_local = non_local - self._build_groups = [] - self._workflow = workflow + self._allow_pnl = allow_pnl + self._non_local = non_local + self._build_groups = [] + self._workflow = workflow self._mail_user = mail_user self._mail_type = mail_type @@ -150,17 +233,21 @@ def __init__(self, test_names, test_data=None, if get_model() == "e3sm": # Current build system is unlikely to be able to productively use more than 16 cores - self._model_build_cost = min(16, int((self._machobj.get_value("GMAKE_J") * 2) / 3) + 1) + self._model_build_cost = min( + 16, int((self._machobj.get_value("GMAKE_J") * 2) / 3) + 1 + ) else: self._model_build_cost = 4 # If user is forcing procs or threads, re-write test names to reflect this. if force_procs or force_threads: - test_names = _translate_test_names_for_new_pecount(test_names, force_procs, force_threads) + test_names = _translate_test_names_for_new_pecount( + test_names, force_procs, force_threads + ) self._no_setup = no_setup self._no_build = no_build or no_setup or namelists_only - self._no_run = no_run or self._no_build + self._no_run = no_run or self._no_build self._output_root = output_root # Figure out what project to use if project is None: @@ -173,8 +260,10 @@ def __init__(self, test_names, test_data=None, # We will not use batch system if user asked for no_batch or if current # machine is not a batch machine self._no_batch = no_batch or not self._machobj.has_batch_system() - expect(not (self._no_batch and self._queue is not None), - "Does not make sense to request a queue without batch system") + expect( + not (self._no_batch and self._queue is not None), + "Does not make sense to request a queue without batch system", + ) # Determine and resolve test_root if test_root is not None: @@ -188,11 +277,13 @@ def __init__(self, test_names, test_data=None, self._test_root = self._test_root.replace("$PROJECT", self._project) self._test_root = os.path.abspath(self._test_root) - self._test_id = test_id if test_id is not None else get_timestamp() + self._test_id = test_id if test_id is not None else get_timestamp() - self._compiler = self._machobj.get_default_compiler() if compiler is None else compiler + self._compiler = ( + self._machobj.get_default_compiler() if compiler is None else compiler + ) - self._clean = clean + self._clean = clean self._namelists_only = namelists_only self._walltime = walltime @@ -205,36 +296,58 @@ def __init__(self, test_names, test_data=None, else: self._parallel_jobs = parallel_jobs - logger.info("create_test will do up to {} tasks simultaneously".format(self._parallel_jobs)) + logger.info( + "create_test will do up to {} tasks simultaneously".format( + self._parallel_jobs + ) + ) - self._baseline_cmp_name = baseline_cmp_name # Implies comparison should be done if not None - self._baseline_gen_name = baseline_gen_name # Implies generation should be done if not None + self._baseline_cmp_name = ( + baseline_cmp_name # Implies comparison should be done if not None + ) + self._baseline_gen_name = ( + baseline_gen_name # Implies generation should be done if not None + ) # Compute baseline_root. Need to set some properties on machobj in order for # the baseline_root to resolve correctly. self._machobj.set_value("COMPILER", self._compiler) self._machobj.set_value("PROJECT", self._project) - self._baseline_root = os.path.abspath(baseline_root) if baseline_root is not None \ - else self._machobj.get_value("BASELINE_ROOT") + self._baseline_root = ( + os.path.abspath(baseline_root) + if baseline_root is not None + else self._machobj.get_value("BASELINE_ROOT") + ) if baseline_cmp_name or baseline_gen_name: if self._baseline_cmp_name: - full_baseline_dir = os.path.join(self._baseline_root, self._baseline_cmp_name) - expect(os.path.isdir(full_baseline_dir), - "Missing baseline comparison directory {}".format(full_baseline_dir)) + full_baseline_dir = os.path.join( + self._baseline_root, self._baseline_cmp_name + ) + expect( + os.path.isdir(full_baseline_dir), + "Missing baseline comparison directory {}".format( + full_baseline_dir + ), + ) # the following is to assure that the existing generate directory is not overwritten if self._baseline_gen_name: - full_baseline_dir = os.path.join(self._baseline_root, self._baseline_gen_name) + full_baseline_dir = os.path.join( + self._baseline_root, self._baseline_gen_name + ) existing_baselines = [] for test_name in test_names: test_baseline = os.path.join(full_baseline_dir, test_name) if os.path.isdir(test_baseline): existing_baselines.append(test_baseline) - - expect(allow_baseline_overwrite or len(existing_baselines) == 0, - "Baseline directories already exists {}\n" \ - "Use -o to avoid this error".format(existing_baselines)) + if allow_baseline_overwrite: + clear_folder(test_baseline) + expect( + allow_baseline_overwrite or len(existing_baselines) == 0, + "Baseline directories already exists {}\n" + "Use -o to avoid this error".format(existing_baselines), + ) if self._cime_model == "e3sm": _order_tests_by_runtime(test_names, self._baseline_root) @@ -254,7 +367,9 @@ def __init__(self, test_names, test_data=None, else: self._proc_pool = int(proc_pool) - logger.info("create_test will use up to {} cores simultaneously".format(self._proc_pool)) + logger.info( + "create_test will use up to {} cores simultaneously".format(self._proc_pool) + ) self._procs_avail = self._proc_pool @@ -276,7 +391,9 @@ def __init__(self, test_names, test_data=None, if status in [TEST_PEND_STATUS, TEST_FAIL_STATUS]: if status == TEST_FAIL_STATUS: # Import for potential subsequent waits - ts.set_status(phase, TEST_PEND_STATUS, TEST_RERUN_COMMENT) + ts.set_status( + phase, TEST_PEND_STATUS, TEST_RERUN_COMMENT + ) # We need to pick up here break @@ -285,20 +402,32 @@ def __init__(self, test_names, test_data=None, if phase != SUBMIT_PHASE: # Somewhat subtle. Create_test considers submit/run to be the run phase, # so don't try to update test status for a passed submit phase - self._update_test_status(test, phase, TEST_PEND_STATUS) + self._update_test_status( + test, phase, TEST_PEND_STATUS + ) self._update_test_status(test, phase, status) if phase == RUN_PHASE: - logger.info("Test {} passed and will not be re-run".format(test)) - - logger.info("Using existing test directory {}".format(self._get_test_dir(test))) + logger.info( + "Test {} passed and will not be re-run".format( + test + ) + ) + + logger.info( + "Using existing test directory {}".format(self._get_test_dir(test)) + ) else: # None of the test directories should already exist. for test in self._tests: - expect(not os.path.exists(self._get_test_dir(test)), - "Cannot create new case in directory '{}', it already exists." - " Pick a different test-id".format(self._get_test_dir(test))) - logger.info("Creating test directory {}".format(self._get_test_dir(test))) + expect( + not os.path.exists(self._get_test_dir(test)), + "Cannot create new case in directory '{}', it already exists." + " Pick a different test-id".format(self._get_test_dir(test)), + ) + logger.info( + "Creating test directory {}".format(self._get_test_dir(test)) + ) # Setup build groups if single_exe: @@ -307,7 +436,7 @@ def __init__(self, test_names, test_data=None, # Any test that's in a shared-enabled suite with other tests should share exes self._build_groups = get_build_groups(self._tests) else: - self._build_groups = [ (item,) for item in self._tests ] + self._build_groups = [(item,) for item in self._tests] # Build group to exeroot map self._build_group_exeroots = {} @@ -317,7 +446,11 @@ def __init__(self, test_names, test_data=None, logger.debug("Build groups are:") for build_group in self._build_groups: for test_name in build_group: - logger.debug("{}{}".format(" " if test_name == build_group[0] else " ", test_name)) + logger.debug( + "{}{}".format( + " " if test_name == build_group[0] else " ", test_name + ) + ) self._chksum = chksum # By the end of this constructor, this program should never hard abort, @@ -326,12 +459,12 @@ def __init__(self, test_names, test_data=None, ########################################################################### def get_testnames(self): - ########################################################################### + ########################################################################### return list(self._tests.keys()) ########################################################################### def _log_output(self, test, output): - ########################################################################### + ########################################################################### test_dir = self._get_test_dir(test) if not os.path.isdir(test_dir): # Note: making this directory could cause create_newcase to fail @@ -341,7 +474,7 @@ def _log_output(self, test, output): ########################################################################### def _get_case_id(self, test): - ########################################################################### + ########################################################################### baseline_action_code = "" if self._baseline_gen_name: baseline_action_code += "G" @@ -354,37 +487,40 @@ def _get_case_id(self, test): ########################################################################### def _get_test_dir(self, test): - ########################################################################### + ########################################################################### return os.path.join(self._test_root, self._get_case_id(test)) ########################################################################### def _get_test_data(self, test): - ########################################################################### + ########################################################################### # Must be atomic return self._tests[test] ########################################################################### def _is_broken(self, test): - ########################################################################### + ########################################################################### status = self._get_test_status(test) return status != TEST_PASS_STATUS and status != TEST_PEND_STATUS ########################################################################### def _work_remains(self, test): - ########################################################################### + ########################################################################### test_phase, test_status = self._get_test_data(test) - return (test_status == TEST_PASS_STATUS or test_status == TEST_PEND_STATUS) and\ - test_phase != self._phases[-1] + return ( + test_status == TEST_PASS_STATUS or test_status == TEST_PEND_STATUS + ) and test_phase != self._phases[-1] ########################################################################### def _get_test_status(self, test, phase=None): - ########################################################################### + ########################################################################### curr_phase, curr_status = self._get_test_data(test) if phase is None or phase == curr_phase: return curr_status else: # Assume all future phases are PEND - if phase is not None and self._phases.index(phase) > self._phases.index(curr_phase): + if phase is not None and self._phases.index(phase) > self._phases.index( + curr_phase + ): return TEST_PEND_STATUS # Assume all older phases PASSed @@ -392,27 +528,35 @@ def _get_test_status(self, test, phase=None): ########################################################################### def _get_test_phase(self, test): - ########################################################################### + ########################################################################### return self._get_test_data(test)[0] ########################################################################### def _update_test_status(self, test, phase, status): - ########################################################################### + ########################################################################### phase_idx = self._phases.index(phase) old_phase, old_status = self._get_test_data(test) if old_phase == phase: - expect(old_status == TEST_PEND_STATUS, - "Only valid to transition from PEND to something else, found '{}' for phase '{}'".format(old_status, phase)) - expect(status != TEST_PEND_STATUS, - "Cannot transition from PEND -> PEND") + expect( + old_status == TEST_PEND_STATUS, + "Only valid to transition from PEND to something else, found '{}' for phase '{}'".format( + old_status, phase + ), + ) + expect(status != TEST_PEND_STATUS, "Cannot transition from PEND -> PEND") else: - expect(old_status == TEST_PASS_STATUS, - "Why did we move on to next phase when prior phase did not pass?") - expect(status == TEST_PEND_STATUS, - "New phase should be set to pending status") - expect(self._phases.index(old_phase) == phase_idx - 1, - "Skipped phase? {} {}".format(old_phase, phase_idx)) + expect( + old_status == TEST_PASS_STATUS, + "Why did we move on to next phase when prior phase did not pass?", + ) + expect( + status == TEST_PEND_STATUS, "New phase should be set to pending status" + ) + expect( + self._phases.index(old_phase) == phase_idx - 1, + "Skipped phase? {} {}".format(old_phase, phase_idx), + ) # Must be atomic self._tests[test] = (phase, status) @@ -426,10 +570,12 @@ def _shell_cmd_for_phase(self, test, cmd, phase, from_dir=None): while True: rc, output, errput = run_cmd(cmd, from_dir=from_dir, env=env) if rc != 0: - self._log_output(test, - "{} FAILED for test '{}'.\nCommand: {}\nOutput: {}\n". - format(phase, test, cmd, - output + "\n" + errput)) + self._log_output( + test, + "{} FAILED for test '{}'.\nCommand: {}\nOutput: {}\n".format( + phase, test, cmd, output + "\n" + errput + ), + ) # Temporary hack to get around odd file descriptor use by # buildnml scripts. if "bad interpreter" in output: @@ -441,25 +587,31 @@ def _shell_cmd_for_phase(self, test, cmd, phase, from_dir=None): # We don't want "RUN PASSED" in the TestStatus.log if the only thing that # succeeded was the submission. phase = "SUBMIT" if phase == RUN_PHASE else phase - self._log_output(test, - "{} PASSED for test '{}'.\nCommand: {}\nOutput: {}\n". - format(phase, test, cmd, - output + "\n" + errput)) + self._log_output( + test, + "{} PASSED for test '{}'.\nCommand: {}\nOutput: {}\n".format( + phase, test, cmd, output + "\n" + errput + ), + ) return True, errput ########################################################################### def _create_newcase_phase(self, test): - ########################################################################### + ########################################################################### test_dir = self._get_test_dir(test) - _, case_opts, grid, compset,\ - machine, compiler, test_mods = parse_test_name(test) + _, case_opts, grid, compset, machine, compiler, test_mods = parse_test_name( + test + ) os.environ["FROM_CREATE_TEST"] = "True" create_newcase_cmd = "{} {} --case {} --res {} --compset {} --test".format( sys.executable, os.path.join(self._cime_root, "CIME", "scripts", "create_newcase.py"), - test_dir, grid, compset) + test_dir, + grid, + compset + ) if machine is not None: create_newcase_cmd += " --machine {}".format(machine) @@ -484,58 +636,67 @@ def _create_newcase_phase(self, test): ninst = 1 ncpl = 1 if case_opts is not None: - for case_opt in case_opts: # pylint: disable=not-an-iterable - if case_opt.startswith('M'): + for case_opt in case_opts: # pylint: disable=not-an-iterable + if case_opt.startswith("M"): mpilib = case_opt[1:] create_newcase_cmd += " --mpilib {}".format(mpilib) - logger.debug (" MPILIB set to {}".format(mpilib)) - elif case_opt.startswith('N'): - expect(ncpl == 1,"Cannot combine _C and _N options") + logger.debug(" MPILIB set to {}".format(mpilib)) + elif case_opt.startswith("N"): + expect(ncpl == 1, "Cannot combine _C and _N options") ninst = case_opt[1:] create_newcase_cmd += " --ninst {}".format(ninst) - logger.debug (" NINST set to {}".format(ninst)) - elif case_opt.startswith('C'): - expect(ninst == 1,"Cannot combine _C and _N options") + logger.debug(" NINST set to {}".format(ninst)) + elif case_opt.startswith("C"): + expect(ninst == 1, "Cannot combine _C and _N options") ncpl = case_opt[1:] - create_newcase_cmd += " --ninst {} --multi-driver" .format(ncpl) - logger.debug (" NCPL set to {}" .format(ncpl)) - elif case_opt.startswith('P'): + create_newcase_cmd += " --ninst {} --multi-driver".format(ncpl) + logger.debug(" NCPL set to {}".format(ncpl)) + elif case_opt.startswith("P"): pesize = case_opt[1:] create_newcase_cmd += " --pecount {}".format(pesize) - elif case_opt.startswith('G'): + elif case_opt.startswith("G"): ngpus_per_node = case_opt[1:] create_newcase_cmd += " --ngpus-per-node {}".format(ngpus_per_node) - elif case_opt.startswith('V'): + elif case_opt.startswith("V"): self._cime_driver = case_opt[1:] create_newcase_cmd += " --driver {}".format(self._cime_driver) - if "--ninst" in create_newcase_cmd and not "--multi-driver" in create_newcase_cmd: - if "--driver nuopc" in create_newcase_cmd or ("--driver" not in create_newcase_cmd and self._cime_driver == "nuopc"): + if ( + "--ninst" in create_newcase_cmd + and not "--multi-driver" in create_newcase_cmd + ): + if "--driver nuopc" in create_newcase_cmd or ( + "--driver" not in create_newcase_cmd and self._cime_driver == "nuopc" + ): expect(False, "_N option not supported by nuopc driver, use _C instead") - - if test_mods is not None: create_newcase_cmd += " --user-mods-dir " - for one_test_mod in test_mods: # pylint: disable=not-an-iterable - if one_test_mod.find('/') != -1: - (component, modspath) = one_test_mod.split('/', 1) + for one_test_mod in test_mods: # pylint: disable=not-an-iterable + if one_test_mod.find("/") != -1: + (component, modspath) = one_test_mod.split("/", 1) else: error = "Missing testmod component. Testmods are specified as '${component}-${testmod}'" self._log_output(test, error) return False, error files = Files(comp_interface=self._cime_driver) - testmods_dir = files.get_value("TESTS_MODS_DIR", {"component": component}) + testmods_dir = files.get_value( + "TESTS_MODS_DIR", {"component": component} + ) test_mod_file = os.path.join(testmods_dir, component, modspath) # if no testmod is found check if a usermod of the same name exists and # use it if it does. if not os.path.exists(test_mod_file): - usermods_dir = files.get_value("USER_MODS_DIR", {"component": component}) + usermods_dir = files.get_value( + "USER_MODS_DIR", {"component": component} + ) test_mod_file = os.path.join(usermods_dir, modspath) if not os.path.exists(test_mod_file): - error = "Missing testmod file '{}', checked {} and {}".format(modspath, testmods_dir, usermods_dir) + error = "Missing testmod file '{}', checked {} and {}".format( + modspath, testmods_dir, usermods_dir + ) self._log_output(test, error) return False, error @@ -544,7 +705,7 @@ def _create_newcase_phase(self, test): # create_test mpilib option overrides default but not explicitly set case_opt mpilib if mpilib is None and self._mpilib is not None: create_newcase_cmd += " --mpilib {}".format(self._mpilib) - logger.debug (" MPILIB set to {}".format(self._mpilib)) + logger.debug(" MPILIB set to {}".format(self._mpilib)) if self._queue is not None: create_newcase_cmd += " --queue={}".format(self._queue) @@ -553,7 +714,9 @@ def _create_newcase_phase(self, test): # otherwise it runs in share and fails intermittently test_case = parse_test_name(test)[0] if test_case == "NODEFAIL": - machine = machine if machine is not None else self._machobj.get_machine_name() + machine = ( + machine if machine is not None else self._machobj.get_machine_name() + ) if machine == "cheyenne": create_newcase_cmd += " --queue=regular" @@ -568,20 +731,30 @@ def _create_newcase_phase(self, test): create_newcase_cmd += " --walltime {}".format(recommended_time) else: - if test in self._test_data and "options" in self._test_data[test] and \ - "wallclock" in self._test_data[test]['options']: - create_newcase_cmd += " --walltime {}".format(self._test_data[test]['options']['wallclock']) - if test in self._test_data and "options" in self._test_data[test] and \ - "workflow" in self._test_data[test]['options']: - create_newcase_cmd += " --workflow {}".format(self._test_data[test]['options']['workflow']) + if ( + test in self._test_data + and "options" in self._test_data[test] + and "wallclock" in self._test_data[test]["options"] + ): + create_newcase_cmd += " --walltime {}".format( + self._test_data[test]["options"]["wallclock"] + ) + if ( + test in self._test_data + and "options" in self._test_data[test] + and "workflow" in self._test_data[test]["options"] + ): + create_newcase_cmd += " --workflow {}".format( + self._test_data[test]["options"]["workflow"] + ) logger.debug("Calling create_newcase: " + create_newcase_cmd) return self._shell_cmd_for_phase(test, create_newcase_cmd, CREATE_NEWCASE_PHASE) ########################################################################### def _xml_phase(self, test): - ########################################################################### - test_case,case_opts,_,_,_,compiler,_ = parse_test_name(test) + ########################################################################### + test_case, case_opts, _, _, _, compiler, _ = parse_test_name(test) # Create, fill and write an envtest object test_dir = self._get_test_dir(test) @@ -593,13 +766,18 @@ def _xml_phase(self, test): ufs_driver = os.environ.get("UFS_DRIVER") attribute = None if ufs_driver: - attribute = {"component":ufs_driver} + attribute = {"component": ufs_driver} drv_config_file = files.get_value("CONFIG_CPL_FILE", attribute=attribute) if self._cime_driver == "nuopc" and not os.path.exists(drv_config_file): - drv_config_file = files.get_value("CONFIG_CPL_FILE", {"component":"cpl"}) - expect(os.path.exists(drv_config_file),"File {} not found, cime driver {}".format(drv_config_file, self._cime_driver)) + drv_config_file = files.get_value("CONFIG_CPL_FILE", {"component": "cpl"}) + expect( + os.path.exists(drv_config_file), + "File {} not found, cime driver {}".format( + drv_config_file, self._cime_driver + ), + ) drv_comp = Component(drv_config_file, "CPL") @@ -608,21 +786,38 @@ def _xml_phase(self, test): envtest.set_value("TESTCASE", test_case) envtest.set_value("TEST_TESTID", self._test_id) envtest.set_value("CASEBASEID", test) - if test in self._test_data and "options" in self._test_data[test] and \ - "memleak_tolerance" in self._test_data[test]['options']: - envtest.set_value("TEST_MEMLEAK_TOLERANCE", self._test_data[test]['options']['memleak_tolerance']) + memleak_tolerance = self._machobj.get_value( + "TEST_MEMLEAK_TOLERANCE", resolved=False + ) + if ( + test in self._test_data + and "options" in self._test_data[test] + and "memleak_tolerance" in self._test_data[test]["options"] + ): + memleak_tolerance = self._test_data[test]["options"]["memleak_tolerance"] + + envtest.set_value( + "TEST_MEMLEAK_TOLERANCE", + 0.10 if memleak_tolerance is None else memleak_tolerance, + ) test_argv = "-testname {} -testroot {}".format(test, self._test_root) if self._baseline_gen_name: test_argv += " -generate {}".format(self._baseline_gen_name) - basegen_case_fullpath = os.path.join(self._baseline_root,self._baseline_gen_name, test) + basegen_case_fullpath = os.path.join( + self._baseline_root, self._baseline_gen_name, test + ) logger.debug("basegen_case is {}".format(basegen_case_fullpath)) envtest.set_value("BASELINE_NAME_GEN", self._baseline_gen_name) - envtest.set_value("BASEGEN_CASE", os.path.join(self._baseline_gen_name, test)) + envtest.set_value( + "BASEGEN_CASE", os.path.join(self._baseline_gen_name, test) + ) if self._baseline_cmp_name: test_argv += " -compare {}".format(self._baseline_cmp_name) envtest.set_value("BASELINE_NAME_CMP", self._baseline_cmp_name) - envtest.set_value("BASECMP_CASE", os.path.join(self._baseline_cmp_name, test)) + envtest.set_value( + "BASECMP_CASE", os.path.join(self._baseline_cmp_name, test) + ) envtest.set_value("TEST_ARGV", test_argv) envtest.set_value("CLEANUP", self._clean) @@ -630,87 +825,104 @@ def _xml_phase(self, test): envtest.set_value("BASELINE_ROOT", self._baseline_root) envtest.set_value("GENERATE_BASELINE", self._baseline_gen_name is not None) envtest.set_value("COMPARE_BASELINE", self._baseline_cmp_name is not None) - envtest.set_value("CCSM_CPRNC", self._machobj.get_value("CCSM_CPRNC", resolved=False)) + envtest.set_value( + "CCSM_CPRNC", self._machobj.get_value("CCSM_CPRNC", resolved=False) + ) tput_tolerance = self._machobj.get_value("TEST_TPUT_TOLERANCE", resolved=False) - if test in self._test_data and "options" in self._test_data[test] and \ - "tput_tolerance" in self._test_data[test]['options']: - tput_tolerance = self._test_data[test]['options']['tput_tolerance'] + if ( + test in self._test_data + and "options" in self._test_data[test] + and "tput_tolerance" in self._test_data[test]["options"] + ): + tput_tolerance = self._test_data[test]["options"]["tput_tolerance"] - envtest.set_value("TEST_TPUT_TOLERANCE", 0.25 if tput_tolerance is None else tput_tolerance) + envtest.set_value( + "TEST_TPUT_TOLERANCE", 0.25 if tput_tolerance is None else tput_tolerance + ) # Add the test instructions from config_test to env_test in the case config_test = Tests() testnode = config_test.get_test_node(test_case) envtest.add_test(testnode) - if compiler == 'nag': - envtest.set_value("FORCE_BUILD_SMP","FALSE") + if compiler == "nag": + envtest.set_value("FORCE_BUILD_SMP", "FALSE") # Determine case_opts from the test_case if case_opts is not None: logger.debug("case_opts are {} ".format(case_opts)) - for opt in case_opts: # pylint: disable=not-an-iterable + for opt in case_opts: # pylint: disable=not-an-iterable logger.debug("case_opt is {}".format(opt)) - if opt == 'D': + if opt == "D": envtest.set_test_parameter("DEBUG", "TRUE") - logger.debug (" DEBUG set to TRUE") + logger.debug(" DEBUG set to TRUE") - elif opt == 'E': + elif opt == "E": envtest.set_test_parameter("USE_ESMF_LIB", "TRUE") - logger.debug (" USE_ESMF_LIB set to TRUE") + logger.debug(" USE_ESMF_LIB set to TRUE") - elif opt == 'CG': + elif opt == "CG": envtest.set_test_parameter("CALENDAR", "GREGORIAN") - logger.debug (" CALENDAR set to {}".format(opt)) - - elif opt.startswith('L'): - match = re.match('L([A-Za-z])([0-9]*)', opt) - stop_option = {"y":"nyears", "m":"nmonths", "d":"ndays", "h":"nhours", - "s":"nseconds", "n":"nsteps"} + logger.debug(" CALENDAR set to {}".format(opt)) + + elif opt.startswith("L"): + match = re.match("L([A-Za-z])([0-9]*)", opt) + stop_option = { + "y": "nyears", + "m": "nmonths", + "d": "ndays", + "h": "nhours", + "s": "nseconds", + "n": "nsteps", + } opt = match.group(1) - envtest.set_test_parameter("STOP_OPTION",stop_option[opt]) + envtest.set_test_parameter("STOP_OPTION", stop_option[opt]) opti = match.group(2) envtest.set_test_parameter("STOP_N", opti) - logger.debug (" STOP_OPTION set to {}".format(stop_option[opt])) - logger.debug (" STOP_N set to {}".format(opti)) + logger.debug(" STOP_OPTION set to {}".format(stop_option[opt])) + logger.debug(" STOP_N set to {}".format(opti)) - elif opt.startswith('R'): + elif opt.startswith("R"): # R option is for testing in PTS_MODE or Single Column Model # (SCM) mode envtest.set_test_parameter("PTS_MODE", "TRUE") # For PTS_MODE, set all tasks and threads to 1 - comps=["ATM","LND","ICE","OCN","CPL","GLC","ROF","WAV"] + comps = ["ATM", "LND", "ICE", "OCN", "CPL", "GLC", "ROF", "WAV"] for comp in comps: - envtest.set_test_parameter("NTASKS_"+comp, "1") - envtest.set_test_parameter("NTHRDS_"+comp, "1") - envtest.set_test_parameter("ROOTPE_"+comp, "0") + envtest.set_test_parameter("NTASKS_" + comp, "1") + envtest.set_test_parameter("NTHRDS_" + comp, "1") + envtest.set_test_parameter("ROOTPE_" + comp, "0") envtest.set_test_parameter("PIO_TYPENAME", "netcdf") - elif opt.startswith('A'): + elif opt.startswith("A"): # A option is for testing in ASYNC IO mode, only available with nuopc driver and pio2 envtest.set_test_parameter("PIO_ASYNC_INTERFACE", "TRUE") envtest.set_test_parameter("CIME_DRIVER", "nuopc") envtest.set_test_parameter("PIO_VERSION", "2") - match = re.match('A([0-9]+)x?([0-9])*', opt) - envtest.set_test_parameter("PIO_NUMTASKS_CPL", match.group(1)) + match = re.match("A([0-9]+)x?([0-9])*", opt) + envtest.set_test_parameter("PIO_NUMTASKS_CPL", match.group(1)) if match.group(2): - envtest.set_test_parameter("PIO_STRIDE_CPL",match.group(2)) - - elif (opt.startswith('I') or # Marker to distinguish tests with same name - ignored - opt.startswith('M') or # handled in create_newcase - opt.startswith('P') or # handled in create_newcase - opt.startswith('N') or # handled in create_newcase - opt.startswith('C') or # handled in create_newcase - opt.startswith('V') or # handled in create_newcase - opt.startswith('G') or # handled in create_newcase - opt == 'B'): # handled in run_phase + envtest.set_test_parameter("PIO_STRIDE_CPL", match.group(2)) + + elif ( + opt.startswith("I") + or opt.startswith( # Marker to distinguish tests with same name - ignored + "M" + ) + or opt.startswith("P") # handled in create_newcase + or opt.startswith("N") # handled in create_newcase + or opt.startswith("C") # handled in create_newcase + or opt.startswith("V") # handled in create_newcase + or opt.startswith("G") # handled in create_newcase + or opt == "B" # handled in create_newcase + ): # handled in run_phase pass - elif opt.startswith('IOP'): + elif opt.startswith("IOP"): logger.warning("IOP test option not yet implemented") else: expect(False, "Could not parse option '{}' ".format(opt)) @@ -723,9 +935,12 @@ def _xml_phase(self, test): self._output_root = case.get_value("CIME_OUTPUT_ROOT") # if we are running a single test we don't need sharedlibroot if len(self._tests) > 1 and self._cime_model != "e3sm": - case.set_value("SHAREDLIBROOT", - os.path.join(self._output_root, - "sharedlibroot.{}".format(self._test_id))) + case.set_value( + "SHAREDLIBROOT", + os.path.join( + self._output_root, "sharedlibroot.{}".format(self._test_id) + ), + ) envtest.set_initial_values(case) case.set_value("TEST", True) case.set_value("SAVE_TIMING", self._save_timing) @@ -734,7 +949,10 @@ def _xml_phase(self, test): # the first case in the build group is_first_test, _, my_build_group = self._get_build_group(test) if is_first_test: - expect(self._build_group_exeroots[my_build_group] is None, "Should not already have exeroot") + expect( + self._build_group_exeroots[my_build_group] is None, + "Should not already have exeroot", + ) self._build_group_exeroots[my_build_group] = case.get_value("EXEROOT") else: build_group_exeroot = self._build_group_exeroots[my_build_group] @@ -750,37 +968,55 @@ def _xml_phase(self, test): ########################################################################### def _setup_phase(self, test): - ########################################################################### - test_dir = self._get_test_dir(test) - rv = self._shell_cmd_for_phase(test, "./case.setup", SETUP_PHASE, from_dir=test_dir) + ########################################################################### + test_dir = self._get_test_dir(test) + rv = self._shell_cmd_for_phase( + test, "./case.setup", SETUP_PHASE, from_dir=test_dir + ) # It's OK for this command to fail with baseline diffs but not catastrophically if rv[0]: env = os.environ.copy() env["PYTHONPATH"] = get_cime_root() - cmdstat, output, _ = run_cmd("./case.cmpgen_namelists", - combine_output=True, from_dir=test_dir, - env=env) - expect(cmdstat in [0, TESTS_FAILED_ERR_CODE], "Fatal error in case.cmpgen_namelists: {}".format(output)) + cmdstat, output, _ = run_cmd( + "./case.cmpgen_namelists", + combine_output=True, + from_dir=test_dir, + env=env + ) + expect( + cmdstat in [0, TESTS_FAILED_ERR_CODE], + "Fatal error in case.cmpgen_namelists: {}".format(output) + ) return rv ########################################################################### def _sharedlib_build_phase(self, test): - ########################################################################### + ########################################################################### is_first_test, first_test, _ = self._get_build_group(test) if not is_first_test: - if self._get_test_status(first_test, phase=SHAREDLIB_BUILD_PHASE) == TEST_PASS_STATUS: + if ( + self._get_test_status(first_test, phase=SHAREDLIB_BUILD_PHASE) + == TEST_PASS_STATUS + ): return True, "" else: - return False, "Cannot use build for test {} because it failed".format(first_test) + return False, "Cannot use build for test {} because it failed".format( + first_test + ) test_dir = self._get_test_dir(test) - return self._shell_cmd_for_phase(test, "./case.build --sharedlib-only", SHAREDLIB_BUILD_PHASE, from_dir=test_dir) + return self._shell_cmd_for_phase( + test, + "./case.build --sharedlib-only", + SHAREDLIB_BUILD_PHASE, + from_dir=test_dir, + ) ########################################################################### def _get_build_group(self, test): - ########################################################################### + ########################################################################### for build_group in self._build_groups: if test in build_group: return test == build_group[0], build_group[0], build_group @@ -789,32 +1025,44 @@ def _get_build_group(self, test): ########################################################################### def _model_build_phase(self, test): - ########################################################################### + ########################################################################### is_first_test, first_test, _ = self._get_build_group(test) test_dir = self._get_test_dir(test) if not is_first_test: - if self._get_test_status(first_test, phase=MODEL_BUILD_PHASE) == TEST_PASS_STATUS: + if ( + self._get_test_status(first_test, phase=MODEL_BUILD_PHASE) + == TEST_PASS_STATUS + ): with Case(test_dir, read_only=False) as case: - post_build(case, [], build_complete=True, save_build_provenance=False) + post_build( + case, [], build_complete=True, save_build_provenance=False + ) return True, "" else: - return False, "Cannot use build for test {} because it failed".format(first_test) + return False, "Cannot use build for test {} because it failed".format( + first_test + ) - return self._shell_cmd_for_phase(test, "./case.build --model-only", MODEL_BUILD_PHASE, from_dir=test_dir) + return self._shell_cmd_for_phase( + test, "./case.build --model-only", MODEL_BUILD_PHASE, from_dir=test_dir + ) ########################################################################### def _run_phase(self, test): - ########################################################################### + ########################################################################### test_dir = self._get_test_dir(test) case_opts = parse_test_name(test)[1] - if case_opts is not None and "B" in case_opts: # pylint: disable=unsupported-membership-test + if ( + case_opts is not None + and "B" in case_opts # pylint: disable=unsupported-membership-test + ): self._log_output(test, "{} SKIPPED for test '{}'".format(RUN_PHASE, test)) self._update_test_status_file(test, SUBMIT_PHASE, TEST_PASS_STATUS) - self._update_test_status_file(test, RUN_PHASE, TEST_PASS_STATUS) + self._update_test_status_file(test, RUN_PHASE, TEST_PASS_STATUS) return True, "SKIPPED" else: @@ -834,25 +1082,31 @@ def _run_phase(self, test): ########################################################################### def _run_catch_exceptions(self, test, phase, run): - ########################################################################### + ########################################################################### try: return run(test) except Exception as e: exc_tb = sys.exc_info()[2] - errput = "Test '{}' failed in phase '{}' with exception '{}'\n".format(test, phase, str(e)) - errput += ''.join(traceback.format_tb(exc_tb)) + errput = "Test '{}' failed in phase '{}' with exception '{}'\n".format( + test, phase, str(e) + ) + errput += "".join(traceback.format_tb(exc_tb)) self._log_output(test, errput) return False, errput ########################################################################### def _get_procs_needed(self, test, phase, threads_in_flight=None, no_batch=False): - ########################################################################### + ########################################################################### # For build pools, we must wait for the first case to complete XML, SHAREDLIB, # and MODEL_BUILD phases before the other cases can do those phases is_first_test, first_test, _ = self._get_build_group(test) if not is_first_test: - build_group_dep_phases = [XML_PHASE, SHAREDLIB_BUILD_PHASE, MODEL_BUILD_PHASE] + build_group_dep_phases = [ + XML_PHASE, + SHAREDLIB_BUILD_PHASE, + MODEL_BUILD_PHASE, + ] if phase in build_group_dep_phases: if self._get_test_status(first_test, phase=phase) == TEST_PEND_STATUS: return self._proc_pool + 1 @@ -864,17 +1118,17 @@ def _get_procs_needed(self, test, phase, threads_in_flight=None, no_batch=False) total_pes = EnvMachPes(test_dir, read_only=True).get_value("TOTALPES") return total_pes - elif (phase == SHAREDLIB_BUILD_PHASE): + elif phase == SHAREDLIB_BUILD_PHASE: if self._cime_model != "e3sm": # Will force serialization of sharedlib builds # TODO - instead of serializing, compute all library configs needed and build # them all in parallel for _, _, running_phase in threads_in_flight.values(): - if (running_phase == SHAREDLIB_BUILD_PHASE): + if running_phase == SHAREDLIB_BUILD_PHASE: return self._proc_pool + 1 return 1 - elif (phase == MODEL_BUILD_PHASE): + elif phase == MODEL_BUILD_PHASE: # Model builds now happen in parallel return self._model_build_cost else: @@ -882,7 +1136,7 @@ def _get_procs_needed(self, test, phase, threads_in_flight=None, no_batch=False) ########################################################################### def _wait_for_something_to_finish(self, threads_in_flight): - ########################################################################### + ########################################################################### expect(len(threads_in_flight) <= self._parallel_jobs, "Oversubscribed?") finished_tests = [] while not finished_tests: @@ -899,7 +1153,7 @@ def _wait_for_something_to_finish(self, threads_in_flight): ########################################################################### def _update_test_status_file(self, test, test_phase, status): - ########################################################################### + ########################################################################### """ In general, test_scheduler should not be responsible for updating the TestStatus file, but there are a few cases where it has to. @@ -910,12 +1164,19 @@ def _update_test_status_file(self, test, test_phase, status): ########################################################################### def _consumer(self, test, test_phase, phase_method): - ########################################################################### + ########################################################################### before_time = time.time() success, errors = self._run_catch_exceptions(test, test_phase, phase_method) elapsed_time = time.time() - before_time - status = (TEST_PEND_STATUS if test_phase == RUN_PHASE and not \ - self._no_batch else TEST_PASS_STATUS) if success else TEST_FAIL_STATUS + status = ( + ( + TEST_PEND_STATUS + if test_phase == RUN_PHASE and not self._no_batch + else TEST_PASS_STATUS + ) + if success + else TEST_FAIL_STATUS + ) if status != TEST_PEND_STATUS: self._update_test_status(test, test_phase, status) @@ -923,38 +1184,61 @@ def _consumer(self, test, test_phase, phase_method): if not self._work_remains(test): self._completed_tests += 1 total = len(self._tests) - status_str = "Finished {} for test {} in {:f} seconds ({}). [COMPLETED {:d} of {:d}]".format(test_phase, test, elapsed_time, status, self._completed_tests, total) + status_str = "Finished {} for test {} in {:f} seconds ({}). [COMPLETED {:d} of {:d}]".format( + test_phase, test, elapsed_time, status, self._completed_tests, total + ) else: - status_str = "Finished {} for test {} in {:f} seconds ({})".format(test_phase, test, elapsed_time, status) + status_str = "Finished {} for test {} in {:f} seconds ({})".format( + test_phase, test, elapsed_time, status + ) if not success: status_str += "\n Case dir: {}\n".format(self._get_test_dir(test)) - status_str += " Errors were:\n {}\n".format("\n ".join(errors.splitlines())) + status_str += " Errors were:\n {}\n".format( + "\n ".join(errors.splitlines()) + ) logger.info(status_str) is_first_test = self._get_build_group(test)[0] - if test_phase in [CREATE_NEWCASE_PHASE, XML_PHASE] or \ - (not is_first_test and test_phase in [SHAREDLIB_BUILD_PHASE, MODEL_BUILD_PHASE]): + if test_phase in [CREATE_NEWCASE_PHASE, XML_PHASE] or ( + not is_first_test + and test_phase in [SHAREDLIB_BUILD_PHASE, MODEL_BUILD_PHASE] + ): # These are the phases for which TestScheduler is reponsible for # updating the TestStatus file self._update_test_status_file(test, test_phase, status) if test_phase == XML_PHASE: - append_status("Case Created using: "+" ".join(sys.argv), "README.case", caseroot=self._get_test_dir(test)) + append_status( + "Case Created using: " + " ".join(sys.argv), + "README.case", + caseroot=self._get_test_dir(test), + ) # On batch systems, we want to immediately submit to the queue, because # it's very cheap to submit and will get us a better spot in line - if (success and not self._no_run and not self._no_batch and test_phase == MODEL_BUILD_PHASE): - logger.info("Starting {} for test {} with 1 proc on interactive node and {:d} procs on compute nodes".format(RUN_PHASE, test, self._get_procs_needed(test, RUN_PHASE, no_batch=True))) + if ( + success + and not self._no_run + and not self._no_batch + and test_phase == MODEL_BUILD_PHASE + ): + logger.info( + "Starting {} for test {} with 1 proc on interactive node and {:d} procs on compute nodes".format( + RUN_PHASE, + test, + self._get_procs_needed(test, RUN_PHASE, no_batch=True), + ) + ) self._update_test_status(test, RUN_PHASE, TEST_PEND_STATUS) self._consumer(test, RUN_PHASE, self._run_phase) ########################################################################### def _producer(self): - ########################################################################### - threads_in_flight = {} # test-name -> (thread, procs, phase) + ########################################################################### + threads_in_flight = {} # test-name -> (thread, procs, phase) while True: work_to_do = False num_threads_launched_this_iteration = 0 @@ -972,40 +1256,76 @@ def _producer(self): test_phase, test_status = self._get_test_data(test) expect(test_status != TEST_PEND_STATUS, test) next_phase = self._phases[self._phases.index(test_phase) + 1] - procs_needed = self._get_procs_needed(test, next_phase, threads_in_flight) + procs_needed = self._get_procs_needed( + test, next_phase, threads_in_flight + ) if procs_needed <= self._procs_avail: self._procs_avail -= procs_needed # Necessary to print this way when multiple threads printing - logger.info("Starting {} for test {} with {:d} procs".format(next_phase, test, procs_needed)) + logger.info( + "Starting {} for test {} with {:d} procs".format( + next_phase, test, procs_needed + ) + ) self._update_test_status(test, next_phase, TEST_PEND_STATUS) - new_thread = threading.Thread(target=self._consumer, - args=(test, next_phase, getattr(self, "_{}_phase".format(next_phase.lower())) )) - threads_in_flight[test] = (new_thread, procs_needed, next_phase) + new_thread = threading.Thread( + target=self._consumer, + args=( + test, + next_phase, + getattr( + self, "_{}_phase".format(next_phase.lower()) + ), + ), + ) + threads_in_flight[test] = ( + new_thread, + procs_needed, + next_phase, + ) new_thread.start() num_threads_launched_this_iteration += 1 logger.debug(" Current workload:") total_procs = 0 for the_test, the_data in CIME.six.iteritems(threads_in_flight): - logger.debug(" {}: {} -> {}".format(the_test, the_data[2], the_data[1])) + logger.debug( + " {}: {} -> {}".format( + the_test, the_data[2], the_data[1] + ) + ) total_procs += the_data[1] - logger.debug(" Total procs in use: {}".format(total_procs)) + logger.debug( + " Total procs in use: {}".format(total_procs) + ) else: if not threads_in_flight: - msg = "Phase '{}' for test '{}' required more processors, {:d}, than this machine can provide, {:d}".format(next_phase, test, procs_needed, self._procs_avail) + msg = "Phase '{}' for test '{}' required more processors, {:d}, than this machine can provide, {:d}".format( + next_phase, test, procs_needed, self._procs_avail + ) logger.warning(msg) - self._update_test_status(test, next_phase, TEST_PEND_STATUS) - self._update_test_status(test, next_phase, TEST_FAIL_STATUS) + self._update_test_status( + test, next_phase, TEST_PEND_STATUS + ) + self._update_test_status( + test, next_phase, TEST_FAIL_STATUS + ) self._log_output(test, msg) if next_phase == RUN_PHASE: - self._update_test_status_file(test, SUBMIT_PHASE, TEST_PASS_STATUS) - self._update_test_status_file(test, next_phase, TEST_FAIL_STATUS) + self._update_test_status_file( + test, SUBMIT_PHASE, TEST_PASS_STATUS + ) + self._update_test_status_file( + test, next_phase, TEST_FAIL_STATUS + ) else: - self._update_test_status_file(test, next_phase, TEST_FAIL_STATUS) + self._update_test_status_file( + test, next_phase, TEST_FAIL_STATUS + ) num_threads_launched_this_iteration += 1 if not work_to_do: @@ -1020,50 +1340,62 @@ def _producer(self): ########################################################################### def _setup_cs_files(self): - ########################################################################### + ########################################################################### try: template_path = get_template_path() - create_cs_status(test_root=self._test_root, - test_id=self._test_id) + create_cs_status(test_root=self._test_root, test_id=self._test_id) template_file = os.path.join(template_path, "cs.submit.template") template = open(template_file, "r").read() setup_cmd = "./case.setup" if self._no_setup else ":" build_cmd = "./case.build" if self._no_build else ":" test_cmd = "./case.submit" - template = template.replace("", setup_cmd).\ - replace("", build_cmd).\ - replace("", test_cmd).\ - replace("", self._test_id) + template = ( + template.replace("", setup_cmd) + .replace("", build_cmd) + .replace("", test_cmd) + .replace("", self._test_id) + ) if self._no_run: - cs_submit_file = os.path.join(self._test_root, "cs.submit.{}".format(self._test_id)) + cs_submit_file = os.path.join( + self._test_root, "cs.submit.{}".format(self._test_id) + ) with open(cs_submit_file, "w") as fd: fd.write(template) - os.chmod(cs_submit_file, - os.stat(cs_submit_file).st_mode | stat.S_IXUSR | stat.S_IXGRP) + os.chmod( + cs_submit_file, + os.stat(cs_submit_file).st_mode | stat.S_IXUSR | stat.S_IXGRP, + ) if self._cime_model == "cesm": template_file = os.path.join(template_path, "testreporter.template") template = open(template_file, "r").read() - template = template.replace("", os.path.join(self._cime_root)) + template = template.replace( + "", self._cime_root + ) testreporter_file = os.path.join(self._test_root, "testreporter") with open(testreporter_file, "w") as fd: fd.write(template) - os.chmod(testreporter_file, os.stat(testreporter_file).st_mode - | stat.S_IXUSR | stat.S_IXGRP) + os.chmod( + testreporter_file, + os.stat(testreporter_file).st_mode | stat.S_IXUSR | stat.S_IXGRP, + ) except Exception as e: logger.warning("FAILED to set up cs files: {}".format(str(e))) ########################################################################### - def run_tests(self, wait=False, - check_throughput=False, - check_memory=False, - ignore_namelists=False, - ignore_memleak=False): - ########################################################################### + def run_tests( + self, + wait=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + ignore_memleak=False, + ): + ########################################################################### """ Main API for this class. @@ -1072,9 +1404,9 @@ def run_tests(self, wait=False, start_time = time.time() # Tell user what will be run - logger.info( "RUNNING TESTS:") + logger.info("RUNNING TESTS:") for test in self._tests: - logger.info( " {}".format(test)) + logger.info(" {}".format(test)) # Setup cs files self._setup_cs_files() @@ -1089,8 +1421,13 @@ def run_tests(self, wait=False, if get_model() == "cesm": for test in self._tests: status = self._get_test_data(test)[1] - if status not in [TEST_PASS_STATUS, TEST_PEND_STATUS] and self._baseline_gen_name: - basegen_case_fullpath = os.path.join(self._baseline_root,self._baseline_gen_name, test) + if ( + status not in [TEST_PASS_STATUS, TEST_PEND_STATUS] + and self._baseline_gen_name + ): + basegen_case_fullpath = os.path.join( + self._baseline_root, self._baseline_gen_name, test + ) test_dir = self._get_test_dir(test) generate_teststatus(test_dir, basegen_case_fullpath) @@ -1101,19 +1438,25 @@ def run_tests(self, wait=False, expect_test_complete = not self._no_run and (self._no_batch or wait) logger.info("Waiting for tests to finish") - rv = wait_for_tests(glob.glob(os.path.join(self._test_root, "*{}/TestStatus".format(self._test_id))), - no_wait=not wait, - check_throughput=check_throughput, - check_memory=check_memory, - ignore_namelists=ignore_namelists, - ignore_memleak=ignore_memleak, - no_run=self._no_run, - expect_test_complete=expect_test_complete) + rv = wait_for_tests( + glob.glob( + os.path.join(self._test_root, "*{}/TestStatus".format(self._test_id)) + ), + no_wait=not wait, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_memleak=ignore_memleak, + no_run=self._no_run, + expect_test_complete=expect_test_complete, + ) if not no_need_to_wait and not wait: - logger.info("Due to presence of batch system, create_test will exit before tests are complete.\n" \ - "To force create_test to wait for full completion, use --wait") + logger.info( + "Due to presence of batch system, create_test will exit before tests are complete.\n" + "To force create_test to wait for full completion, use --wait" + ) - logger.info( "test-scheduler took {} seconds".format(time.time() - start_time)) + logger.info("test-scheduler took {} seconds".format(time.time() - start_time)) return rv diff --git a/CIME/test_status.py b/CIME/test_status.py index d68bc6c3d24..20d2cef93a9 100644 --- a/CIME/test_status.py +++ b/CIME/test_status.py @@ -40,79 +40,95 @@ ALL_PHASE_STATUSES = [TEST_PEND_STATUS, TEST_PASS_STATUS, TEST_FAIL_STATUS] # Special statuses that the overall test can be in -TEST_DIFF_STATUS = "DIFF" # Implies a failure in the BASELINE phase -NAMELIST_FAIL_STATUS = "NLFAIL" # Implies a failure in the NLCOMP phase +TEST_DIFF_STATUS = "DIFF" # Implies a failure in the BASELINE phase +NAMELIST_FAIL_STATUS = "NLFAIL" # Implies a failure in the NLCOMP phase # Special strings that can appear in comments, indicating particular types of failures -TEST_NO_BASELINES_COMMENT = "BFAIL" # Implies baseline directory is missing in the - # baseline comparison phase -TEST_RERUN_COMMENT = "RERUN" # Added to a PEND status to indicate that the test - # system has changed this phase to PEND in order to - # rerun it (e.g., to retry a failed test). +TEST_NO_BASELINES_COMMENT = "BFAIL" # Implies baseline directory is missing in the +# baseline comparison phase +TEST_RERUN_COMMENT = "RERUN" # Added to a PEND status to indicate that the test +# system has changed this phase to PEND in order to +# rerun it (e.g., to retry a failed test). # The expected and unexpected failure comments aren't used directly in this module, but # are included here for symmetry, so other modules can access them from here. TEST_EXPECTED_FAILURE_COMMENT = expected_fails.EXPECTED_FAILURE_COMMENT TEST_UNEXPECTED_FAILURE_COMMENT_START = expected_fails.UNEXPECTED_FAILURE_COMMENT_START # The valid phases -CREATE_NEWCASE_PHASE = "CREATE_NEWCASE" -XML_PHASE = "XML" -SETUP_PHASE = "SETUP" -NAMELIST_PHASE = "NLCOMP" +CREATE_NEWCASE_PHASE = "CREATE_NEWCASE" +XML_PHASE = "XML" +SETUP_PHASE = "SETUP" +NAMELIST_PHASE = "NLCOMP" SHAREDLIB_BUILD_PHASE = "SHAREDLIB_BUILD" -MODEL_BUILD_PHASE = "MODEL_BUILD" -SUBMIT_PHASE = "SUBMIT" -RUN_PHASE = "RUN" -THROUGHPUT_PHASE = "TPUTCOMP" -MEMCOMP_PHASE = "MEMCOMP" -MEMLEAK_PHASE = "MEMLEAK" -STARCHIVE_PHASE = "SHORT_TERM_ARCHIVER" -COMPARE_PHASE = "COMPARE" # This is one special, real phase will be COMPARE_$WHAT, this is for internal test comparisons, there could be multiple variations of this phase in one test -BASELINE_PHASE = "BASELINE" -GENERATE_PHASE = "GENERATE" - -ALL_PHASES = [CREATE_NEWCASE_PHASE, - XML_PHASE, - SETUP_PHASE, - NAMELIST_PHASE, - SHAREDLIB_BUILD_PHASE, - MODEL_BUILD_PHASE, - SUBMIT_PHASE, - RUN_PHASE, - COMPARE_PHASE, - BASELINE_PHASE, - THROUGHPUT_PHASE, - MEMCOMP_PHASE, - MEMLEAK_PHASE, - STARCHIVE_PHASE, - GENERATE_PHASE] +MODEL_BUILD_PHASE = "MODEL_BUILD" +SUBMIT_PHASE = "SUBMIT" +RUN_PHASE = "RUN" +THROUGHPUT_PHASE = "TPUTCOMP" +MEMCOMP_PHASE = "MEMCOMP" +MEMLEAK_PHASE = "MEMLEAK" +STARCHIVE_PHASE = "SHORT_TERM_ARCHIVER" +COMPARE_PHASE = "COMPARE" # This is one special, real phase will be COMPARE_$WHAT, this is for internal test comparisons, there could be multiple variations of this phase in one test +BASELINE_PHASE = "BASELINE" +GENERATE_PHASE = "GENERATE" + +ALL_PHASES = [ + CREATE_NEWCASE_PHASE, + XML_PHASE, + SETUP_PHASE, + NAMELIST_PHASE, + SHAREDLIB_BUILD_PHASE, + MODEL_BUILD_PHASE, + SUBMIT_PHASE, + RUN_PHASE, + COMPARE_PHASE, + BASELINE_PHASE, + THROUGHPUT_PHASE, + MEMCOMP_PHASE, + MEMLEAK_PHASE, + STARCHIVE_PHASE, + GENERATE_PHASE, +] # These are mandatory phases that a test must go through -CORE_PHASES = [CREATE_NEWCASE_PHASE, - XML_PHASE, - SETUP_PHASE, - SHAREDLIB_BUILD_PHASE, - MODEL_BUILD_PHASE, - SUBMIT_PHASE, - RUN_PHASE] +CORE_PHASES = [ + CREATE_NEWCASE_PHASE, + XML_PHASE, + SETUP_PHASE, + SHAREDLIB_BUILD_PHASE, + MODEL_BUILD_PHASE, + SUBMIT_PHASE, + RUN_PHASE, +] + def _test_helper1(file_contents): ts = TestStatus(test_dir="/", test_name="ERS.foo.A") - ts._parse_test_status(file_contents) # pylint: disable=protected-access - return ts._phase_statuses # pylint: disable=protected-access - -def _test_helper2(file_contents, wait_for_run=False, check_throughput=False, check_memory=False, ignore_namelists=False, no_run=False, no_perm=False): + ts._parse_test_status(file_contents) # pylint: disable=protected-access + return ts._phase_statuses # pylint: disable=protected-access + + +def _test_helper2( + file_contents, + wait_for_run=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + no_run=False, + no_perm=False, +): lines = file_contents.splitlines() rv = None perms = [lines] if no_perm else itertools.permutations(lines) for perm in perms: ts = TestStatus(test_dir="/", test_name="ERS.foo.A") - ts._parse_test_status("\n".join(perm)) # pylint: disable=protected-access - the_status = ts.get_overall_test_status(wait_for_run=wait_for_run, - check_throughput=check_throughput, - check_memory=check_memory, - ignore_namelists=ignore_namelists, - no_run=no_run) + ts._parse_test_status("\n".join(perm)) # pylint: disable=protected-access + the_status = ts.get_overall_test_status( + wait_for_run=wait_for_run, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + no_run=no_run, + ) if rv is not None and the_status != rv: return "{} != {}".format(rv, the_status) else: @@ -120,8 +136,8 @@ def _test_helper2(file_contents, wait_for_run=False, check_throughput=False, che return rv -class TestStatus(object): +class TestStatus(object): def __init__(self, test_dir=None, test_name=None, no_io=False): """ Create a TestStatus object @@ -133,7 +149,7 @@ def __init__(self, test_dir=None, test_name=None, no_io=False): """ test_dir = os.getcwd() if test_dir is None else test_dir self._filename = os.path.join(test_dir, TEST_STATUS_FILENAME) - self._phase_statuses = OrderedDict() # {name -> (status, comments)} + self._phase_statuses = OrderedDict() # {name -> (status, comments)} self._test_name = test_name self._ok_to_modify = False self._no_io = no_io @@ -143,7 +159,10 @@ def __init__(self, test_dir=None, test_name=None, no_io=False): if not os.access(self._filename, os.W_OK): self._no_io = True else: - expect(test_name is not None, "Must provide test_name if TestStatus file doesn't exist") + expect( + test_name is not None, + "Must provide test_name if TestStatus file doesn't exist", + ) def __enter__(self): self._ok_to_modify = True @@ -158,7 +177,9 @@ def __iter__(self): yield phase, data[0] def __eq__(self, rhs): - return self._phase_statuses == rhs._phase_statuses # pylint: disable=protected-access + return ( + self._phase_statuses == rhs._phase_statuses + ) # pylint: disable=protected-access def __ne__(self, rhs): return not self.__eq__(rhs) @@ -198,25 +219,38 @@ def set_status(self, phase, status, comments=""): >>> ts._phase_statuses OrderedDict([('CREATE_NEWCASE', ('FAIL', ''))]) """ - expect(self._ok_to_modify, "TestStatus not in a modifiable state, use 'with' syntax") - expect(phase in ALL_PHASES or phase.startswith(COMPARE_PHASE), - "Invalid phase '{}'".format(phase)) + expect( + self._ok_to_modify, + "TestStatus not in a modifiable state, use 'with' syntax", + ) + expect( + phase in ALL_PHASES or phase.startswith(COMPARE_PHASE), + "Invalid phase '{}'".format(phase), + ) expect(status in ALL_PHASE_STATUSES, "Invalid status '{}'".format(status)) if phase in CORE_PHASES and phase != CORE_PHASES[0]: - previous_core_phase = CORE_PHASES[CORE_PHASES.index(phase)-1] - #TODO: enable check below - #expect(previous_core_phase in self._phase_statuses, "Core phase '{}' was skipped".format(previous_core_phase)) + previous_core_phase = CORE_PHASES[CORE_PHASES.index(phase) - 1] + # TODO: enable check below + # expect(previous_core_phase in self._phase_statuses, "Core phase '{}' was skipped".format(previous_core_phase)) if previous_core_phase in self._phase_statuses: - expect(self._phase_statuses[previous_core_phase][0] == TEST_PASS_STATUS, - "Cannot move past core phase '{}', it didn't pass: ".format(previous_core_phase)) - - reran_phase = (phase in self._phase_statuses and self._phase_statuses[phase][0] != TEST_PEND_STATUS and phase in CORE_PHASES) + expect( + self._phase_statuses[previous_core_phase][0] == TEST_PASS_STATUS, + "Cannot move past core phase '{}', it didn't pass: ".format( + previous_core_phase + ), + ) + + reran_phase = ( + phase in self._phase_statuses + and self._phase_statuses[phase][0] != TEST_PEND_STATUS + and phase in CORE_PHASES + ) if reran_phase: # All subsequent phases are invalidated phase_idx = ALL_PHASES.index(phase) - for subsequent_phase in ALL_PHASES[phase_idx+1:]: + for subsequent_phase in ALL_PHASES[phase_idx + 1 :]: if subsequent_phase in self._phase_statuses: del self._phase_statuses[subsequent_phase] if subsequent_phase.startswith(COMPARE_PHASE): @@ -224,10 +258,14 @@ def set_status(self, phase, status, comments=""): if stored_phase.startswith(COMPARE_PHASE): del self._phase_statuses[stored_phase] - self._phase_statuses[phase] = (status, comments) # Can overwrite old phase info + self._phase_statuses[phase] = (status, comments) # Can overwrite old phase info - if status == TEST_PASS_STATUS and phase in CORE_PHASES and phase != CORE_PHASES[-1]: - next_core_phase = CORE_PHASES[CORE_PHASES.index(phase)+1] + if ( + status == TEST_PASS_STATUS + and phase in CORE_PHASES + and phase != CORE_PHASES[-1] + ): + next_core_phase = CORE_PHASES[CORE_PHASES.index(phase) + 1] self._phase_statuses[next_core_phase] = (TEST_PEND_STATUS, "") def get_status(self, phase): @@ -236,7 +274,9 @@ def get_status(self, phase): def get_comment(self, phase): return self._phase_statuses[phase][1] if phase in self._phase_statuses else None - def phase_statuses_dump(self, prefix='', skip_passes=False, skip_phase_list=None, xfails=None): + def phase_statuses_dump( + self, prefix="", skip_passes=False, skip_phase_list=None, xfails=None + ): """ Args: prefix: string printed at the start of each line @@ -308,36 +348,65 @@ def _parse_test_status(self, file_contents): line = line.strip() tokens = line.split() if line == "": - pass # skip blank lines + pass # skip blank lines elif len(tokens) >= 3: status, curr_test_name, phase = tokens[:3] - if (self._test_name is None): + if self._test_name is None: self._test_name = curr_test_name else: - expect(self._test_name == curr_test_name, - "inconsistent test name in parse_test_status: '{}' != '{}'".format(self._test_name, curr_test_name)) - - expect(status in ALL_PHASE_STATUSES, - "Unexpected status '{}' in parse_test_status for test '{}'".format(status, self._test_name)) - expect(phase in ALL_PHASES or phase.startswith(COMPARE_PHASE), - "phase '{}' not expected in parse_test_status for test '{}'".format(phase, self._test_name)) - expect(phase not in self._phase_statuses, - "Should not have seen multiple instances of phase '{}' for test '{}'".format(phase, self._test_name)) + expect( + self._test_name == curr_test_name, + "inconsistent test name in parse_test_status: '{}' != '{}'".format( + self._test_name, curr_test_name + ), + ) + + expect( + status in ALL_PHASE_STATUSES, + "Unexpected status '{}' in parse_test_status for test '{}'".format( + status, self._test_name + ), + ) + expect( + phase in ALL_PHASES or phase.startswith(COMPARE_PHASE), + "phase '{}' not expected in parse_test_status for test '{}'".format( + phase, self._test_name + ), + ) + expect( + phase not in self._phase_statuses, + "Should not have seen multiple instances of phase '{}' for test '{}'".format( + phase, self._test_name + ), + ) self._phase_statuses[phase] = (status, " ".join(tokens[3:])) else: - logging.warning("In TestStatus file for test '{}', line '{}' not in expected format".format(self._test_name, line)) + logging.warning( + "In TestStatus file for test '{}', line '{}' not in expected format".format( + self._test_name, line + ) + ) def _parse_test_status_file(self): with open(self._filename, "r") as fd: self._parse_test_status(fd.read()) - def _get_overall_status_based_on_phases(self, phases, wait_for_run=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False): + def _get_overall_status_based_on_phases( + self, + phases, + wait_for_run=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + ignore_memleak=False, + no_run=False, + ): rv = TEST_PASS_STATUS run_phase_found = False phase_responsible_for_status = None - for phase in phases: # ensure correct order of processing phases + for phase in phases: # ensure correct order of processing phases if phase in self._phase_statuses: data = self._phase_statuses[phase] else: @@ -345,7 +414,11 @@ def _get_overall_status_based_on_phases(self, phases, wait_for_run=False, check_ status = data[0] - if phase in CORE_PHASES and rv in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] and status != TEST_PEND_STATUS: + if ( + phase in CORE_PHASES + and rv in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] + and status != TEST_PEND_STATUS + ): phase_responsible_for_status = phase if phase == RUN_PHASE: @@ -354,21 +427,26 @@ def _get_overall_status_based_on_phases(self, phases, wait_for_run=False, check_ if phase in [SUBMIT_PHASE, RUN_PHASE] and no_run: break - if status == TEST_PEND_STATUS and rv in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS]: + if status == TEST_PEND_STATUS and rv in [ + TEST_PASS_STATUS, + NAMELIST_FAIL_STATUS, + ]: if not no_run: rv = TEST_PEND_STATUS phase_responsible_for_status = phase break - elif (status == TEST_FAIL_STATUS): - if ( (not check_throughput and phase == THROUGHPUT_PHASE) or - (not check_memory and phase == MEMCOMP_PHASE) or - (ignore_namelists and phase == NAMELIST_PHASE) or - (ignore_memleak and phase == MEMLEAK_PHASE) ): + elif status == TEST_FAIL_STATUS: + if ( + (not check_throughput and phase == THROUGHPUT_PHASE) + or (not check_memory and phase == MEMCOMP_PHASE) + or (ignore_namelists and phase == NAMELIST_PHASE) + or (ignore_memleak and phase == MEMLEAK_PHASE) + ): continue if phase == NAMELIST_PHASE: - if (rv == TEST_PASS_STATUS): + if rv == TEST_PASS_STATUS: rv = NAMELIST_FAIL_STATUS elif phase == BASELINE_PHASE: @@ -376,7 +454,7 @@ def _get_overall_status_based_on_phases(self, phases, wait_for_run=False, check_ phase_responsible_for_status = phase rv = TEST_DIFF_STATUS else: - pass # a DIFF does not trump a FAIL + pass # a DIFF does not trump a FAIL elif phase in CORE_PHASES: phase_responsible_for_status = phase @@ -388,13 +466,25 @@ def _get_overall_status_based_on_phases(self, phases, wait_for_run=False, check_ # The test did not fail but the RUN phase was not found, so if the user requested # that we wait for the RUN phase, then the test must still be considered pending. - if rv in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] and not run_phase_found and wait_for_run: + if ( + rv in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] + and not run_phase_found + and wait_for_run + ): phase_responsible_for_status = RUN_PHASE rv = TEST_PEND_STATUS return rv, phase_responsible_for_status - def get_overall_test_status(self, wait_for_run=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False): + def get_overall_test_status( + self, + wait_for_run=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + ignore_memleak=False, + no_run=False, + ): r""" Given the current phases and statuses, produce a single results for this test. Preference is given to PEND since we don't want to stop waiting for a test @@ -476,24 +566,29 @@ def get_overall_test_status(self, wait_for_run=False, check_throughput=False, ch ('PEND', 'SHAREDLIB_BUILD') """ # Core phases take priority - core_rv, phase = self._get_overall_status_based_on_phases(CORE_PHASES, - wait_for_run=wait_for_run, - check_throughput=check_throughput, - check_memory=check_memory, - ignore_namelists=ignore_namelists, - ignore_memleak=ignore_memleak, - no_run=no_run) + core_rv, phase = self._get_overall_status_based_on_phases( + CORE_PHASES, + wait_for_run=wait_for_run, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_memleak=ignore_memleak, + no_run=no_run, + ) if core_rv != TEST_PASS_STATUS: return core_rv, phase else: phase_order = list(CORE_PHASES) - phase_order.extend([item for item in self._phase_statuses if item not in CORE_PHASES]) - - return self._get_overall_status_based_on_phases(phase_order, - wait_for_run=wait_for_run, - check_throughput=check_throughput, - check_memory=check_memory, - ignore_namelists=ignore_namelists, - ignore_memleak=ignore_memleak, - no_run=no_run) - + phase_order.extend( + [item for item in self._phase_statuses if item not in CORE_PHASES] + ) + + return self._get_overall_status_based_on_phases( + phase_order, + wait_for_run=wait_for_run, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_memleak=ignore_memleak, + no_run=no_run, + ) diff --git a/CIME/test_utils.py b/CIME/test_utils.py index 1f576589919..ac7e98474e4 100644 --- a/CIME/test_utils.py +++ b/CIME/test_utils.py @@ -11,62 +11,88 @@ logger = logging.getLogger(__name__) -def get_tests_from_xml(xml_machine=None,xml_category=None,xml_compiler=None, xml_testlist=None, - machine=None, compiler=None, driver=None): + +def get_tests_from_xml( + xml_machine=None, + xml_category=None, + xml_compiler=None, + xml_testlist=None, + machine=None, + compiler=None, + driver=None, +): """ Parse testlists for a list of tests """ listoftests = [] testlistfiles = [] - if(machine is not None): - thismach=machine - if(compiler is not None): + if machine is not None: + thismach = machine + if compiler is not None: thiscompiler = compiler - if(xml_testlist is not None): - expect(os.path.isfile(xml_testlist), "Testlist not found or not readable "+xml_testlist) + if xml_testlist is not None: + expect( + os.path.isfile(xml_testlist), + "Testlist not found or not readable " + xml_testlist, + ) testlistfiles.append(xml_testlist) else: files = Files() comps = files.get_components("TESTS_SPEC_FILE") for comp in comps: - test_spec_file = files.get_value("TESTS_SPEC_FILE", {"component":comp}) - if(os.path.isfile(test_spec_file)): + test_spec_file = files.get_value("TESTS_SPEC_FILE", {"component": comp}) + if os.path.isfile(test_spec_file): testlistfiles.append(test_spec_file) for testlistfile in testlistfiles: thistestlistfile = Testlist(testlistfile) - logger.debug("Testlist file is "+testlistfile) - logger.debug("xml_machine {} xml_category {} xml_compiler {}".format(xml_machine, xml_category, xml_compiler)) - newtests = thistestlistfile.get_tests(xml_machine, xml_category, xml_compiler) + logger.debug("Testlist file is " + testlistfile) + logger.debug( + "xml_machine {} xml_category {} xml_compiler {}".format( + xml_machine, xml_category, xml_compiler + ) + ) + newtests = thistestlistfile.get_tests(xml_machine, xml_category, xml_compiler) for test in newtests: - if(machine is None): + if machine is None: thismach = test["machine"] - if(compiler is None): + if compiler is None: thiscompiler = test["compiler"] - test["name"] = CIME.utils.get_full_test_name(test["testname"], grid=test["grid"], compset=test["compset"], - machine=thismach, compiler=thiscompiler, - testmods_string=None if "testmods" not in test else test["testmods"]) + test["name"] = CIME.utils.get_full_test_name( + test["testname"], + grid=test["grid"], + compset=test["compset"], + machine=thismach, + compiler=thiscompiler, + testmods_string=None if "testmods" not in test else test["testmods"], + ) if driver: # override default or specified driver founddriver = False - for specdriver in ("Vnuopc","Vmct","Vmoab"): + for specdriver in ("Vnuopc", "Vmct", "Vmoab"): if specdriver in test["name"]: - test["name"] = test["name"].replace(specdriver,"V{}".format(driver)) + test["name"] = test["name"].replace( + specdriver, "V{}".format(driver) + ) founddriver = True if not founddriver: name = test["name"] - index = name.find('.') + index = name.find(".") test["name"] = name[:index] + "_V{}".format(driver) + name[index:] - - logger.debug("Adding test {} with compiler {}".format(test["name"], test["compiler"])) + logger.debug( + "Adding test {} with compiler {}".format(test["name"], test["compiler"]) + ) listoftests += newtests logger.debug("Found {:d} tests".format(len(listoftests))) return listoftests -def test_to_string(test, category_field_width=0, test_field_width=0, show_options=False): + +def test_to_string( + test, category_field_width=0, test_field_width=0, show_options=False +): """Given a test dictionary, return a string representation suitable for printing Args: @@ -98,12 +124,17 @@ def test_to_string(test, category_field_width=0, test_field_width=0, show_option 'prealpha : SMS.f19_g16.A.cheyenne_intel # my remarks # memleak_tolerance: 0.2 # wallclock: 0:20' """ - mystr = "%-*s: %-*s"%(category_field_width, test['category'], test_field_width, test['name']) - if 'options' in test: - myopts = test['options'].copy() - comment = myopts.pop('comment', None) + mystr = "%-*s: %-*s" % ( + category_field_width, + test["category"], + test_field_width, + test["name"], + ) + if "options" in test: + myopts = test["options"].copy() + comment = myopts.pop("comment", None) if comment: - comment = comment.replace('\n', ' ') + comment = comment.replace("\n", " ") mystr += " # {}".format(comment) if show_options: for one_opt in sorted(myopts): @@ -111,10 +142,25 @@ def test_to_string(test, category_field_width=0, test_field_width=0, show_option return mystr -def get_test_status_files(test_root, compiler, test_id=None): - test_id_glob = "*{}*".format(compiler) if test_id is None else "*{}*".format(test_id) - test_status_files = glob.glob("{}/{}/{}".format(test_root, test_id_glob, TEST_STATUS_FILENAME)) - test_status_files = [item for item in test_status_files if not os.path.dirname(item).endswith('ref1') and not os.path.dirname(item).endswith('ref2')] - expect(test_status_files, "No matching test cases found in for {}/{}/{}".format(test_root, test_id_glob, TEST_STATUS_FILENAME)) +def get_test_status_files(test_root, compiler, test_id=None): + test_id_glob = ( + "*{}*".format(compiler) if test_id is None else "*{}*".format(test_id) + ) + test_status_files = glob.glob( + "{}/{}/{}".format(test_root, test_id_glob, TEST_STATUS_FILENAME) + ) + test_status_files = [ + item + for item in test_status_files + if not os.path.dirname(item).endswith("ref1") + and not os.path.dirname(item).endswith("ref2") + ] + + expect( + test_status_files, + "No matching test cases found in for {}/{}/{}".format( + test_root, test_id_glob, TEST_STATUS_FILENAME + ), + ) return test_status_files diff --git a/CIME/tests/SystemTests/test_system_tests_compare_two.py b/CIME/tests/SystemTests/test_system_tests_compare_two.py index aee0035443f..d455fd8deb4 100755 --- a/CIME/tests/SystemTests/test_system_tests_compare_two.py +++ b/CIME/tests/SystemTests/test_system_tests_compare_two.py @@ -35,7 +35,7 @@ # somecall = Call(method = 'foo', arguments = {'bar': 1, 'baz': 2}) # Or simply: # somecall = Call('foo', {'bar': 1, 'baz': 2}) -Call = namedtuple('Call', ['method', 'arguments']) +Call = namedtuple("Call", ["method", "arguments"]) # ======================================================================== # Names of methods for which we want to record calls @@ -68,17 +68,20 @@ # It logs what stubbed-out methods have been called in its log attribute; this # is a list of Call objects (see above for their definition). + class SystemTestsCompareTwoFake(SystemTestsCompareTwo): - def __init__(self, - case1, - run_one_suffix = 'base', - run_two_suffix = 'test', - separate_builds = False, - multisubmit = False, - case2setup_raises_exception = False, - run_one_should_pass = True, - run_two_should_pass = True, - compare_should_pass = True): + def __init__( + self, + case1, + run_one_suffix="base", + run_two_suffix="test", + separate_builds=False, + multisubmit=False, + case2setup_raises_exception=False, + run_one_should_pass=True, + run_two_should_pass=True, + compare_should_pass=True, + ): """ Initialize a SystemTestsCompareTwoFake object @@ -110,14 +113,15 @@ def __init__(self, # this requirement later: To relax this assumption, remove the following # assertion and add run_one_suffix as an argument to # SystemTestsCompareTwo.__init__ - assert(run_one_suffix == 'base') + assert run_one_suffix == "base" SystemTestsCompareTwo.__init__( self, case1, - separate_builds = separate_builds, - run_two_suffix = run_two_suffix, - multisubmit = multisubmit) + separate_builds=separate_builds, + run_two_suffix=run_two_suffix, + multisubmit=multisubmit, + ) # Need to tell test status that all phases prior to the run phase have # passed, since this is checked in the run call (at least for the build @@ -130,9 +134,9 @@ def __init__(self, self.run_pass_caseroot = [] if run_one_should_pass: - self.run_pass_caseroot.append(self._case1.get_value('CASEROOT')) + self.run_pass_caseroot.append(self._case1.get_value("CASEROOT")) if run_two_should_pass: - self.run_pass_caseroot.append(self._case2.get_value('CASEROOT')) + self.run_pass_caseroot.append(self._case2.get_value("CASEROOT")) self.compare_should_pass = compare_should_pass @@ -167,9 +171,8 @@ def run_indv(self, suffix="base", st_archive=False, submit_resubmits=None): proper suffix is used for the proper case, but this extra check can be removed if it's a maintenance problem.) """ - caseroot = self._case.get_value('CASEROOT') - self.log.append(Call(METHOD_run_indv, - {'suffix': suffix, 'CASEROOT': caseroot})) + caseroot = self._case.get_value("CASEROOT") + self.log.append(Call(METHOD_run_indv, {"suffix": suffix, "CASEROOT": caseroot})) # Determine whether we should raise an exception # @@ -178,7 +181,7 @@ def run_indv(self, suffix="base", st_archive=False, submit_resubmits=None): # for this call to run_indv (e.g., to catch if we forgot to activate # case2 before the second call to run_indv). if caseroot not in self.run_pass_caseroot: - raise RuntimeError('caseroot not in run_pass_caseroot') + raise RuntimeError("caseroot not in run_pass_caseroot") def _do_compare_test(self, suffix1, suffix2, ignore_fieldlist_diffs=False): """ @@ -218,13 +221,13 @@ def _link_to_case2_output(self): # ------------------------------------------------------------------------ def _common_setup(self): - self._case.set_value('var_set_in_common_setup', 'common_val') + self._case.set_value("var_set_in_common_setup", "common_val") def _case_one_setup(self): - self._case.set_value('var_set_in_setup', 'case1val') + self._case.set_value("var_set_in_setup", "case1val") def _case_two_setup(self): - self._case.set_value('var_set_in_setup', 'case2val') + self._case.set_value("var_set_in_setup", "case2val") if self._case2setup_raises_exception: raise RuntimeError @@ -240,12 +243,13 @@ def _case_two_custom_prerun_action(self): def _case_two_custom_postrun_action(self): self.log.append(Call(METHOD_case_two_custom_postrun_action, {})) + # ======================================================================== # Test class itself # ======================================================================== -class TestSystemTestsCompareTwo(unittest.TestCase): +class TestSystemTestsCompareTwo(unittest.TestCase): def setUp(self): self.original_wd = os.getcwd() # create a sandbox in which case directories can be created @@ -258,12 +262,12 @@ def tearDown(self): shutil.rmtree(self.tempdir, ignore_errors=True) - def get_caseroots(self, casename='mytest'): + def get_caseroots(self, casename="mytest"): """ Returns a tuple (case1root, case2root) """ case1root = os.path.join(self.tempdir, casename) - case2root = os.path.join(case1root, 'case2', casename) + case2root = os.path.join(case1root, "case2", casename) return case1root, case2root def get_compare_phase_name(self, mytest): @@ -272,18 +276,18 @@ def get_compare_phase_name(self, mytest): """ run_one_suffix = mytest._run_one_suffix run_two_suffix = mytest._run_two_suffix - compare_phase_name = "{}_{}_{}".format(test_status.COMPARE_PHASE, - run_one_suffix, - run_two_suffix) + compare_phase_name = "{}_{}_{}".format( + test_status.COMPARE_PHASE, run_one_suffix, run_two_suffix + ) return compare_phase_name def test_setup(self): # Ensure that test setup properly sets up case 1 and case 2 # Setup - case1root = os.path.join(self.tempdir, 'case1') + case1root = os.path.join(self.tempdir, "case1") case1 = CaseFake(case1root) - case1.set_value('var_preset', 'preset_value') + case1.set_value("var_preset", "preset_value") # Exercise mytest = SystemTestsCompareTwoFake(case1) @@ -291,21 +295,20 @@ def test_setup(self): # Verify # Make sure that pre-existing values in case1 are copied to case2 (via # clone) - self.assertEqual('preset_value', - mytest._case2.get_value('var_preset')) + self.assertEqual("preset_value", mytest._case2.get_value("var_preset")) # Make sure that _common_setup is called for both - self.assertEqual('common_val', - mytest._case1.get_value('var_set_in_common_setup')) - self.assertEqual('common_val', - mytest._case2.get_value('var_set_in_common_setup')) + self.assertEqual( + "common_val", mytest._case1.get_value("var_set_in_common_setup") + ) + self.assertEqual( + "common_val", mytest._case2.get_value("var_set_in_common_setup") + ) # Make sure that _case_one_setup and _case_two_setup are called # appropriately - self.assertEqual('case1val', - mytest._case1.get_value('var_set_in_setup')) - self.assertEqual('case2val', - mytest._case2.get_value('var_set_in_setup')) + self.assertEqual("case1val", mytest._case1.get_value("var_set_in_setup")) + self.assertEqual("case2val", mytest._case2.get_value("var_set_in_setup")) def test_setup_separate_builds_sharedlibroot(self): # If we're using separate_builds, the two cases should still use @@ -317,60 +320,59 @@ def test_setup_separate_builds_sharedlibroot(self): case1.set_value("SHAREDLIBROOT", os.path.join(case1root, "sharedlibroot")) # Exercise - mytest = SystemTestsCompareTwoFake(case1, - separate_builds = True) + mytest = SystemTestsCompareTwoFake(case1, separate_builds=True) # Verify - self.assertEqual(case1.get_value("SHAREDLIBROOT"), - mytest._case2.get_value("SHAREDLIBROOT")) + self.assertEqual( + case1.get_value("SHAREDLIBROOT"), mytest._case2.get_value("SHAREDLIBROOT") + ) def test_setup_case2_exists(self): # If case2 already exists, then setup code should not be called # Setup - case1root = os.path.join(self.tempdir, 'case1') + case1root = os.path.join(self.tempdir, "case1") case1 = CaseFake(case1root) - os.makedirs(os.path.join(case1root, 'case2','case1')) + os.makedirs(os.path.join(case1root, "case2", "case1")) # Exercise - mytest = SystemTestsCompareTwoFake(case1, - run_two_suffix = 'test') + mytest = SystemTestsCompareTwoFake(case1, run_two_suffix="test") # Verify: # Make sure that case2 object is set (i.e., that it doesn't remain None) - self.assertEqual('case1', mytest._case2.get_value('CASE')) + self.assertEqual("case1", mytest._case2.get_value("CASE")) # Variables set in various setup methods should not be set # (In the real world - i.e., outside of this unit testing fakery - these # values would be set when the Case objects are created.) - self.assertIsNone(mytest._case1.get_value('var_set_in_common_setup')) - self.assertIsNone(mytest._case2.get_value('var_set_in_common_setup')) - self.assertIsNone(mytest._case1.get_value('var_set_in_setup')) - self.assertIsNone(mytest._case2.get_value('var_set_in_setup')) + self.assertIsNone(mytest._case1.get_value("var_set_in_common_setup")) + self.assertIsNone(mytest._case2.get_value("var_set_in_common_setup")) + self.assertIsNone(mytest._case1.get_value("var_set_in_setup")) + self.assertIsNone(mytest._case2.get_value("var_set_in_setup")) def test_setup_error(self): # If there is an error in setup, an exception should be raised and the # case2 directory should be removed # Setup - case1root = os.path.join(self.tempdir, 'case1') + case1root = os.path.join(self.tempdir, "case1") case1 = CaseFake(case1root) # Exercise with self.assertRaises(Exception): - SystemTestsCompareTwoFake(case1, - run_two_suffix = 'test', - case2setup_raises_exception = True) + SystemTestsCompareTwoFake( + case1, run_two_suffix="test", case2setup_raises_exception=True + ) # Verify - self.assertFalse(os.path.exists(os.path.join(case1root, 'case1.test'))) + self.assertFalse(os.path.exists(os.path.join(case1root, "case1.test"))) def test_run_phase_passes(self): # Make sure the run phase behaves properly when all runs succeed. # Setup - case1root = os.path.join(self.tempdir, 'case1') + case1root = os.path.join(self.tempdir, "case1") case1 = CaseFake(case1root) mytest = SystemTestsCompareTwoFake(case1) @@ -378,8 +380,10 @@ def test_run_phase_passes(self): mytest.run() # Verify - self.assertEqual(test_status.TEST_PASS_STATUS, - mytest._test_status.get_status(test_status.RUN_PHASE)) + self.assertEqual( + test_status.TEST_PASS_STATUS, + mytest._test_status.get_status(test_status.RUN_PHASE), + ) def test_run_phase_internal_calls(self): # Make sure that the correct calls are made to methods stubbed out by @@ -391,13 +395,13 @@ def test_run_phase_internal_calls(self): # sure that those methods actually got called correctly. # Setup - run_one_suffix = 'base' - run_two_suffix = 'run2' + run_one_suffix = "base" + run_two_suffix = "run2" case1root, case2root = self.get_caseroots() case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, - run_one_suffix = run_one_suffix, - run_two_suffix = run_two_suffix) + mytest = SystemTestsCompareTwoFake( + case1, run_one_suffix=run_one_suffix, run_two_suffix=run_two_suffix + ) # Exercise mytest.run() @@ -405,14 +409,12 @@ def test_run_phase_internal_calls(self): # Verify expected_calls = [ Call(METHOD_case_one_custom_prerun_action, {}), - Call(METHOD_run_indv, - {'suffix': run_one_suffix, 'CASEROOT': case1root}), + Call(METHOD_run_indv, {"suffix": run_one_suffix, "CASEROOT": case1root}), Call(METHOD_case_one_custom_postrun_action, {}), Call(METHOD_case_two_custom_prerun_action, {}), - Call(METHOD_run_indv, - {'suffix': run_two_suffix, 'CASEROOT': case2root}), + Call(METHOD_run_indv, {"suffix": run_two_suffix, "CASEROOT": case2root}), Call(METHOD_case_two_custom_postrun_action, {}), - Call(METHOD_link_to_case2_output, {}) + Call(METHOD_link_to_case2_output, {}), ] self.assertEqual(expected_calls, mytest.log) @@ -422,15 +424,16 @@ def test_run_phase_internal_calls_multisubmit_phase1(self): # multi-submit test, in the first phase # Setup - run_one_suffix = 'base' - run_two_suffix = 'run2' + run_one_suffix = "base" + run_two_suffix = "run2" case1root, _ = self.get_caseroots() case1 = CaseFake(case1root) mytest = SystemTestsCompareTwoFake( - case1 = case1, - run_one_suffix = run_one_suffix, - run_two_suffix = run_two_suffix, - multisubmit = True) + case1=case1, + run_one_suffix=run_one_suffix, + run_two_suffix=run_two_suffix, + multisubmit=True, + ) # RESUBMIT=1 signals first phase case1.set_value("RESUBMIT", 1) @@ -440,15 +443,17 @@ def test_run_phase_internal_calls_multisubmit_phase1(self): # Verify expected_calls = [ Call(METHOD_case_one_custom_prerun_action, {}), - Call(METHOD_run_indv, - {'suffix': run_one_suffix, 'CASEROOT': case1root}), + Call(METHOD_run_indv, {"suffix": run_one_suffix, "CASEROOT": case1root}), Call(METHOD_case_one_custom_postrun_action, {}), ] self.assertEqual(expected_calls, mytest.log) # Also verify that comparison is NOT called: compare_phase_name = self.get_compare_phase_name(mytest) - self.assertEqual(test_status.TEST_PEND_STATUS, mytest._test_status.get_status(compare_phase_name)) + self.assertEqual( + test_status.TEST_PEND_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) def test_run_phase_internal_calls_multisubmit_phase2(self): # Make sure that the correct calls are made to methods stubbed out by @@ -456,16 +461,17 @@ def test_run_phase_internal_calls_multisubmit_phase2(self): # multi-submit test, in the second phase # Setup - run_one_suffix = 'base' - run_two_suffix = 'run2' + run_one_suffix = "base" + run_two_suffix = "run2" case1root, case2root = self.get_caseroots() case1 = CaseFake(case1root) mytest = SystemTestsCompareTwoFake( - case1 = case1, - run_one_suffix = run_one_suffix, - run_two_suffix = run_two_suffix, - multisubmit = True, - compare_should_pass = True) + case1=case1, + run_one_suffix=run_one_suffix, + run_two_suffix=run_two_suffix, + multisubmit=True, + compare_should_pass=True, + ) # RESUBMIT=0 signals second phase case1.set_value("RESUBMIT", 0) @@ -475,54 +481,56 @@ def test_run_phase_internal_calls_multisubmit_phase2(self): # Verify expected_calls = [ Call(METHOD_case_two_custom_prerun_action, {}), - Call(METHOD_run_indv, - {'suffix': run_two_suffix, 'CASEROOT': case2root}), + Call(METHOD_run_indv, {"suffix": run_two_suffix, "CASEROOT": case2root}), Call(METHOD_case_two_custom_postrun_action, {}), - Call(METHOD_link_to_case2_output, {}) + Call(METHOD_link_to_case2_output, {}), ] self.assertEqual(expected_calls, mytest.log) # Also verify that comparison is called: compare_phase_name = self.get_compare_phase_name(mytest) - self.assertEqual(test_status.TEST_PASS_STATUS, - mytest._test_status.get_status(compare_phase_name)) + self.assertEqual( + test_status.TEST_PASS_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) def test_internal_calls_multisubmit_failed_state(self): - run_one_suffix = 'base' - run_two_suffix = 'run2' + run_one_suffix = "base" + run_two_suffix = "run2" case1root, _ = self.get_caseroots() case1 = CaseFake(case1root) def _set_initial_test_values(x): - x.set_value('RESUBMIT', 1) + x.set_value("RESUBMIT", 1) case1.set_initial_test_values = functools.partial( - _set_initial_test_values, - case1) + _set_initial_test_values, case1 + ) # Standard first phase - case1.set_value('IS_FIRST_RUN', True) - case1.set_value('RESUBMIT', 1) + case1.set_value("IS_FIRST_RUN", True) + case1.set_value("RESUBMIT", 1) mytest = SystemTestsCompareTwoFake( - case1 = case1, - run_one_suffix = run_one_suffix, - run_two_suffix = run_two_suffix, - multisubmit = True) + case1=case1, + run_one_suffix=run_one_suffix, + run_two_suffix=run_two_suffix, + multisubmit=True, + ) mytest.run() expected_calls = [ Call(METHOD_case_one_custom_prerun_action, {}), - Call(METHOD_run_indv, {'CASEROOT': case1root, 'suffix': 'base'}), - Call(METHOD_case_one_custom_postrun_action, {}) + Call(METHOD_run_indv, {"CASEROOT": case1root, "suffix": "base"}), + Call(METHOD_case_one_custom_postrun_action, {}), ] self.assertEqual(expected_calls, mytest.log) # Emulate a rerun ensure phase 1 still runs - case1.set_value('IS_FIRST_RUN', True) - case1.set_value('RESUBMIT', 0) + case1.set_value("IS_FIRST_RUN", True) + case1.set_value("RESUBMIT", 0) # Reset the log mytest.log = [] @@ -531,8 +539,8 @@ def _set_initial_test_values(x): expected_calls = [ Call(METHOD_case_one_custom_prerun_action, {}), - Call(METHOD_run_indv, {'CASEROOT': case1root, 'suffix': 'base'}), - Call(METHOD_case_one_custom_postrun_action, {}) + Call(METHOD_run_indv, {"CASEROOT": case1root, "suffix": "base"}), + Call(METHOD_case_one_custom_postrun_action, {}), ] self.assertEqual(expected_calls, mytest.log) @@ -541,10 +549,9 @@ def test_run1_fails(self): # Make sure that a failure in run1 is reported correctly # Setup - case1root = os.path.join(self.tempdir, 'case1') + case1root = os.path.join(self.tempdir, "case1") case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, - run_one_should_pass = False) + mytest = SystemTestsCompareTwoFake(case1, run_one_should_pass=False) # Exercise try: @@ -553,17 +560,18 @@ def test_run1_fails(self): pass # Verify - self.assertEqual(test_status.TEST_FAIL_STATUS, - mytest._test_status.get_status(test_status.RUN_PHASE)) + self.assertEqual( + test_status.TEST_FAIL_STATUS, + mytest._test_status.get_status(test_status.RUN_PHASE), + ) def test_run2_fails(self): # Make sure that a failure in run2 is reported correctly # Setup - case1root = os.path.join(self.tempdir, 'case1') + case1root = os.path.join(self.tempdir, "case1") case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, - run_two_should_pass = False) + mytest = SystemTestsCompareTwoFake(case1, run_two_should_pass=False) # Exercise try: @@ -572,42 +580,47 @@ def test_run2_fails(self): pass # Verify - self.assertEqual(test_status.TEST_FAIL_STATUS, - mytest._test_status.get_status(test_status.RUN_PHASE)) + self.assertEqual( + test_status.TEST_FAIL_STATUS, + mytest._test_status.get_status(test_status.RUN_PHASE), + ) def test_compare_passes(self): # Make sure that a pass in the comparison is reported correctly # Setup - case1root = os.path.join(self.tempdir, 'case1') + case1root = os.path.join(self.tempdir, "case1") case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, - compare_should_pass = True) + mytest = SystemTestsCompareTwoFake(case1, compare_should_pass=True) # Exercise mytest.run() # Verify compare_phase_name = self.get_compare_phase_name(mytest) - self.assertEqual(test_status.TEST_PASS_STATUS, - mytest._test_status.get_status(compare_phase_name)) + self.assertEqual( + test_status.TEST_PASS_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) def test_compare_fails(self): # Make sure that a failure in the comparison is reported correctly # Setup - case1root = os.path.join(self.tempdir, 'case1') + case1root = os.path.join(self.tempdir, "case1") case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, - compare_should_pass = False) + mytest = SystemTestsCompareTwoFake(case1, compare_should_pass=False) # Exercise mytest.run() # Verify compare_phase_name = self.get_compare_phase_name(mytest) - self.assertEqual(test_status.TEST_FAIL_STATUS, - mytest._test_status.get_status(compare_phase_name)) + self.assertEqual( + test_status.TEST_FAIL_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) + if __name__ == "__main__": unittest.main(verbosity=2, catchbreak=True) diff --git a/CIME/tests/SystemTests/test_system_tests_compare_two_link_to_case2_output.py b/CIME/tests/SystemTests/test_system_tests_compare_two_link_to_case2_output.py index 8800d99f982..2984fa4c802 100755 --- a/CIME/tests/SystemTests/test_system_tests_compare_two_link_to_case2_output.py +++ b/CIME/tests/SystemTests/test_system_tests_compare_two_link_to_case2_output.py @@ -22,16 +22,13 @@ # the sake of unit testing # ======================================================================== + class SystemTestsCompareTwoFake(SystemTestsCompareTwo): - def __init__(self, - case1, - run_two_suffix = 'test'): + def __init__(self, case1, run_two_suffix="test"): SystemTestsCompareTwo.__init__( - self, - case1, - separate_builds = False, - run_two_suffix = run_two_suffix) + self, case1, separate_builds=False, run_two_suffix=run_two_suffix + ) # ------------------------------------------------------------------------ # Stubs of methods called by SystemTestsCommon.__init__ that interact with @@ -57,10 +54,12 @@ def _case_one_setup(self): def _case_two_setup(self): pass + # ======================================================================== # Test class itself # ======================================================================== + class TestLinkToCase2Output(unittest.TestCase): # ======================================================================== @@ -86,9 +85,9 @@ def setup_test_and_directories(self, casename1, run2_suffix): case1root = os.path.join(self.tempdir, casename1) case1 = CaseFake(case1root) - mytest = SystemTestsCompareTwoFake(case1, run_two_suffix = run2_suffix) - mytest._case1.make_rundir() #pylint: disable=maybe-no-member - mytest._case2.make_rundir() #pylint: disable=maybe-no-member + mytest = SystemTestsCompareTwoFake(case1, run_two_suffix=run2_suffix) + mytest._case1.make_rundir() # pylint: disable=maybe-no-member + mytest._case2.make_rundir() # pylint: disable=maybe-no-member return mytest @@ -99,11 +98,11 @@ def create_file_in_rundir2(self, mytest, core_filename, run2_suffix): Returns full path to the file created """ - filename = '{}.{}.nc.{}'.format(mytest._case2.get_value('CASE'), - core_filename, - run2_suffix) - filepath = os.path.join(mytest._case2.get_value('RUNDIR'), filename) - open(filepath, 'w').close() + filename = "{}.{}.nc.{}".format( + mytest._case2.get_value("CASE"), core_filename, run2_suffix + ) + filepath = os.path.join(mytest._case2.get_value("RUNDIR"), filename) + open(filepath, "w").close() return filepath # ======================================================================== @@ -112,36 +111,38 @@ def create_file_in_rundir2(self, mytest, core_filename, run2_suffix): def test_basic(self): # Setup - casename1 = 'mytest' - run2_suffix = 'run2' + casename1 = "mytest" + run2_suffix = "run2" mytest = self.setup_test_and_directories(casename1, run2_suffix) - filepath1 = self.create_file_in_rundir2(mytest, 'clm2.h0', run2_suffix) - filepath2 = self.create_file_in_rundir2(mytest, 'clm2.h1', run2_suffix) + filepath1 = self.create_file_in_rundir2(mytest, "clm2.h0", run2_suffix) + filepath2 = self.create_file_in_rundir2(mytest, "clm2.h1", run2_suffix) # Exercise mytest._link_to_case2_output() # Verify - expected_link_filename1 = '{}.clm2.h0.nc.{}'.format(casename1, run2_suffix) - expected_link_filepath1 = os.path.join(mytest._case1.get_value('RUNDIR'), - expected_link_filename1) + expected_link_filename1 = "{}.clm2.h0.nc.{}".format(casename1, run2_suffix) + expected_link_filepath1 = os.path.join( + mytest._case1.get_value("RUNDIR"), expected_link_filename1 + ) self.assertTrue(os.path.islink(expected_link_filepath1)) self.assertEqual(filepath1, os.readlink(expected_link_filepath1)) - expected_link_filename2 = '{}.clm2.h1.nc.{}'.format(casename1, run2_suffix) - expected_link_filepath2 = os.path.join(mytest._case1.get_value('RUNDIR'), - expected_link_filename2) + expected_link_filename2 = "{}.clm2.h1.nc.{}".format(casename1, run2_suffix) + expected_link_filepath2 = os.path.join( + mytest._case1.get_value("RUNDIR"), expected_link_filename2 + ) self.assertTrue(os.path.islink(expected_link_filepath2)) self.assertEqual(filepath2, os.readlink(expected_link_filepath2)) def test_existing_link(self): # Setup - casename1 = 'mytest' - run2_suffix = 'run2' + casename1 = "mytest" + run2_suffix = "run2" mytest = self.setup_test_and_directories(casename1, run2_suffix) - self.create_file_in_rundir2(mytest, 'clm2.h0', run2_suffix) + self.create_file_in_rundir2(mytest, "clm2.h0", run2_suffix) # Create initial link via a call to _link_to_case2_output mytest._link_to_case2_output() @@ -152,5 +153,6 @@ def test_existing_link(self): # (No verification: Test passes if no exception was raised) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py b/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py index 4112001f693..e5f585372b8 100755 --- a/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py +++ b/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py @@ -7,6 +7,7 @@ from CIME.SystemTests.test_utils import user_nl_utils import CIME.six + class TestUserNLCopier(unittest.TestCase): # ======================================================================== @@ -19,7 +20,7 @@ def setUp(self): def tearDown(self): shutil.rmtree(self._caseroot, ignore_errors=True) - def write_user_nl_file(self, component, contents, suffix=''): + def write_user_nl_file(self, component, contents, suffix=""): """Write contents to a user_nl file in the case directory. Returns the basename (i.e., not the full path) of the file that is created. @@ -29,9 +30,9 @@ def write_user_nl_file(self, component, contents, suffix=''): If the suffix is '_0001', the file name will be user_nl_foo_0001 """ - filename = 'user_nl_' + component + suffix + filename = "user_nl_" + component + suffix - with open(os.path.join(self._caseroot, filename), 'w') as user_nl_file: + with open(os.path.join(self._caseroot, filename), "w") as user_nl_file: user_nl_file.write(contents) return filename @@ -41,7 +42,7 @@ def assertFileContentsEqual(self, expected, filepath, msg=None): the string given by 'expected'. 'msg' gives an optional message to be printed if the assertion fails.""" - with open(filepath, 'r') as myfile: + with open(filepath, "r") as myfile: contents = myfile.read() self.assertEqual(expected, contents, msg=msg) @@ -52,63 +53,66 @@ def assertFileContentsEqual(self, expected, filepath, msg=None): def test_append(self): # Define some variables - component = 'foo' + component = "foo" # deliberately exclude new line from file contents, to make sure that's # handled correctly - orig_contents = 'bar = 42' - contents_to_append = 'baz = 101' + orig_contents = "bar = 42" + contents_to_append = "baz = 101" # Setup filename = self.write_user_nl_file(component, orig_contents) # Exercise - user_nl_utils.append_to_user_nl_files(caseroot = self._caseroot, - component = component, - contents = contents_to_append) + user_nl_utils.append_to_user_nl_files( + caseroot=self._caseroot, component=component, contents=contents_to_append + ) # Verify - expected_contents = orig_contents + '\n' + contents_to_append + '\n' - self.assertFileContentsEqual(expected_contents, - os.path.join(self._caseroot, filename)) + expected_contents = orig_contents + "\n" + contents_to_append + "\n" + self.assertFileContentsEqual( + expected_contents, os.path.join(self._caseroot, filename) + ) def test_append_multiple_files(self): # Simulates a multi-instance test - component = 'foo' - orig_contents1 = 'bar = 42' - orig_contents2 = 'bar = 17' - contents_to_append = 'baz = 101' + component = "foo" + orig_contents1 = "bar = 42" + orig_contents2 = "bar = 17" + contents_to_append = "baz = 101" # Setup - filename1 = self.write_user_nl_file(component, orig_contents1, suffix='_0001') - filename2 = self.write_user_nl_file(component, orig_contents2, suffix='_0002') + filename1 = self.write_user_nl_file(component, orig_contents1, suffix="_0001") + filename2 = self.write_user_nl_file(component, orig_contents2, suffix="_0002") # Exercise - user_nl_utils.append_to_user_nl_files(caseroot = self._caseroot, - component = component, - contents = contents_to_append) + user_nl_utils.append_to_user_nl_files( + caseroot=self._caseroot, component=component, contents=contents_to_append + ) # Verify - expected_contents1 = orig_contents1 + '\n' + contents_to_append + '\n' - expected_contents2 = orig_contents2 + '\n' + contents_to_append + '\n' - self.assertFileContentsEqual(expected_contents1, - os.path.join(self._caseroot, filename1)) - self.assertFileContentsEqual(expected_contents2, - os.path.join(self._caseroot, filename2)) - + expected_contents1 = orig_contents1 + "\n" + contents_to_append + "\n" + expected_contents2 = orig_contents2 + "\n" + contents_to_append + "\n" + self.assertFileContentsEqual( + expected_contents1, os.path.join(self._caseroot, filename1) + ) + self.assertFileContentsEqual( + expected_contents2, os.path.join(self._caseroot, filename2) + ) def test_append_without_files_raises_exception(self): # This test verifies that you get an exception if you call # append_to_user_nl_files when there are no user_nl files of interest # Define some variables - component_exists = 'foo' - component_for_append = 'bar' + component_exists = "foo" + component_for_append = "bar" # Setup # Create file in caseroot for component_exists, but not for component_for_append - self.write_user_nl_file(component_exists, 'irrelevant contents') + self.write_user_nl_file(component_exists, "irrelevant contents") # Exercise & verify +<<<<<<< HEAD:CIME/tests/SystemTests/test_utils/test_user_nl_utils.py CIME.six.assertRaisesRegex(self, RuntimeError, "No user_nl files found", user_nl_utils.append_to_user_nl_files, caseroot = self._caseroot, @@ -116,4 +120,18 @@ def test_append_without_files_raises_exception(self): contents = 'irrelevant contents to append') if __name__ == '__main__': +======= + six.assertRaisesRegex( + self, + RuntimeError, + "No user_nl files found", + user_nl_utils.append_to_user_nl_files, + caseroot=self._caseroot, + component=component_for_append, + contents="irrelevant contents to append", + ) + + +if __name__ == "__main__": +>>>>>>> master:CIME/tests/test_unit_user_nl_utils.py unittest.main() diff --git a/CIME/tests/XML/test_expected_fails_file.py b/CIME/tests/XML/test_expected_fails_file.py index ecdb0c00f15..e1094ea7d25 100755 --- a/CIME/tests/XML/test_expected_fails_file.py +++ b/CIME/tests/XML/test_expected_fails_file.py @@ -9,8 +9,8 @@ from CIME.utils import CIMEError from CIME.expected_fails import ExpectedFails -class TestExpectedFailsFile(unittest.TestCase): +class TestExpectedFailsFile(unittest.TestCase): def setUp(self): self._workdir = tempfile.mkdtemp() self._xml_filepath = os.path.join(self._workdir, "expected_fails.xml") @@ -45,19 +45,18 @@ def test_basic(self):
""" - with open(self._xml_filepath, 'w') as xml_file: + with open(self._xml_filepath, "w") as xml_file: xml_file.write(contents) expected_fails_file = ExpectedFailsFile(self._xml_filepath) xfails = expected_fails_file.get_expected_fails() expected_test1 = ExpectedFails() - expected_test1.add_failure('RUN', 'FAIL') - expected_test1.add_failure('COMPARE_base_rest', 'PEND') + expected_test1.add_failure("RUN", "FAIL") + expected_test1.add_failure("COMPARE_base_rest", "PEND") expected_test2 = ExpectedFails() - expected_test2.add_failure('GENERATE', 'FAIL') - expected_test2.add_failure('BASELINE', 'FAIL') - expected = {'my.test.1': expected_test1, - 'my.test.2': expected_test2} + expected_test2.add_failure("GENERATE", "FAIL") + expected_test2.add_failure("BASELINE", "FAIL") + expected = {"my.test.1": expected_test1, "my.test.2": expected_test2} self.assertEqual(xfails, expected) @@ -84,15 +83,15 @@ def test_same_test_appears_twice(self):
""" - with open(self._xml_filepath, 'w') as xml_file: + with open(self._xml_filepath, "w") as xml_file: xml_file.write(contents) expected_fails_file = ExpectedFailsFile(self._xml_filepath) xfails = expected_fails_file.get_expected_fails() expected_test1 = ExpectedFails() - expected_test1.add_failure('RUN', 'FAIL') - expected_test1.add_failure('COMPARE_base_rest', 'PEND') - expected = {'my.test.1': expected_test1} + expected_test1.add_failure("RUN", "FAIL") + expected_test1.add_failure("COMPARE_base_rest", "PEND") + expected = {"my.test.1": expected_test1} self.assertEqual(xfails, expected) @@ -111,11 +110,12 @@ def test_invalid_file(self):
""" - with open(self._xml_filepath, 'w') as xml_file: + with open(self._xml_filepath, "w") as xml_file: xml_file.write(contents) with CIME.six.assertRaisesRegex(self, CIMEError, "Schemas validity error"): _ = ExpectedFailsFile(self._xml_filepath) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/CIME/tests/XML/test_grids.py b/CIME/tests/XML/test_grids.py index fa091d6d70f..8417177e83e 100755 --- a/CIME/tests/XML/test_grids.py +++ b/CIME/tests/XML/test_grids.py @@ -21,13 +21,15 @@ from CIME.XML.grids import Grids, _ComponentGrids, _add_grid_info, _strip_grid_from_name from CIME.utils import CIMEError + class TestGrids(unittest.TestCase): """Tests some functionality of CIME.XML.grids Note that much of the functionality of CIME.XML.grids is NOT covered here """ - _CONFIG_GRIDS_TEMPLATE = string.Template(""" + _CONFIG_GRIDS_TEMPLATE = string.Template( + """ @@ -68,7 +70,8 @@ class TestGrids(unittest.TestCase): $GRIDMAP_ENTRIES -""") +""" + ) _MODEL_GRID_F09_G17 = """ @@ -93,11 +96,7 @@ class TestGrids(unittest.TestCase): _DOMAIN_F09 = """ 288 192 - domain.lnd.fv0.9x1.25_gx1v6.nc - domain.ocn.fv0.9x1.25_gx1v6.nc - domain.lnd.fv0.9x1.25_gx1v7.nc - domain.ocn.fv0.9x1.25_gx1v7.nc - fv0.9x1.25_ESMFmesh.nc + fv0.9x1.25_ESMFmesh.nc 0.9x1.25 is FV 1-deg grid: """ @@ -105,9 +104,7 @@ class TestGrids(unittest.TestCase): _DOMAIN_G17 = """ 320 384 - domain.ocn.gx1v7.nc - domain.ocn.gx1v7.nc - gx1v7_ESMFmesh.nc + gx1v7_ESMFmesh.nc gx1v7 is displaced Greenland pole 1-deg grid with Caspian as a land feature: """ @@ -115,7 +112,7 @@ class TestGrids(unittest.TestCase): _DOMAIN_GRIS4 = """ 416 704 - greenland_4km_ESMFmesh.nc + greenland_4km_ESMFmesh.nc 4-km Greenland grid """ @@ -123,7 +120,7 @@ class TestGrids(unittest.TestCase): _DOMAIN_AIS8 = """ 704 576 - antarctica_8km_ESMFmesh.nc + antarctica_8km_ESMFmesh.nc 8-km Antarctica grid """ @@ -131,7 +128,7 @@ class TestGrids(unittest.TestCase): _DOMAIN_LIS12 = """ 123 456 - laurentide_12km_ESMFmesh.nc + laurentide_12km_ESMFmesh.nc 12-km Laurentide grid """ @@ -188,85 +185,93 @@ def setUp(self): def tearDown(self): shutil.rmtree(self._workdir) - def _create_grids_xml(self, model_grid_entries, domain_entries, gridmap_entries, - extra_required_gridmaps=""): - grids_xml = self._CONFIG_GRIDS_TEMPLATE.substitute({'MODEL_GRID_ENTRIES':model_grid_entries, - 'DOMAIN_ENTRIES':domain_entries, - 'EXTRA_REQUIRED_GRIDMAPS':extra_required_gridmaps, - 'GRIDMAP_ENTRIES':gridmap_entries}) - with open(self._xml_filepath, 'w') as xml_file: + def _create_grids_xml( + self, + model_grid_entries, + domain_entries, + gridmap_entries, + extra_required_gridmaps="", + ): + grids_xml = self._CONFIG_GRIDS_TEMPLATE.substitute( + { + "MODEL_GRID_ENTRIES": model_grid_entries, + "DOMAIN_ENTRIES": domain_entries, + "EXTRA_REQUIRED_GRIDMAPS": extra_required_gridmaps, + "GRIDMAP_ENTRIES": gridmap_entries, + } + ) + with open(self._xml_filepath, "w", encoding="UTF-8") as xml_file: xml_file.write(grids_xml) - def assert_grid_info_f09_g17(self, grid_info, nuopc=True): - """Asserts that expected grid info is present and correct when using _MODEL_GRID_F09_G17 - - If nuopc is true (the default), then assumes that we used the nuopc driver. - - """ - self.assertEqual(grid_info['ATM_NX'], 288) - self.assertEqual(grid_info['ATM_NY'], 192) - self.assertEqual(grid_info['ATM_GRID'], '0.9x1.25') - self.assertEqual(grid_info['ATM_DOMAIN_FILE'], 'domain.lnd.fv0.9x1.25_gx1v7.nc') - if nuopc: - self.assertEqual(grid_info['ATM_DOMAIN_MESH'], 'fv0.9x1.25_ESMFmesh.nc') - - self.assertEqual(grid_info['LND_NX'], 288) - self.assertEqual(grid_info['LND_NY'], 192) - self.assertEqual(grid_info['LND_GRID'], '0.9x1.25') - self.assertEqual(grid_info['LND_DOMAIN_FILE'], 'domain.lnd.fv0.9x1.25_gx1v7.nc') - if nuopc: - self.assertEqual(grid_info['LND_DOMAIN_MESH'], 'fv0.9x1.25_ESMFmesh.nc') - - self.assertEqual(grid_info['OCN_NX'], 320) - self.assertEqual(grid_info['OCN_NY'], 384) - self.assertEqual(grid_info['OCN_GRID'], 'gx1v7') - self.assertEqual(grid_info['OCN_DOMAIN_FILE'], 'domain.ocn.gx1v7.nc') - if nuopc: - self.assertEqual(grid_info['OCN_DOMAIN_MESH'], 'gx1v7_ESMFmesh.nc') - - self.assertEqual(grid_info['ICE_NX'], 320) - self.assertEqual(grid_info['ICE_NY'], 384) - self.assertEqual(grid_info['ICE_GRID'], 'gx1v7') - self.assertEqual(grid_info['ICE_DOMAIN_FILE'], 'domain.ocn.gx1v7.nc') - if nuopc: - self.assertEqual(grid_info['ICE_DOMAIN_MESH'], 'gx1v7_ESMFmesh.nc') - - self.assertEqual(grid_info['ATM2OCN_FMAPNAME'], 'map_fv0.9x1.25_TO_gx1v7_aave.nc') - self.assertEqual(grid_info['OCN2ATM_FMAPNAME'], 'map_gx1v7_TO_fv0.9x1.25_aave.nc') - self.assertFalse('OCN2ATM_SHOULDBEABSENT' in grid_info) - - def assert_grid_info_f09_g17_3glc(self, grid_info, nuopc=True): - """Asserts that all domain info is present & correct for _MODEL_GRID_F09_G17_3GLC - """ - self.assert_grid_info_f09_g17(grid_info, nuopc=nuopc) + def assert_grid_info_f09_g17(self, grid_info): + """Asserts that expected grid info is present and correct when using _MODEL_GRID_F09_G17""" + self.assertEqual(grid_info["ATM_NX"], 288) + self.assertEqual(grid_info["ATM_NY"], 192) + self.assertEqual(grid_info["ATM_GRID"], "0.9x1.25") + self.assertEqual(grid_info["ATM_DOMAIN_MESH"], "fv0.9x1.25_ESMFmesh.nc") + + self.assertEqual(grid_info["LND_NX"], 288) + self.assertEqual(grid_info["LND_NY"], 192) + self.assertEqual(grid_info["LND_GRID"], "0.9x1.25") + self.assertEqual(grid_info["LND_DOMAIN_MESH"], "fv0.9x1.25_ESMFmesh.nc") + + self.assertEqual(grid_info["OCN_NX"], 320) + self.assertEqual(grid_info["OCN_NY"], 384) + self.assertEqual(grid_info["OCN_GRID"], "gx1v7") + self.assertEqual(grid_info["OCN_DOMAIN_MESH"], "gx1v7_ESMFmesh.nc") + + self.assertEqual(grid_info["ICE_NX"], 320) + self.assertEqual(grid_info["ICE_NY"], 384) + self.assertEqual(grid_info["ICE_GRID"], "gx1v7") + self.assertEqual(grid_info["ICE_DOMAIN_MESH"], "gx1v7_ESMFmesh.nc") + + self.assertEqual( + grid_info["ATM2OCN_FMAPNAME"], "map_fv0.9x1.25_TO_gx1v7_aave.nc" + ) + self.assertEqual( + grid_info["OCN2ATM_FMAPNAME"], "map_gx1v7_TO_fv0.9x1.25_aave.nc" + ) + self.assertFalse("OCN2ATM_SHOULDBEABSENT" in grid_info) + + def assert_grid_info_f09_g17_3glc(self, grid_info): + """Asserts that all domain info is present & correct for _MODEL_GRID_F09_G17_3GLC""" + self.assert_grid_info_f09_g17(grid_info) # Note that we don't assert GLC_NX and GLC_NY here: these are unused for this # multi-grid case, so we don't care what arbitrary values they have. - self.assertEqual(grid_info['GLC_GRID'], 'ais8:gris4:lis12') - if nuopc: - self.assertEqual(grid_info['GLC_DOMAIN_MESH'], - 'antarctica_8km_ESMFmesh.nc:greenland_4km_ESMFmesh.nc:laurentide_12km_ESMFmesh.nc') - - self.assertEqual(grid_info['GLC2OCN_LIQ_RMAPNAME'], - 'map_ais8_to_gx1v7_liq.nc:map_gris4_to_gx1v7_liq.nc:map_lis12_to_gx1v7_liq.nc') - self.assertEqual(grid_info['GLC2OCN_ICE_RMAPNAME'], - 'map_ais8_to_gx1v7_ice.nc:map_gris4_to_gx1v7_ice.nc:map_lis12_to_gx1v7_ice.nc') + self.assertEqual(grid_info["GLC_GRID"], "ais8:gris4:lis12") + self.assertEqual( + grid_info["GLC_DOMAIN_MESH"], + "antarctica_8km_ESMFmesh.nc:greenland_4km_ESMFmesh.nc:laurentide_12km_ESMFmesh.nc", + ) + self.assertEqual( + grid_info["GLC2OCN_LIQ_RMAPNAME"], + "map_ais8_to_gx1v7_liq.nc:map_gris4_to_gx1v7_liq.nc:map_lis12_to_gx1v7_liq.nc", + ) + self.assertEqual( + grid_info["GLC2OCN_ICE_RMAPNAME"], + "map_ais8_to_gx1v7_ice.nc:map_gris4_to_gx1v7_ice.nc:map_lis12_to_gx1v7_ice.nc", + ) def test_get_grid_info_basic(self): """Basic test of get_grid_info""" model_grid_entries = self._MODEL_GRID_F09_G17 domain_entries = self._DOMAIN_F09 + self._DOMAIN_G17 gridmap_entries = self._GRIDMAP_F09_G17 - self._create_grids_xml(model_grid_entries=model_grid_entries, - domain_entries=domain_entries, - gridmap_entries=gridmap_entries) + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + ) grids = Grids(self._xml_filepath) - grid_info = grids.get_grid_info(name="f09_g17", - compset="NOT_IMPORTANT", - driver="nuopc") + grid_info = grids.get_grid_info( + name="f09_g17", + compset="NOT_IMPORTANT", + driver="nuopc", + ) - self.assert_grid_info_f09_g17(grid_info, nuopc=True) + self.assert_grid_info_f09_g17(grid_info) def test_get_grid_info_extra_required_gridmaps(self): """Test of get_grid_info with some extra required gridmaps""" @@ -278,19 +283,23 @@ def test_get_grid_info_extra_required_gridmaps(self): ATM2OCN_EXTRA OCN2ATM_EXTRA """ - self._create_grids_xml(model_grid_entries=model_grid_entries, - domain_entries=domain_entries, - gridmap_entries=gridmap_entries, - extra_required_gridmaps=extra_required_gridmaps) + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + extra_required_gridmaps=extra_required_gridmaps, + ) grids = Grids(self._xml_filepath) - grid_info = grids.get_grid_info(name="f09_g17", - compset="NOT_IMPORTANT", - driver="nuopc") + grid_info = grids.get_grid_info( + name="f09_g17", + compset="NOT_IMPORTANT", + driver="nuopc", + ) - self.assert_grid_info_f09_g17(grid_info, nuopc=True) - self.assertEqual(grid_info['ATM2OCN_EXTRA'], 'unset') - self.assertEqual(grid_info['OCN2ATM_EXTRA'], 'unset') + self.assert_grid_info_f09_g17(grid_info) + self.assertEqual(grid_info["ATM2OCN_EXTRA"], "unset") + self.assertEqual(grid_info["OCN2ATM_EXTRA"], "unset") def test_get_grid_info_extra_gridmaps(self): """Test of get_grid_info with some extra gridmaps""" @@ -304,43 +313,61 @@ def test_get_grid_info_extra_gridmaps(self): map_gx1v7_TO_fv0.9x1.25_extra.nc """ - self._create_grids_xml(model_grid_entries=model_grid_entries, - domain_entries=domain_entries, - gridmap_entries=gridmap_entries) + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + ) grids = Grids(self._xml_filepath) - grid_info = grids.get_grid_info(name="f09_g17", - compset="NOT_IMPORTANT", - driver="nuopc") + grid_info = grids.get_grid_info( + name="f09_g17", + compset="NOT_IMPORTANT", + driver="nuopc", + ) - self.assert_grid_info_f09_g17(grid_info, nuopc=True) - self.assertEqual(grid_info['ATM2OCN_EXTRA'], 'map_fv0.9x1.25_TO_gx1v7_extra.nc') - self.assertEqual(grid_info['OCN2ATM_EXTRA'], 'map_gx1v7_TO_fv0.9x1.25_extra.nc') + self.assert_grid_info_f09_g17(grid_info) + self.assertEqual(grid_info["ATM2OCN_EXTRA"], "map_fv0.9x1.25_TO_gx1v7_extra.nc") + self.assertEqual(grid_info["OCN2ATM_EXTRA"], "map_gx1v7_TO_fv0.9x1.25_extra.nc") def test_get_grid_info_3glc(self): """Test of get_grid_info with 3 glc grids""" model_grid_entries = self._MODEL_GRID_F09_G17_3GLC - domain_entries = (self._DOMAIN_F09 + self._DOMAIN_G17 + - self._DOMAIN_GRIS4 + self._DOMAIN_AIS8 + self._DOMAIN_LIS12) - gridmap_entries = (self._GRIDMAP_F09_G17 + - self._GRIDMAP_GRIS4_G17 + self._GRIDMAP_AIS8_G17 + self._GRIDMAP_LIS12_G17) + domain_entries = ( + self._DOMAIN_F09 + + self._DOMAIN_G17 + + self._DOMAIN_GRIS4 + + self._DOMAIN_AIS8 + + self._DOMAIN_LIS12 + ) + gridmap_entries = ( + self._GRIDMAP_F09_G17 + + self._GRIDMAP_GRIS4_G17 + + self._GRIDMAP_AIS8_G17 + + self._GRIDMAP_LIS12_G17 + ) # Claim that a glc2atm gridmap is required in order to test the logic that handles # an unset required gridmap for a component with multiple grids. extra_required_gridmaps = """ GLC2ATM_EXTRA """ - self._create_grids_xml(model_grid_entries=model_grid_entries, - domain_entries=domain_entries, - gridmap_entries=gridmap_entries, - extra_required_gridmaps=extra_required_gridmaps) + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + extra_required_gridmaps=extra_required_gridmaps, + ) grids = Grids(self._xml_filepath) - grid_info = grids.get_grid_info(name="f09_g17_3glc", - compset="NOT_IMPORTANT", - driver="nuopc") + grid_info = grids.get_grid_info( + name="f09_g17_3glc", + compset="NOT_IMPORTANT", + driver="nuopc", + ) + + self.assert_grid_info_f09_g17_3glc(grid_info) + self.assertEqual(grid_info["GLC2ATM_EXTRA"], "unset") - self.assert_grid_info_f09_g17_3glc(grid_info, nuopc=True) - self.assertEqual(grid_info['GLC2ATM_EXTRA'], 'unset') class TestComponentGrids(unittest.TestCase): """Tests the _ComponentGrids helper class defined in CIME.XML.grids""" @@ -362,7 +389,7 @@ class TestComponentGrids(unittest.TestCase): def test_check_num_elements_right_ndomains(self): """With the right number of domains for a component, check_num_elements should pass""" component_grids = _ComponentGrids(self._GRID_LONGNAME) - gridinfo = {'GLC_DOMAIN_MESH': 'foo:bar:baz'} + gridinfo = {"GLC_DOMAIN_MESH": "foo:bar:baz"} # The test passes as long as the following call doesn't generate any errors component_grids.check_num_elements(gridinfo) @@ -371,15 +398,19 @@ def test_check_num_elements_wrong_ndomains(self): """With the wrong number of domains for a component, check_num_elements should fail""" component_grids = _ComponentGrids(self._GRID_LONGNAME) # In the following, there should be 3 elements, but we only specify 2 - gridinfo = {'GLC_DOMAIN_MESH': 'foo:bar'} + gridinfo = {"GLC_DOMAIN_MESH": "foo:bar"} - self.assertRaisesRegex(CIMEError, "Unexpected number of colon-delimited elements", - component_grids.check_num_elements, gridinfo) + self.assertRaisesRegex( + CIMEError, + "Unexpected number of colon-delimited elements", + component_grids.check_num_elements, + gridinfo, + ) def test_check_num_elements_right_nmaps(self): """With the right number of maps between two components, check_num_elements should pass""" component_grids = _ComponentGrids(self._GRID_LONGNAME) - gridinfo = {'GLC2ROF_RMAPNAME': 'map1:map2:map3:map4:map5:map6'} + gridinfo = {"GLC2ROF_RMAPNAME": "map1:map2:map3:map4:map5:map6"} # The test passes as long as the following call doesn't generate any errors component_grids.check_num_elements(gridinfo) @@ -388,10 +419,15 @@ def test_check_num_elements_wrong_nmaps(self): """With the wrong number of maps between two components, check_num_elements should fail""" component_grids = _ComponentGrids(self._GRID_LONGNAME) # In the following, there should be 6 elements, but we only specify 5 - gridinfo = {'GLC2ROF_RMAPNAME': 'map1:map2:map3:map4:map5'} + gridinfo = {"GLC2ROF_RMAPNAME": "map1:map2:map3:map4:map5"} + + self.assertRaisesRegex( + CIMEError, + "Unexpected number of colon-delimited elements", + component_grids.check_num_elements, + gridinfo, + ) - self.assertRaisesRegex(CIMEError, "Unexpected number of colon-delimited elements", - component_grids.check_num_elements, gridinfo) class TestGridsFunctions(unittest.TestCase): """Tests helper functions defined in CIME.XML.grids @@ -400,27 +436,28 @@ class TestGridsFunctions(unittest.TestCase): function of the main test class. """ + # ------------------------------------------------------------------------ # Tests of _add_grid_info # ------------------------------------------------------------------------ def test_add_grid_info_initial(self): """Test of _add_grid_info for the initial add of a given key""" - grid_info = {'foo': 'a'} - _add_grid_info(grid_info, 'bar', 'b') - self.assertEqual(grid_info, {'foo': 'a', 'bar': 'b'}) + grid_info = {"foo": "a"} + _add_grid_info(grid_info, "bar", "b") + self.assertEqual(grid_info, {"foo": "a", "bar": "b"}) def test_add_grid_info_existing(self): """Test of _add_grid_info when the given key already exists""" - grid_info = {'foo': 'bar'} - _add_grid_info(grid_info, 'foo', 'baz') - self.assertEqual(grid_info, {'foo': 'bar:baz'}) + grid_info = {"foo": "bar"} + _add_grid_info(grid_info, "foo", "baz") + self.assertEqual(grid_info, {"foo": "bar:baz"}) def test_add_grid_info_existing_with_value_for_multiple(self): """Test of _add_grid_info when the given key already exists and value_for_multiple is provided""" - grid_info = {'foo': 1} - _add_grid_info(grid_info, 'foo', 2, value_for_multiple=0) - self.assertEqual(grid_info, {'foo': 0}) + grid_info = {"foo": 1} + _add_grid_info(grid_info, "foo", 2, value_for_multiple=0) + self.assertEqual(grid_info, {"foo": 0}) # ------------------------------------------------------------------------ # Tests of strip_grid_from_name @@ -433,8 +470,9 @@ def test_strip_grid_from_name_basic(self): def test_strip_grid_from_name_badname(self): """_strip_grid_from_name should raise an exception for a name not ending with _grid""" - self.assertRaisesRegex(CIMEError, "does not end with _grid", - _strip_grid_from_name, name = "atm") + self.assertRaisesRegex( + CIMEError, "does not end with _grid", _strip_grid_from_name, name="atm" + ) # ------------------------------------------------------------------------ # Tests of _check_grid_info_component_counts diff --git a/CIME/tests/base.py b/CIME/tests/base.py new file mode 100644 index 00000000000..dafd1a581f5 --- /dev/null +++ b/CIME/tests/base.py @@ -0,0 +1,321 @@ +#!/usr/bin/env python3 + +import glob +import os +import tempfile +import time +import signal +import shutil +import stat +import sys +import unittest + +from CIME import utils +from CIME.XML.machines import Machines + + +def typed_os_environ(key, default_value, expected_type=None): + # Infer type if not explicitly set + dst_type = expected_type or type(default_value) + + value = os.environ.get(key, default_value) + + if value is not None and dst_type == bool: + # Any else is false, might want to be more strict + return value.lower() == "true" if isinstance(value, str) else value + + if value is None: + return None + + return dst_type(value) + + +class BaseTestCase(unittest.TestCase): + # These static values are set when scripts/lib/CIME/tests/scripts_regression_tests.py is called. + MACHINE = None + SCRIPT_DIR = utils.get_scripts_root() + TOOLS_DIR = os.path.join(SCRIPT_DIR, "Tools") + TEST_ROOT = None + TEST_COMPILER = None + TEST_MPILIB = None + NO_FORTRAN_RUN = None + FAST_ONLY = None + NO_BATCH = None + NO_CMAKE = None + NO_TEARDOWN = None + GLOBAL_TIMEOUT = None + + def setUp(self): + self._thread_error = None + self._unset_proxy = self.setup_proxy() + self._machine = self.MACHINE.get_machine_name() + self._compiler = ( + self.MACHINE.get_default_compiler() + if self.TEST_COMPILER is None + else self.TEST_COMPILER + ) + self._baseline_name = "fake_testing_only_%s" % utils.get_timestamp() + self._baseline_area = os.path.join(self.TEST_ROOT, "baselines") + self._testroot = self.TEST_ROOT + self._hasbatch = self.MACHINE.has_batch_system() and not self.NO_BATCH + self._do_teardown = not self.NO_TEARDOWN + self._root_dir = os.getcwd() + + def tearDown(self): + self.kill_subprocesses() + + os.chdir(self._root_dir) + + if self._unset_proxy: + del os.environ["http_proxy"] + + files_to_clean = [] + + baselines = os.path.join(self._baseline_area, self._baseline_name) + if os.path.isdir(baselines): + files_to_clean.append(baselines) + + for test_id in ["master", self._baseline_name]: + for leftover in glob.glob(os.path.join(self._testroot, "*%s*" % test_id)): + files_to_clean.append(leftover) + + do_teardown = self._do_teardown and sys.exc_info() == (None, None, None) + if not do_teardown and files_to_clean: + print("Detected failed test or user request no teardown") + print("Leaving files:") + for file_to_clean in files_to_clean: + print(" " + file_to_clean) + else: + # For batch machines need to avoid race condition as batch system + # finishes I/O for the case. + if self._hasbatch: + time.sleep(5) + + for file_to_clean in files_to_clean: + if os.path.isdir(file_to_clean): + shutil.rmtree(file_to_clean) + else: + os.remove(file_to_clean) + + def assert_test_status(self, test_name, test_status_obj, test_phase, expected_stat): + test_status = test_status_obj.get_status(test_phase) + self.assertEqual( + test_status, + expected_stat, + msg="Problem with {}: for phase '{}': has status '{}', expected '{}'".format( + test_name, test_phase, test_status, expected_stat + ), + ) + + def run_cmd_assert_result( + self, cmd, from_dir=None, expected_stat=0, env=None, verbose=False + ): + from_dir = os.getcwd() if from_dir is None else from_dir + stat, output, errput = utils.run_cmd( + cmd, from_dir=from_dir, env=env, verbose=verbose + ) + if expected_stat == 0: + expectation = "SHOULD HAVE WORKED, INSTEAD GOT STAT %s" % stat + else: + expectation = "EXPECTED STAT %s, INSTEAD GOT STAT %s" % ( + expected_stat, + stat, + ) + msg = """ + COMMAND: %s + FROM_DIR: %s + %s + OUTPUT: %s + ERRPUT: %s + """ % ( + cmd, + from_dir, + expectation, + output, + errput, + ) + self.assertEqual(stat, expected_stat, msg=msg) + + return output + + def setup_proxy(self): + if "http_proxy" not in os.environ: + proxy = self.MACHINE.get_value("PROXY") + if proxy is not None: + os.environ["http_proxy"] = proxy + return True + + return False + + def assert_dashboard_has_build(self, build_name, expected_count=1): + # Do not test E3SM dashboard if model is CESM + if utils.get_model() == "e3sm": + time.sleep(10) # Give chance for cdash to update + + wget_file = tempfile.mktemp() + + utils.run_cmd_no_fail( + "wget https://my.cdash.org/api/v1/index.php?project=ACME_test --no-check-certificate -O %s" + % wget_file + ) + + raw_text = open(wget_file, "r").read() + os.remove(wget_file) + + num_found = raw_text.count(build_name) + self.assertEqual( + num_found, + expected_count, + msg="Dashboard did not have expected num occurances of build name '%s'. Expected %s, found %s" + % (build_name, expected_count, num_found), + ) + + def kill_subprocesses( + self, name=None, sig=signal.SIGKILL, expected_num_killed=None + ): + # Kill all subprocesses + proc_ids = utils.find_proc_id(proc_name=name, children_only=True) + if expected_num_killed is not None: + self.assertEqual( + len(proc_ids), + expected_num_killed, + msg="Expected to find %d processes to kill, found %d" + % (expected_num_killed, len(proc_ids)), + ) + for proc_id in proc_ids: + try: + os.kill(proc_id, sig) + except OSError: + pass + + def kill_python_subprocesses(self, sig=signal.SIGKILL, expected_num_killed=None): + self.kill_subprocesses("[Pp]ython", sig, expected_num_killed) + + def _create_test(self, extra_args, test_id=None, run_errors=False, env_changes=""): + """ + Convenience wrapper around create_test. Returns list of full paths to created cases. If multiple cases, + the order of the returned list is not guaranteed to match the order of the arguments. + """ + # All stub model not supported in nuopc driver + driver = utils.get_cime_default_driver() + if driver == "nuopc" and "cime_developer" in extra_args: + extra_args.append( + " ^SMS_Ln3.T42_T42.S ^PRE.f19_f19.ADESP_TEST ^PRE.f19_f19.ADESP ^DAE.ww3a.ADWAV" + ) + + test_id = ( + "{}-{}".format(self._baseline_name, utils.get_timestamp()) + if test_id is None + else test_id + ) + extra_args.append("-t {}".format(test_id)) + extra_args.append("--baseline-root {}".format(self._baseline_area)) + if self.NO_BATCH: + extra_args.append("--no-batch") + if self.TEST_COMPILER and ( + [extra_arg for extra_arg in extra_args if "--compiler" in extra_arg] == [] + ): + extra_args.append("--compiler={}".format(self.TEST_COMPILER)) + if self.TEST_MPILIB and ( + [extra_arg for extra_arg in extra_args if "--mpilib" in extra_arg] == [] + ): + extra_args.append("--mpilib={}".format(self.TEST_MPILIB)) + extra_args.append("--test-root={0} --output-root={0}".format(self._testroot)) + extra_args.append(f"--machine {self.MACHINE.get_machine_name()}") + + full_run = ( + set(extra_args) + & set(["-n", "--namelist-only", "--no-setup", "--no-build", "--no-run"]) + ) == set() + if full_run and not self.NO_BATCH: + extra_args.append("--wait") + + expected_stat = 0 if not run_errors else utils.TESTS_FAILED_ERR_CODE + + output = self.run_cmd_assert_result( + "{} {}/create_test {}".format( + env_changes, self.SCRIPT_DIR, " ".join(extra_args) + ), + expected_stat=expected_stat, + ) + cases = [] + for line in output.splitlines(): + if "Case dir:" in line: + casedir = line.split()[-1] + self.assertTrue( + os.path.isdir(casedir), msg="Missing casedir {}".format(casedir) + ) + cases.append(casedir) + + self.assertTrue(len(cases) > 0, "create_test made no cases") + + return cases[0] if len(cases) == 1 else cases + + def _wait_for_tests(self, test_id, expect_works=True, always_wait=False): + if self._hasbatch or always_wait: + timeout_arg = ( + "--timeout={}".format(self.GLOBAL_TIMEOUT) + if self.GLOBAL_TIMEOUT is not None + else "" + ) + expected_stat = 0 if expect_works else utils.TESTS_FAILED_ERR_CODE + self.run_cmd_assert_result( + "{}/wait_for_tests {} *{}/TestStatus".format( + self.TOOLS_DIR, timeout_arg, test_id + ), + from_dir=self._testroot, + expected_stat=expected_stat, + ) + + def get_casedir(self, case_fragment, all_cases): + potential_matches = [item for item in all_cases if case_fragment in item] + self.assertTrue( + len(potential_matches) == 1, + "Ambiguous casedir selection for {}, found {} among {}".format( + case_fragment, potential_matches, all_cases + ), + ) + return potential_matches[0] + + def verify_perms(self, root_dir): + for root, dirs, files in os.walk(root_dir): + for filename in files: + full_path = os.path.join(root, filename) + st = os.stat(full_path) + self.assertTrue( + st.st_mode & stat.S_IWGRP, + msg="file {} is not group writeable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IRGRP, + msg="file {} is not group readable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IROTH, + msg="file {} is not world readable".format(full_path), + ) + + for dirname in dirs: + full_path = os.path.join(root, dirname) + st = os.stat(full_path) + + self.assertTrue( + st.st_mode & stat.S_IWGRP, + msg="dir {} is not group writable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IRGRP, + msg="dir {} is not group readable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IXGRP, + msg="dir {} is not group executable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IROTH, + msg="dir {} is not world readable".format(full_path), + ) + self.assertTrue( + st.st_mode & stat.S_IXOTH, + msg="dir {} is not world executable".format(full_path), + ) diff --git a/CIME/tests/case_fake.py b/CIME/tests/case_fake.py index 295f01f5ce8..2b1117edf63 100644 --- a/CIME/tests/case_fake.py +++ b/CIME/tests/case_fake.py @@ -6,6 +6,7 @@ import os from copy import deepcopy + class CaseFake(object): def __init__(self, case_root, create_case_root=True): """ @@ -18,17 +19,16 @@ def __init__(self, case_root, create_case_root=True): self.vars = dict() if create_case_root: os.makedirs(case_root) - self.set_value('CASEROOT', case_root) + self.set_value("CASEROOT", case_root) casename = os.path.basename(case_root) # Typically, CIME_OUTPUT_ROOT is independent of the case. Here, # we nest it under CASEROOT so that (1) tests don't interfere # with each other; (2) a cleanup that removes CASEROOT will also # remove CIME_OUTPUT_ROOT. - self.set_value('CIME_OUTPUT_ROOT', - os.path.join(case_root, 'CIME_OUTPUT_ROOT')) - self.set_value('CASE', casename) - self.set_value('CASEBASEID', casename) - self.set_value('RUN_TYPE', 'startup') + self.set_value("CIME_OUTPUT_ROOT", os.path.join(case_root, "CIME_OUTPUT_ROOT")) + self.set_value("CASE", casename) + self.set_value("CASEBASEID", casename) + self.set_value("RUN_TYPE", "startup") self.set_exeroot() self.set_rundir() @@ -66,16 +66,24 @@ def copy(self, newcasename, newcaseroot): newcaseroot (str): new value for CASEROOT """ newcase = deepcopy(self) - newcase.set_value('CASE', newcasename) - newcase.set_value('CASEBASEID', newcasename) - newcase.set_value('CASEROOT', newcaseroot) + newcase.set_value("CASE", newcasename) + newcase.set_value("CASEBASEID", newcasename) + newcase.set_value("CASEROOT", newcaseroot) newcase.set_exeroot() newcase.set_rundir() return newcase - def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, - cime_output_root=None, exeroot=None, rundir=None): + def create_clone( + self, + newcase, + keepexe=False, + mach_dir=None, + project=None, + cime_output_root=None, + exeroot=None, + rundir=None, + ): # Need to disable unused-argument checking: keepexe is needed to match # the interface of Case, but is not used in this fake implementation # @@ -99,13 +107,13 @@ def create_clone(self, newcase, keepexe=False, mach_dir=None, project=None, newcaseroot = os.path.abspath(newcase) newcasename = os.path.basename(newcase) os.makedirs(newcaseroot) - clone = self.copy(newcasename = newcasename, newcaseroot = newcaseroot) + clone = self.copy(newcasename=newcasename, newcaseroot=newcaseroot) if cime_output_root is not None: - clone.set_value('CIME_OUTPUT_ROOT', cime_output_root) + clone.set_value("CIME_OUTPUT_ROOT", cime_output_root) if exeroot is not None: - clone.set_value('EXEROOT', exeroot) + clone.set_value("EXEROOT", exeroot) if rundir is not None: - clone.set_value('RUNDIR', rundir) + clone.set_value("RUNDIR", rundir) return clone @@ -116,21 +124,21 @@ def make_rundir(self): """ Make directory given by RUNDIR """ - os.makedirs(self.get_value('RUNDIR')) + os.makedirs(self.get_value("RUNDIR")) def set_exeroot(self): """ Assumes CASEROOT is already set; sets an appropriate EXEROOT (nested inside CASEROOT) """ - self.set_value('EXEROOT', os.path.join(self.get_value('CASEROOT'), 'bld')) + self.set_value("EXEROOT", os.path.join(self.get_value("CASEROOT"), "bld")) def set_rundir(self): """ Assumes CASEROOT is already set; sets an appropriate RUNDIR (nested inside CASEROOT) """ - self.set_value('RUNDIR', os.path.join(self.get_value('CASEROOT'), 'run')) + self.set_value("RUNDIR", os.path.join(self.get_value("CASEROOT"), "run")) def case_setup(self, clean=False, test_mode=False, reset=False): pass diff --git a/CIME/tests/custom_assertions_test_status.py b/CIME/tests/custom_assertions_test_status.py index 2eaed48118e..e62a78c7998 100644 --- a/CIME/tests/custom_assertions_test_status.py +++ b/CIME/tests/custom_assertions_test_status.py @@ -11,8 +11,8 @@ import CIME.six_additions from CIME import test_status -class CustomAssertionsTestStatus(unittest.TestCase): +class CustomAssertionsTestStatus(unittest.TestCase): def assert_status_of_phase(self, output, status, phase, test_name, xfail=None): """Asserts that 'output' contains a line showing the given status for the given phase for the given test_name. @@ -24,16 +24,21 @@ def assert_status_of_phase(self, output, status, phase, test_name, xfail=None): - 'expected': After the phase, the line should contain '(EXPECTED FAILURE)' - 'unexpected': After the phase, the line should contain '(UNEXPECTED' """ - expected = (r'^ *{} +'.format(re.escape(status)) + - self._test_name_and_phase_regex(test_name, phase)) + expected = r"^ *{} +".format( + re.escape(status) + ) + self._test_name_and_phase_regex(test_name, phase) - if xfail == 'no': + if xfail == "no": # There should be no other text after the testname and phase regex - expected += r' *$' - elif xfail == 'expected': - expected += r' *{}'.format(re.escape(test_status.TEST_EXPECTED_FAILURE_COMMENT)) - elif xfail == 'unexpected': - expected += r' *{}'.format(re.escape(test_status.TEST_UNEXPECTED_FAILURE_COMMENT_START)) + expected += r" *$" + elif xfail == "expected": + expected += r" *{}".format( + re.escape(test_status.TEST_EXPECTED_FAILURE_COMMENT) + ) + elif xfail == "unexpected": + expected += r" *{}".format( + re.escape(test_status.TEST_UNEXPECTED_FAILURE_COMMENT_START) + ) else: expect(xfail is None, "Unhandled value of xfail argument") @@ -43,9 +48,10 @@ def assert_status_of_phase(self, output, status, phase, test_name, xfail=None): def assert_phase_absent(self, output, phase, test_name): """Asserts that 'output' does not contain a status line for the given phase and test_name""" - expected = re.compile(r'^.* +' + - self._test_name_and_phase_regex(test_name, phase), - flags=re.MULTILINE) + expected = re.compile( + r"^.* +" + self._test_name_and_phase_regex(test_name, phase), + flags=re.MULTILINE, + ) CIME.six_additions.assertNotRegex(self, output, expected) @@ -59,16 +65,22 @@ def assert_core_phases(self, output, test_name, fails): status = test_status.TEST_FAIL_STATUS else: status = test_status.TEST_PASS_STATUS - self.assert_status_of_phase(output=output, - status=status, - phase=phase, - test_name=test_name) + self.assert_status_of_phase( + output=output, status=status, phase=phase, test_name=test_name + ) - def assert_num_expected_unexpected_fails(self, output, num_expected, num_unexpected): + def assert_num_expected_unexpected_fails( + self, output, num_expected, num_unexpected + ): """Asserts that the number of occurrences of expected and unexpected fails in 'output' matches the given numbers""" - self.assertEqual(output.count(test_status.TEST_EXPECTED_FAILURE_COMMENT), num_expected) - self.assertEqual(output.count(test_status.TEST_UNEXPECTED_FAILURE_COMMENT_START), num_unexpected) + self.assertEqual( + output.count(test_status.TEST_EXPECTED_FAILURE_COMMENT), num_expected + ) + self.assertEqual( + output.count(test_status.TEST_UNEXPECTED_FAILURE_COMMENT_START), + num_unexpected, + ) @staticmethod def _test_name_and_phase_regex(test_name, phase): @@ -80,4 +92,4 @@ def _test_name_and_phase_regex(test_name, phase): # changing. By making its regex shared as much as possible with # the regex in assert_status_of_phase, we decrease the chances # of these false passes. - return r'{} +{}'.format(re.escape(test_name), re.escape(phase)) + return r"{} +{}".format(re.escape(test_name), re.escape(phase)) diff --git a/CIME/tests/scripts_regression_tests.py b/CIME/tests/scripts_regression_tests.py new file mode 100755 index 00000000000..df52cf41698 --- /dev/null +++ b/CIME/tests/scripts_regression_tests.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python3 + +""" +Script containing CIME python regression test suite. This suite should be run +to confirm overall CIME correctness. +""" + +import glob, os, re, shutil, signal, sys, tempfile, threading, time, logging, unittest, getpass, filecmp, time, atexit, functools + +CIMEROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) +sys.path.insert(0, CIMEROOT) + +from xml.etree.ElementTree import ParseError + +import subprocess, argparse + +subprocess.call('/bin/rm -f $(find . -name "*.pyc")', shell=True, cwd=CIMEROOT) +import six +from six import assertRaisesRegex +import stat as osstat + +import collections + +from CIME.utils import ( + run_cmd, + run_cmd_no_fail, + get_lids, + get_current_commit, + safe_copy, + CIMEError, + get_cime_root, + get_src_root, + Timeout, + import_from_file, + get_model, +) +import get_tests +import CIME.test_scheduler, CIME.wait_for_tests +from CIME.test_scheduler import TestScheduler +from CIME.XML.compilers import Compilers +from CIME.XML.env_run import EnvRun +from CIME.XML.machines import Machines +from CIME.XML.files import Files +from CIME.case import Case +from CIME.code_checker import check_code, get_all_checkable_files +from CIME.test_status import * +from CIME.provenance import get_test_success, save_test_success +from CIME import utils +from CIME.tests.base import BaseTestCase + +os.environ["CIME_GLOBAL_WALLTIME"] = "0:05:00" + + +def write_provenance_info(machine, test_compiler, test_mpilib, test_root): + curr_commit = get_current_commit(repo=CIMEROOT) + logging.info("Testing commit %s" % curr_commit) + cime_model = get_model() + logging.info("Using cime_model = %s" % cime_model) + logging.info("Testing machine = %s" % machine.get_machine_name()) + if test_compiler is not None: + logging.info("Testing compiler = %s" % test_compiler) + if test_mpilib is not None: + logging.info("Testing mpilib = %s" % test_mpilib) + logging.info("Test root: %s" % test_root) + logging.info("Test driver: %s" % CIME.utils.get_cime_default_driver()) + logging.info("Python version {}\n".format(sys.version)) + + +def cleanup(test_root): + if os.path.exists(test_root): + testreporter = os.path.join(test_root, "testreporter") + files = os.listdir(test_root) + if len(files) == 1 and os.path.isfile(testreporter): + os.unlink(testreporter) + if not os.listdir(test_root): + print("All pass, removing directory:", test_root) + os.rmdir(test_root) + + +def setup_arguments(parser): + parser.add_argument( + "--fast", + action="store_true", + help="Skip full system tests, which saves a lot of time", + ) + + parser.add_argument( + "--no-batch", + action="store_true", + help="Do not submit jobs to batch system, run locally." + " If false, will default to machine setting.", + ) + + parser.add_argument( + "--no-fortran-run", + action="store_true", + help="Do not run any fortran jobs. Implies --fast" " Used for github actions", + ) + + parser.add_argument( + "--no-cmake", action="store_true", help="Do not run cmake tests" + ) + + parser.add_argument( + "--no-teardown", + action="store_true", + help="Do not delete directories left behind by testing", + ) + + parser.add_argument( + "--machine", help="Select a specific machine setting for cime", default=None + ) + + parser.add_argument( + "--compiler", help="Select a specific compiler setting for cime", default=None + ) + + parser.add_argument( + "--mpilib", help="Select a specific compiler setting for cime", default=None + ) + + parser.add_argument( + "--test-root", + help="Select a specific test root for all cases created by the testing", + default=None, + ) + + parser.add_argument( + "--timeout", + type=int, + help="Select a specific timeout for all tests", + default=None, + ) + + +def configure_tests( + timeout, + no_fortran_run, + fast, + no_batch, + no_cmake, + no_teardown, + machine, + compiler, + mpilib, + test_root, + **kwargs +): + config = CIME.utils.get_cime_config() + + if timeout: + BaseTestCase.GLOBAL_TIMEOUT = str(timeout) + + BaseTestCase.NO_FORTRAN_RUN = no_fortran_run or False + BaseTestCase.FAST_ONLY = fast or no_fortran_run + BaseTestCase.NO_BATCH = no_batch or False + BaseTestCase.NO_CMAKE = no_cmake or False + BaseTestCase.NO_TEARDOWN = no_teardown or False + + # make sure we have default values + MACHINE = None + TEST_COMPILER = None + TEST_MPILIB = None + + if machine is not None: + MACHINE = Machines(machine=machine) + os.environ["CIME_MACHINE"] = machine + elif "CIME_MACHINE" in os.environ: + MACHINE = Machines(machine=os.environ["CIME_MACHINE"]) + elif config.has_option("create_test", "MACHINE"): + MACHINE = Machines(machine=config.get("create_test", "MACHINE")) + elif config.has_option("main", "MACHINE"): + MACHINE = Machines(machine=config.get("main", "MACHINE")) + else: + MACHINE = Machines() + + BaseTestCase.MACHINE = MACHINE + + if compiler is not None: + TEST_COMPILER = compiler + elif config.has_option("create_test", "COMPILER"): + TEST_COMPILER = config.get("create_test", "COMPILER") + elif config.has_option("main", "COMPILER"): + TEST_COMPILER = config.get("main", "COMPILER") + + BaseTestCase.TEST_COMPILER = TEST_COMPILER + + if mpilib is not None: + TEST_MPILIB = mpilib + elif config.has_option("create_test", "MPILIB"): + TEST_MPILIB = config.get("create_test", "MPILIB") + elif config.has_option("main", "MPILIB"): + TEST_MPILIB = config.get("main", "MPILIB") + + BaseTestCase.TEST_MPILIB = TEST_MPILIB + + if test_root is not None: + TEST_ROOT = test_root + elif config.has_option("create_test", "TEST_ROOT"): + TEST_ROOT = config.get("create_test", "TEST_ROOT") + else: + TEST_ROOT = os.path.join( + MACHINE.get_value("CIME_OUTPUT_ROOT"), + "scripts_regression_test.%s" % CIME.utils.get_timestamp(), + ) + + BaseTestCase.TEST_ROOT = TEST_ROOT + + write_provenance_info(MACHINE, TEST_COMPILER, TEST_MPILIB, TEST_ROOT) + + atexit.register(functools.partial(cleanup, TEST_ROOT)) + + +def _main_func(description): + help_str = """ +{0} [TEST] [TEST] +OR +{0} --help + +\033[1mEXAMPLES:\033[0m + \033[1;32m# Run the full suite \033[0m + > {0} + + \033[1;32m# Run all code checker tests \033[0m + > {0} B_CheckCode + + \033[1;32m# Run test test_wait_for_test_all_pass from class M_TestWaitForTests \033[0m + > {0} M_TestWaitForTests.test_wait_for_test_all_pass +""".format( + os.path.basename(sys.argv[0]) + ) + + parser = argparse.ArgumentParser( + usage=help_str, + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) + + setup_arguments(parser) + + parser.add_argument("--verbose", action="store_true", help="Enable verbose logging") + + parser.add_argument("--debug", action="store_true", help="Enable debug logging") + + parser.add_argument("--silent", action="store_true", help="Disable all logging") + + parser.add_argument( + "tests", nargs="*", help="Specific tests to run e.g. test_unit*" + ) + + ns, args = parser.parse_known_args() + + # Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone) + sys.argv[1:] = args + + utils.configure_logging(ns.verbose, ns.debug, ns.silent) + + configure_tests(**vars(ns)) + + os.chdir(CIMEROOT) + + if len(ns.tests) == 0: + test_suite = unittest.defaultTestLoader.discover(CIMEROOT) + else: + # Try to load tests by just names + test_suite = unittest.defaultTestLoader.loadTestsFromNames(ns.tests) + + test_runner = unittest.TextTestRunner(verbosity=2) + + TEST_RESULT = test_runner.run(test_suite) + + # Implements same behavior as unittesst.main + # https://github.com/python/cpython/blob/b6d68aa08baebb753534a26d537ac3c0d2c21c79/Lib/unittest/main.py#L272-L273 + sys.exit(not TEST_RESULT.wasSuccessful()) + + +if __name__ == "__main__": + _main_func(__doc__) diff --git a/CIME/tests/test_case.py b/CIME/tests/test_case.py index c88dfc6280c..7fc78b466bf 100755 --- a/CIME/tests/test_case.py +++ b/CIME/tests/test_case.py @@ -9,16 +9,17 @@ from CIME.case import Case from CIME import utils as cime_utils + def make_valid_case(path): """Make the given path look like a valid case to avoid errors""" # Case validity is determined by checking for an env_case.xml file. So put one there # to suggest that this directory is a valid case directory. Open in append mode in # case the file already exists. - with open(os.path.join(path, "env_case.xml"), 'a'): + with open(os.path.join(path, "env_case.xml"), "a"): pass -class TestCaseSubmit(unittest.TestCase): +class TestCaseSubmit(unittest.TestCase): def test_check_case(self): case = mock.MagicMock() @@ -29,10 +30,12 @@ def test_check_case(self): @mock.patch("CIME.case.case_submit.lock_file") @mock.patch("CIME.case.case_submit.unlock_file") @mock.patch("os.path.basename") - def test__submit(self, lock_file, unlock_file, basename): # pylint: disable=unused-argument + def test__submit( + self, lock_file, unlock_file, basename + ): # pylint: disable=unused-argument case = mock.MagicMock() - case_submit._submit(case, chksum=True) # pylint: disable=protected-access + case_submit._submit(case, chksum=True) # pylint: disable=protected-access case.check_case.assert_called_with(skip_pnl=False, chksum=True) @@ -40,7 +43,9 @@ def test__submit(self, lock_file, unlock_file, basename): # pylint: disable=unus @mock.patch("CIME.case.case.Case.initialize_derived_attributes") @mock.patch("CIME.case.case.Case.get_value") @mock.patch("CIME.case.case.Case.read_xml") - def test_submit(self, read_xml, get_value, init, _submit): # pylint: disable=unused-argument + def test_submit( + self, read_xml, get_value, init, _submit + ): # pylint: disable=unused-argument with tempfile.TemporaryDirectory() as tempdir: get_value.side_effect = [ tempdir, @@ -68,50 +73,48 @@ def test_submit(self, read_xml, get_value, init, _submit): # pylint: disable=unu mail_type=None, batch_args=None, workflow=True, - chksum=True + chksum=True, ) -class TestCase(unittest.TestCase): +class TestCase(unittest.TestCase): def setUp(self): self.srcroot = os.path.abspath(cime_utils.get_cime_root()) self.tempdir = tempfile.TemporaryDirectory() @mock.patch("CIME.case.case.Case.read_xml") - @mock.patch("sys.argv", [ - "/src/create_newcase", - "--machine", - "docker" - ]) + @mock.patch("sys.argv", ["/src/create_newcase", "--machine", "docker"]) @mock.patch("time.strftime", return_value="00:00:00") @mock.patch("socket.getfqdn", return_value="host1") @mock.patch("getpass.getuser", side_effect=["root", "root", "johndoe"]) - def test_new_hash(self, getuser, getfqdn, strftime, read_xml): # pylint: disable=unused-argument + def test_new_hash( + self, getuser, getfqdn, strftime, read_xml + ): # pylint: disable=unused-argument with self.tempdir as tempdir: make_valid_case(tempdir) with Case(tempdir) as case: - expected = "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a" + expected = ( + "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a" + ) # Check idempotency for _ in range(2): value = case.new_hash() - self.assertTrue(value == expected, - "{} != {}".format(value, expected)) + self.assertTrue( + value == expected, "{} != {}".format(value, expected) + ) - expected = "bb59f1c473ac07e9dd30bfab153c0530a777f89280b716cf42e6fe2f49811a6e" + expected = ( + "bb59f1c473ac07e9dd30bfab153c0530a777f89280b716cf42e6fe2f49811a6e" + ) value = case.new_hash() - self.assertTrue(value == expected, - "{} != {}".format(value, expected)) + self.assertTrue(value == expected, "{} != {}".format(value, expected)) @mock.patch("CIME.case.case.Case.read_xml") - @mock.patch("sys.argv", [ - "/src/create_newcase", - "--machine", - "docker" - ]) + @mock.patch("sys.argv", ["/src/create_newcase", "--machine", "docker"]) @mock.patch("time.strftime", return_value="00:00:00") @mock.patch("CIME.case.case.lock_file") @mock.patch("CIME.case.case.Case.set_lookup_value") @@ -121,57 +124,78 @@ def test_new_hash(self, getuser, getfqdn, strftime, read_xml): # pylint: disable @mock.patch("socket.getfqdn", return_value="host1") @mock.patch("getpass.getuser", return_value="root") @mock.patch.dict(os.environ, {"CIME_MODEL": "cesm"}) - def test_copy(self, getuser, getfqdn, configure, create_caseroot, # pylint: disable=unused-argument - apply_user_mods, set_lookup_value, lock_file, strftime, # pylint: disable=unused-argument - read_xml): # pylint: disable=unused-argument - expected_first_hash = "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a" - expected_second_hash = "3561339a49daab999e3c4ea2f03a9c6acc33296a5bc35f1bfb82e7b5e10bdf38" + def test_copy( + self, + getuser, + getfqdn, + configure, + create_caseroot, # pylint: disable=unused-argument + apply_user_mods, + set_lookup_value, + lock_file, + strftime, # pylint: disable=unused-argument + read_xml, + ): # pylint: disable=unused-argument + expected_first_hash = ( + "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a" + ) + expected_second_hash = ( + "3561339a49daab999e3c4ea2f03a9c6acc33296a5bc35f1bfb82e7b5e10bdf38" + ) with self.tempdir as tempdir: caseroot = os.path.join(tempdir, "test1") with Case(caseroot, read_only=False) as case: - srcroot = os.path.abspath(os.path.join( - os.path.dirname(__file__), "../../../../../")) - case.create("test1", srcroot, "A", "f19_g16_rx1", - machine_name="cori-haswell") + srcroot = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../../../../../") + ) + case.create( + "test1", srcroot, "A", "f19_g16_rx1", machine_name="cori-haswell" + ) # Check that they're all called - configure.assert_called_with("A", "f19_g16_rx1", - machine_name="cori-haswell", - project=None, pecount=None, - compiler=None, mpilib=None, - pesfile=None, gridfile=None, - multi_driver=False, ninst=1, - test=False, walltime=None, - queue=None, output_root=None, - run_unsupported=False, answer=None, - input_dir=None, driver=None, - workflowid="default", - non_local=False, - extra_machines_dir=None, - case_group=None, - ngpus_per_node=0) + configure.assert_called_with( + "A", + "f19_g16_rx1", + machine_name="cori-haswell", + project=None, + pecount=None, + compiler=None, + mpilib=None, + pesfile=None, + gridfile=None, + multi_driver=False, + ninst=1, + test=False, + walltime=None, + queue=None, + output_root=None, + run_unsupported=False, + answer=None, + input_dir=None, + driver=None, + workflowid="default", + non_local=False, + extra_machines_dir=None, + case_group=None, + ngpus_per_node=0, + ) create_caseroot.assert_called() apply_user_mods.assert_called() lock_file.assert_called() - set_lookup_value.assert_called_with("CASE_HASH", - expected_first_hash) + set_lookup_value.assert_called_with("CASE_HASH", expected_first_hash) strftime.return_value = "10:00:00" - with mock.patch("CIME.case.case.Case.set_value") as set_value, \ - mock.patch("sys.argv", ["/src/create_clone"]): + with mock.patch( + "CIME.case.case.Case.set_value" + ) as set_value, mock.patch("sys.argv", ["/src/create_clone"]): case.copy("test2", "{}_2".format(tempdir)) - set_value.assert_called_with("CASE_HASH", - expected_second_hash) + set_value.assert_called_with("CASE_HASH", expected_second_hash) @mock.patch("CIME.case.case.Case.read_xml") - @mock.patch("sys.argv", [ - "/src/create_newcase", - "--machine", - "docker" - ]) + @mock.patch("sys.argv", ["/src/create_newcase", "--machine", "docker"]) @mock.patch("time.strftime", return_value="00:00:00") @mock.patch("CIME.case.case.lock_file") @mock.patch("CIME.case.case.Case.set_lookup_value") @@ -181,42 +205,66 @@ def test_copy(self, getuser, getfqdn, configure, create_caseroot, # pylint: disa @mock.patch("socket.getfqdn", return_value="host1") @mock.patch("getpass.getuser", return_value="root") @mock.patch.dict(os.environ, {"CIME_MODEL": "cesm"}) - def test_create(self, get_user, getfqdn, configure, create_caseroot, # pylint: disable=unused-argument - apply_user_mods, set_lookup_value, lock_file, strftime, # pylint: disable=unused-argument - read_xml): # pylint: disable=unused-argument + def test_create( + self, + get_user, + getfqdn, + configure, + create_caseroot, # pylint: disable=unused-argument + apply_user_mods, + set_lookup_value, + lock_file, + strftime, # pylint: disable=unused-argument + read_xml, + ): # pylint: disable=unused-argument with self.tempdir as tempdir: caseroot = os.path.join(tempdir, "test1") with Case(caseroot, read_only=False) as case: - srcroot = os.path.abspath(os.path.join( - os.path.dirname(__file__), "../../../../../")) - case.create("test1", srcroot, "A", "f19_g16_rx1", - machine_name="cori-haswell") + srcroot = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../../../../../") + ) + case.create( + "test1", srcroot, "A", "f19_g16_rx1", machine_name="cori-haswell" + ) # Check that they're all called - configure.assert_called_with("A", "f19_g16_rx1", - machine_name="cori-haswell", - project=None, pecount=None, - compiler=None, mpilib=None, - pesfile=None, gridfile=None, - multi_driver=False, ninst=1, - test=False, walltime=None, - queue=None, output_root=None, - run_unsupported=False, answer=None, - input_dir=None, driver=None, - workflowid="default", - non_local=False, - extra_machines_dir=None, - case_group=None, - ngpus_per_node=0) + configure.assert_called_with( + "A", + "f19_g16_rx1", + machine_name="cori-haswell", + project=None, + pecount=None, + compiler=None, + mpilib=None, + pesfile=None, + gridfile=None, + multi_driver=False, + ninst=1, + test=False, + walltime=None, + queue=None, + output_root=None, + run_unsupported=False, + answer=None, + input_dir=None, + driver=None, + workflowid="default", + non_local=False, + extra_machines_dir=None, + case_group=None, + ngpus_per_node=0, + ) create_caseroot.assert_called() apply_user_mods.assert_called() lock_file.assert_called() - set_lookup_value.assert_called_with("CASE_HASH", - "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a") + set_lookup_value.assert_called_with( + "CASE_HASH", + "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a", + ) -class TestCase_RecordCmd(unittest.TestCase): +class TestCase_RecordCmd(unittest.TestCase): def setUp(self): self.srcroot = os.path.abspath(cime_utils.get_cime_root()) self.tempdir = tempfile.TemporaryDirectory() @@ -226,23 +274,25 @@ def assert_calls_match(self, calls, expected): for x, y in zip(calls, expected): self.assertTrue(x == y, calls) + @mock.patch("CIME.case.case.Case.__init__", return_value=None) @mock.patch("CIME.case.case.Case.flush") @mock.patch("CIME.case.case.Case.get_value") @mock.patch("CIME.case.case.open", mock.mock_open()) @mock.patch("time.strftime", return_value="00:00:00") @mock.patch("sys.argv", ["/src/create_newcase"]) - def test_error(self, strftime, get_value, flush, init): # pylint: disable=unused-argument - Case._force_read_only = False # pylint: disable=protected-access - - with self.tempdir as tempdir, mock.patch("CIME.case.case.open", mock.mock_open()) as m: + def test_error( + self, strftime, get_value, flush, init + ): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access + + with self.tempdir as tempdir, mock.patch( + "CIME.case.case.open", mock.mock_open() + ) as m: m.side_effect = PermissionError() with Case(tempdir) as case: - get_value.side_effect = [ - tempdir, - "/src" - ] + get_value.side_effect = [tempdir, "/src"] # We didn't need to make tempdir look like a valid case for the Case # constructor because we mock that constructor, but we *do* need to make @@ -256,18 +306,16 @@ def test_error(self, strftime, get_value, flush, init): # pylint: disable=unused @mock.patch("CIME.case.case.open", mock.mock_open()) @mock.patch("time.strftime", return_value="00:00:00") @mock.patch("sys.argv", ["/src/create_newcase"]) - def test_init(self, strftime, get_value, flush, init): # pylint: disable=unused-argument - Case._force_read_only = False # pylint: disable=protected-access + def test_init( + self, strftime, get_value, flush, init + ): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access mocked_open = mock.mock_open() - with self.tempdir as tempdir, mock.patch("CIME.case.case.open", - mocked_open): + with self.tempdir as tempdir, mock.patch("CIME.case.case.open", mocked_open): with Case(tempdir) as case: - get_value.side_effect = [ - tempdir, - "/src" - ] + get_value.side_effect = [tempdir, "/src"] case.record_cmd(init=True) @@ -275,14 +323,16 @@ def test_init(self, strftime, get_value, flush, init): # pylint: disable=unused- handle = mocked_open() - handle.writelines.assert_called_with([ - "#!/bin/bash\n\n", - "set -e\n\n", - "# Created 00:00:00\n\n", - "CASEDIR=\"{}\"\n\n".format(tempdir), - "/src/create_newcase\n\n", - "cd \"${CASEDIR}\"\n\n", - ]) + handle.writelines.assert_called_with( + [ + "#!/bin/bash\n\n", + "set -e\n\n", + "# Created 00:00:00\n\n", + 'CASEDIR="{}"\n\n'.format(tempdir), + "/src/create_newcase\n\n", + 'cd "${CASEDIR}"\n\n', + ] + ) @mock.patch("CIME.case.case.Case.__init__", return_value=None) @mock.patch("CIME.case.case.Case.flush") @@ -290,18 +340,16 @@ def test_init(self, strftime, get_value, flush, init): # pylint: disable=unused- @mock.patch("CIME.case.case.open", mock.mock_open()) @mock.patch("time.strftime", return_value="00:00:00") @mock.patch("sys.argv", ["/src/scripts/create_newcase"]) - def test_sub_relative(self, strftime, get_value, flush, init): # pylint: disable=unused-argument - Case._force_read_only = False # pylint: disable=protected-access + def test_sub_relative( + self, strftime, get_value, flush, init + ): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access mocked_open = mock.mock_open() - with self.tempdir as tempdir, mock.patch("CIME.case.case.open", - mocked_open): + with self.tempdir as tempdir, mock.patch("CIME.case.case.open", mocked_open): with Case(tempdir) as case: - get_value.side_effect = [ - tempdir, - "/src" - ] + get_value.side_effect = [tempdir, "/src"] case.record_cmd(init=True) @@ -309,9 +357,9 @@ def test_sub_relative(self, strftime, get_value, flush, init): # pylint: disable "#!/bin/bash\n\n", "set -e\n\n", "# Created 00:00:00\n\n", - "CASEDIR=\"{}\"\n\n".format(tempdir), + 'CASEDIR="{}"\n\n'.format(tempdir), "/src/scripts/create_newcase\n\n", - "cd \"${CASEDIR}\"\n\n", + 'cd "${CASEDIR}"\n\n', ] handle = mocked_open() @@ -320,13 +368,12 @@ def test_sub_relative(self, strftime, get_value, flush, init): # pylint: disable @mock.patch("CIME.case.case.Case.__init__", return_value=None) @mock.patch("CIME.case.case.Case.flush") @mock.patch("CIME.case.case.Case.get_value") - def test_cmd_arg(self, get_value, flush, init): # pylint: disable=unused-argument - Case._force_read_only = False # pylint: disable=protected-access + def test_cmd_arg(self, get_value, flush, init): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access mocked_open = mock.mock_open() - with self.tempdir as tempdir, mock.patch("CIME.case.case.open", - mocked_open): + with self.tempdir as tempdir, mock.patch("CIME.case.case.open", mocked_open): with Case(tempdir) as case: get_value.side_effect = [ tempdir, @@ -346,5 +393,6 @@ def test_cmd_arg(self, get_value, flush, init): # pylint: disable=unused-argumen handle = mocked_open() handle.writelines.assert_called_with(expected) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/CIME/tests/test_case_fake.py b/CIME/tests/test_case_fake.py index fb1ac1ed4dd..448931ecc7c 100755 --- a/CIME/tests/test_case_fake.py +++ b/CIME/tests/test_case_fake.py @@ -11,8 +11,8 @@ from CIME.tests.case_fake import CaseFake -class TestCaseFake(unittest.TestCase): +class TestCaseFake(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() @@ -21,21 +21,21 @@ def tearDown(self): def test_create_clone(self): # Setup - old_caseroot = os.path.join(self.tempdir, 'oldcase') + old_caseroot = os.path.join(self.tempdir, "oldcase") oldcase = CaseFake(old_caseroot) - oldcase.set_value('foo', 'bar') + oldcase.set_value("foo", "bar") # Exercise - new_caseroot = os.path.join(self.tempdir, 'newcase') + new_caseroot = os.path.join(self.tempdir, "newcase") clone = oldcase.create_clone(new_caseroot) # Verify - self.assertEqual('bar', clone.get_value('foo')) - self.assertEqual('newcase', clone.get_value('CASE')) - self.assertEqual('newcase', clone.get_value('CASEBASEID')) - self.assertEqual(new_caseroot, clone.get_value('CASEROOT')) - self.assertEqual(os.path.join(new_caseroot, 'run'), - clone.get_value('RUNDIR')) - -if __name__ == '__main__': + self.assertEqual("bar", clone.get_value("foo")) + self.assertEqual("newcase", clone.get_value("CASE")) + self.assertEqual("newcase", clone.get_value("CASEBASEID")) + self.assertEqual(new_caseroot, clone.get_value("CASEROOT")) + self.assertEqual(os.path.join(new_caseroot, "run"), clone.get_value("RUNDIR")) + + +if __name__ == "__main__": unittest.main() diff --git a/CIME/tests/test_case_setup.py b/CIME/tests/test_case_setup.py index f51529173b3..fe5fa7308c1 100644 --- a/CIME/tests/test_case_setup.py +++ b/CIME/tests/test_case_setup.py @@ -9,9 +9,10 @@ from CIME.case import case_setup + @contextlib.contextmanager def create_machines_dir(): - """ Creates temp machines directory with fake content """ + """Creates temp machines directory with fake content""" with tempfile.TemporaryDirectory() as temp_path: machines_path = os.path.join(temp_path, "machines") cmake_path = os.path.join(machines_path, "cmake_macros") @@ -21,6 +22,7 @@ def create_machines_dir(): yield temp_path + @contextlib.contextmanager def chdir(path): old_path = os.getcwd() @@ -31,9 +33,9 @@ def chdir(path): finally: os.chdir(old_path) + # pylint: disable=protected-access class TestCaseSetup(unittest.TestCase): - @mock.patch("CIME.case.case_setup.copy_depends_files") def test_create_macros_cmake(self, copy_depends_files): machine_mock = mock.MagicMock() @@ -45,22 +47,28 @@ def test_create_macros_cmake(self, copy_depends_files): case_path = stack.enter_context(tempfile.TemporaryDirectory()) machines_path = os.path.join(root_path, "machines") - type(machine_mock).machines_dir = mock.PropertyMock(return_value=machines_path) + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) # make sure we're calling everything from within the case root stack.enter_context(chdir(case_path)) - case_setup._create_macros_cmake(case_path, - os.path.join(machines_path, "cmake_macros"), - machine_mock, - "gnu-test", - os.path.join(case_path, "cmake_macros")) + case_setup._create_macros_cmake( + case_path, + os.path.join(machines_path, "cmake_macros"), + machine_mock, + "gnu-test", + os.path.join(case_path, "cmake_macros"), + ) assert os.path.exists(os.path.join(case_path, "Macros.cmake")) assert os.path.exists(os.path.join(case_path, "cmake_macros", "test.cmake")) - copy_depends_files.assert_called_with("test", machines_path, case_path, "gnu-test") - + copy_depends_files.assert_called_with( + "test", machines_path, case_path, "gnu-test" + ) + @mock.patch("CIME.case.case_setup._create_macros_cmake") def test_create_macros(self, _create_macros_cmake): case_mock = mock.MagicMock() @@ -77,24 +85,36 @@ def test_create_macros(self, _create_macros_cmake): case_mock.get_value.return_value = cmake_macros_path machines_path = os.path.join(root_path, "machines") - type(machine_mock).machines_dir = mock.PropertyMock(return_value=machines_path) + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) # do not generate env_mach_specific.xml Path(os.path.join(case_path, "env_mach_specific.xml")).touch() - case_setup._create_macros(case_mock, machine_mock, case_path, "gnu-test", - "openmpi", False, "mct", "LINUX") + case_setup._create_macros( + case_mock, + machine_mock, + case_path, + "gnu-test", + "openmpi", + False, + "mct", + "LINUX", + ) case_mock.get_value.assert_any_call("CMAKE_MACROS_DIR") # make sure we're calling everything from within the case root stack.enter_context(chdir(case_path)) - _create_macros_cmake.assert_called_with(case_path, - cmake_macros_path, - machine_mock, - "gnu-test", - os.path.join(case_path, "cmake_macros")) + _create_macros_cmake.assert_called_with( + case_path, + cmake_macros_path, + machine_mock, + "gnu-test", + os.path.join(case_path, "cmake_macros"), + ) def test_create_macros_copy_user(self): case_mock = mock.MagicMock() @@ -117,7 +137,9 @@ def test_create_macros_copy_user(self): case_mock.get_value.return_value = cmake_macros_path machines_path = os.path.join(root_path, "machines") - type(machine_mock).machines_dir = mock.PropertyMock(return_value=machines_path) + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) # do not generate env_mach_specific.xml Path(os.path.join(case_path, "env_mach_specific.xml")).touch() @@ -127,8 +149,16 @@ def test_create_macros_copy_user(self): # make sure we're calling everything from within the case root stack.enter_context(chdir(case_path)) - case_setup._create_macros(case_mock, machine_mock, case_path, "gnu-test", - "openmpi", False, "mct", "LINUX") + case_setup._create_macros( + case_mock, + machine_mock, + case_path, + "gnu-test", + "openmpi", + False, + "mct", + "LINUX", + ) case_mock.get_value.assert_any_call("CMAKE_MACROS_DIR") @@ -156,7 +186,9 @@ def test_create_macros_copy_extra(self): case_mock.get_value.side_effect = [cmake_macros_path, extra_path] machines_path = os.path.join(root_path, "machines") - type(machine_mock).machines_dir = mock.PropertyMock(return_value=machines_path) + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) # do not generate env_mach_specific.xml Path(os.path.join(case_path, "env_mach_specific.xml")).touch() @@ -164,9 +196,19 @@ def test_create_macros_copy_extra(self): # make sure we're calling everything from within the case root stack.enter_context(chdir(case_path)) - case_setup._create_macros(case_mock, machine_mock, case_path, "gnu-test", - "openmpi", False, "mct", "LINUX") + case_setup._create_macros( + case_mock, + machine_mock, + case_path, + "gnu-test", + "openmpi", + False, + "mct", + "LINUX", + ) case_mock.get_value.assert_any_call("EXTRA_MACHDIR") - assert os.path.exists(os.path.join(case_path, "cmake_macros", "extra.cmake")) + assert os.path.exists( + os.path.join(case_path, "cmake_macros", "extra.cmake") + ) diff --git a/CIME/tests/test_compare_test_results.py b/CIME/tests/test_compare_test_results.py index ed705624ad6..bc298aaf9f9 100755 --- a/CIME/tests/test_compare_test_results.py +++ b/CIME/tests/test_compare_test_results.py @@ -16,7 +16,6 @@ class TestCaseFake(unittest.TestCase): - def setUp(self): self.tempdir = tempfile.mkdtemp() self.test_root = os.path.join(self.tempdir, "tests") @@ -27,7 +26,9 @@ def setUp(self): utils.time.strftime = lambda *args: "2021-02-20" self._old_init = CaseFake.__init__ - CaseFake.__init__ = lambda x, y, *args: self._old_init(x, y, create_case_root=False) + CaseFake.__init__ = lambda x, y, *args: self._old_init( + x, y, create_case_root=False + ) self._old_case = compare_test_results.Case compare_test_results.Case = CaseFake @@ -47,10 +48,13 @@ def _compare_test_results(self, baseline, test_id, phases, **kwargs): for x in phases: status.set_status(x[0], x[1]) - compare_test_results.compare_test_results(baseline, \ - self.baseline_root, self.test_root, "gnu", test_id, **kwargs) + compare_test_results.compare_test_results( + baseline, self.baseline_root, self.test_root, "gnu", test_id, **kwargs + ) - compare_log = os.path.join(test_status_root, "compare.log.{}.2021-02-20".format(baseline)) + compare_log = os.path.join( + test_status_root, "compare.log.{}.2021-02-20".format(baseline) + ) self.assertTrue(os.path.exists(compare_log)) @@ -63,7 +67,9 @@ def test_namelists_only(self): (RUN_PHASE, "PASS"), ] - self._compare_test_results("test1", "test-baseline", phases, namelists_only=True) + self._compare_test_results( + "test1", "test-baseline", phases, namelists_only=True + ) def test_hist_only(self): compare_test_results.compare_namelists = lambda *args: True @@ -97,5 +103,6 @@ def test_baseline(self): self._compare_test_results("test1", "test-baseline", phases) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/CIME/tests/test_cs_status.py b/CIME/tests/test_cs_status.py index 467c9b72f12..38668ded879 100755 --- a/CIME/tests/test_cs_status.py +++ b/CIME/tests/test_cs_status.py @@ -11,6 +11,7 @@ from CIME import test_status from CIME.tests.custom_assertions_test_status import CustomAssertionsTestStatus + class TestCsStatus(CustomAssertionsTestStatus): # ------------------------------------------------------------------------ @@ -44,8 +45,7 @@ def create_test_dir(self, test_dir): def create_test_status_core_passes(test_dir_path, test_name): """Creates a TestStatus file in the given path, with PASS status for all core phases""" - with test_status.TestStatus(test_dir=test_dir_path, - test_name=test_name) as ts: + with test_status.TestStatus(test_dir=test_dir_path, test_name=test_name) as ts: for phase in test_status.CORE_PHASES: ts.set_status(phase, test_status.TEST_PASS_STATUS) @@ -54,17 +54,18 @@ def set_last_core_phase_to_fail(self, test_dir_path, test_name): Returns the name of this phase""" fail_phase = test_status.CORE_PHASES[-1] - self.set_phase_to_status(test_dir_path=test_dir_path, - test_name=test_name, - phase=fail_phase, - status=test_status.TEST_FAIL_STATUS) + self.set_phase_to_status( + test_dir_path=test_dir_path, + test_name=test_name, + phase=fail_phase, + status=test_status.TEST_FAIL_STATUS, + ) return fail_phase @staticmethod def set_phase_to_status(test_dir_path, test_name, phase, status): """Sets the given phase to the given status for this test""" - with test_status.TestStatus(test_dir=test_dir_path, - test_name=test_name) as ts: + with test_status.TestStatus(test_dir=test_dir_path, test_name=test_name) as ts: ts.set_status(phase, status) # ------------------------------------------------------------------------ @@ -73,59 +74,80 @@ def set_phase_to_status(test_dir_path, test_name, phase, status): def test_single_test(self): """cs_status for a single test should include some minimal expected output""" - test_name = 'my.test.name' - test_dir = 'my.test.name.testid' + test_name = "my.test.name" + test_dir = "my.test.name.testid" test_dir_path = self.create_test_dir(test_dir) self.create_test_status_core_passes(test_dir_path, test_name) - cs_status([os.path.join(test_dir_path, 'TestStatus')], - out=self._output) + cs_status([os.path.join(test_dir_path, "TestStatus")], out=self._output) self.assert_core_phases(self._output.getvalue(), test_name, fails=[]) def test_two_tests(self): """cs_status for two tests (one with a FAIL) should include some minimal expected output""" - test_name1 = 'my.test.name1' - test_name2 = 'my.test.name2' - test_dir1 = test_name1 + '.testid' - test_dir2 = test_name2 + '.testid' + test_name1 = "my.test.name1" + test_name2 = "my.test.name2" + test_dir1 = test_name1 + ".testid" + test_dir2 = test_name2 + ".testid" test_dir_path1 = self.create_test_dir(test_dir1) test_dir_path2 = self.create_test_dir(test_dir2) self.create_test_status_core_passes(test_dir_path1, test_name1) self.create_test_status_core_passes(test_dir_path2, test_name2) test2_fail_phase = self.set_last_core_phase_to_fail(test_dir_path2, test_name2) - cs_status([os.path.join(test_dir_path1, 'TestStatus'), - os.path.join(test_dir_path2, 'TestStatus')], - out=self._output) + cs_status( + [ + os.path.join(test_dir_path1, "TestStatus"), + os.path.join(test_dir_path2, "TestStatus"), + ], + out=self._output, + ) self.assert_core_phases(self._output.getvalue(), test_name1, fails=[]) - self.assert_core_phases(self._output.getvalue(), test_name2, fails=[test2_fail_phase]) + self.assert_core_phases( + self._output.getvalue(), test_name2, fails=[test2_fail_phase] + ) def test_fails_only(self): """With fails_only flag, only fails and pends should appear in the output""" - test_name = 'my.test.name' - test_dir = 'my.test.name.testid' + test_name = "my.test.name" + test_dir = "my.test.name.testid" test_dir_path = self.create_test_dir(test_dir) self.create_test_status_core_passes(test_dir_path, test_name) fail_phase = self.set_last_core_phase_to_fail(test_dir_path, test_name) pend_phase = self._NON_CORE_PHASE - self.set_phase_to_status(test_dir_path, test_name, - phase=pend_phase, - status=test_status.TEST_PEND_STATUS) - cs_status([os.path.join(test_dir_path, 'TestStatus')], - fails_only=True, - out=self._output) - self.assert_status_of_phase(output=self._output.getvalue(), - status=test_status.TEST_FAIL_STATUS, - phase=fail_phase, - test_name=test_name) - self.assert_status_of_phase(output=self._output.getvalue(), - status=test_status.TEST_PEND_STATUS, - phase=pend_phase, - test_name=test_name) + self.set_phase_to_status( + test_dir_path, + test_name, + phase=pend_phase, + status=test_status.TEST_PEND_STATUS, + ) + cs_status( + [os.path.join(test_dir_path, "TestStatus")], + fails_only=True, + out=self._output, + ) + self.assert_status_of_phase( + output=self._output.getvalue(), + status=test_status.TEST_FAIL_STATUS, + phase=fail_phase, + test_name=test_name, + ) + self.assert_status_of_phase( + output=self._output.getvalue(), + status=test_status.TEST_PEND_STATUS, + phase=pend_phase, + test_name=test_name, + ) for phase in test_status.CORE_PHASES: if phase != fail_phase: +<<<<<<< HEAD:CIME/tests/test_cs_status.py self.assert_phase_absent(output=self._output.getvalue(), phase=phase, test_name=test_name) CIME.six_additions.assertNotRegex(self, self._output.getvalue(), r'Overall:') +======= + self.assert_phase_absent( + output=self._output.getvalue(), phase=phase, test_name=test_name + ) + six_additions.assertNotRegex(self, self._output.getvalue(), r"Overall:") +>>>>>>> master:CIME/tests/test_unit_cs_status.py def test_count_fails(self): """Test the count of fails with three tests @@ -145,33 +167,46 @@ def test_count_fails(self): # wouldn't cover any additional code/logic phase_of_interest1 = self._NON_CORE_PHASE phase_of_interest2 = self._NON_CORE_PHASE2 - statuses1 = [test_status.TEST_FAIL_STATUS, - test_status.TEST_PASS_STATUS, - test_status.TEST_FAIL_STATUS] - statuses2 = [test_status.TEST_PASS_STATUS, - test_status.TEST_PASS_STATUS, - test_status.TEST_FAIL_STATUS] + statuses1 = [ + test_status.TEST_FAIL_STATUS, + test_status.TEST_PASS_STATUS, + test_status.TEST_FAIL_STATUS, + ] + statuses2 = [ + test_status.TEST_PASS_STATUS, + test_status.TEST_PASS_STATUS, + test_status.TEST_FAIL_STATUS, + ] test_paths = [] test_names = [] for testnum in range(3): - test_name = 'my.test.name' + str(testnum) + test_name = "my.test.name" + str(testnum) test_names.append(test_name) - test_dir = test_name + '.testid' + test_dir = test_name + ".testid" test_dir_path = self.create_test_dir(test_dir) self.create_test_status_core_passes(test_dir_path, test_name) - self.set_phase_to_status(test_dir_path, test_name, - phase=phase_of_interest1, - status=statuses1[testnum]) - self.set_phase_to_status(test_dir_path, test_name, - phase=phase_of_interest2, - status=statuses2[testnum]) - test_paths.append(os.path.join(test_dir_path, 'TestStatus')) + self.set_phase_to_status( + test_dir_path, + test_name, + phase=phase_of_interest1, + status=statuses1[testnum], + ) + self.set_phase_to_status( + test_dir_path, + test_name, + phase=phase_of_interest2, + status=statuses2[testnum], + ) + test_paths.append(os.path.join(test_dir_path, "TestStatus")) - cs_status(test_paths, - count_fails_phase_list=[phase_of_interest1, phase_of_interest2], - out=self._output) + cs_status( + test_paths, + count_fails_phase_list=[phase_of_interest1, phase_of_interest2], + out=self._output, + ) for testnum in range(3): +<<<<<<< HEAD:CIME/tests/test_cs_status.py self.assert_phase_absent(output=self._output.getvalue(), phase=phase_of_interest1, test_name=test_names[testnum]) @@ -182,13 +217,29 @@ def test_count_fails(self): CIME.six.assertRegex(self, self._output.getvalue(), count_regex1) count_regex2 = r'{} +non-passes: +1'.format(re.escape(phase_of_interest2)) CIME.six.assertRegex(self, self._output.getvalue(), count_regex2) +======= + self.assert_phase_absent( + output=self._output.getvalue(), + phase=phase_of_interest1, + test_name=test_names[testnum], + ) + self.assert_phase_absent( + output=self._output.getvalue(), + phase=phase_of_interest2, + test_name=test_names[testnum], + ) + count_regex1 = r"{} +non-passes: +2".format(re.escape(phase_of_interest1)) + six.assertRegex(self, self._output.getvalue(), count_regex1) + count_regex2 = r"{} +non-passes: +1".format(re.escape(phase_of_interest2)) + six.assertRegex(self, self._output.getvalue(), count_regex2) +>>>>>>> master:CIME/tests/test_unit_cs_status.py def test_expected_fails(self): """With the expected_fails_file flag, expected failures should be flagged as such""" - test_name1 = 'my.test.name1' - test_name2 = 'my.test.name2' - test_dir1 = test_name1 + '.testid' - test_dir2 = test_name2 + '.testid' + test_name1 = "my.test.name1" + test_name2 = "my.test.name2" + test_dir1 = test_name1 + ".testid" + test_dir2 = test_name2 + ".testid" test_dir_path1 = self.create_test_dir(test_dir1) test_dir_path2 = self.create_test_dir(test_dir2) self.create_test_status_core_passes(test_dir_path1, test_name1) @@ -205,36 +256,51 @@ def test_expected_fails(self):
-""".format(test_name1=test_name1, - test1_fail_phase=test1_fail_phase, - fail_status=test_status.TEST_FAIL_STATUS) - expected_fails_filepath = os.path.join(self._testroot, 'ExpectedFails.xml') - with open(expected_fails_filepath, 'w') as expected_fails_file: +""".format( + test_name1=test_name1, + test1_fail_phase=test1_fail_phase, + fail_status=test_status.TEST_FAIL_STATUS, + ) + expected_fails_filepath = os.path.join(self._testroot, "ExpectedFails.xml") + with open(expected_fails_filepath, "w") as expected_fails_file: expected_fails_file.write(expected_fails_contents) - cs_status([os.path.join(test_dir_path1, 'TestStatus'), - os.path.join(test_dir_path2, 'TestStatus')], - expected_fails_filepath=expected_fails_filepath, - out=self._output) + cs_status( + [ + os.path.join(test_dir_path1, "TestStatus"), + os.path.join(test_dir_path2, "TestStatus"), + ], + expected_fails_filepath=expected_fails_filepath, + out=self._output, + ) # Both test1 and test2 should have a failure for one phase, but this should be # marked as expected only for test1. - self.assert_core_phases(self._output.getvalue(), test_name1, fails=[test1_fail_phase]) - self.assert_status_of_phase(self._output.getvalue(), - test_status.TEST_FAIL_STATUS, - test1_fail_phase, - test_name1, - xfail='expected') - self.assert_core_phases(self._output.getvalue(), test_name2, fails=[test2_fail_phase]) - self.assert_status_of_phase(self._output.getvalue(), - test_status.TEST_FAIL_STATUS, - test2_fail_phase, - test_name2, - xfail='no') + self.assert_core_phases( + self._output.getvalue(), test_name1, fails=[test1_fail_phase] + ) + self.assert_status_of_phase( + self._output.getvalue(), + test_status.TEST_FAIL_STATUS, + test1_fail_phase, + test_name1, + xfail="expected", + ) + self.assert_core_phases( + self._output.getvalue(), test_name2, fails=[test2_fail_phase] + ) + self.assert_status_of_phase( + self._output.getvalue(), + test_status.TEST_FAIL_STATUS, + test2_fail_phase, + test_name2, + xfail="no", + ) # Make sure that no other phases are mistakenly labeled as expected failures: - self.assert_num_expected_unexpected_fails(self._output.getvalue(), - num_expected=1, - num_unexpected=0) + self.assert_num_expected_unexpected_fails( + self._output.getvalue(), num_expected=1, num_unexpected=0 + ) + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/CIME/tests/test_nmlgen.py b/CIME/tests/test_nmlgen.py index 00345fa0854..52e04d28856 100644 --- a/CIME/tests/test_nmlgen.py +++ b/CIME/tests/test_nmlgen.py @@ -7,9 +7,8 @@ # pylint: disable=protected-access class TestNamelistGenerator(unittest.TestCase): - def test_init_defaults(self): - test_nml_infile = b"""&test + test_nml_infile = b"""&test test1 = 'test1_updated' /""" @@ -36,8 +35,7 @@ def test_init_defaults(self): """ - with tempfile.NamedTemporaryFile() as temp, \ - tempfile.NamedTemporaryFile() as temp2: + with tempfile.NamedTemporaryFile() as temp, tempfile.NamedTemporaryFile() as temp2: temp.write(test_data.encode()) temp.flush() @@ -50,14 +48,12 @@ def test_init_defaults(self): nmlgen.init_defaults([temp2.name], None) - expected_groups = OrderedDict({ - "test_nml": { - "test1": ["'test1_updated'"], - "test2": ['"test2_value"'] - } - }) + expected_groups = OrderedDict( + {"test_nml": {"test1": ["'test1_updated'"], "test2": ['"test2_value"']}} + ) assert nmlgen._namelist._groups == expected_groups + if __name__ == "__main__": unittest.main() diff --git a/CIME/tests/test_provenance.py b/CIME/tests/test_provenance.py index 22351a123c0..21038a8b48d 100755 --- a/CIME/tests/test_provenance.py +++ b/CIME/tests/test_provenance.py @@ -55,7 +55,9 @@ def test_run_git_cmd_recursively(self, run_cmd): run_cmd.return_value = (0, "data", None) with mock.patch("CIME.provenance.open", mock.mock_open()) as m: - provenance._run_git_cmd_recursively("status", "/srcroot", "/output.txt") # pylint: disable=protected-access + provenance._run_git_cmd_recursively( + "status", "/srcroot", "/output.txt" + ) # pylint: disable=protected-access m.assert_called_with("/output.txt", "w") @@ -66,15 +68,17 @@ def test_run_git_cmd_recursively(self, run_cmd): run_cmd.assert_any_call("git status", from_dir="/srcroot") run_cmd.assert_any_call( - "git submodule foreach --recursive \"git status; echo\"", - from_dir="/srcroot") + 'git submodule foreach --recursive "git status; echo"', from_dir="/srcroot" + ) @mock.patch("CIME.provenance.run_cmd") def test_run_git_cmd_recursively_error(self, run_cmd): run_cmd.return_value = (1, "data", "error") with mock.patch("CIME.provenance.open", mock.mock_open()) as m: - provenance._run_git_cmd_recursively("status", "/srcroot", "/output.txt") # pylint: disable=protected-access + provenance._run_git_cmd_recursively( + "status", "/srcroot", "/output.txt" + ) # pylint: disable=protected-access m.assert_called_with("/output.txt", "w") @@ -85,8 +89,8 @@ def test_run_git_cmd_recursively_error(self, run_cmd): run_cmd.assert_any_call("git status", from_dir="/srcroot") run_cmd.assert_any_call( - "git submodule foreach --recursive \"git status; echo\"", - from_dir="/srcroot") + 'git submodule foreach --recursive "git status; echo"', from_dir="/srcroot" + ) @mock.patch("CIME.provenance.safe_copy") @mock.patch("CIME.provenance.run_cmd") @@ -97,7 +101,9 @@ def test_record_git_provenance(self, run_cmd, safe_copy): with tempfile.TemporaryDirectory() as tempdir: os.makedirs(os.path.join(tempdir, ".git")) - provenance._record_git_provenance(tempdir, "/output", "5") # pylint: disable=protected-access + provenance._record_git_provenance( + tempdir, "/output", "5" + ) # pylint: disable=protected-access m.assert_any_call("/output/GIT_STATUS.5", "w") m.assert_any_call("/output/GIT_DIFF.5", "w") @@ -111,32 +117,30 @@ def test_record_git_provenance(self, run_cmd, safe_copy): run_cmd.assert_any_call("git status", from_dir=tempdir) run_cmd.assert_any_call( - "git submodule foreach --recursive \"git status; echo\"", - from_dir=tempdir) + 'git submodule foreach --recursive "git status; echo"', from_dir=tempdir + ) + run_cmd.assert_any_call("git diff", from_dir=tempdir) run_cmd.assert_any_call( - "git diff", - from_dir=tempdir) + 'git submodule foreach --recursive "git diff; echo"', from_dir=tempdir + ) run_cmd.assert_any_call( - "git submodule foreach --recursive \"git diff; echo\"", - from_dir=tempdir) + "git log --first-parent --pretty=oneline -n 5", from_dir=tempdir + ) run_cmd.assert_any_call( - "git log --first-parent --pretty=oneline -n 5", - from_dir=tempdir) + 'git submodule foreach --recursive "git log --first-parent' + ' --pretty=oneline -n 5; echo"', + from_dir=tempdir, + ) + run_cmd.assert_any_call("git remote -v", from_dir=tempdir) run_cmd.assert_any_call( - "git submodule foreach --recursive \"git log --first-parent" - " --pretty=oneline -n 5; echo\"", - from_dir=tempdir) - run_cmd.assert_any_call( - "git remote -v", - from_dir=tempdir) - run_cmd.assert_any_call( - "git submodule foreach --recursive \"git remote -v; echo\"", - from_dir=tempdir) + 'git submodule foreach --recursive "git remote -v; echo"', from_dir=tempdir + ) + + safe_copy.assert_any_call( + f"{tempdir}/.git/config", "/output/GIT_CONFIG.5", preserve_meta=False + ) - safe_copy.assert_any_call(f"{tempdir}/.git/config", - "/output/GIT_CONFIG.5", - preserve_meta=False) -if __name__ == '__main__': - sys.path.insert(0, os.path.abspath(os.path.join('.', '..', '..', 'lib'))) +if __name__ == "__main__": + sys.path.insert(0, os.path.abspath(os.path.join(".", "..", "..", "lib"))) unittest.main() diff --git a/CIME/tests/test_sys_bless_tests_results.py b/CIME/tests/test_sys_bless_tests_results.py new file mode 100644 index 00000000000..72029d6c7d7 --- /dev/null +++ b/CIME/tests/test_sys_bless_tests_results.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 + +import glob +import re +import os +import stat + +from CIME import utils +from CIME.tests import base + + +class TestBlessTestResults(base.BaseTestCase): + def setUp(self): + super().setUp() + + # Set a restrictive umask so we can test that SharedAreas used for + # recording baselines are working + restrictive_mask = 0o027 + self._orig_umask = os.umask(restrictive_mask) + + def tearDown(self): + super().tearDown() + + if "TESTRUNDIFF_ALTERNATE" in os.environ: + del os.environ["TESTRUNDIFF_ALTERNATE"] + + os.umask(self._orig_umask) + + def test_bless_test_results(self): + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + # Test resubmit scenario if Machine has a batch system + if self.MACHINE.has_batch_system(): + test_names = [ + "TESTRUNDIFFRESUBMIT_Mmpi-serial.f19_g16_rx1.A", + "TESTRUNDIFF_Mmpi-serial.f19_g16_rx1.A", + ] + else: + test_names = ["TESTRUNDIFF_P1.f19_g16_rx1.A"] + + # Generate some baselines + for test_name in test_names: + if utils.get_model() == "e3sm": + genargs = ["-g", "-o", "-b", self._baseline_name, test_name] + compargs = ["-c", "-b", self._baseline_name, test_name] + else: + genargs = [ + "-g", + self._baseline_name, + "-o", + test_name, + "--baseline-root ", + self._baseline_area, + ] + compargs = [ + "-c", + self._baseline_name, + test_name, + "--baseline-root ", + self._baseline_area, + ] + + self._create_test(genargs) + # Hist compare should pass + self._create_test(compargs) + # Change behavior + os.environ["TESTRUNDIFF_ALTERNATE"] = "True" + + # Hist compare should now fail + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + self._create_test(compargs, test_id=test_id, run_errors=True) + + # compare_test_results should detect the fail + cpr_cmd = "{}/compare_test_results --test-root {} -t {} 2>&1".format( + self.TOOLS_DIR, self._testroot, test_id + ) + output = self.run_cmd_assert_result( + cpr_cmd, expected_stat=utils.TESTS_FAILED_ERR_CODE + ) + + # use regex + expected_pattern = re.compile(r"FAIL %s[^\s]* BASELINE" % test_name) + the_match = expected_pattern.search(output) + self.assertNotEqual( + the_match, + None, + msg="Cmd '%s' failed to display failed test %s in output:\n%s" + % (cpr_cmd, test_name, output), + ) + # Bless + utils.run_cmd_no_fail( + "{}/bless_test_results --test-root {} --hist-only --force -t {}".format( + self.TOOLS_DIR, self._testroot, test_id + ) + ) + # Hist compare should now pass again + self._create_test(compargs) + self.verify_perms(self._baseline_area) + if "TESTRUNDIFF_ALTERNATE" in os.environ: + del os.environ["TESTRUNDIFF_ALTERNATE"] + + def test_rebless_namelist(self): + # Generate some namelist baselines + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + test_to_change = "TESTRUNPASS_P1.f19_g16_rx1.A" + if utils.get_model() == "e3sm": + genargs = ["-g", "-o", "-b", self._baseline_name, "cime_test_only_pass"] + compargs = ["-c", "-b", self._baseline_name, "cime_test_only_pass"] + else: + genargs = ["-g", self._baseline_name, "-o", "cime_test_only_pass"] + compargs = ["-c", self._baseline_name, "cime_test_only_pass"] + + self._create_test(genargs) + + # Basic namelist compare + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + cases = self._create_test(compargs, test_id=test_id) + casedir = self.get_casedir(test_to_change, cases) + + # Check standalone case.cmpgen_namelists + self.run_cmd_assert_result("./case.cmpgen_namelists", from_dir=casedir) + + # compare_test_results should pass + cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1".format( + self.TOOLS_DIR, self._testroot, test_id + ) + output = self.run_cmd_assert_result(cpr_cmd) + + # use regex + expected_pattern = re.compile(r"PASS %s[^\s]* NLCOMP" % test_to_change) + the_match = expected_pattern.search(output) + self.assertNotEqual( + the_match, + None, + msg="Cmd '%s' failed to display passed test in output:\n%s" + % (cpr_cmd, output), + ) + + # Modify namelist + fake_nl = """ + &fake_nml + fake_item = 'fake' + fake = .true. +/""" + baseline_area = self._baseline_area + baseline_glob = glob.glob( + os.path.join(baseline_area, self._baseline_name, "TEST*") + ) + self.assertEqual( + len(baseline_glob), + 3, + msg="Expected three matches, got:\n%s" % "\n".join(baseline_glob), + ) + + for baseline_dir in baseline_glob: + nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in") + self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path) + + os.chmod(nl_path, stat.S_IRUSR | stat.S_IWUSR) + with open(nl_path, "a") as nl_file: + nl_file.write(fake_nl) + + # Basic namelist compare should now fail + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + self._create_test(compargs, test_id=test_id, run_errors=True) + casedir = self.get_casedir(test_to_change, cases) + + # Unless namelists are explicitly ignored + test_id2 = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + self._create_test(compargs + ["--ignore-namelists"], test_id=test_id2) + + self.run_cmd_assert_result( + "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100 + ) + + # preview namelists should work + self.run_cmd_assert_result("./preview_namelists", from_dir=casedir) + + # This should still fail + self.run_cmd_assert_result( + "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100 + ) + + # compare_test_results should fail + cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1".format( + self.TOOLS_DIR, self._testroot, test_id + ) + output = self.run_cmd_assert_result( + cpr_cmd, expected_stat=utils.TESTS_FAILED_ERR_CODE + ) + + # use regex + expected_pattern = re.compile(r"FAIL %s[^\s]* NLCOMP" % test_to_change) + the_match = expected_pattern.search(output) + self.assertNotEqual( + the_match, + None, + msg="Cmd '%s' failed to display passed test in output:\n%s" + % (cpr_cmd, output), + ) + + # Bless + new_test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + utils.run_cmd_no_fail( + "{}/bless_test_results --test-root {} -n --force -t {} --new-test-root={} --new-test-id={}".format( + self.TOOLS_DIR, self._testroot, test_id, self._testroot, new_test_id + ) + ) + + # Basic namelist compare should now pass again + self._create_test(compargs) + + self.verify_perms(self._baseline_area) diff --git a/CIME/tests/test_sys_build_system.py b/CIME/tests/test_sys_build_system.py new file mode 100644 index 00000000000..e76d7524e08 --- /dev/null +++ b/CIME/tests/test_sys_build_system.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 + +from CIME.tests import base + + +class TestBuildSystem(base.BaseTestCase): + def test_clean_rebuild(self): + casedir = self._create_test( + ["--no-run", "SMS.f19_g16_rx1.A"], test_id=self._baseline_name + ) + + # Clean a component and a sharedlib + self.run_cmd_assert_result("./case.build --clean atm", from_dir=casedir) + self.run_cmd_assert_result("./case.build --clean gptl", from_dir=casedir) + + # Repeating should not be an error + self.run_cmd_assert_result("./case.build --clean atm", from_dir=casedir) + self.run_cmd_assert_result("./case.build --clean gptl", from_dir=casedir) + + self.run_cmd_assert_result("./case.build", from_dir=casedir) diff --git a/CIME/tests/test_sys_cime_case.py b/CIME/tests/test_sys_cime_case.py new file mode 100644 index 00000000000..63e3f37ae45 --- /dev/null +++ b/CIME/tests/test_sys_cime_case.py @@ -0,0 +1,731 @@ +#!/usr/bin/env python3 + +import collections +import os +import re +import shutil +import sys +import time + +import six +from CIME import utils +from CIME.tests import base +from CIME.case.case import Case +from CIME.XML.env_run import EnvRun + + +class TestCimeCase(base.BaseTestCase): + def test_cime_case(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS_P1.f19_g16_rx1.A"], test_id=self._baseline_name + ) + + self.assertEqual(type(self.MACHINE.get_value("MAX_TASKS_PER_NODE")), int) + self.assertTrue( + type(self.MACHINE.get_value("PROJECT_REQUIRED")) in [type(None), bool] + ) + + with Case(casedir, read_only=False) as case: + build_complete = case.get_value("BUILD_COMPLETE") + self.assertFalse( + build_complete, + msg="Build complete had wrong value '%s'" % build_complete, + ) + + case.set_value("BUILD_COMPLETE", True) + build_complete = case.get_value("BUILD_COMPLETE") + self.assertTrue( + build_complete, + msg="Build complete had wrong value '%s'" % build_complete, + ) + + case.flush() + + build_complete = utils.run_cmd_no_fail( + "./xmlquery BUILD_COMPLETE --value", from_dir=casedir + ) + self.assertEqual( + build_complete, + "TRUE", + msg="Build complete had wrong value '%s'" % build_complete, + ) + + # Test some test properties + self.assertEqual(case.get_value("TESTCASE"), "TESTRUNPASS") + + def _batch_test_fixture(self, testcase_name): + if not self.MACHINE.has_batch_system() or self.NO_BATCH: + self.skipTest("Skipping testing user prerequisites without batch systems") + testdir = os.path.join(self._testroot, testcase_name) + if os.path.exists(testdir): + shutil.rmtree(testdir) + args = "--case {name} --script-root {testdir} --compset X --res f19_g16 --handle-preexisting-dirs=r --output-root {testdir}".format( + name=testcase_name, testdir=testdir + ) + if utils.get_model() == "cesm": + args += " --run-unsupported" + + self.run_cmd_assert_result( + "{}/create_newcase {}".format(self.SCRIPT_DIR, args), + from_dir=self.SCRIPT_DIR, + ) + self.run_cmd_assert_result("./case.setup", from_dir=testdir) + + return testdir + + def test_cime_case_prereq(self): + testcase_name = "prereq_test" + testdir = self._batch_test_fixture(testcase_name) + with Case(testdir, read_only=False) as case: + if case.get_value("depend_string") is None: + self.skipTest( + "Skipping prereq test, depend_string was not provided for this batch system" + ) + job_name = "case.run" + prereq_name = "prereq_test" + batch_commands = case.submit_jobs( + prereq=prereq_name, job=job_name, skip_pnl=True, dry_run=True + ) + self.assertTrue( + isinstance(batch_commands, collections.Sequence), + "case.submit_jobs did not return a sequence for a dry run", + ) + self.assertTrue( + len(batch_commands) > 0, + "case.submit_jobs did not return any job submission string", + ) + # The first element in the internal sequence should just be the job name + # The second one (batch_cmd_index) should be the actual batch submission command + batch_cmd_index = 1 + # The prerequisite should be applied to all jobs, though we're only expecting one + for batch_cmd in batch_commands: + self.assertTrue( + isinstance(batch_cmd, collections.Sequence), + "case.submit_jobs did not return a sequence of sequences", + ) + self.assertTrue( + len(batch_cmd) > batch_cmd_index, + "case.submit_jobs returned internal sequences with length <= {}".format( + batch_cmd_index + ), + ) + self.assertTrue( + isinstance(batch_cmd[1], six.string_types), + "case.submit_jobs returned internal sequences without the batch command string as the second parameter: {}".format( + batch_cmd[1] + ), + ) + batch_cmd_args = batch_cmd[1] + + jobid_ident = "jobid" + dep_str_fmt = case.get_env("batch").get_value( + "depend_string", subgroup=None + ) + self.assertTrue( + jobid_ident in dep_str_fmt, + "dependency string doesn't include the jobid identifier {}".format( + jobid_ident + ), + ) + dep_str = dep_str_fmt[: dep_str_fmt.index(jobid_ident)] + + prereq_substr = None + while dep_str in batch_cmd_args: + dep_id_pos = batch_cmd_args.find(dep_str) + len(dep_str) + batch_cmd_args = batch_cmd_args[dep_id_pos:] + prereq_substr = batch_cmd_args[: len(prereq_name)] + if prereq_substr == prereq_name: + break + + self.assertTrue( + prereq_name in prereq_substr, + "Dependencies added, but not the user specified one", + ) + + def test_cime_case_allow_failed_prereq(self): + testcase_name = "allow_failed_prereq_test" + testdir = self._batch_test_fixture(testcase_name) + with Case(testdir, read_only=False) as case: + depend_allow = case.get_value("depend_allow_string") + if depend_allow is None: + self.skipTest( + "Skipping allow_failed_prereq test, depend_allow_string was not provided for this batch system" + ) + job_name = "case.run" + prereq_name = "prereq_allow_fail_test" + depend_allow = depend_allow.replace("jobid", prereq_name) + batch_commands = case.submit_jobs( + prereq=prereq_name, + allow_fail=True, + job=job_name, + skip_pnl=True, + dry_run=True, + ) + self.assertTrue( + isinstance(batch_commands, collections.Sequence), + "case.submit_jobs did not return a sequence for a dry run", + ) + num_submissions = 1 + if case.get_value("DOUT_S"): + num_submissions = 2 + self.assertTrue( + len(batch_commands) == num_submissions, + "case.submit_jobs did not return any job submission strings", + ) + self.assertTrue(depend_allow in batch_commands[0][1]) + + def test_cime_case_resubmit_immediate(self): + testcase_name = "resubmit_immediate_test" + testdir = self._batch_test_fixture(testcase_name) + with Case(testdir, read_only=False) as case: + depend_string = case.get_value("depend_string") + if depend_string is None: + self.skipTest( + "Skipping resubmit_immediate test, depend_string was not provided for this batch system" + ) + depend_string = re.sub("jobid.*$", "", depend_string) + job_name = "case.run" + num_submissions = 6 + case.set_value("RESUBMIT", num_submissions - 1) + batch_commands = case.submit_jobs( + job=job_name, skip_pnl=True, dry_run=True, resubmit_immediate=True + ) + self.assertTrue( + isinstance(batch_commands, collections.Sequence), + "case.submit_jobs did not return a sequence for a dry run", + ) + if case.get_value("DOUT_S"): + num_submissions = 12 + self.assertTrue( + len(batch_commands) == num_submissions, + "case.submit_jobs did not return {} submitted jobs".format( + num_submissions + ), + ) + for i, cmd in enumerate(batch_commands): + if i > 0: + self.assertTrue(depend_string in cmd[1]) + + def test_cime_case_st_archive_resubmit(self): + testcase_name = "st_archive_resubmit_test" + testdir = self._batch_test_fixture(testcase_name) + with Case(testdir, read_only=False) as case: + case.case_setup(clean=False, test_mode=False, reset=True) + orig_resubmit = 2 + case.set_value("RESUBMIT", orig_resubmit) + case.case_st_archive(resubmit=False) + new_resubmit = case.get_value("RESUBMIT") + self.assertTrue( + orig_resubmit == new_resubmit, "st_archive resubmitted when told not to" + ) + case.case_st_archive(resubmit=True) + new_resubmit = case.get_value("RESUBMIT") + self.assertTrue( + (orig_resubmit - 1) == new_resubmit, + "st_archive did not resubmit when told to", + ) + + def test_cime_case_build_threaded_1(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], + test_id=self._baseline_name, + ) + + with Case(casedir, read_only=False) as case: + build_threaded = case.get_value("SMP_PRESENT") + self.assertFalse(build_threaded) + + build_threaded = case.get_build_threaded() + self.assertFalse(build_threaded) + + case.set_value("FORCE_BUILD_SMP", True) + + build_threaded = case.get_build_threaded() + self.assertTrue(build_threaded) + + def test_cime_case_build_threaded_2(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS_P1x2.f19_g16_rx1.A"], + test_id=self._baseline_name, + ) + + with Case(casedir, read_only=False) as case: + build_threaded = case.get_value("SMP_PRESENT") + self.assertTrue(build_threaded) + + build_threaded = case.get_build_threaded() + self.assertTrue(build_threaded) + + def test_cime_case_mpi_serial(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A"], + test_id=self._baseline_name, + ) + + with Case(casedir, read_only=True) as case: + + # Serial cases should not be using pnetcdf + self.assertEqual(case.get_value("CPL_PIO_TYPENAME"), "netcdf") + + # Serial cases should be using 1 task + self.assertEqual(case.get_value("TOTALPES"), 1) + + self.assertEqual(case.get_value("NTASKS_CPL"), 1) + + def test_cime_case_force_pecount(self): + casedir = self._create_test( + [ + "--no-build", + "--force-procs=16", + "--force-threads=8", + "TESTRUNPASS.f19_g16_rx1.A", + ], + test_id=self._baseline_name, + ) + + with Case(casedir, read_only=True) as case: + self.assertEqual(case.get_value("NTASKS_CPL"), 16) + + self.assertEqual(case.get_value("NTHRDS_CPL"), 8) + + def test_cime_case_xmlchange_append(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], + test_id=self._baseline_name, + ) + + self.run_cmd_assert_result( + "./xmlchange --id PIO_CONFIG_OPTS --val='-opt1'", from_dir=casedir + ) + result = self.run_cmd_assert_result( + "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir + ) + self.assertEqual(result, "-opt1") + + self.run_cmd_assert_result( + "./xmlchange --id PIO_CONFIG_OPTS --val='-opt2' --append", from_dir=casedir + ) + result = self.run_cmd_assert_result( + "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir + ) + self.assertEqual(result, "-opt1 -opt2") + + def test_cime_case_test_walltime_mgmt_1(self): + if utils.get_model() != "e3sm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + test_name = "ERS.f19_g16_rx1.A" + casedir = self._create_test( + ["--no-setup", "--machine=blues", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "00:10:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "biggpu") + + def test_cime_case_test_walltime_mgmt_2(self): + if utils.get_model() != "e3sm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + test_name = "ERS_P64.f19_g16_rx1.A" + casedir = self._create_test( + ["--no-setup", "--machine=blues", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "01:00:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "biggpu") + + def test_cime_case_test_walltime_mgmt_3(self): + if utils.get_model() != "e3sm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + test_name = "ERS_P64.f19_g16_rx1.A" + casedir = self._create_test( + ["--no-setup", "--machine=blues", "--walltime=0:10:00", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "00:10:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "biggpu") # Not smart enough to select faster queue + + def test_cime_case_test_walltime_mgmt_4(self): + if utils.get_model() != "e3sm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + test_name = "ERS_P1.f19_g16_rx1.A" + casedir = self._create_test( + ["--no-setup", "--machine=blues", "--walltime=2:00:00", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "01:00:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "biggpu") + + def test_cime_case_test_walltime_mgmt_5(self): + if utils.get_model() != "e3sm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + test_name = "ERS_P1.f19_g16_rx1.A" + casedir = self._create_test( + ["--no-setup", "--machine=blues", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + self.run_cmd_assert_result( + "./xmlchange JOB_QUEUE=slartibartfast --subgroup=case.test", + from_dir=casedir, + expected_stat=1, + ) + + self.run_cmd_assert_result( + "./xmlchange JOB_QUEUE=slartibartfast --force --subgroup=case.test", + from_dir=casedir, + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "01:00:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "slartibartfast") + + def test_cime_case_test_walltime_mgmt_6(self): + if not self._hasbatch: + self.skipTest("Skipping walltime test. Depends on batch system") + + test_name = "ERS_P1.f19_g16_rx1.A" + casedir = self._create_test( + ["--no-build", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + self.run_cmd_assert_result( + "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", + from_dir=casedir, + ) + + self.run_cmd_assert_result("./case.setup --reset", from_dir=casedir) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", + from_dir=casedir, + ) + with Case(casedir) as case: + walltime_format = case.get_value("walltime_format", subgroup=None) + if walltime_format is not None and walltime_format.count(":") == 1: + self.assertEqual(result, "421:32") + else: + self.assertEqual(result, "421:32:11") + + def test_cime_case_test_walltime_mgmt_7(self): + if not self._hasbatch: + self.skipTest("Skipping walltime test. Depends on batch system") + + test_name = "ERS_P1.f19_g16_rx1.A" + casedir = self._create_test( + ["--no-build", "--walltime=01:00:00", test_name], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + self.run_cmd_assert_result( + "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", + from_dir=casedir, + ) + + self.run_cmd_assert_result("./case.setup --reset", from_dir=casedir) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", + from_dir=casedir, + ) + with Case(casedir) as case: + walltime_format = case.get_value("walltime_format", subgroup=None) + if walltime_format is not None and walltime_format.count(":") == 1: + self.assertEqual(result, "421:32") + else: + self.assertEqual(result, "421:32:11") + + def test_cime_case_test_walltime_mgmt_8(self): + if utils.get_model() != "e3sm": + self.skipTest("Skipping walltime test. Depends on E3SM batch settings") + + test_name = "SMS_P25600.f19_g16_rx1.A" + machine, compiler = "theta", "gnu" + casedir = self._create_test( + [ + "--no-setup", + "--machine={}".format(machine), + "--compiler={}".format(compiler), + "--project e3sm", + test_name, + ], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", + from_dir=casedir, + ) + self.assertEqual(result, "09:00:00") + + result = self.run_cmd_assert_result( + "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir + ) + self.assertEqual(result, "default") + + def test_cime_case_test_custom_project(self): + test_name = "ERS_P1.f19_g16_rx1.A" + # have to use a machine both models know and one that doesn't put PROJECT in any key paths + if utils.get_model() == "e3sm": + machine = "mappy" + else: + machine = "melvin" + compiler = "gnu" + casedir = self._create_test( + [ + "--no-setup", + "--machine={}".format(machine), + "--compiler={}".format(compiler), + "--project=testproj", + test_name, + "--mpilib=mpi-serial", + ], + test_id=self._baseline_name, + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) + + result = self.run_cmd_assert_result( + "./xmlquery --value PROJECT --subgroup=case.test", from_dir=casedir + ) + self.assertEqual(result, "testproj") + + def test_create_test_longname(self): + self._create_test( + ["SMS.f19_g16.2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV", "--no-build"] + ) + + def test_env_loading(self): + if self._machine != "mappy": + self.skipTest("Skipping env load test - Only works on mappy") + + casedir = self._create_test( + ["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name + ) + + with Case(casedir, read_only=True) as case: + env_mach = case.get_env("mach_specific") + orig_env = dict(os.environ) + + env_mach.load_env(case) + module_env = dict(os.environ) + + os.environ.clear() + os.environ.update(orig_env) + + env_mach.load_env(case, force_method="generic") + generic_env = dict(os.environ) + + os.environ.clear() + os.environ.update(orig_env) + + problems = "" + for mkey, mval in module_env.items(): + if mkey not in generic_env: + if not mkey.startswith("PS") and mkey != "OLDPWD": + problems += "Generic missing key: {}\n".format(mkey) + elif ( + mval != generic_env[mkey] + and mkey not in ["_", "SHLVL", "PWD"] + and not mkey.endswith("()") + ): + problems += "Value mismatch for key {}: {} != {}\n".format( + mkey, repr(mval), repr(generic_env[mkey]) + ) + + for gkey in generic_env.keys(): + if gkey not in module_env: + problems += "Modules missing key: {}\n".format(gkey) + + self.assertEqual(problems, "", msg=problems) + + def test_case_submit_interface(self): + # the current directory may not exist, so make sure we are in a real directory + os.chdir(os.getenv("HOME")) + sys.path.append(self.TOOLS_DIR) + case_submit_path = os.path.join(self.TOOLS_DIR, "case.submit") + + module = utils.import_from_file("case.submit", case_submit_path) + + sys.argv = [ + "case.submit", + "--batch-args", + "'random_arguments_here.%j'", + "--mail-type", + "fail", + "--mail-user", + "'random_arguments_here.%j'", + ] + module._main_func(None, True) + + def test_xml_caching(self): + casedir = self._create_test( + ["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name + ) + + active = os.path.join(casedir, "env_run.xml") + backup = os.path.join(casedir, "env_run.xml.bak") + + utils.safe_copy(active, backup) + + with Case(casedir, read_only=False) as case: + env_run = EnvRun(casedir, read_only=True) + self.assertEqual(case.get_value("RUN_TYPE"), "startup") + case.set_value("RUN_TYPE", "branch") + self.assertEqual(case.get_value("RUN_TYPE"), "branch") + self.assertEqual(env_run.get_value("RUN_TYPE"), "branch") + + with Case(casedir) as case: + self.assertEqual(case.get_value("RUN_TYPE"), "branch") + + time.sleep(0.2) + utils.safe_copy(backup, active) + + with Case(casedir, read_only=False) as case: + self.assertEqual(case.get_value("RUN_TYPE"), "startup") + case.set_value("RUN_TYPE", "branch") + + with Case(casedir, read_only=False) as case: + self.assertEqual(case.get_value("RUN_TYPE"), "branch") + time.sleep(0.2) + utils.safe_copy(backup, active) + case.read_xml() # Manual re-sync + self.assertEqual(case.get_value("RUN_TYPE"), "startup") + case.set_value("RUN_TYPE", "branch") + self.assertEqual(case.get_value("RUN_TYPE"), "branch") + + with Case(casedir) as case: + self.assertEqual(case.get_value("RUN_TYPE"), "branch") + time.sleep(0.2) + utils.safe_copy(backup, active) + env_run = EnvRun(casedir, read_only=True) + self.assertEqual(env_run.get_value("RUN_TYPE"), "startup") + + with Case(casedir, read_only=False) as case: + self.assertEqual(case.get_value("RUN_TYPE"), "startup") + case.set_value("RUN_TYPE", "branch") + + # behind the back detection. + with self.assertRaises(utils.CIMEError): + with Case(casedir, read_only=False) as case: + case.set_value("RUN_TYPE", "startup") + time.sleep(0.2) + utils.safe_copy(backup, active) + + with Case(casedir, read_only=False) as case: + case.set_value("RUN_TYPE", "branch") + + # If there's no modications within CIME, the files should not be written + # and therefore no timestamp check + with Case(casedir) as case: + time.sleep(0.2) + utils.safe_copy(backup, active) + + def test_configure(self): + testname = "SMS.f09_g16.X" + casedir = self._create_test( + [testname, "--no-build"], test_id=self._baseline_name + ) + + manual_config_dir = os.path.join(casedir, "manual_config") + os.mkdir(manual_config_dir) + + utils.run_cmd_no_fail( + "{} --machine={} --compiler={}".format( + os.path.join(utils.get_cime_root(), "tools", "configure"), + self._machine, + self._compiler, + ), + from_dir=manual_config_dir, + ) + + with open(os.path.join(casedir, "env_mach_specific.xml"), "r") as fd: + case_env_contents = fd.read() + + with open(os.path.join(manual_config_dir, "env_mach_specific.xml"), "r") as fd: + man_env_contents = fd.read() + + self.assertEqual(case_env_contents, man_env_contents) + + def test_self_build_cprnc(self): + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + if self.TEST_COMPILER and "gpu" in self.TEST_COMPILER: + self.skipTest("Skipping cprnc test for gpu compiler") + + testname = "ERS_Ln7.f19_g16_rx1.A" + casedir = self._create_test( + [testname, "--no-build"], test_id=self._baseline_name + ) + + self.run_cmd_assert_result( + "./xmlchange CCSM_CPRNC=this_is_a_broken_cprnc", from_dir=casedir + ) + self.run_cmd_assert_result("./case.build", from_dir=casedir) + self.run_cmd_assert_result("./case.submit", from_dir=casedir) + + self._wait_for_tests(self._baseline_name, always_wait=True) + + def test_case_clean(self): + testname = "ERS_Ln7.f19_g16_rx1.A" + casedir = self._create_test( + [testname, "--no-build"], test_id=self._baseline_name + ) + + self.run_cmd_assert_result("./case.setup --clean", from_dir=casedir) + self.run_cmd_assert_result("./case.setup --clean", from_dir=casedir) + self.run_cmd_assert_result("./case.setup", from_dir=casedir) diff --git a/CIME/tests/test_sys_cime_performance.py b/CIME/tests/test_sys_cime_performance.py new file mode 100644 index 00000000000..a0282c7759a --- /dev/null +++ b/CIME/tests/test_sys_cime_performance.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 + +import time + +from CIME.tests import base + + +class TestCimePerformance(base.BaseTestCase): + def test_cime_case_ctrl_performance(self): + + ts = time.time() + + num_repeat = 5 + for _ in range(num_repeat): + self._create_test(["cime_tiny", "--no-build"]) + + elapsed = time.time() - ts + + print("Perf test result: {:0.2f}".format(elapsed)) diff --git a/CIME/tests/test_sys_cmake_macros.py b/CIME/tests/test_sys_cmake_macros.py new file mode 100644 index 00000000000..83c2734e56b --- /dev/null +++ b/CIME/tests/test_sys_cmake_macros.py @@ -0,0 +1,624 @@ +#!/usr/bin/env python3 + +from CIME import utils +from CIME.tests import base +from CIME.tests import utils as test_utils +from CIME.XML.compilers import Compilers + + +class TestCMakeMacros(base.BaseTestCase): + """CMake macros tests. + + This class contains tests of the CMake output of Build. + + This class simply inherits all of the methods of TestMakeOutput, but changes + the definition of xml_to_tester to create a CMakeTester instead. + """ + + def xml_to_tester(self, xml_string): + """Helper that directly converts an XML string to a MakefileTester.""" + test_xml = test_utils._wrap_config_compilers_xml(xml_string) + if self.NO_CMAKE: + self.skipTest("Skipping cmake test") + else: + return test_utils.CMakeTester( + self, test_utils.get_macros(self._maker, test_xml, "CMake") + ) + + def setUp(self): + super().setUp() + + self.test_os = "SomeOS" + self.test_machine = "mymachine" + self.test_compiler = ( + self.MACHINE.get_default_compiler() + if self.TEST_COMPILER is None + else self.TEST_COMPILER + ) + self.test_mpilib = ( + self.MACHINE.get_default_MPIlib(attributes={"compiler": self.test_compiler}) + if self.TEST_MPILIB is None + else self.TEST_MPILIB + ) + + self._maker = Compilers( + test_utils.MockMachines(self.test_machine, self.test_os), version=2.0 + ) + + def test_generic_item(self): + """The macro writer can write out a single generic item.""" + xml_string = "FALSE" + tester = self.xml_to_tester(xml_string) + tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") + + def test_machine_specific_item(self): + """The macro writer can pick out a machine-specific item.""" + xml1 = """TRUE""".format( + self.test_machine + ) + xml2 = """FALSE""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + # Do this a second time, but with elements in the reverse order, to + # ensure that the code is not "cheating" by taking the first match. + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + + def test_ignore_non_match(self): + """The macro writer ignores an entry with the wrong machine name.""" + xml1 = """TRUE""" + xml2 = """FALSE""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") + # Again, double-check that we don't just get lucky with the order. + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") + + def test_os_specific_item(self): + """The macro writer can pick out an OS-specific item.""" + xml1 = ( + """TRUE""".format( + self.test_os + ) + ) + xml2 = """FALSE""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + + def test_mach_other_compiler(self): + """The macro writer compiler-specific logic works as expected.""" + xml1 = """a b c""".format( + self.test_compiler + ) + xml2 = """x y z""".format( + self.test_machine + ) + xml3 = """x y z""".format( + self.test_machine, self.test_compiler + ) + xml4 = """x y z""".format( + self.test_machine, self.test_compiler + ) + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals( + "CFLAGS", "a b c", var={"COMPILER": self.test_compiler} + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals( + "CFLAGS", "a b c", var={"COMPILER": self.test_compiler} + ) + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals( + "CFLAGS", "a b c", var={"COMPILER": self.test_compiler} + ) + tester = self.xml_to_tester(xml1 + xml3) + tester.assert_variable_equals( + "CFLAGS", "a b c x y z", var={"COMPILER": self.test_compiler} + ) + tester = self.xml_to_tester(xml1 + xml4) + tester.assert_variable_equals( + "CFLAGS", "x y z", var={"COMPILER": self.test_compiler} + ) + tester = self.xml_to_tester(xml4 + xml1) + tester.assert_variable_equals( + "CFLAGS", "x y z", var={"COMPILER": self.test_compiler} + ) + + def test_mach_beats_os(self): + """The macro writer chooses machine-specific over os-specific matches.""" + xml1 = """FALSE""".format( + self.test_os + ) + xml2 = """TRUE""".format( + self.test_machine + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + + def test_mach_and_os_beats_mach(self): + """The macro writer chooses the most-specific match possible.""" + xml1 = """FALSE""".format( + self.test_machine + ) + xml2 = """TRUE""" + xml2 = xml2.format(self.test_machine, self.test_os) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + + def test_build_time_attribute(self): + """The macro writer writes conditionals for build-time choices.""" + xml1 = """/path/to/mpich""" + xml2 = """/path/to/openmpi""" + xml3 = """/path/to/default""" + tester = self.xml_to_tester(xml1 + xml2 + xml3) + tester.assert_variable_equals("MPI_PATH", "/path/to/default") + tester.assert_variable_equals( + "MPI_PATH", "/path/to/mpich", var={"MPILIB": "mpich"} + ) + tester.assert_variable_equals( + "MPI_PATH", "/path/to/openmpi", var={"MPILIB": "openmpi"} + ) + tester = self.xml_to_tester(xml3 + xml2 + xml1) + tester.assert_variable_equals("MPI_PATH", "/path/to/default") + tester.assert_variable_equals( + "MPI_PATH", "/path/to/mpich", var={"MPILIB": "mpich"} + ) + tester.assert_variable_equals( + "MPI_PATH", "/path/to/openmpi", var={"MPILIB": "openmpi"} + ) + + def test_reject_duplicate_defaults(self): + """The macro writer dies if given many defaults.""" + xml1 = """/path/to/default""" + xml2 = """/path/to/other_default""" + with self.assertRaisesRegex( + utils.CIMEError, + "Variable MPI_PATH is set ambiguously in config_compilers.xml.", + ): + self.xml_to_tester(xml1 + xml2) + + def test_reject_duplicates(self): + """The macro writer dies if given many matches for a given configuration.""" + xml1 = """/path/to/mpich""" + xml2 = """/path/to/mpich2""" + with self.assertRaisesRegex( + utils.CIMEError, + "Variable MPI_PATH is set ambiguously in config_compilers.xml.", + ): + self.xml_to_tester(xml1 + xml2) + + def test_reject_ambiguous(self): + """The macro writer dies if given an ambiguous set of matches.""" + xml1 = """/path/to/mpich""" + xml2 = """/path/to/mpi-debug""" + with self.assertRaisesRegex( + utils.CIMEError, + "Variable MPI_PATH is set ambiguously in config_compilers.xml.", + ): + self.xml_to_tester(xml1 + xml2) + + def test_compiler_changeable_at_build_time(self): + """The macro writer writes information for multiple compilers.""" + xml1 = """FALSE""" + xml2 = ( + """TRUE""" + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE", var={"COMPILER": "gnu"}) + tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") + + def test_base_flags(self): + """Test that we get "base" compiler flags.""" + xml1 = """-O2""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals("FFLAGS", "-O2") + + def test_machine_specific_base_flags(self): + """Test selection among base compiler flag sets based on machine.""" + xml1 = """-O2""" + xml2 = """-O3""".format( + self.test_machine + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-O3") + + def test_build_time_base_flags(self): + """Test selection of base flags based on build-time attributes.""" + xml1 = """-O2""" + xml2 = """-O3""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-O2") + tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) + + def test_build_time_base_flags_same_parent(self): + """Test selection of base flags in the same parent element.""" + xml1 = """-O2""" + xml2 = """-O3""" + tester = self.xml_to_tester( + "" + xml1 + xml2 + "" + ) + tester.assert_variable_equals("FFLAGS", "-O2") + tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) + # Check for order independence here, too. + tester = self.xml_to_tester( + "" + xml2 + xml1 + "" + ) + tester.assert_variable_equals("FFLAGS", "-O2") + tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) + + def test_append_flags(self): + """Test appending flags to a list.""" + xml1 = """-delicious""" + xml2 = """-cake""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-delicious -cake") + # Order independence, as usual. + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("FFLAGS", "-delicious -cake") + + def test_machine_specific_append_flags(self): + """Test appending flags that are either more or less machine-specific.""" + xml1 = """-delicious""" + xml2 = """-cake""".format( + self.test_machine + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_matches( + "FFLAGS", "^(-delicious -cake|-cake -delicious)$" + ) + + def test_machine_specific_base_keeps_append_flags(self): + """Test that machine-specific base flags don't override default append flags.""" + xml1 = """-delicious""" + xml2 = """-cake""".format( + self.test_machine + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-cake -delicious") + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("FFLAGS", "-cake -delicious") + + def test_machine_specific_base_and_append_flags(self): + """Test that machine-specific base flags coexist with machine-specific append flags.""" + xml1 = """-delicious""".format( + self.test_machine + ) + xml2 = """-cake""".format( + self.test_machine + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-cake -delicious") + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("FFLAGS", "-cake -delicious") + + def test_append_flags_without_base(self): + """Test appending flags to a value set before Macros is included.""" + xml1 = """-cake""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals( + "FFLAGS", "-delicious -cake", var={"FFLAGS": "-delicious"} + ) + + def test_build_time_append_flags(self): + """Test build_time selection of compiler flags.""" + xml1 = """-cake""" + xml2 = """-and-pie""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-cake") + tester.assert_variable_matches( + "FFLAGS", "^(-cake -and-pie|-and-pie -cake)$", var={"DEBUG": "TRUE"} + ) + + def test_environment_variable_insertion(self): + """Test that ENV{..} inserts environment variables.""" + # DO it again with $ENV{} style + xml1 = """-L$ENV{NETCDF} -lnetcdf""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals( + "LDFLAGS", "-L/path/to/netcdf -lnetcdf", env={"NETCDF": "/path/to/netcdf"} + ) + + def test_shell_command_insertion(self): + """Test that $SHELL insert shell command output.""" + xml1 = """-O$SHELL{echo 2} -fast""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals("FFLAGS", "-O2 -fast") + + def test_multiple_shell_commands(self): + """Test that more than one $SHELL element can be used.""" + xml1 = """-O$SHELL{echo 2} -$SHELL{echo fast}""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals("FFLAGS", "-O2 -fast") + + def test_env_and_shell_command(self): + """Test that $ENV works inside $SHELL elements.""" + xml1 = """-O$SHELL{echo $ENV{OPT_LEVEL}} -fast""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals("FFLAGS", "-O2 -fast", env={"OPT_LEVEL": "2"}) + + def test_config_variable_insertion(self): + """Test that $VAR insert variables from config_compilers.""" + # Construct an absurd chain of references just to sure that we don't + # pass by accident, e.g. outputting things in the right order just due + # to good luck in a hash somewhere. + xml1 = """stuff-${MPI_PATH}-stuff""" + xml2 = """${MPICC}""" + xml3 = """${MPICXX}""" + xml4 = """${MPIFC}""" + xml5 = """mpicc""" + tester = self.xml_to_tester( + "" + xml1 + xml2 + xml3 + xml4 + xml5 + "" + ) + tester.assert_variable_equals("MPI_LIB_NAME", "stuff-mpicc-stuff") + + def test_config_reject_self_references(self): + """Test that $VAR self-references are rejected.""" + # This is a special case of the next test, which also checks circular + # references. + xml1 = """${MPI_LIB_NAME}""" + err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." + with self.assertRaisesRegex(utils.CIMEError, err_msg): + self.xml_to_tester("" + xml1 + "") + + def test_config_reject_cyclical_references(self): + """Test that cyclical $VAR references are rejected.""" + xml1 = """${MPI_PATH}""" + xml2 = """${MPI_LIB_NAME}""" + err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." + with self.assertRaisesRegex(utils.CIMEError, err_msg): + self.xml_to_tester("" + xml1 + xml2 + "") + + def test_variable_insertion_with_machine_specific_setting(self): + """Test that machine-specific $VAR dependencies are correct.""" + xml1 = """something""" + xml2 = """$MPI_PATH""".format( + self.test_machine + ) + xml3 = """${MPI_LIB_NAME}""" + err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." + with self.assertRaisesRegex(utils.CIMEError, err_msg): + self.xml_to_tester(xml1 + xml2 + xml3) + + def test_override_with_machine_and_new_attributes(self): + """Test that overrides with machine-specific settings with added attributes work correctly.""" + xml1 = """ + + icc + mpicxx + mpif90 + mpicc +""".format( + self.test_compiler + ) + xml2 = """ + + mpifoo + mpiffoo + mpifouc + +""".format( + self.test_compiler, self.test_machine, self.test_mpilib + ) + + tester = self.xml_to_tester(xml1 + xml2) + + tester.assert_variable_equals( + "SCC", + "icc", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPICXX", + "mpifoo", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPIFC", + "mpiffoo", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPICC", + "mpicc", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + + tester = self.xml_to_tester(xml2 + xml1) + + tester.assert_variable_equals( + "SCC", + "icc", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPICXX", + "mpifoo", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPIFC", + "mpiffoo", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPICC", + "mpicc", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + + def test_override_with_machine_and_same_attributes(self): + """Test that machine-specific conditional overrides with the same attribute work correctly.""" + xml1 = """ + + mpifc +""".format( + self.test_compiler, self.test_mpilib + ) + xml2 = """ + + mpif90 + +""".format( + self.test_machine, self.test_compiler, self.test_mpilib + ) + + tester = self.xml_to_tester(xml1 + xml2) + + tester.assert_variable_equals( + "MPIFC", + "mpif90", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + + tester = self.xml_to_tester(xml2 + xml1) + + tester.assert_variable_equals( + "MPIFC", + "mpif90", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + + def test_appends_not_overriden(self): + """Test that machine-specific base value changes don't interfere with appends.""" + xml1 = """ + + + -base1 + -debug1 + +""".format( + self.test_compiler + ) + + xml2 = """ + + + -base2 + -debug2 + +""".format( + self.test_machine, self.test_compiler + ) + + tester = self.xml_to_tester(xml1 + xml2) + + tester.assert_variable_equals( + "FFLAGS", "-base2", var={"COMPILER": self.test_compiler} + ) + tester.assert_variable_equals( + "FFLAGS", + "-base2 -debug2", + var={"COMPILER": self.test_compiler, "DEBUG": "TRUE"}, + ) + tester.assert_variable_equals( + "FFLAGS", + "-base2 -debug1", + var={"COMPILER": self.test_compiler, "DEBUG": "FALSE"}, + ) + + tester = self.xml_to_tester(xml2 + xml1) + + tester.assert_variable_equals( + "FFLAGS", "-base2", var={"COMPILER": self.test_compiler} + ) + tester.assert_variable_equals( + "FFLAGS", + "-base2 -debug2", + var={"COMPILER": self.test_compiler, "DEBUG": "TRUE"}, + ) + tester.assert_variable_equals( + "FFLAGS", + "-base2 -debug1", + var={"COMPILER": self.test_compiler, "DEBUG": "FALSE"}, + ) + + def test_multilevel_specificity(self): + """Check that settings with multiple levels of machine-specificity can be resolved.""" + xml1 = """ + + mpifc +""" + + xml2 = """ + + mpif03 +""".format( + self.test_os, self.test_mpilib + ) + + xml3 = """ + + mpif90 +""".format( + self.test_machine + ) + + # To verify order-independence, test every possible ordering of blocks. + testers = [] + testers.append(self.xml_to_tester(xml1 + xml2 + xml3)) + testers.append(self.xml_to_tester(xml1 + xml3 + xml2)) + testers.append(self.xml_to_tester(xml2 + xml1 + xml3)) + testers.append(self.xml_to_tester(xml2 + xml3 + xml1)) + testers.append(self.xml_to_tester(xml3 + xml1 + xml2)) + testers.append(self.xml_to_tester(xml3 + xml2 + xml1)) + + for tester in testers: + tester.assert_variable_equals( + "MPIFC", + "mpif90", + var={ + "COMPILER": self.test_compiler, + "MPILIB": self.test_mpilib, + "DEBUG": "TRUE", + }, + ) + tester.assert_variable_equals( + "MPIFC", + "mpif03", + var={ + "COMPILER": self.test_compiler, + "MPILIB": self.test_mpilib, + "DEBUG": "FALSE", + }, + ) + tester.assert_variable_equals( + "MPIFC", + "mpifc", + var={ + "COMPILER": self.test_compiler, + "MPILIB": "NON_MATCHING_MPI", + "DEBUG": "FALSE", + }, + ) + + def test_remove_dependency_issues(self): + """Check that overridden settings don't cause inter-variable dependencies.""" + xml1 = """ + + ${SFC} +""" + + xml2 = ( + """ +""".format( + self.test_machine + ) + + """ + ${MPIFC} + mpif90 +""" + ) + + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SFC", "mpif90") + tester.assert_variable_equals("MPIFC", "mpif90") + + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SFC", "mpif90") + tester.assert_variable_equals("MPIFC", "mpif90") diff --git a/CIME/tests/test_sys_create_newcase.py b/CIME/tests/test_sys_create_newcase.py new file mode 100644 index 00000000000..1d42fa620a6 --- /dev/null +++ b/CIME/tests/test_sys_create_newcase.py @@ -0,0 +1,828 @@ +#!/usr/bin/env python3 + +import filecmp +import os +import re +import shutil +import sys + +from CIME import utils +from CIME.tests import base +from CIME.case.case import Case + + +class TestCreateNewcase(base.BaseTestCase): + @classmethod + def setUpClass(cls): + cls._testdirs = [] + cls._do_teardown = [] + cls._testroot = os.path.join(cls.TEST_ROOT, "TestCreateNewcase") + cls._root_dir = os.getcwd() + + def tearDown(self): + cls = self.__class__ + os.chdir(cls._root_dir) + + def test_a_createnewcase(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "testcreatenewcase") + if os.path.exists(testdir): + shutil.rmtree(testdir) + args = " --case %s --compset X --output-root %s --handle-preexisting-dirs=r" % ( + testdir, + cls._testroot, + ) + if utils.get_model() == "cesm": + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args = args + " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args = args + " --mpilib %s" % self.TEST_MPILIB + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + + args += f" --machine {self.MACHINE.get_machine_name()}" + + cls._testdirs.append(testdir) + self.run_cmd_assert_result( + "./create_newcase %s" % (args), from_dir=self.SCRIPT_DIR + ) + self.assertTrue(os.path.exists(testdir)) + self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) + + self.run_cmd_assert_result("./case.setup", from_dir=testdir) + self.run_cmd_assert_result("./case.build", from_dir=testdir) + + with Case(testdir, read_only=False) as case: + ntasks = case.get_value("NTASKS_ATM") + case.set_value("NTASKS_ATM", ntasks + 1) + + # this should fail with a locked file issue + self.run_cmd_assert_result("./case.build", from_dir=testdir, expected_stat=1) + + self.run_cmd_assert_result("./case.setup --reset", from_dir=testdir) + self.run_cmd_assert_result("./case.build", from_dir=testdir) + with Case(testdir, read_only=False) as case: + case.set_value("CHARGE_ACCOUNT", "fred") + + # this should not fail with a locked file issue + self.run_cmd_assert_result("./case.build", from_dir=testdir) + + self.run_cmd_assert_result("./case.st_archive --test-all", from_dir=testdir) + + # Trying to set values outside of context manager should fail + case = Case(testdir, read_only=False) + with self.assertRaises(utils.CIMEError): + case.set_value("NTASKS_ATM", 42) + + # Trying to read_xml with pending changes should fail + with self.assertRaises(utils.CIMEError): + with Case(testdir, read_only=False) as case: + case.set_value("CHARGE_ACCOUNT", "fouc") + case.read_xml() + + cls._do_teardown.append(testdir) + + def test_aa_no_flush_on_instantiate(self): + testdir = os.path.join(self.__class__._testroot, "testcreatenewcase") + with Case(testdir, read_only=False) as case: + for env_file in case._files: + self.assertFalse( + env_file.needsrewrite, + msg="Instantiating a case should not trigger a flush call", + ) + + with Case(testdir, read_only=False) as case: + case.set_value("HIST_OPTION", "nyears") + runfile = case.get_env("run") + self.assertTrue( + runfile.needsrewrite, msg="Expected flush call not triggered" + ) + for env_file in case._files: + if env_file != runfile: + self.assertFalse( + env_file.needsrewrite, + msg="Unexpected flush triggered for file {}".format( + env_file.filename + ), + ) + # Flush the file + runfile.write() + # set it again to the same value + case.set_value("HIST_OPTION", "nyears") + # now the file should not need to be flushed + for env_file in case._files: + self.assertFalse( + env_file.needsrewrite, + msg="Unexpected flush triggered for file {}".format( + env_file.filename + ), + ) + + # Check once more with a new instance + with Case(testdir, read_only=False) as case: + case.set_value("HIST_OPTION", "nyears") + for env_file in case._files: + self.assertFalse( + env_file.needsrewrite, + msg="Unexpected flush triggered for file {}".format( + env_file.filename + ), + ) + + def test_b_user_mods(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "testusermods") + if os.path.exists(testdir): + shutil.rmtree(testdir) + + cls._testdirs.append(testdir) + + user_mods_dir = os.path.join( + utils.get_python_libs_root(), "..", "tests", "user_mods_test1" + ) + args = ( + " --case %s --compset X --user-mods-dir %s --output-root %s --handle-preexisting-dirs=r" + % (testdir, user_mods_dir, cls._testroot) + ) + if utils.get_model() == "cesm": + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args = args + " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args = args + " --mpilib %s" % self.TEST_MPILIB + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "%s/create_newcase %s " % (self.SCRIPT_DIR, args), from_dir=self.SCRIPT_DIR + ) + + self.assertTrue( + os.path.isfile( + os.path.join(testdir, "SourceMods", "src.drv", "somefile.F90") + ), + msg="User_mods SourceMod missing", + ) + + with open(os.path.join(testdir, "user_nl_cpl"), "r") as fd: + contents = fd.read() + self.assertTrue( + "a different cpl test option" in contents, + msg="User_mods contents of user_nl_cpl missing", + ) + self.assertTrue( + "a cpl namelist option" in contents, + msg="User_mods contents of user_nl_cpl missing", + ) + cls._do_teardown.append(testdir) + + def test_c_create_clone_keepexe(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "test_create_clone_keepexe") + if os.path.exists(testdir): + shutil.rmtree(testdir) + prevtestdir = cls._testdirs[0] + user_mods_dir = os.path.join( + utils.get_python_libs_root(), "..", "tests", "user_mods_test3" + ) + + cmd = "%s/create_clone --clone %s --case %s --keepexe --user-mods-dir %s" % ( + self.SCRIPT_DIR, + prevtestdir, + testdir, + user_mods_dir, + ) + self.run_cmd_assert_result(cmd, from_dir=self.SCRIPT_DIR, expected_stat=1) + cls._do_teardown.append(testdir) + + def test_d_create_clone_new_user(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "test_create_clone_new_user") + if os.path.exists(testdir): + shutil.rmtree(testdir) + prevtestdir = cls._testdirs[0] + cls._testdirs.append(testdir) + # change the USER and CIME_OUTPUT_ROOT to nonsense values + # this is intended as a test of whether create_clone is independent of user + self.run_cmd_assert_result( + "./xmlchange USER=this_is_not_a_user", from_dir=prevtestdir + ) + + fakeoutputroot = cls._testroot.replace( + os.environ.get("USER"), "this_is_not_a_user" + ) + self.run_cmd_assert_result( + "./xmlchange CIME_OUTPUT_ROOT=%s" % fakeoutputroot, from_dir=prevtestdir + ) + + # this test should pass (user name is replaced) + self.run_cmd_assert_result( + "%s/create_clone --clone %s --case %s " + % (self.SCRIPT_DIR, prevtestdir, testdir), + from_dir=self.SCRIPT_DIR, + ) + + shutil.rmtree(testdir) + # this test should pass + self.run_cmd_assert_result( + "%s/create_clone --clone %s --case %s --cime-output-root %s" + % (self.SCRIPT_DIR, prevtestdir, testdir, cls._testroot), + from_dir=self.SCRIPT_DIR, + ) + + cls._do_teardown.append(testdir) + + def test_dd_create_clone_not_writable(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "test_create_clone_not_writable") + if os.path.exists(testdir): + shutil.rmtree(testdir) + prevtestdir = cls._testdirs[0] + cls._testdirs.append(testdir) + + with Case(prevtestdir, read_only=False) as case1: + case2 = case1.create_clone(testdir) + with self.assertRaises(utils.CIMEError): + case2.set_value("CHARGE_ACCOUNT", "fouc") + cls._do_teardown.append(testdir) + + def test_e_xmlquery(self): + # Set script and script path + xmlquery = "./xmlquery" + cls = self.__class__ + casedir = cls._testdirs[0] + + # Check for environment + self.assertTrue(os.path.isdir(self.SCRIPT_DIR)) + self.assertTrue(os.path.isdir(self.TOOLS_DIR)) + self.assertTrue(os.path.isfile(os.path.join(casedir, xmlquery))) + + # Test command line options + with Case(casedir, read_only=True) as case: + STOP_N = case.get_value("STOP_N") + COMP_CLASSES = case.get_values("COMP_CLASSES") + BUILD_COMPLETE = case.get_value("BUILD_COMPLETE") + cmd = xmlquery + " STOP_N --value" + output = utils.run_cmd_no_fail(cmd, from_dir=casedir) + self.assertTrue(output == str(STOP_N), msg="%s != %s" % (output, STOP_N)) + cmd = xmlquery + " BUILD_COMPLETE --value" + output = utils.run_cmd_no_fail(cmd, from_dir=casedir) + self.assertTrue(output == "TRUE", msg="%s != %s" % (output, BUILD_COMPLETE)) + # we expect DOCN_MODE to be undefined in this X compset + # this test assures that we do not try to resolve this as a compvar + cmd = xmlquery + " DOCN_MODE --value" + _, output, error = utils.run_cmd(cmd, from_dir=casedir) + self.assertTrue( + error == "ERROR: No results found for variable DOCN_MODE", + msg="unexpected result for DOCN_MODE, output {}, error {}".format( + output, error + ), + ) + + for comp in COMP_CLASSES: + caseresult = case.get_value("NTASKS_%s" % comp) + cmd = xmlquery + " NTASKS_%s --value" % comp + output = utils.run_cmd_no_fail(cmd, from_dir=casedir) + self.assertTrue( + output == str(caseresult), msg="%s != %s" % (output, caseresult) + ) + cmd = xmlquery + " NTASKS --subgroup %s --value" % comp + output = utils.run_cmd_no_fail(cmd, from_dir=casedir) + self.assertTrue( + output == str(caseresult), msg="%s != %s" % (output, caseresult) + ) + if self.MACHINE.has_batch_system(): + JOB_QUEUE = case.get_value("JOB_QUEUE", subgroup="case.run") + cmd = xmlquery + " JOB_QUEUE --subgroup case.run --value" + output = utils.run_cmd_no_fail(cmd, from_dir=casedir) + self.assertTrue( + output == JOB_QUEUE, msg="%s != %s" % (output, JOB_QUEUE) + ) + + cmd = xmlquery + " --listall" + utils.run_cmd_no_fail(cmd, from_dir=casedir) + + cls._do_teardown.append(cls._testroot) + + def test_f_createnewcase_with_user_compset(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "testcreatenewcase_with_user_compset") + if os.path.exists(testdir): + shutil.rmtree(testdir) + + cls._testdirs.append(testdir) + + if utils.get_model() == "cesm": + if utils.get_cime_default_driver() == "nuopc": + pesfile = os.path.join( + utils.get_src_root(), + "components", + "cmeps", + "cime_config", + "config_pes.xml", + ) + else: + pesfile = os.path.join( + utils.get_src_root(), + "components", + "cpl7", + "driver", + "cime_config", + "config_pes.xml", + ) + else: + pesfile = os.path.join( + utils.get_src_root(), "driver-mct", "cime_config", "config_pes.xml" + ) + + args = ( + "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" + % (testdir, pesfile, cls._testroot) + ) + if utils.get_model() == "cesm": + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args += " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args = args + " --mpilib %s" % self.TEST_MPILIB + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "%s/create_newcase %s" % (self.SCRIPT_DIR, args), from_dir=self.SCRIPT_DIR + ) + self.run_cmd_assert_result("./case.setup", from_dir=testdir) + self.run_cmd_assert_result("./case.build", from_dir=testdir) + + cls._do_teardown.append(testdir) + + def test_g_createnewcase_with_user_compset_and_env_mach_pes(self): + cls = self.__class__ + + testdir = os.path.join( + cls._testroot, "testcreatenewcase_with_user_compset_and_env_mach_pes" + ) + if os.path.exists(testdir): + shutil.rmtree(testdir) + previous_testdir = cls._testdirs[-1] + cls._testdirs.append(testdir) + + pesfile = os.path.join(previous_testdir, "env_mach_pes.xml") + args = ( + "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" + % (testdir, pesfile, cls._testroot) + ) + if utils.get_model() == "cesm": + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args += " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args += " --mpilib %s" % self.TEST_MPILIB + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "%s/create_newcase %s" % (self.SCRIPT_DIR, args), from_dir=self.SCRIPT_DIR + ) + self.run_cmd_assert_result( + "diff env_mach_pes.xml %s" % (previous_testdir), from_dir=testdir + ) + # this line should cause the diff to fail (I assume no machine is going to default to 17 tasks) + self.run_cmd_assert_result("./xmlchange NTASKS=17", from_dir=testdir) + self.run_cmd_assert_result( + "diff env_mach_pes.xml %s" % (previous_testdir), + from_dir=testdir, + expected_stat=1, + ) + + cls._do_teardown.append(testdir) + + def test_h_primary_component(self): + cls = self.__class__ + + testdir = os.path.join(cls._testroot, "testprimarycomponent") + if os.path.exists(testdir): + shutil.rmtree(testdir) + + cls._testdirs.append(testdir) + args = ( + " --case CreateNewcaseTest --script-root %s --compset X --output-root %s --handle-preexisting-dirs u" + % (testdir, cls._testroot) + ) + if utils.get_model() == "cesm": + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args += " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args += " --mpilib %s" % self.TEST_MPILIB + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "%s/create_newcase %s" % (self.SCRIPT_DIR, args), from_dir=self.SCRIPT_DIR + ) + self.assertTrue(os.path.exists(testdir)) + self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) + + with Case(testdir, read_only=False) as case: + case._compsetname = case.get_value("COMPSET") + case.set_comp_classes(case.get_values("COMP_CLASSES")) + primary = case._find_primary_component() + self.assertEqual( + primary, + "drv", + msg="primary component test expected drv but got %s" % primary, + ) + # now we are going to corrupt the case so that we can do more primary_component testing + case.set_valid_values("COMP_GLC", "%s,fred" % case.get_value("COMP_GLC")) + case.set_value("COMP_GLC", "fred") + primary = case._find_primary_component() + self.assertEqual( + primary, + "fred", + msg="primary component test expected fred but got %s" % primary, + ) + case.set_valid_values("COMP_ICE", "%s,wilma" % case.get_value("COMP_ICE")) + case.set_value("COMP_ICE", "wilma") + primary = case._find_primary_component() + self.assertEqual( + primary, + "wilma", + msg="primary component test expected wilma but got %s" % primary, + ) + + case.set_valid_values( + "COMP_OCN", "%s,bambam,docn" % case.get_value("COMP_OCN") + ) + case.set_value("COMP_OCN", "bambam") + primary = case._find_primary_component() + self.assertEqual( + primary, + "bambam", + msg="primary component test expected bambam but got %s" % primary, + ) + + case.set_valid_values("COMP_LND", "%s,barney" % case.get_value("COMP_LND")) + case.set_value("COMP_LND", "barney") + primary = case._find_primary_component() + # This is a "J" compset + self.assertEqual( + primary, + "allactive", + msg="primary component test expected allactive but got %s" % primary, + ) + case.set_value("COMP_OCN", "docn") + case.set_valid_values("COMP_LND", "%s,barney" % case.get_value("COMP_LND")) + case.set_value("COMP_LND", "barney") + primary = case._find_primary_component() + self.assertEqual( + primary, + "barney", + msg="primary component test expected barney but got %s" % primary, + ) + case.set_valid_values("COMP_ATM", "%s,wilma" % case.get_value("COMP_ATM")) + case.set_value("COMP_ATM", "wilma") + primary = case._find_primary_component() + self.assertEqual( + primary, + "wilma", + msg="primary component test expected wilma but got %s" % primary, + ) + # this is a "E" compset + case._compsetname = case._compsetname.replace("XOCN", "DOCN%SOM") + primary = case._find_primary_component() + self.assertEqual( + primary, + "allactive", + msg="primary component test expected allactive but got %s" % primary, + ) + # finally a "B" compset + case.set_value("COMP_OCN", "bambam") + primary = case._find_primary_component() + self.assertEqual( + primary, + "allactive", + msg="primary component test expected allactive but got %s" % primary, + ) + + cls._do_teardown.append(testdir) + + def test_j_createnewcase_user_compset_vs_alias(self): + """ + Create a compset using the alias and another compset using the full compset name + and make sure they are the same by comparing the namelist files in CaseDocs. + Ignore the modelio files and clean the directory names out first. + """ + cls = self.__class__ + + testdir1 = os.path.join(cls._testroot, "testcreatenewcase_user_compset") + if os.path.exists(testdir1): + shutil.rmtree(testdir1) + cls._testdirs.append(testdir1) + + args = " --case CreateNewcaseTest --script-root {} --compset 2000_DATM%NYF_SLND_SICE_DOCN%SOMAQP_SROF_SGLC_SWAV --output-root {} --handle-preexisting-dirs u".format( + testdir1, cls._testroot + ) + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + if utils.get_model() == "cesm": + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args += " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args += " --mpilib %s" % self.TEST_MPILIB + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "{}/create_newcase {}".format(self.SCRIPT_DIR, args), + from_dir=self.SCRIPT_DIR, + ) + self.run_cmd_assert_result("./case.setup ", from_dir=testdir1) + self.run_cmd_assert_result("./preview_namelists ", from_dir=testdir1) + + dir1 = os.path.join(testdir1, "CaseDocs") + dir2 = os.path.join(testdir1, "CleanCaseDocs") + os.mkdir(dir2) + for _file in os.listdir(dir1): + if "modelio" in _file: + continue + with open(os.path.join(dir1, _file), "r") as fi: + file_text = fi.read() + file_text = file_text.replace(os.path.basename(testdir1), "PATH") + file_text = re.sub(r"logfile =.*", "", file_text) + with open(os.path.join(dir2, _file), "w") as fo: + fo.write(file_text) + cleancasedocs1 = dir2 + + testdir2 = os.path.join(cls._testroot, "testcreatenewcase_alias_compset") + if os.path.exists(testdir2): + shutil.rmtree(testdir2) + cls._testdirs.append(testdir2) + args = " --case CreateNewcaseTest --script-root {} --compset ADSOMAQP --output-root {} --handle-preexisting-dirs u".format( + testdir2, cls._testroot + ) + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + if utils.get_model() == "cesm": + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args += " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args += " --mpilib %s" % self.TEST_MPILIB + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "{}/create_newcase {}".format(self.SCRIPT_DIR, args), + from_dir=self.SCRIPT_DIR, + ) + self.run_cmd_assert_result("./case.setup ", from_dir=testdir2) + self.run_cmd_assert_result("./preview_namelists ", from_dir=testdir2) + + dir1 = os.path.join(testdir2, "CaseDocs") + dir2 = os.path.join(testdir2, "CleanCaseDocs") + os.mkdir(dir2) + for _file in os.listdir(dir1): + if "modelio" in _file: + continue + with open(os.path.join(dir1, _file), "r") as fi: + file_text = fi.read() + file_text = file_text.replace(os.path.basename(testdir2), "PATH") + file_text = re.sub(r"logfile =.*", "", file_text) + with open(os.path.join(dir2, _file), "w") as fo: + fo.write(file_text) + + cleancasedocs2 = dir2 + dcmp = filecmp.dircmp(cleancasedocs1, cleancasedocs2) + self.assertTrue( + len(dcmp.diff_files) == 0, "CaseDocs differ {}".format(dcmp.diff_files) + ) + + cls._do_teardown.append(testdir1) + cls._do_teardown.append(testdir2) + + def test_k_append_config(self): + machlist_before = self.MACHINE.list_available_machines() + self.assertEqual( + len(machlist_before) > 1, True, msg="Problem reading machine list" + ) + + newmachfile = os.path.join( + utils.get_cime_root(), + "config", + "xml_schemas", + "config_machines_template.xml", + ) + self.MACHINE.read(newmachfile) + machlist_after = self.MACHINE.list_available_machines() + + self.assertEqual( + len(machlist_after) - len(machlist_before), + 1, + msg="Not able to append config_machines.xml {} {}".format( + len(machlist_after), len(machlist_before) + ), + ) + self.assertEqual( + "mymachine" in machlist_after, + True, + msg="Not able to append config_machines.xml", + ) + + def test_ka_createnewcase_extra_machines_dir(self): + # Test that we pick up changes in both config_machines.xml and + # config_compilers.xml in a directory specified with the --extra-machines-dir + # argument to create_newcase. + cls = self.__class__ + casename = "testcreatenewcase_extra_machines_dir" + + # Setup: stage some xml files in a temporary directory + extra_machines_dir = os.path.join( + cls._testroot, "{}_machine_config".format(casename) + ) + os.makedirs(extra_machines_dir) + cls._do_teardown.append(extra_machines_dir) + newmachfile = os.path.join( + utils.get_cime_root(), + "config", + "xml_schemas", + "config_machines_template.xml", + ) + utils.safe_copy( + newmachfile, os.path.join(extra_machines_dir, "config_machines.xml") + ) + os.environ["CIME_NO_CMAKE_MACRO"] = "ON" + config_compilers_text = """\ + + + + /my/netcdf/path + + +""" + config_compilers_path = os.path.join(extra_machines_dir, "config_compilers.xml") + with open(config_compilers_path, "w") as config_compilers: + config_compilers.write(config_compilers_text) + + # Create the case + testdir = os.path.join(cls._testroot, casename) + if os.path.exists(testdir): + shutil.rmtree(testdir) + # In the following, note that 'mymachine' is the machine name defined in + # config_machines_template.xml + args = ( + " --case {testdir} --compset X --mach mymachine" + " --output-root {testroot} --non-local" + " --extra-machines-dir {extra_machines_dir}".format( + testdir=testdir, + testroot=cls._testroot, + extra_machines_dir=extra_machines_dir, + ) + ) + if utils.get_model() == "cesm": + args += " --run-unsupported" + + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + self.run_cmd_assert_result( + "./create_newcase {}".format(args), from_dir=self.SCRIPT_DIR + ) + + args += f" --machine {self.MACHINE.get_machine_name()}" + + cls._do_teardown.append(testdir) + + # Run case.setup + self.run_cmd_assert_result("./case.setup", from_dir=testdir) + + # Make sure Macros file contains expected text + if utils.get_model() != "e3sm": + macros_file_name = os.path.join(testdir, "Macros.make") + self.assertTrue(os.path.isfile(macros_file_name)) + with open(macros_file_name) as macros_file: + macros_contents = macros_file.read() + expected_re = re.compile("NETCDF_PATH.*/my/netcdf/path") + self.assertTrue(expected_re.search(macros_contents)) + del os.environ["CIME_NO_CMAKE_MACRO"] + + def test_m_createnewcase_alternate_drivers(self): + # Test that case.setup runs for nuopc and moab drivers + cls = self.__class__ + model = utils.get_model() + for driver in ("nuopc", "moab"): + if not os.path.exists( + os.path.join(utils.get_cime_root(), "src", "drivers", driver) + ): + self.skipTest( + "Skipping driver test for {}, driver not found".format(driver) + ) + if (model == "cesm" and driver == "moab") or ( + model == "e3sm" and driver == "nuopc" + ): + continue + + testdir = os.path.join(cls._testroot, "testcreatenewcase.{}".format(driver)) + if os.path.exists(testdir): + shutil.rmtree(testdir) + args = " --driver {} --case {} --compset X --res f19_g16 --output-root {} --handle-preexisting-dirs=r".format( + driver, testdir, cls._testroot + ) + if model == "cesm": + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args = args + " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args = args + " --mpilib %s" % self.TEST_MPILIB + + args += f" --machine {self.MACHINE.get_machine_name()}" + + cls._testdirs.append(testdir) + self.run_cmd_assert_result( + "./create_newcase %s" % (args), from_dir=self.SCRIPT_DIR + ) + self.assertTrue(os.path.exists(testdir)) + self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) + + self.run_cmd_assert_result("./case.setup", from_dir=testdir) + with Case(testdir, read_only=False) as case: + comp_interface = case.get_value("COMP_INTERFACE") + self.assertTrue( + driver == comp_interface, msg="%s != %s" % (driver, comp_interface) + ) + + cls._do_teardown.append(testdir) + + def test_n_createnewcase_bad_compset(self): + cls = self.__class__ + model = utils.get_model() + + testdir = os.path.join(cls._testroot, "testcreatenewcase_bad_compset") + if os.path.exists(testdir): + shutil.rmtree(testdir) + args = ( + " --case %s --compset InvalidCompsetName --output-root %s --handle-preexisting-dirs=r " + % (testdir, cls._testroot) + ) + if model == "cesm": + args += " --run-unsupported" + if self.TEST_COMPILER is not None: + args = args + " --compiler %s" % self.TEST_COMPILER + if self.TEST_MPILIB is not None: + args = args + " --mpilib %s" % self.TEST_MPILIB + if utils.get_cime_default_driver() == "nuopc": + args += " --res f19_g17 " + else: + args += " --res f19_g16 " + + args += f" --machine {self.MACHINE.get_machine_name()}" + + self.run_cmd_assert_result( + "./create_newcase %s" % (args), from_dir=self.SCRIPT_DIR, expected_stat=1 + ) + self.assertFalse(os.path.exists(testdir)) + + @classmethod + def tearDownClass(cls): + do_teardown = ( + len(cls._do_teardown) > 0 + and sys.exc_info() == (None, None, None) + and not cls.NO_TEARDOWN + ) + rmtestroot = True + for tfile in cls._testdirs: + if tfile not in cls._do_teardown: + print("Detected failed test or user request no teardown") + print("Leaving case directory : %s" % tfile) + rmtestroot = False + elif do_teardown: + try: + print("Attempt to remove directory {}".format(tfile)) + shutil.rmtree(tfile) + except BaseException: + print("Could not remove directory {}".format(tfile)) + if rmtestroot and do_teardown: + shutil.rmtree(cls._testroot) diff --git a/CIME/tests/test_sys_full_system.py b/CIME/tests/test_sys_full_system.py new file mode 100644 index 00000000000..cde4d625f07 --- /dev/null +++ b/CIME/tests/test_sys_full_system.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 + +import os + +import get_tests +from CIME import test_status +from CIME import utils +from CIME import wait_for_tests +from CIME.tests import base + + +class TestFullSystem(base.BaseTestCase): + def test_full_system(self): + # Put this inside any test that's slow + if self.FAST_ONLY: + self.skipTest("Skipping slow test") + + driver = utils.get_cime_default_driver() + if driver == "mct": + cases = self._create_test( + ["--walltime=0:15:00", "cime_developer"], test_id=self._baseline_name + ) + else: + cases = self._create_test( + ["--walltime=0:30:00", "cime_developer"], test_id=self._baseline_name + ) + + self.run_cmd_assert_result( + "%s/cs.status.%s" % (self._testroot, self._baseline_name), + from_dir=self._testroot, + ) + + # Ensure that we can get test times + for case_dir in cases: + tstatus = os.path.join(case_dir, "TestStatus") + test_time = wait_for_tests.get_test_time(os.path.dirname(tstatus)) + self.assertIs( + type(test_time), int, msg="get time did not return int for %s" % tstatus + ) + self.assertTrue(test_time > 0, msg="test time was zero for %s" % tstatus) + + # Test that re-running works + skip_tests = None + if utils.get_cime_default_driver() == "nuopc": + skip_tests = [ + "SMS_Ln3.T42_T42.S", + "PRE.f19_f19.ADESP_TEST", + "PRE.f19_f19.ADESP", + "DAE.ww3a.ADWAV", + ] + tests = get_tests.get_test_suite( + "cime_developer", + machine=self._machine, + compiler=self._compiler, + skip_tests=skip_tests, + ) + + for test in tests: + casedir = self.get_casedir(test, cases) + + # Subtle issue: The run phases of these tests will be in the PASS state until + # the submitted case.test script is run, which could take a while if the system is + # busy. This potentially leaves a window where the wait_for_tests command below will + # not wait for the re-submitted jobs to run because it sees the original PASS. + # The code below forces things back to PEND to avoid this race condition. Note + # that we must use the MEMLEAK phase, not the RUN phase, because RUN being in a non-PEND + # state is how system tests know they are being re-run and must reset certain + # case settings. + if self._hasbatch: + with test_status.TestStatus(test_dir=casedir) as ts: + ts.set_status( + test_status.MEMLEAK_PHASE, test_status.TEST_PEND_STATUS + ) + + self.run_cmd_assert_result( + "./case.submit --skip-preview-namelist", from_dir=casedir + ) + + self._wait_for_tests(self._baseline_name) diff --git a/CIME/tests/test_sys_grid_generation.py b/CIME/tests/test_sys_grid_generation.py new file mode 100644 index 00000000000..25e49a281a5 --- /dev/null +++ b/CIME/tests/test_sys_grid_generation.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 + +import os +import shutil +import sys + +from CIME import utils +from CIME.tests import base + + +class TestGridGeneration(base.BaseTestCase): + @classmethod + def setUpClass(cls): + cls._do_teardown = [] + cls._testroot = os.path.join(cls.TEST_ROOT, "TestGridGeneration") + cls._testdirs = [] + + def test_gen_domain(self): + if utils.get_model() != "e3sm": + self.skipTest("Skipping gen_domain test. Depends on E3SM tools") + cime_root = utils.get_cime_root() + inputdata = self.MACHINE.get_value("DIN_LOC_ROOT") + + tool_name = "test_gen_domain" + tool_location = os.path.join( + cime_root, "tools", "mapping", "gen_domain_files", "test_gen_domain.sh" + ) + args = "--cime_root={} --inputdata_root={}".format(cime_root, inputdata) + + cls = self.__class__ + test_dir = os.path.join(cls._testroot, tool_name) + cls._testdirs.append(test_dir) + os.makedirs(test_dir) + self.run_cmd_assert_result( + self, "{} {}".format(tool_location, args), from_dir=test_dir + ) + cls._do_teardown.append(test_dir) + + @classmethod + def tearDownClass(cls): + do_teardown = ( + len(cls._do_teardown) > 0 + and sys.exc_info() == (None, None, None) + and not cls.NO_TEARDOWN + ) + teardown_root = True + for tfile in cls._testdirs: + if tfile not in cls._do_teardown: + print("Detected failed test or user request no teardown") + print("Leaving case directory : %s" % tfile) + teardown_root = False + elif do_teardown: + shutil.rmtree(tfile) + + if teardown_root and do_teardown: + shutil.rmtree(cls._testroot) diff --git a/CIME/tests/test_sys_jenkins_generic_job.py b/CIME/tests/test_sys_jenkins_generic_job.py new file mode 100644 index 00000000000..7649f8cb4b8 --- /dev/null +++ b/CIME/tests/test_sys_jenkins_generic_job.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 + +import glob +import os +import signal +import stat +import threading +import time + +import get_tests +from CIME import utils +from CIME.tests import base + + +class TestJenkinsGenericJob(base.BaseTestCase): + def setUp(self): + if utils.get_model() != "e3sm": + self.skipTest("Skipping Jenkins tests. E3SM feature") + super().setUp() + + # Need to run in a subdir in order to not have CTest clash. Name it + # such that it should be cleaned up by the parent tearDown + self._testdir = os.path.join( + self._testroot, "jenkins_test_%s" % self._baseline_name + ) + os.makedirs(self._testdir) + + # Change root to avoid clashing with other jenkins_generic_jobs + self._jenkins_root = os.path.join(self._testdir, "J") + + def tearDown(self): + super().tearDown() + + if "TESTRUNDIFF_ALTERNATE" in os.environ: + del os.environ["TESTRUNDIFF_ALTERNATE"] + + def simple_test(self, expect_works, extra_args, build_name=None): + if self.NO_BATCH: + extra_args += " --no-batch" + + # Need these flags to test dashboard if e3sm + if utils.get_model() == "e3sm" and build_name is not None: + extra_args += ( + " -p ACME_test --submit-to-cdash --cdash-build-group=Nightly -c %s" + % build_name + ) + + self.run_cmd_assert_result( + self, + "%s/jenkins_generic_job -r %s %s -B %s" + % (self.TOOLS_DIR, self._testdir, extra_args, self._baseline_area), + from_dir=self._testdir, + expected_stat=(0 if expect_works else utils.TESTS_FAILED_ERR_CODE), + ) + + def threaded_test(self, expect_works, extra_args, build_name=None): + try: + self.simple_test(expect_works, extra_args, build_name) + except AssertionError as e: + self._thread_error = str(e) + + def assert_num_leftovers(self, suite): + num_tests_in_tiny = len(get_tests.get_test_suite(suite)) + + jenkins_dirs = glob.glob( + "%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize()) + ) # case dirs + # scratch_dirs = glob.glob("%s/*%s*/" % (self._testroot, test_id)) # blr/run dirs + + self.assertEqual( + num_tests_in_tiny, + len(jenkins_dirs), + msg="Wrong number of leftover directories in %s, expected %d, see %s" + % (self._jenkins_root, num_tests_in_tiny, jenkins_dirs), + ) + + # JGF: Can't test this at the moment due to root change flag given to jenkins_generic_job + # self.assertEqual(num_tests_in_tiny + 1, len(scratch_dirs), + # msg="Wrong number of leftover directories in %s, expected %d, see %s" % \ + # (self._testroot, num_tests_in_tiny, scratch_dirs)) + + def test_jenkins_generic_job(self): + # Generate fresh baselines so that this test is not impacted by + # unresolved diffs + self.simple_test(True, "-t cime_test_only_pass -g -b %s" % self._baseline_name) + self.assert_num_leftovers("cime_test_only_pass") + + build_name = "jenkins_generic_job_pass_%s" % utils.get_timestamp() + self.simple_test( + True, + "-t cime_test_only_pass -b %s" % self._baseline_name, + build_name=build_name, + ) + self.assert_num_leftovers( + "cime_test_only_pass" + ) # jenkins_generic_job should have automatically cleaned up leftovers from prior run + self.assert_dashboard_has_build(build_name) + + def test_jenkins_generic_job_kill(self): + build_name = "jenkins_generic_job_kill_%s" % utils.get_timestamp() + run_thread = threading.Thread( + target=self.threaded_test, + args=(False, " -t cime_test_only_slow_pass -b master", build_name), + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(120) + + self.kill_subprocesses(sig=signal.SIGTERM) + + run_thread.join(timeout=30) + + self.assertFalse( + run_thread.is_alive(), msg="jenkins_generic_job should have finished" + ) + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + self.assert_dashboard_has_build(build_name) + + def test_jenkins_generic_job_realistic_dash(self): + # The actual quality of the cdash results for this test can only + # be inspected manually + + # Generate fresh baselines so that this test is not impacted by + # unresolved diffs + self.simple_test(False, "-t cime_test_all -g -b %s" % self._baseline_name) + self.assert_num_leftovers("cime_test_all") + + # Should create a diff + os.environ["TESTRUNDIFF_ALTERNATE"] = "True" + + # Should create a nml diff + # Modify namelist + fake_nl = """ + &fake_nml + fake_item = 'fake' + fake = .true. +/""" + baseline_glob = glob.glob( + os.path.join(self._baseline_area, self._baseline_name, "TESTRUNPASS*") + ) + self.assertEqual( + len(baseline_glob), + 1, + msg="Expected one match, got:\n%s" % "\n".join(baseline_glob), + ) + + for baseline_dir in baseline_glob: + nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in") + self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path) + + os.chmod(nl_path, stat.S_IRUSR | stat.S_IWUSR) + with open(nl_path, "a") as nl_file: + nl_file.write(fake_nl) + + build_name = "jenkins_generic_job_mixed_%s" % utils.get_timestamp() + self.simple_test( + False, "-t cime_test_all -b %s" % self._baseline_name, build_name=build_name + ) + self.assert_num_leftovers( + "cime_test_all" + ) # jenkins_generic_job should have automatically cleaned up leftovers from prior run + self.assert_dashboard_has_build(build_name) diff --git a/CIME/tests/test_sys_macro_basic.py b/CIME/tests/test_sys_macro_basic.py new file mode 100644 index 00000000000..3e2cdd52a2f --- /dev/null +++ b/CIME/tests/test_sys_macro_basic.py @@ -0,0 +1,42 @@ +#!/usr/bin/env python3 + +from xml.etree.ElementTree import ParseError + +from CIME import utils +from CIME.tests import base +from CIME.tests import utils as test_utils +from CIME.XML.compilers import Compilers + + +class TestMacrosBasic(base.BaseTestCase): + """Basic infrastructure tests. + + This class contains tests that do not actually depend on the output of the + macro file conversion. This includes basic smoke testing and tests of + error-handling in the routine. + """ + + def test_script_is_callable(self): + """The test script can be called on valid output without dying.""" + # This is really more a smoke test of this script than anything else. + maker = Compilers(test_utils.MockMachines("mymachine", "SomeOS"), version=2.0) + test_xml = test_utils._wrap_config_compilers_xml( + "FALSE" + ) + test_utils.get_macros(maker, test_xml, "Makefile") + + def test_script_rejects_bad_xml(self): + """The macro writer rejects input that's not valid XML.""" + maker = Compilers(test_utils.MockMachines("mymachine", "SomeOS"), version=2.0) + with self.assertRaises(ParseError): + test_utils.get_macros(maker, "This is not valid XML.", "Makefile") + + def test_script_rejects_bad_build_system(self): + """The macro writer rejects a bad build system string.""" + maker = Compilers(test_utils.MockMachines("mymachine", "SomeOS"), version=2.0) + bad_string = "argle-bargle." + with self.assertRaisesRegex( + utils.CIMEError, + "Unrecognized build system provided to write_macros: " + bad_string, + ): + test_utils.get_macros(maker, "This string is irrelevant.", bad_string) diff --git a/CIME/tests/test_sys_make_macros.py b/CIME/tests/test_sys_make_macros.py new file mode 100644 index 00000000000..18d7eb7b8ee --- /dev/null +++ b/CIME/tests/test_sys_make_macros.py @@ -0,0 +1,622 @@ +#!/usr/bin/env python3 + + +from CIME import utils +from CIME.tests import utils as test_utils +from CIME.tests import base +from CIME.XML.compilers import Compilers + + +class TestMakeMacros(base.BaseTestCase): + """Makefile macros tests. + + This class contains tests of the Makefile output of Build. + + Aside from the usual setUp and test methods, this class has a utility method + (xml_to_tester) that converts XML input directly to a MakefileTester object. + """ + + def setUp(self): + self.test_os = "SomeOS" + self.test_machine = "mymachine" + self.test_compiler = ( + self.MACHINE.get_default_compiler() + if self.TEST_COMPILER is None + else self.TEST_COMPILER + ) + self.test_mpilib = ( + self.MACHINE.get_default_MPIlib(attributes={"compiler": self.test_compiler}) + if self.TEST_MPILIB is None + else self.TEST_MPILIB + ) + + self._maker = Compilers( + test_utils.MockMachines(self.test_machine, self.test_os), version=2.0 + ) + + super().setUp() + + def xml_to_tester(self, xml_string): + """Helper that directly converts an XML string to a MakefileTester.""" + test_xml = test_utils._wrap_config_compilers_xml(xml_string) + return test_utils.MakefileTester( + self, test_utils.get_macros(self._maker, test_xml, "Makefile") + ) + + def test_generic_item(self): + """The macro writer can write out a single generic item.""" + xml_string = "FALSE" + tester = self.xml_to_tester(xml_string) + tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") + + def test_machine_specific_item(self): + """The macro writer can pick out a machine-specific item.""" + xml1 = """TRUE""".format( + self.test_machine + ) + xml2 = """FALSE""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + # Do this a second time, but with elements in the reverse order, to + # ensure that the code is not "cheating" by taking the first match. + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + + def test_ignore_non_match(self): + """The macro writer ignores an entry with the wrong machine name.""" + xml1 = """TRUE""" + xml2 = """FALSE""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") + # Again, double-check that we don't just get lucky with the order. + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") + + def test_os_specific_item(self): + """The macro writer can pick out an OS-specific item.""" + xml1 = ( + """TRUE""".format( + self.test_os + ) + ) + xml2 = """FALSE""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + + def test_mach_other_compiler(self): + """The macro writer compiler-specific logic works as expected.""" + xml1 = """a b c""".format( + self.test_compiler + ) + xml2 = """x y z""".format( + self.test_machine + ) + xml3 = """x y z""".format( + self.test_machine, self.test_compiler + ) + xml4 = """x y z""".format( + self.test_machine, self.test_compiler + ) + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals( + "CFLAGS", "a b c", var={"COMPILER": self.test_compiler} + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals( + "CFLAGS", "a b c", var={"COMPILER": self.test_compiler} + ) + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals( + "CFLAGS", "a b c", var={"COMPILER": self.test_compiler} + ) + tester = self.xml_to_tester(xml1 + xml3) + tester.assert_variable_equals( + "CFLAGS", "a b c x y z", var={"COMPILER": self.test_compiler} + ) + tester = self.xml_to_tester(xml1 + xml4) + tester.assert_variable_equals( + "CFLAGS", "x y z", var={"COMPILER": self.test_compiler} + ) + tester = self.xml_to_tester(xml4 + xml1) + tester.assert_variable_equals( + "CFLAGS", "x y z", var={"COMPILER": self.test_compiler} + ) + + def test_mach_beats_os(self): + """The macro writer chooses machine-specific over os-specific matches.""" + xml1 = """FALSE""".format( + self.test_os + ) + xml2 = """TRUE""".format( + self.test_machine + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + + def test_mach_and_os_beats_mach(self): + """The macro writer chooses the most-specific match possible.""" + xml1 = """FALSE""".format( + self.test_machine + ) + xml2 = """TRUE""" + xml2 = xml2.format(self.test_machine, self.test_os) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") + + def test_build_time_attribute(self): + """The macro writer writes conditionals for build-time choices.""" + xml1 = """/path/to/mpich""" + xml2 = """/path/to/openmpi""" + xml3 = """/path/to/default""" + tester = self.xml_to_tester(xml1 + xml2 + xml3) + tester.assert_variable_equals("MPI_PATH", "/path/to/default") + tester.assert_variable_equals( + "MPI_PATH", "/path/to/mpich", var={"MPILIB": "mpich"} + ) + tester.assert_variable_equals( + "MPI_PATH", "/path/to/openmpi", var={"MPILIB": "openmpi"} + ) + tester = self.xml_to_tester(xml3 + xml2 + xml1) + tester.assert_variable_equals("MPI_PATH", "/path/to/default") + tester.assert_variable_equals( + "MPI_PATH", "/path/to/mpich", var={"MPILIB": "mpich"} + ) + tester.assert_variable_equals( + "MPI_PATH", "/path/to/openmpi", var={"MPILIB": "openmpi"} + ) + + def test_reject_duplicate_defaults(self): + """The macro writer dies if given many defaults.""" + xml1 = """/path/to/default""" + xml2 = """/path/to/other_default""" + with self.assertRaisesRegex( + utils.CIMEError, + "Variable MPI_PATH is set ambiguously in config_compilers.xml.", + ): + self.xml_to_tester(xml1 + xml2) + + def test_reject_duplicates(self): + """The macro writer dies if given many matches for a given configuration.""" + xml1 = """/path/to/mpich""" + xml2 = """/path/to/mpich2""" + with self.assertRaisesRegex( + utils.CIMEError, + "Variable MPI_PATH is set ambiguously in config_compilers.xml.", + ): + self.xml_to_tester(xml1 + xml2) + + def test_reject_ambiguous(self): + """The macro writer dies if given an ambiguous set of matches.""" + xml1 = """/path/to/mpich""" + xml2 = """/path/to/mpi-debug""" + with self.assertRaisesRegex( + utils.CIMEError, + "Variable MPI_PATH is set ambiguously in config_compilers.xml.", + ): + self.xml_to_tester(xml1 + xml2) + + def test_compiler_changeable_at_build_time(self): + """The macro writer writes information for multiple compilers.""" + xml1 = """FALSE""" + xml2 = ( + """TRUE""" + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SUPPORTS_CXX", "TRUE", var={"COMPILER": "gnu"}) + tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") + + def test_base_flags(self): + """Test that we get "base" compiler flags.""" + xml1 = """-O2""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals("FFLAGS", "-O2") + + def test_machine_specific_base_flags(self): + """Test selection among base compiler flag sets based on machine.""" + xml1 = """-O2""" + xml2 = """-O3""".format( + self.test_machine + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-O3") + + def test_build_time_base_flags(self): + """Test selection of base flags based on build-time attributes.""" + xml1 = """-O2""" + xml2 = """-O3""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-O2") + tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) + + def test_build_time_base_flags_same_parent(self): + """Test selection of base flags in the same parent element.""" + xml1 = """-O2""" + xml2 = """-O3""" + tester = self.xml_to_tester( + "" + xml1 + xml2 + "" + ) + tester.assert_variable_equals("FFLAGS", "-O2") + tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) + # Check for order independence here, too. + tester = self.xml_to_tester( + "" + xml2 + xml1 + "" + ) + tester.assert_variable_equals("FFLAGS", "-O2") + tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) + + def test_append_flags(self): + """Test appending flags to a list.""" + xml1 = """-delicious""" + xml2 = """-cake""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-delicious -cake") + # Order independence, as usual. + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("FFLAGS", "-delicious -cake") + + def test_machine_specific_append_flags(self): + """Test appending flags that are either more or less machine-specific.""" + xml1 = """-delicious""" + xml2 = """-cake""".format( + self.test_machine + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_matches( + "FFLAGS", "^(-delicious -cake|-cake -delicious)$" + ) + + def test_machine_specific_base_keeps_append_flags(self): + """Test that machine-specific base flags don't override default append flags.""" + xml1 = """-delicious""" + xml2 = """-cake""".format( + self.test_machine + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-cake -delicious") + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("FFLAGS", "-cake -delicious") + + def test_machine_specific_base_and_append_flags(self): + """Test that machine-specific base flags coexist with machine-specific append flags.""" + xml1 = """-delicious""".format( + self.test_machine + ) + xml2 = """-cake""".format( + self.test_machine + ) + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-cake -delicious") + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("FFLAGS", "-cake -delicious") + + def test_append_flags_without_base(self): + """Test appending flags to a value set before Macros is included.""" + xml1 = """-cake""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals( + "FFLAGS", "-delicious -cake", var={"FFLAGS": "-delicious"} + ) + + def test_build_time_append_flags(self): + """Test build_time selection of compiler flags.""" + xml1 = """-cake""" + xml2 = """-and-pie""" + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("FFLAGS", "-cake") + tester.assert_variable_matches( + "FFLAGS", "^(-cake -and-pie|-and-pie -cake)$", var={"DEBUG": "TRUE"} + ) + + def test_environment_variable_insertion(self): + """Test that ENV{..} inserts environment variables.""" + # DO it again with $ENV{} style + xml1 = """-L$ENV{NETCDF} -lnetcdf""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals( + "LDFLAGS", "-L/path/to/netcdf -lnetcdf", env={"NETCDF": "/path/to/netcdf"} + ) + + def test_shell_command_insertion(self): + """Test that $SHELL insert shell command output.""" + xml1 = """-O$SHELL{echo 2} -fast""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals("FFLAGS", "-O2 -fast") + + def test_multiple_shell_commands(self): + """Test that more than one $SHELL element can be used.""" + xml1 = """-O$SHELL{echo 2} -$SHELL{echo fast}""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals("FFLAGS", "-O2 -fast") + + def test_env_and_shell_command(self): + """Test that $ENV works inside $SHELL elements.""" + xml1 = """-O$SHELL{echo $ENV{OPT_LEVEL}} -fast""" + tester = self.xml_to_tester(xml1) + tester.assert_variable_equals("FFLAGS", "-O2 -fast", env={"OPT_LEVEL": "2"}) + + def test_config_variable_insertion(self): + """Test that $VAR insert variables from config_compilers.""" + # Construct an absurd chain of references just to sure that we don't + # pass by accident, e.g. outputting things in the right order just due + # to good luck in a hash somewhere. + xml1 = """stuff-${MPI_PATH}-stuff""" + xml2 = """${MPICC}""" + xml3 = """${MPICXX}""" + xml4 = """${MPIFC}""" + xml5 = """mpicc""" + tester = self.xml_to_tester( + "" + xml1 + xml2 + xml3 + xml4 + xml5 + "" + ) + tester.assert_variable_equals("MPI_LIB_NAME", "stuff-mpicc-stuff") + + def test_config_reject_self_references(self): + """Test that $VAR self-references are rejected.""" + # This is a special case of the next test, which also checks circular + # references. + xml1 = """${MPI_LIB_NAME}""" + err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." + with self.assertRaisesRegex(utils.CIMEError, err_msg): + self.xml_to_tester("" + xml1 + "") + + def test_config_reject_cyclical_references(self): + """Test that cyclical $VAR references are rejected.""" + xml1 = """${MPI_PATH}""" + xml2 = """${MPI_LIB_NAME}""" + err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." + with self.assertRaisesRegex(utils.CIMEError, err_msg): + self.xml_to_tester("" + xml1 + xml2 + "") + + def test_variable_insertion_with_machine_specific_setting(self): + """Test that machine-specific $VAR dependencies are correct.""" + xml1 = """something""" + xml2 = """$MPI_PATH""".format( + self.test_machine + ) + xml3 = """${MPI_LIB_NAME}""" + err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." + with self.assertRaisesRegex(utils.CIMEError, err_msg): + self.xml_to_tester(xml1 + xml2 + xml3) + + def test_override_with_machine_and_new_attributes(self): + """Test that overrides with machine-specific settings with added attributes work correctly.""" + xml1 = """ + + icc + mpicxx + mpif90 + mpicc +""".format( + self.test_compiler + ) + xml2 = """ + + mpifoo + mpiffoo + mpifouc + +""".format( + self.test_compiler, self.test_machine, self.test_mpilib + ) + + tester = self.xml_to_tester(xml1 + xml2) + + tester.assert_variable_equals( + "SCC", + "icc", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPICXX", + "mpifoo", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPIFC", + "mpiffoo", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPICC", + "mpicc", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + + tester = self.xml_to_tester(xml2 + xml1) + + tester.assert_variable_equals( + "SCC", + "icc", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPICXX", + "mpifoo", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPIFC", + "mpiffoo", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + tester.assert_variable_equals( + "MPICC", + "mpicc", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + + def test_override_with_machine_and_same_attributes(self): + """Test that machine-specific conditional overrides with the same attribute work correctly.""" + xml1 = """ + + mpifc +""".format( + self.test_compiler, self.test_mpilib + ) + xml2 = """ + + mpif90 + +""".format( + self.test_machine, self.test_compiler, self.test_mpilib + ) + + tester = self.xml_to_tester(xml1 + xml2) + + tester.assert_variable_equals( + "MPIFC", + "mpif90", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + + tester = self.xml_to_tester(xml2 + xml1) + + tester.assert_variable_equals( + "MPIFC", + "mpif90", + var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib}, + ) + + def test_appends_not_overriden(self): + """Test that machine-specific base value changes don't interfere with appends.""" + xml1 = """ + + + -base1 + -debug1 + +""".format( + self.test_compiler + ) + + xml2 = """ + + + -base2 + -debug2 + +""".format( + self.test_machine, self.test_compiler + ) + + tester = self.xml_to_tester(xml1 + xml2) + + tester.assert_variable_equals( + "FFLAGS", "-base2", var={"COMPILER": self.test_compiler} + ) + tester.assert_variable_equals( + "FFLAGS", + "-base2 -debug2", + var={"COMPILER": self.test_compiler, "DEBUG": "TRUE"}, + ) + tester.assert_variable_equals( + "FFLAGS", + "-base2 -debug1", + var={"COMPILER": self.test_compiler, "DEBUG": "FALSE"}, + ) + + tester = self.xml_to_tester(xml2 + xml1) + + tester.assert_variable_equals( + "FFLAGS", "-base2", var={"COMPILER": self.test_compiler} + ) + tester.assert_variable_equals( + "FFLAGS", + "-base2 -debug2", + var={"COMPILER": self.test_compiler, "DEBUG": "TRUE"}, + ) + tester.assert_variable_equals( + "FFLAGS", + "-base2 -debug1", + var={"COMPILER": self.test_compiler, "DEBUG": "FALSE"}, + ) + + def test_multilevel_specificity(self): + """Check that settings with multiple levels of machine-specificity can be resolved.""" + xml1 = """ + + mpifc +""" + + xml2 = """ + + mpif03 +""".format( + self.test_os, self.test_mpilib + ) + + xml3 = """ + + mpif90 +""".format( + self.test_machine + ) + + # To verify order-independence, test every possible ordering of blocks. + testers = [] + testers.append(self.xml_to_tester(xml1 + xml2 + xml3)) + testers.append(self.xml_to_tester(xml1 + xml3 + xml2)) + testers.append(self.xml_to_tester(xml2 + xml1 + xml3)) + testers.append(self.xml_to_tester(xml2 + xml3 + xml1)) + testers.append(self.xml_to_tester(xml3 + xml1 + xml2)) + testers.append(self.xml_to_tester(xml3 + xml2 + xml1)) + + for tester in testers: + tester.assert_variable_equals( + "MPIFC", + "mpif90", + var={ + "COMPILER": self.test_compiler, + "MPILIB": self.test_mpilib, + "DEBUG": "TRUE", + }, + ) + tester.assert_variable_equals( + "MPIFC", + "mpif03", + var={ + "COMPILER": self.test_compiler, + "MPILIB": self.test_mpilib, + "DEBUG": "FALSE", + }, + ) + tester.assert_variable_equals( + "MPIFC", + "mpifc", + var={ + "COMPILER": self.test_compiler, + "MPILIB": "NON_MATCHING_MPI", + "DEBUG": "FALSE", + }, + ) + + def test_remove_dependency_issues(self): + """Check that overridden settings don't cause inter-variable dependencies.""" + xml1 = """ + + ${SFC} +""" + + xml2 = ( + """ +""".format( + self.test_machine + ) + + """ + ${MPIFC} + mpif90 +""" + ) + + tester = self.xml_to_tester(xml1 + xml2) + tester.assert_variable_equals("SFC", "mpif90") + tester.assert_variable_equals("MPIFC", "mpif90") + + tester = self.xml_to_tester(xml2 + xml1) + tester.assert_variable_equals("SFC", "mpif90") + tester.assert_variable_equals("MPIFC", "mpif90") diff --git a/CIME/tests/test_sys_manage_and_query.py b/CIME/tests/test_sys_manage_and_query.py new file mode 100644 index 00000000000..2d0350e571c --- /dev/null +++ b/CIME/tests/test_sys_manage_and_query.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 + +from CIME import utils +from CIME.tests import base +from CIME.XML.files import Files + + +class TestManageAndQuery(base.BaseTestCase): + """Tests various scripts to manage and query xml files""" + + def setUp(self): + if utils.get_model() == "e3sm": + self.skipTest("Skipping XML test management tests. E3SM does not use this.") + + super().setUp() + + def _run_and_assert_query_testlist(self, extra_args=""): + """Ensure that query_testlist runs successfully with the given extra arguments""" + files = Files() + testlist_drv = files.get_value("TESTS_SPEC_FILE", {"component": "drv"}) + + self.run_cmd_assert_result( + "{}/query_testlists --xml-testlist {} {}".format( + self.SCRIPT_DIR, testlist_drv, extra_args + ) + ) + + def test_query_testlists_runs(self): + """Make sure that query_testlists runs successfully + + This simply makes sure that query_testlists doesn't generate any errors + when it runs. This helps ensure that changes in other utilities don't + break query_testlists. + """ + self._run_and_assert_query_testlist(extra_args="--show-options") + + def test_query_testlists_define_testtypes_runs(self): + """Make sure that query_testlists runs successfully with the --define-testtypes argument""" + self._run_and_assert_query_testlist(extra_args="--define-testtypes") + + def test_query_testlists_count_runs(self): + """Make sure that query_testlists runs successfully with the --count argument""" + self._run_and_assert_query_testlist(extra_args="--count") + + def test_query_testlists_list_runs(self): + """Make sure that query_testlists runs successfully with the --list argument""" + self._run_and_assert_query_testlist(extra_args="--list categories") diff --git a/CIME/tests/test_sys_query_config.py b/CIME/tests/test_sys_query_config.py new file mode 100644 index 00000000000..1aee428cc16 --- /dev/null +++ b/CIME/tests/test_sys_query_config.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 + +from CIME import utils +from CIME.tests import base + + +class TestQueryConfig(base.BaseTestCase): + def setUp(self): + super().setUp() + + def test_query_compsets(self): + utils.run_cmd_no_fail("{}/query_config --compsets".format(self.SCRIPT_DIR)) + + def test_query_components(self): + utils.run_cmd_no_fail("{}/query_config --components".format(self.SCRIPT_DIR)) + + def test_query_grids(self): + utils.run_cmd_no_fail("{}/query_config --grids".format(self.SCRIPT_DIR)) + + def test_query_machines(self): + utils.run_cmd_no_fail("{}/query_config --machines".format(self.SCRIPT_DIR)) diff --git a/CIME/tests/test_sys_run_restart.py b/CIME/tests/test_sys_run_restart.py new file mode 100644 index 00000000000..4a7fb9e2391 --- /dev/null +++ b/CIME/tests/test_sys_run_restart.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 + +import os + +from CIME import utils +from CIME.tests import base + + +class TestRunRestart(base.BaseTestCase): + def test_run_restart(self): + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + driver = utils.get_cime_default_driver() + if driver == "mct": + walltime = "00:15:00" + else: + walltime = "00:30:00" + + casedir = self._create_test( + ["--walltime " + walltime, "NODEFAIL_P1.f09_g16.X"], + test_id=self._baseline_name, + ) + rundir = utils.run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir) + fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL") + self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel) + + self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 3) + + def test_run_restart_too_many_fails(self): + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + driver = utils.get_cime_default_driver() + if driver == "mct": + walltime = "00:15:00" + else: + walltime = "00:30:00" + + casedir = self._create_test( + ["--walltime " + walltime, "NODEFAIL_P1.f09_g16.X"], + test_id=self._baseline_name, + env_changes="NODEFAIL_NUM_FAILS=5", + run_errors=True, + ) + rundir = utils.run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir) + fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL") + self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel) + + self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 4) diff --git a/CIME/tests/test_sys_save_timings.py b/CIME/tests/test_sys_save_timings.py new file mode 100644 index 00000000000..d571974c050 --- /dev/null +++ b/CIME/tests/test_sys_save_timings.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python3 + +import getpass +import glob +import os + +from CIME import provenance +from CIME import utils +from CIME.tests import base +from CIME.case.case import Case + + +class TestSaveTimings(base.BaseTestCase): + def simple_test(self, manual_timing=False): + if self.NO_FORTRAN_RUN: + self.skipTest("Skipping fortran test") + timing_flag = "" if manual_timing else "--save-timing" + driver = utils.get_cime_default_driver() + if driver == "mct": + walltime = "00:15:00" + else: + walltime = "00:30:00" + self._create_test( + ["SMS_Ln9_P1.f19_g16_rx1.A", timing_flag, "--walltime=" + walltime], + test_id=self._baseline_name, + ) + + statuses = glob.glob( + "%s/*%s/TestStatus" % (self._testroot, self._baseline_name) + ) + self.assertEqual( + len(statuses), + 1, + msg="Should have had exactly one match, found %s" % statuses, + ) + casedir = os.path.dirname(statuses[0]) + + with Case(casedir, read_only=True) as case: + lids = utils.get_lids(case) + timing_dir = case.get_value("SAVE_TIMING_DIR") + casename = case.get_value("CASE") + + self.assertEqual(len(lids), 1, msg="Expected one LID, found %s" % lids) + + if manual_timing: + self.run_cmd_assert_result( + "cd %s && %s/save_provenance postrun" % (casedir, self.TOOLS_DIR) + ) + if utils.get_model() == "e3sm": + provenance_glob = os.path.join( + timing_dir, + "performance_archive", + getpass.getuser(), + casename, + lids[0] + "*", + ) + provenance_dirs = glob.glob(provenance_glob) + self.assertEqual( + len(provenance_dirs), + 1, + msg="wrong number of provenance dirs, expected 1, got {}, looked for {}".format( + provenance_dirs, provenance_glob + ), + ) + self.verify_perms("".join(provenance_dirs)) + + def test_save_timings(self): + self.simple_test() + + def test_save_timings_manual(self): + self.simple_test(manual_timing=True) + + def _record_success( + self, + test_name, + test_success, + commit, + exp_last_pass, + exp_trans_fail, + baseline_dir, + ): + provenance.save_test_success( + baseline_dir, None, test_name, test_success, force_commit_test=commit + ) + was_success, last_pass, trans_fail = provenance.get_test_success( + baseline_dir, None, test_name, testing=True + ) + self.assertEqual( + test_success, + was_success, + msg="Broken was_success {} {}".format(test_name, commit), + ) + self.assertEqual( + last_pass, + exp_last_pass, + msg="Broken last_pass {} {}".format(test_name, commit), + ) + self.assertEqual( + trans_fail, + exp_trans_fail, + msg="Broken trans_fail {} {}".format(test_name, commit), + ) + if test_success: + self.assertEqual(exp_last_pass, commit, msg="Should never") + + def test_success_recording(self): + if utils.get_model() != "e3sm": + self.skipTest("Skipping success recording tests. E3SM feature") + + fake_test1 = "faketest1" + fake_test2 = "faketest2" + baseline_dir = os.path.join(self._baseline_area, self._baseline_name) + + # Test initial state + was_success, last_pass, trans_fail = provenance.get_test_success( + baseline_dir, None, fake_test1, testing=True + ) + self.assertFalse(was_success, msg="Broken initial was_success") + self.assertEqual(last_pass, None, msg="Broken initial last_pass") + self.assertEqual(trans_fail, None, msg="Broken initial trans_fail") + + # Test first result (test1 fails, test2 passes) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, False, "AAA", None, "AAA", baseline_dir) + self._record_success(fake_test2, True, "AAA", "AAA", None, baseline_dir) + + # Test second result matches first (no transition) (test1 fails, test2 passes) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, False, "BBB", None, "AAA", baseline_dir) + self._record_success(fake_test2, True, "BBB", "BBB", None, baseline_dir) + + # Test transition to new state (first real transition) (test1 passes, test2 fails) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, True, "CCC", "CCC", "AAA", baseline_dir) + self._record_success(fake_test2, False, "CCC", "BBB", "CCC", baseline_dir) + + # Test transition to new state (second real transition) (test1 fails, test2 passes) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, False, "DDD", "CCC", "DDD", baseline_dir) + self._record_success(fake_test2, True, "DDD", "DDD", "CCC", baseline_dir) + + # Test final repeat (test1 fails, test2 passes) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, False, "EEE", "CCC", "DDD", baseline_dir) + self._record_success(fake_test2, True, "EEE", "EEE", "CCC", baseline_dir) + + # Test final transition (test1 passes, test2 fails) + # test_name , success, commit , expP , expTF, baseline) + self._record_success(fake_test1, True, "FFF", "FFF", "DDD", baseline_dir) + self._record_success(fake_test2, False, "FFF", "EEE", "FFF", baseline_dir) diff --git a/CIME/tests/test_sys_single_submit.py b/CIME/tests/test_sys_single_submit.py new file mode 100644 index 00000000000..0909e7d83eb --- /dev/null +++ b/CIME/tests/test_sys_single_submit.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python3 + +from CIME import utils +from CIME.tests import base + + +class TestSingleSubmit(base.BaseTestCase): + def test_single_submit(self): + # Skip unless on a batch system and users did not select no-batch + if not self._hasbatch: + self.skipTest("Skipping single submit. Not valid without batch") + if utils.get_model() != "e3sm": + self.skipTest("Skipping single submit. E3SM experimental feature") + if self._machine not in ["sandiatoss3"]: + self.skipTest("Skipping single submit. Only works on sandiatoss3") + + # Keep small enough for now that we don't have to worry about load balancing + self._create_test( + ["--single-submit", "SMS_Ln9_P8.f45_g37_rx1.A", "SMS_Ln9_P8.f19_g16_rx1.A"], + env_changes="unset CIME_GLOBAL_WALLTIME &&", + ) diff --git a/CIME/tests/test_sys_test_scheduler.py b/CIME/tests/test_sys_test_scheduler.py new file mode 100755 index 00000000000..977cce26f20 --- /dev/null +++ b/CIME/tests/test_sys_test_scheduler.py @@ -0,0 +1,452 @@ +#!/usr/bin/env python3 + +import glob +import logging +import os +import unittest +from unittest import mock + +import get_tests +from CIME import utils +from CIME import test_status +from CIME import test_scheduler +from CIME.tests import base + + +class TestTestScheduler(base.BaseTestCase): + @mock.patch.dict(os.environ, {"CIME_MODEL": "cesm"}) + @mock.patch("time.strftime", return_value="00:00:00") + def test_chksum(self, strftime): # pylint: disable=unused-argument + ts = test_scheduler.TestScheduler( + ["SEQ_Ln9.f19_g16_rx1.A.cori-haswell_gnu"], + machine_name="cori-haswell", + chksum=True, + test_root="/tests", + ) + + with mock.patch.object(ts, "_shell_cmd_for_phase") as _shell_cmd_for_phase: + ts._run_phase( + "SEQ_Ln9.f19_g16_rx1.A.cori-haswell_gnu" + ) # pylint: disable=protected-access + + _shell_cmd_for_phase.assert_called_with( + "SEQ_Ln9.f19_g16_rx1.A.cori-haswell_gnu", + "./case.submit --skip-preview-namelist --chksum", + "RUN", + from_dir="/tests/SEQ_Ln9.f19_g16_rx1.A.cori-haswell_gnu.00:00:00", + ) + + def test_a_phases(self): + # exclude the MEMLEAK tests here. + tests = get_tests.get_full_test_names( + [ + "cime_test_only", + "^TESTMEMLEAKFAIL_P1.f09_g16.X", + "^TESTMEMLEAKPASS_P1.f09_g16.X", + "^TESTRUNSTARCFAIL_P1.f19_g16_rx1.A", + "^TESTTESTDIFF_P1.f19_g16_rx1.A", + "^TESTBUILDFAILEXC_P1.f19_g16_rx1.A", + "^TESTRUNFAILEXC_P1.f19_g16_rx1.A", + ], + self._machine, + self._compiler, + ) + self.assertEqual(len(tests), 3) + ct = test_scheduler.TestScheduler( + tests, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0] + run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0] + pass_test = [item for item in tests if "TESTRUNPASS" in item][0] + + self.assertTrue( + "BUILDFAIL" in build_fail_test, msg="Wrong test '%s'" % build_fail_test + ) + self.assertTrue( + "RUNFAIL" in run_fail_test, msg="Wrong test '%s'" % run_fail_test + ) + self.assertTrue("RUNPASS" in pass_test, msg="Wrong test '%s'" % pass_test) + + for idx, phase in enumerate(ct._phases): + for test in ct._tests: + if phase == test_scheduler.TEST_START: + continue + elif phase == test_status.MODEL_BUILD_PHASE: + ct._update_test_status(test, phase, test_status.TEST_PEND_STATUS) + + if test == build_fail_test: + ct._update_test_status( + test, phase, test_status.TEST_FAIL_STATUS + ) + self.assertTrue(ct._is_broken(test)) + self.assertFalse(ct._work_remains(test)) + else: + ct._update_test_status( + test, phase, test_status.TEST_PASS_STATUS + ) + self.assertFalse(ct._is_broken(test)) + self.assertTrue(ct._work_remains(test)) + + elif phase == test_status.RUN_PHASE: + if test == build_fail_test: + with self.assertRaises(utils.CIMEError): + ct._update_test_status( + test, phase, test_status.TEST_PEND_STATUS + ) + else: + ct._update_test_status( + test, phase, test_status.TEST_PEND_STATUS + ) + self.assertFalse(ct._work_remains(test)) + + if test == run_fail_test: + ct._update_test_status( + test, phase, test_status.TEST_FAIL_STATUS + ) + self.assertTrue(ct._is_broken(test)) + else: + ct._update_test_status( + test, phase, test_status.TEST_PASS_STATUS + ) + self.assertFalse(ct._is_broken(test)) + + self.assertFalse(ct._work_remains(test)) + + else: + with self.assertRaises(utils.CIMEError): + ct._update_test_status( + test, ct._phases[idx + 1], test_status.TEST_PEND_STATUS + ) + + with self.assertRaises(utils.CIMEError): + ct._update_test_status( + test, phase, test_status.TEST_PASS_STATUS + ) + + ct._update_test_status(test, phase, test_status.TEST_PEND_STATUS) + self.assertFalse(ct._is_broken(test)) + self.assertTrue(ct._work_remains(test)) + + with self.assertRaises(utils.CIMEError): + ct._update_test_status( + test, phase, test_status.TEST_PEND_STATUS + ) + + ct._update_test_status(test, phase, test_status.TEST_PASS_STATUS) + + with self.assertRaises(utils.CIMEError): + ct._update_test_status( + test, phase, test_status.TEST_FAIL_STATUS + ) + + self.assertFalse(ct._is_broken(test)) + self.assertTrue(ct._work_remains(test)) + + def test_b_full(self): + tests = get_tests.get_full_test_names( + ["cime_test_only"], self._machine, self._compiler + ) + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + ct = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + build_fail_test = [item for item in tests if "TESTBUILDFAIL_" in item][0] + build_fail_exc_test = [item for item in tests if "TESTBUILDFAILEXC" in item][0] + run_fail_test = [item for item in tests if "TESTRUNFAIL_" in item][0] + run_fail_exc_test = [item for item in tests if "TESTRUNFAILEXC" in item][0] + pass_test = [item for item in tests if "TESTRUNPASS" in item][0] + test_diff_test = [item for item in tests if "TESTTESTDIFF" in item][0] + mem_fail_test = [item for item in tests if "TESTMEMLEAKFAIL" in item][0] + mem_pass_test = [item for item in tests if "TESTMEMLEAKPASS" in item][0] + st_arch_fail_test = [item for item in tests if "TESTRUNSTARCFAIL" in item][0] + + log_lvl = logging.getLogger().getEffectiveLevel() + logging.disable(logging.CRITICAL) + try: + ct.run_tests() + finally: + logging.getLogger().setLevel(log_lvl) + + self._wait_for_tests(test_id, expect_works=False) + + test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id)) + self.assertEqual(len(tests), len(test_statuses)) + + for x in test_statuses: + ts = test_status.TestStatus(test_dir=os.path.dirname(x)) + test_name = ts.get_name() + log_files = glob.glob( + "%s/%s*%s/TestStatus.log" % (self._testroot, test_name, test_id) + ) + self.assertEqual( + len(log_files), + 1, + "Expected exactly one test_status.TestStatus.log file, found %d" + % len(log_files), + ) + log_file = log_files[0] + if test_name == build_fail_test: + + self.assert_test_status( + test_name, + ts, + test_status.MODEL_BUILD_PHASE, + test_status.TEST_FAIL_STATUS, + ) + data = open(log_file, "r").read() + self.assertTrue( + "Intentional fail for testing infrastructure" in data, + "Broken test did not report build error:\n%s" % data, + ) + elif test_name == build_fail_exc_test: + data = open(log_file, "r").read() + self.assert_test_status( + test_name, + ts, + test_status.SHAREDLIB_BUILD_PHASE, + test_status.TEST_FAIL_STATUS, + ) + self.assertTrue( + "Exception from init" in data, + "Broken test did not report build error:\n%s" % data, + ) + elif test_name == run_fail_test: + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_FAIL_STATUS + ) + elif test_name == run_fail_exc_test: + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_FAIL_STATUS + ) + data = open(log_file, "r").read() + self.assertTrue( + "Exception from run_phase" in data, + "Broken test did not report run error:\n%s" % data, + ) + elif test_name == mem_fail_test: + self.assert_test_status( + test_name, + ts, + test_status.MEMLEAK_PHASE, + test_status.TEST_FAIL_STATUS, + ) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + elif test_name == test_diff_test: + self.assert_test_status( + test_name, ts, "COMPARE_base_rest", test_status.TEST_FAIL_STATUS + ) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + elif test_name == st_arch_fail_test: + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + self.assert_test_status( + test_name, + ts, + test_status.STARCHIVE_PHASE, + test_status.TEST_FAIL_STATUS, + ) + else: + self.assertTrue(test_name in [pass_test, mem_pass_test]) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + if test_name == mem_pass_test: + self.assert_test_status( + test_name, + ts, + test_status.MEMLEAK_PHASE, + test_status.TEST_PASS_STATUS, + ) + + def test_c_use_existing(self): + tests = get_tests.get_full_test_names( + [ + "TESTBUILDFAIL_P1.f19_g16_rx1.A", + "TESTRUNFAIL_P1.f19_g16_rx1.A", + "TESTRUNPASS_P1.f19_g16_rx1.A", + ], + self._machine, + self._compiler, + ) + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + ct = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0] + run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0] + pass_test = [item for item in tests if "TESTRUNPASS" in item][0] + + log_lvl = logging.getLogger().getEffectiveLevel() + logging.disable(logging.CRITICAL) + try: + ct.run_tests() + finally: + logging.getLogger().setLevel(log_lvl) + + test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id)) + self.assertEqual(len(tests), len(test_statuses)) + + self._wait_for_tests(test_id, expect_works=False) + + for x in test_statuses: + casedir = os.path.dirname(x) + ts = test_status.TestStatus(test_dir=casedir) + test_name = ts.get_name() + if test_name == build_fail_test: + self.assert_test_status( + test_name, + ts, + test_status.MODEL_BUILD_PHASE, + test_status.TEST_FAIL_STATUS, + ) + with test_status.TestStatus(test_dir=casedir) as ts: + ts.set_status( + test_status.MODEL_BUILD_PHASE, test_status.TEST_PEND_STATUS + ) + elif test_name == run_fail_test: + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_FAIL_STATUS + ) + with test_status.TestStatus(test_dir=casedir) as ts: + ts.set_status( + test_status.SUBMIT_PHASE, test_status.TEST_PEND_STATUS + ) + else: + self.assertTrue(test_name == pass_test) + self.assert_test_status( + test_name, + ts, + test_status.MODEL_BUILD_PHASE, + test_status.TEST_PASS_STATUS, + ) + self.assert_test_status( + test_name, + ts, + test_status.SUBMIT_PHASE, + test_status.TEST_PASS_STATUS, + ) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + + os.environ["TESTBUILDFAIL_PASS"] = "True" + os.environ["TESTRUNFAIL_PASS"] = "True" + ct2 = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + use_existing=True, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + log_lvl = logging.getLogger().getEffectiveLevel() + logging.disable(logging.CRITICAL) + try: + ct2.run_tests() + finally: + logging.getLogger().setLevel(log_lvl) + + self._wait_for_tests(test_id) + + for x in test_statuses: + ts = test_status.TestStatus(test_dir=os.path.dirname(x)) + test_name = ts.get_name() + self.assert_test_status( + test_name, + ts, + test_status.MODEL_BUILD_PHASE, + test_status.TEST_PASS_STATUS, + ) + self.assert_test_status( + test_name, ts, test_status.SUBMIT_PHASE, test_status.TEST_PASS_STATUS + ) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + + del os.environ["TESTBUILDFAIL_PASS"] + del os.environ["TESTRUNFAIL_PASS"] + + # test that passed tests are not re-run + + ct2 = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + use_existing=True, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + log_lvl = logging.getLogger().getEffectiveLevel() + logging.disable(logging.CRITICAL) + try: + ct2.run_tests() + finally: + logging.getLogger().setLevel(log_lvl) + + self._wait_for_tests(test_id) + + for x in test_statuses: + ts = test_status.TestStatus(test_dir=os.path.dirname(x)) + test_name = ts.get_name() + self.assert_test_status( + test_name, + ts, + test_status.MODEL_BUILD_PHASE, + test_status.TEST_PASS_STATUS, + ) + self.assert_test_status( + test_name, ts, test_status.SUBMIT_PHASE, test_status.TEST_PASS_STATUS + ) + self.assert_test_status( + test_name, ts, test_status.RUN_PHASE, test_status.TEST_PASS_STATUS + ) + + def test_d_retry(self): + args = [ + "TESTBUILDFAIL_P1.f19_g16_rx1.A", + "TESTRUNFAILRESET_P1.f19_g16_rx1.A", + "TESTRUNPASS_P1.f19_g16_rx1.A", + "--retry=1", + ] + + self._create_test(args) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_sys_unittest.py b/CIME/tests/test_sys_unittest.py new file mode 100644 index 00000000000..e95695093b8 --- /dev/null +++ b/CIME/tests/test_sys_unittest.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python3 + +import os +import shutil +import sys + +from CIME import utils +from CIME.tests import base +from CIME.XML.compilers import Compilers + + +class TestUnitTest(base.BaseTestCase): + @classmethod + def setUpClass(cls): + cls._do_teardown = [] + cls._testroot = os.path.join(cls.TEST_ROOT, "TestUnitTests") + cls._testdirs = [] + os.environ["CIME_NO_CMAKE_MACRO"] = "ON" + + def _has_unit_test_support(self): + if self.TEST_COMPILER is None: + default_compiler = self.MACHINE.get_default_compiler() + compiler = Compilers(self.MACHINE, compiler=default_compiler) + else: + compiler = Compilers(self.MACHINE, compiler=self.TEST_COMPILER) + attrs = {"MPILIB": "mpi-serial", "compile_threaded": "FALSE"} + pfunit_path = compiler.get_optional_compiler_node( + "PFUNIT_PATH", attributes=attrs + ) + if pfunit_path is None: + return False + else: + return True + + def test_a_unit_test(self): + cls = self.__class__ + if not self._has_unit_test_support(): + self.skipTest( + "Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine" + ) + test_dir = os.path.join(cls._testroot, "unit_tester_test") + cls._testdirs.append(test_dir) + os.makedirs(test_dir) + unit_test_tool = os.path.abspath( + os.path.join( + utils.get_cime_root(), "scripts", "fortran_unit_testing", "run_tests.py" + ) + ) + test_spec_dir = os.path.join( + os.path.dirname(unit_test_tool), "Examples", "interpolate_1d", "tests" + ) + args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir) + args += " --machine {}".format(self.MACHINE.get_machine_name()) + utils.run_cmd_no_fail("{} {}".format(unit_test_tool, args)) + cls._do_teardown.append(test_dir) + + def test_b_cime_f90_unit_tests(self): + cls = self.__class__ + if self.FAST_ONLY: + self.skipTest("Skipping slow test") + + if not self._has_unit_test_support(): + self.skipTest( + "Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine" + ) + + test_dir = os.path.join(cls._testroot, "driver_f90_tests") + cls._testdirs.append(test_dir) + os.makedirs(test_dir) + test_spec_dir = utils.get_cime_root() + unit_test_tool = os.path.abspath( + os.path.join( + test_spec_dir, "scripts", "fortran_unit_testing", "run_tests.py" + ) + ) + args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir) + args += " --machine {}".format(self.MACHINE.get_machine_name()) + utils.run_cmd_no_fail("{} {}".format(unit_test_tool, args)) + cls._do_teardown.append(test_dir) + + @classmethod + def tearDownClass(cls): + do_teardown = ( + len(cls._do_teardown) > 0 + and sys.exc_info() == (None, None, None) + and not cls.NO_TEARDOWN + ) + del os.environ["CIME_NO_CMAKE_MACRO"] + + teardown_root = True + for tfile in cls._testdirs: + if tfile not in cls._do_teardown: + print("Detected failed test or user request no teardown") + print("Leaving case directory : %s" % tfile) + teardown_root = False + elif do_teardown: + shutil.rmtree(tfile) + + if teardown_root and do_teardown: + shutil.rmtree(cls._testroot) diff --git a/CIME/tests/test_sys_user_concurrent_mods.py b/CIME/tests/test_sys_user_concurrent_mods.py new file mode 100644 index 00000000000..c8173b34215 --- /dev/null +++ b/CIME/tests/test_sys_user_concurrent_mods.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 + +import os +import time + +from CIME import utils +from CIME.tests import base + + +class TestUserConcurrentMods(base.BaseTestCase): + def test_user_concurrent_mods(self): + # Put this inside any test that's slow + if self.FAST_ONLY: + self.skipTest("Skipping slow test") + + casedir = self._create_test( + ["--walltime=0:30:00", "TESTRUNUSERXMLCHANGE_Mmpi-serial.f19_g16.X"], + test_id=self._baseline_name, + ) + + with utils.Timeout(3000): + while True: + with open(os.path.join(casedir, "CaseStatus"), "r") as fd: + self._wait_for_tests(self._baseline_name) + contents = fd.read() + if contents.count("model execution success") == 2: + break + + time.sleep(5) + + rundir = utils.run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir) + if utils.get_cime_default_driver() == "nuopc": + chk_file = "nuopc.runconfig" + else: + chk_file = "drv_in" + with open(os.path.join(rundir, chk_file), "r") as fd: + contents = fd.read() + self.assertTrue("stop_n = 6" in contents) diff --git a/CIME/tests/test_sys_wait_for_tests.py b/CIME/tests/test_sys_wait_for_tests.py new file mode 100644 index 00000000000..d6647571d78 --- /dev/null +++ b/CIME/tests/test_sys_wait_for_tests.py @@ -0,0 +1,381 @@ +#!/usr/bin/env python3 + +import os +import signal +import shutil +import sys +import time +import threading + +from CIME import utils +from CIME import test_status +from CIME.tests import base +from CIME.tests import utils as test_utils + + +class TestWaitForTests(base.BaseTestCase): + def setUp(self): + self._testroot = os.path.join(self.TEST_ROOT, "TestWaitForTests") + self._timestamp = utils.get_timestamp() + + # basic tests + self._testdir_all_pass = os.path.join( + self._testroot, "scripts_regression_tests.testdir_all_pass" + ) + self._testdir_with_fail = os.path.join( + self._testroot, "scripts_regression_tests.testdir_with_fail" + ) + self._testdir_unfinished = os.path.join( + self._testroot, "scripts_regression_tests.testdir_unfinished" + ) + self._testdir_unfinished2 = os.path.join( + self._testroot, "scripts_regression_tests.testdir_unfinished2" + ) + + # live tests + self._testdir_teststatus1 = os.path.join( + self._testroot, "scripts_regression_tests.testdir_teststatus1" + ) + self._testdir_teststatus2 = os.path.join( + self._testroot, "scripts_regression_tests.testdir_teststatus2" + ) + + self._testdirs = [ + self._testdir_all_pass, + self._testdir_with_fail, + self._testdir_unfinished, + self._testdir_unfinished2, + self._testdir_teststatus1, + self._testdir_teststatus2, + ] + basic_tests = self._testdirs[: self._testdirs.index(self._testdir_teststatus1)] + + for testdir in self._testdirs: + if os.path.exists(testdir): + shutil.rmtree(testdir) + os.makedirs(testdir) + + for r in range(10): + for testdir in basic_tests: + os.makedirs(os.path.join(testdir, str(r))) + test_utils.make_fake_teststatus( + os.path.join(testdir, str(r)), + "Test_%d" % r, + test_status.TEST_PASS_STATUS, + test_status.RUN_PHASE, + ) + + test_utils.make_fake_teststatus( + os.path.join(self._testdir_with_fail, "5"), + "Test_5", + test_status.TEST_FAIL_STATUS, + test_status.RUN_PHASE, + ) + test_utils.make_fake_teststatus( + os.path.join(self._testdir_unfinished, "5"), + "Test_5", + test_status.TEST_PEND_STATUS, + test_status.RUN_PHASE, + ) + test_utils.make_fake_teststatus( + os.path.join(self._testdir_unfinished2, "5"), + "Test_5", + test_status.TEST_PASS_STATUS, + test_status.SUBMIT_PHASE, + ) + + integration_tests = self._testdirs[len(basic_tests) :] + for integration_test in integration_tests: + os.makedirs(os.path.join(integration_test, "0")) + test_utils.make_fake_teststatus( + os.path.join(integration_test, "0"), + "Test_0", + test_status.TEST_PASS_STATUS, + test_status.CORE_PHASES[0], + ) + + # Set up proxy if possible + self._unset_proxy = self.setup_proxy() + + self._thread_error = None + + def tearDown(self): + do_teardown = sys.exc_info() == (None, None, None) and not self.NO_TEARDOWN + + if do_teardown: + for testdir in self._testdirs: + shutil.rmtree(testdir) + + self.kill_subprocesses() + + if self._unset_proxy: + del os.environ["http_proxy"] + + def simple_test(self, testdir, expected_results, extra_args="", build_name=None): + # Need these flags to test dashboard if e3sm + if utils.get_model() == "e3sm" and build_name is not None: + extra_args += " -b %s" % build_name + + expected_stat = 0 + for expected_result in expected_results: + if not ( + expected_result == "PASS" + or (expected_result == "PEND" and "-n" in extra_args) + ): + expected_stat = utils.TESTS_FAILED_ERR_CODE + + output = self.run_cmd_assert_result( + "%s/wait_for_tests -p ACME_test */TestStatus %s" + % (self.TOOLS_DIR, extra_args), + from_dir=testdir, + expected_stat=expected_stat, + ) + + lines = [ + line + for line in output.splitlines() + if ( + line.startswith("PASS") + or line.startswith("FAIL") + or line.startswith("PEND") + ) + ] + self.assertEqual(len(lines), len(expected_results)) + for idx, line in enumerate(lines): + testname, status = test_utils.parse_test_status(line) + self.assertEqual(status, expected_results[idx]) + self.assertEqual(testname, "Test_%d" % idx) + + def threaded_test(self, testdir, expected_results, extra_args="", build_name=None): + try: + self.simple_test(testdir, expected_results, extra_args, build_name) + except AssertionError as e: + self._thread_error = str(e) + + def test_wait_for_test_all_pass(self): + self.simple_test(self._testdir_all_pass, ["PASS"] * 10) + + def test_wait_for_test_with_fail(self): + expected_results = ["FAIL" if item == 5 else "PASS" for item in range(10)] + self.simple_test(self._testdir_with_fail, expected_results) + + def test_wait_for_test_no_wait(self): + expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] + self.simple_test(self._testdir_unfinished, expected_results, "-n") + + def test_wait_for_test_timeout(self): + expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] + self.simple_test(self._testdir_unfinished, expected_results, "--timeout=3") + + def test_wait_for_test_wait_for_pend(self): + run_thread = threading.Thread( + target=self.threaded_test, args=(self._testdir_unfinished, ["PASS"] * 10) + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(5) # Kinda hacky + + self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") + + with test_status.TestStatus( + test_dir=os.path.join(self._testdir_unfinished, "5") + ) as ts: + ts.set_status(test_status.RUN_PHASE, test_status.TEST_PASS_STATUS) + + run_thread.join(timeout=10) + + self.assertFalse( + run_thread.is_alive(), msg="wait_for_tests should have finished" + ) + + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + def test_wait_for_test_wait_for_missing_run_phase(self): + run_thread = threading.Thread( + target=self.threaded_test, args=(self._testdir_unfinished2, ["PASS"] * 10) + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(5) # Kinda hacky + + self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") + + with test_status.TestStatus( + test_dir=os.path.join(self._testdir_unfinished2, "5") + ) as ts: + ts.set_status(test_status.RUN_PHASE, test_status.TEST_PASS_STATUS) + + run_thread.join(timeout=10) + + self.assertFalse( + run_thread.is_alive(), msg="wait_for_tests should have finished" + ) + + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + def test_wait_for_test_wait_kill(self): + expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] + run_thread = threading.Thread( + target=self.threaded_test, args=(self._testdir_unfinished, expected_results) + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(5) + + self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") + + self.kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1) + + run_thread.join(timeout=10) + + self.assertFalse( + run_thread.is_alive(), msg="wait_for_tests should have finished" + ) + + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + def test_wait_for_test_cdash_pass(self): + expected_results = ["PASS"] * 10 + build_name = "regression_test_pass_" + self._timestamp + run_thread = threading.Thread( + target=self.threaded_test, + args=(self._testdir_all_pass, expected_results, "", build_name), + ) + run_thread.daemon = True + run_thread.start() + + run_thread.join(timeout=10) + + self.assertFalse( + run_thread.is_alive(), msg="wait_for_tests should have finished" + ) + + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + self.assert_dashboard_has_build(build_name) + + def test_wait_for_test_cdash_kill(self): + expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] + build_name = "regression_test_kill_" + self._timestamp + run_thread = threading.Thread( + target=self.threaded_test, + args=(self._testdir_unfinished, expected_results, "", build_name), + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(5) + + self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") + + self.kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1) + + run_thread.join(timeout=10) + + self.assertFalse( + run_thread.is_alive(), msg="wait_for_tests should have finished" + ) + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + self.assert_dashboard_has_build(build_name) + + if utils.get_model() == "e3sm": + cdash_result_dir = os.path.join(self._testdir_unfinished, "Testing") + tag_file = os.path.join(cdash_result_dir, "TAG") + self.assertTrue(os.path.isdir(cdash_result_dir)) + self.assertTrue(os.path.isfile(tag_file)) + + tag = open(tag_file, "r").readlines()[0].strip() + xml_file = os.path.join(cdash_result_dir, tag, "Test.xml") + self.assertTrue(os.path.isfile(xml_file)) + + xml_contents = open(xml_file, "r").read() + self.assertTrue( + r"Test_0Test_1Test_2Test_3Test_4Test_5Test_6Test_7Test_8Test_9" + in xml_contents + ) + self.assertTrue( + r'Test_5' in xml_contents + ) + + # TODO: Any further checking of xml output worth doing? + + def live_test_impl(self, testdir, expected_results, last_phase, last_status): + run_thread = threading.Thread( + target=self.threaded_test, args=(testdir, expected_results) + ) + run_thread.daemon = True + run_thread.start() + + time.sleep(5) + + self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") + + for core_phase in test_status.CORE_PHASES[1:]: + with test_status.TestStatus( + test_dir=os.path.join(self._testdir_teststatus1, "0") + ) as ts: + ts.set_status( + core_phase, + last_status + if core_phase == last_phase + else test_status.TEST_PASS_STATUS, + ) + + time.sleep(5) + + if core_phase != last_phase: + self.assertTrue( + run_thread.is_alive(), + msg="wait_for_tests should have waited after passing phase {}".format( + core_phase + ), + ) + else: + run_thread.join(timeout=10) + self.assertFalse( + run_thread.is_alive(), + msg="wait_for_tests should have finished after phase {}".format( + core_phase + ), + ) + break + + self.assertTrue( + self._thread_error is None, + msg="Thread had failure: %s" % self._thread_error, + ) + + def test_wait_for_test_test_status_integration_pass(self): + self.live_test_impl( + self._testdir_teststatus1, + ["PASS"], + test_status.RUN_PHASE, + test_status.TEST_PASS_STATUS, + ) + + def test_wait_for_test_test_status_integration_submit_fail(self): + self.live_test_impl( + self._testdir_teststatus1, + ["FAIL"], + test_status.SUBMIT_PHASE, + test_status.TEST_FAIL_STATUS, + ) diff --git a/CIME/tests/test_test_status.py b/CIME/tests/test_test_status.py index 0a5617e3113..0b79c8bac6a 100755 --- a/CIME/tests/test_test_status.py +++ b/CIME/tests/test_test_status.py @@ -6,17 +6,20 @@ from CIME import expected_fails from CIME.tests.custom_assertions_test_status import CustomAssertionsTestStatus + class TestTestStatus(CustomAssertionsTestStatus): - _TESTNAME = 'fake_test' + _TESTNAME = "fake_test" # An arbitrary phase we can use when we want to work with a non-core phase _NON_CORE_PHASE = test_status.MEMLEAK_PHASE def setUp(self): - self._ts = test_status.TestStatus(test_dir=os.path.join('nonexistent', 'path'), - test_name=self._TESTNAME, - no_io=True) + self._ts = test_status.TestStatus( + test_dir=os.path.join("nonexistent", "path"), + test_name=self._TESTNAME, + no_io=True, + ) self._set_core_phases_to_pass() def _set_core_phases_to_pass(self): @@ -45,75 +48,78 @@ def _set_phase_to_status(self, phase, status): def test_psdump_corePhasesPass(self): output = self._ts.phase_statuses_dump() self.assert_core_phases(output, self._TESTNAME, fails=[]) - self.assert_num_expected_unexpected_fails(output, - num_expected=0, - num_unexpected=0) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=0 + ) def test_psdump_oneCorePhaseFails(self): fail_phase = self._set_last_core_phase_to_fail() output = self._ts.phase_statuses_dump() self.assert_core_phases(output, self._TESTNAME, fails=[fail_phase]) - self.assert_num_expected_unexpected_fails(output, - num_expected=0, - num_unexpected=0) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=0 + ) def test_psdump_oneCorePhaseFailsAbsentFromXFails(self): """One phase fails. There is an expected fails list, but that phase is not in it.""" fail_phase = self._set_last_core_phase_to_fail() xfails = expected_fails.ExpectedFails() - xfails.add_failure(phase=self._NON_CORE_PHASE, - expected_status=test_status.TEST_FAIL_STATUS) + xfails.add_failure( + phase=self._NON_CORE_PHASE, expected_status=test_status.TEST_FAIL_STATUS + ) output = self._ts.phase_statuses_dump(xfails=xfails) - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - fail_phase, - self._TESTNAME, - xfail='no') - self.assert_num_expected_unexpected_fails(output, - num_expected=0, - num_unexpected=0) + self.assert_status_of_phase( + output, test_status.TEST_FAIL_STATUS, fail_phase, self._TESTNAME, xfail="no" + ) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=0 + ) def test_psdump_oneCorePhaseFailsInXFails(self): """One phase fails. That phase is in the expected fails list.""" fail_phase = self._set_last_core_phase_to_fail() xfails = expected_fails.ExpectedFails() - xfails.add_failure(phase=fail_phase, - expected_status=test_status.TEST_FAIL_STATUS) + xfails.add_failure( + phase=fail_phase, expected_status=test_status.TEST_FAIL_STATUS + ) output = self._ts.phase_statuses_dump(xfails=xfails) - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - fail_phase, - self._TESTNAME, - xfail='expected') - self.assert_num_expected_unexpected_fails(output, - num_expected=1, - num_unexpected=0) + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + fail_phase, + self._TESTNAME, + xfail="expected", + ) + self.assert_num_expected_unexpected_fails( + output, num_expected=1, num_unexpected=0 + ) def test_psdump_oneCorePhasePassesInXFails(self): """One phase passes despite being in the expected fails list.""" xfail_phase = test_status.CORE_PHASES[-1] xfails = expected_fails.ExpectedFails() - xfails.add_failure(phase=xfail_phase, - expected_status=test_status.TEST_FAIL_STATUS) + xfails.add_failure( + phase=xfail_phase, expected_status=test_status.TEST_FAIL_STATUS + ) output = self._ts.phase_statuses_dump(xfails=xfails) - self.assert_status_of_phase(output, - test_status.TEST_PASS_STATUS, - xfail_phase, - self._TESTNAME, - xfail='unexpected') - self.assert_num_expected_unexpected_fails(output, - num_expected=0, - num_unexpected=1) + self.assert_status_of_phase( + output, + test_status.TEST_PASS_STATUS, + xfail_phase, + self._TESTNAME, + xfail="unexpected", + ) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=1 + ) def test_psdump_skipPasses(self): """With the skip_passes argument, only non-passes should appear""" fail_phase = self._set_last_core_phase_to_fail() output = self._ts.phase_statuses_dump(skip_passes=True) - self.assert_status_of_phase(output, - test_status.TEST_FAIL_STATUS, - fail_phase, - self._TESTNAME, - xfail='no') + self.assert_status_of_phase( + output, test_status.TEST_FAIL_STATUS, fail_phase, self._TESTNAME, xfail="no" + ) for phase in test_status.CORE_PHASES: if phase != fail_phase: self.assert_phase_absent(output, phase, self._TESTNAME) @@ -122,17 +128,21 @@ def test_psdump_unexpectedPass_shouldBePresent(self): """Even with the skip_passes argument, an unexpected PASS should be present""" xfail_phase = test_status.CORE_PHASES[-1] xfails = expected_fails.ExpectedFails() - xfails.add_failure(phase=xfail_phase, - expected_status=test_status.TEST_FAIL_STATUS) + xfails.add_failure( + phase=xfail_phase, expected_status=test_status.TEST_FAIL_STATUS + ) output = self._ts.phase_statuses_dump(skip_passes=True, xfails=xfails) - self.assert_status_of_phase(output, - test_status.TEST_PASS_STATUS, - xfail_phase, - self._TESTNAME, - xfail='unexpected') + self.assert_status_of_phase( + output, + test_status.TEST_PASS_STATUS, + xfail_phase, + self._TESTNAME, + xfail="unexpected", + ) for phase in test_status.CORE_PHASES: if phase != xfail_phase: self.assert_phase_absent(output, phase, self._TESTNAME) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/CIME/tests/test_unit_case.py b/CIME/tests/test_unit_case.py new file mode 100755 index 00000000000..7fc78b466bf --- /dev/null +++ b/CIME/tests/test_unit_case.py @@ -0,0 +1,398 @@ +#!/usr/bin/env python3 + +import os +import unittest +from unittest import mock +import tempfile + +from CIME.case import case_submit +from CIME.case import Case +from CIME import utils as cime_utils + + +def make_valid_case(path): + """Make the given path look like a valid case to avoid errors""" + # Case validity is determined by checking for an env_case.xml file. So put one there + # to suggest that this directory is a valid case directory. Open in append mode in + # case the file already exists. + with open(os.path.join(path, "env_case.xml"), "a"): + pass + + +class TestCaseSubmit(unittest.TestCase): + def test_check_case(self): + case = mock.MagicMock() + + case_submit.check_case(case, chksum=True) + + case.check_all_input_data.assert_called_with(chksum=True) + + @mock.patch("CIME.case.case_submit.lock_file") + @mock.patch("CIME.case.case_submit.unlock_file") + @mock.patch("os.path.basename") + def test__submit( + self, lock_file, unlock_file, basename + ): # pylint: disable=unused-argument + case = mock.MagicMock() + + case_submit._submit(case, chksum=True) # pylint: disable=protected-access + + case.check_case.assert_called_with(skip_pnl=False, chksum=True) + + @mock.patch("CIME.case.case_submit._submit") + @mock.patch("CIME.case.case.Case.initialize_derived_attributes") + @mock.patch("CIME.case.case.Case.get_value") + @mock.patch("CIME.case.case.Case.read_xml") + def test_submit( + self, read_xml, get_value, init, _submit + ): # pylint: disable=unused-argument + with tempfile.TemporaryDirectory() as tempdir: + get_value.side_effect = [ + tempdir, + tempdir, + True, + "baseid", + None, + True, + ] + + make_valid_case(tempdir) + with Case(tempdir) as case: + case.submit(chksum=True) + + _submit.assert_called_with( + case, + job=None, + no_batch=False, + prereq=None, + allow_fail=False, + resubmit=False, + resubmit_immediate=False, + skip_pnl=False, + mail_user=None, + mail_type=None, + batch_args=None, + workflow=True, + chksum=True, + ) + + +class TestCase(unittest.TestCase): + def setUp(self): + self.srcroot = os.path.abspath(cime_utils.get_cime_root()) + self.tempdir = tempfile.TemporaryDirectory() + + @mock.patch("CIME.case.case.Case.read_xml") + @mock.patch("sys.argv", ["/src/create_newcase", "--machine", "docker"]) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("socket.getfqdn", return_value="host1") + @mock.patch("getpass.getuser", side_effect=["root", "root", "johndoe"]) + def test_new_hash( + self, getuser, getfqdn, strftime, read_xml + ): # pylint: disable=unused-argument + with self.tempdir as tempdir: + make_valid_case(tempdir) + with Case(tempdir) as case: + expected = ( + "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a" + ) + + # Check idempotency + for _ in range(2): + value = case.new_hash() + + self.assertTrue( + value == expected, "{} != {}".format(value, expected) + ) + + expected = ( + "bb59f1c473ac07e9dd30bfab153c0530a777f89280b716cf42e6fe2f49811a6e" + ) + + value = case.new_hash() + + self.assertTrue(value == expected, "{} != {}".format(value, expected)) + + @mock.patch("CIME.case.case.Case.read_xml") + @mock.patch("sys.argv", ["/src/create_newcase", "--machine", "docker"]) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("CIME.case.case.lock_file") + @mock.patch("CIME.case.case.Case.set_lookup_value") + @mock.patch("CIME.case.case.Case.apply_user_mods") + @mock.patch("CIME.case.case.Case.create_caseroot") + @mock.patch("CIME.case.case.Case.configure") + @mock.patch("socket.getfqdn", return_value="host1") + @mock.patch("getpass.getuser", return_value="root") + @mock.patch.dict(os.environ, {"CIME_MODEL": "cesm"}) + def test_copy( + self, + getuser, + getfqdn, + configure, + create_caseroot, # pylint: disable=unused-argument + apply_user_mods, + set_lookup_value, + lock_file, + strftime, # pylint: disable=unused-argument + read_xml, + ): # pylint: disable=unused-argument + expected_first_hash = ( + "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a" + ) + expected_second_hash = ( + "3561339a49daab999e3c4ea2f03a9c6acc33296a5bc35f1bfb82e7b5e10bdf38" + ) + + with self.tempdir as tempdir: + caseroot = os.path.join(tempdir, "test1") + with Case(caseroot, read_only=False) as case: + srcroot = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../../../../../") + ) + case.create( + "test1", srcroot, "A", "f19_g16_rx1", machine_name="cori-haswell" + ) + + # Check that they're all called + configure.assert_called_with( + "A", + "f19_g16_rx1", + machine_name="cori-haswell", + project=None, + pecount=None, + compiler=None, + mpilib=None, + pesfile=None, + gridfile=None, + multi_driver=False, + ninst=1, + test=False, + walltime=None, + queue=None, + output_root=None, + run_unsupported=False, + answer=None, + input_dir=None, + driver=None, + workflowid="default", + non_local=False, + extra_machines_dir=None, + case_group=None, + ngpus_per_node=0, + ) + create_caseroot.assert_called() + apply_user_mods.assert_called() + lock_file.assert_called() + + set_lookup_value.assert_called_with("CASE_HASH", expected_first_hash) + + strftime.return_value = "10:00:00" + with mock.patch( + "CIME.case.case.Case.set_value" + ) as set_value, mock.patch("sys.argv", ["/src/create_clone"]): + case.copy("test2", "{}_2".format(tempdir)) + + set_value.assert_called_with("CASE_HASH", expected_second_hash) + + @mock.patch("CIME.case.case.Case.read_xml") + @mock.patch("sys.argv", ["/src/create_newcase", "--machine", "docker"]) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("CIME.case.case.lock_file") + @mock.patch("CIME.case.case.Case.set_lookup_value") + @mock.patch("CIME.case.case.Case.apply_user_mods") + @mock.patch("CIME.case.case.Case.create_caseroot") + @mock.patch("CIME.case.case.Case.configure") + @mock.patch("socket.getfqdn", return_value="host1") + @mock.patch("getpass.getuser", return_value="root") + @mock.patch.dict(os.environ, {"CIME_MODEL": "cesm"}) + def test_create( + self, + get_user, + getfqdn, + configure, + create_caseroot, # pylint: disable=unused-argument + apply_user_mods, + set_lookup_value, + lock_file, + strftime, # pylint: disable=unused-argument + read_xml, + ): # pylint: disable=unused-argument + with self.tempdir as tempdir: + caseroot = os.path.join(tempdir, "test1") + with Case(caseroot, read_only=False) as case: + srcroot = os.path.abspath( + os.path.join(os.path.dirname(__file__), "../../../../../") + ) + case.create( + "test1", srcroot, "A", "f19_g16_rx1", machine_name="cori-haswell" + ) + + # Check that they're all called + configure.assert_called_with( + "A", + "f19_g16_rx1", + machine_name="cori-haswell", + project=None, + pecount=None, + compiler=None, + mpilib=None, + pesfile=None, + gridfile=None, + multi_driver=False, + ninst=1, + test=False, + walltime=None, + queue=None, + output_root=None, + run_unsupported=False, + answer=None, + input_dir=None, + driver=None, + workflowid="default", + non_local=False, + extra_machines_dir=None, + case_group=None, + ngpus_per_node=0, + ) + create_caseroot.assert_called() + apply_user_mods.assert_called() + lock_file.assert_called() + + set_lookup_value.assert_called_with( + "CASE_HASH", + "134a939f62115fb44bf08a46bfb2bd13426833b5c8848cf7c4884af7af05b91a", + ) + + +class TestCase_RecordCmd(unittest.TestCase): + def setUp(self): + self.srcroot = os.path.abspath(cime_utils.get_cime_root()) + self.tempdir = tempfile.TemporaryDirectory() + + def assert_calls_match(self, calls, expected): + self.assertTrue(len(calls) == len(expected), calls) + + for x, y in zip(calls, expected): + self.assertTrue(x == y, calls) + + @mock.patch("CIME.case.case.Case.__init__", return_value=None) + @mock.patch("CIME.case.case.Case.flush") + @mock.patch("CIME.case.case.Case.get_value") + @mock.patch("CIME.case.case.open", mock.mock_open()) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("sys.argv", ["/src/create_newcase"]) + def test_error( + self, strftime, get_value, flush, init + ): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access + + with self.tempdir as tempdir, mock.patch( + "CIME.case.case.open", mock.mock_open() + ) as m: + m.side_effect = PermissionError() + + with Case(tempdir) as case: + get_value.side_effect = [tempdir, "/src"] + + # We didn't need to make tempdir look like a valid case for the Case + # constructor because we mock that constructor, but we *do* need to make + # it look like a valid case for record_cmd. + make_valid_case(tempdir) + case.record_cmd() + + @mock.patch("CIME.case.case.Case.__init__", return_value=None) + @mock.patch("CIME.case.case.Case.flush") + @mock.patch("CIME.case.case.Case.get_value") + @mock.patch("CIME.case.case.open", mock.mock_open()) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("sys.argv", ["/src/create_newcase"]) + def test_init( + self, strftime, get_value, flush, init + ): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access + + mocked_open = mock.mock_open() + + with self.tempdir as tempdir, mock.patch("CIME.case.case.open", mocked_open): + with Case(tempdir) as case: + get_value.side_effect = [tempdir, "/src"] + + case.record_cmd(init=True) + + mocked_open.assert_called_with(f"{tempdir}/replay.sh", "a") + + handle = mocked_open() + + handle.writelines.assert_called_with( + [ + "#!/bin/bash\n\n", + "set -e\n\n", + "# Created 00:00:00\n\n", + 'CASEDIR="{}"\n\n'.format(tempdir), + "/src/create_newcase\n\n", + 'cd "${CASEDIR}"\n\n', + ] + ) + + @mock.patch("CIME.case.case.Case.__init__", return_value=None) + @mock.patch("CIME.case.case.Case.flush") + @mock.patch("CIME.case.case.Case.get_value") + @mock.patch("CIME.case.case.open", mock.mock_open()) + @mock.patch("time.strftime", return_value="00:00:00") + @mock.patch("sys.argv", ["/src/scripts/create_newcase"]) + def test_sub_relative( + self, strftime, get_value, flush, init + ): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access + + mocked_open = mock.mock_open() + + with self.tempdir as tempdir, mock.patch("CIME.case.case.open", mocked_open): + with Case(tempdir) as case: + get_value.side_effect = [tempdir, "/src"] + + case.record_cmd(init=True) + + expected = [ + "#!/bin/bash\n\n", + "set -e\n\n", + "# Created 00:00:00\n\n", + 'CASEDIR="{}"\n\n'.format(tempdir), + "/src/scripts/create_newcase\n\n", + 'cd "${CASEDIR}"\n\n', + ] + + handle = mocked_open() + handle.writelines.assert_called_with(expected) + + @mock.patch("CIME.case.case.Case.__init__", return_value=None) + @mock.patch("CIME.case.case.Case.flush") + @mock.patch("CIME.case.case.Case.get_value") + def test_cmd_arg(self, get_value, flush, init): # pylint: disable=unused-argument + Case._force_read_only = False # pylint: disable=protected-access + + mocked_open = mock.mock_open() + + with self.tempdir as tempdir, mock.patch("CIME.case.case.open", mocked_open): + with Case(tempdir) as case: + get_value.side_effect = [ + tempdir, + "/src", + ] + + # We didn't need to make tempdir look like a valid case for the Case + # constructor because we mock that constructor, but we *do* need to make + # it look like a valid case for record_cmd. + make_valid_case(tempdir) + case.record_cmd(["/some/custom/command", "arg1"]) + + expected = [ + "/some/custom/command arg1\n\n", + ] + + handle = mocked_open() + handle.writelines.assert_called_with(expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_case_fake.py b/CIME/tests/test_unit_case_fake.py new file mode 100755 index 00000000000..448931ecc7c --- /dev/null +++ b/CIME/tests/test_unit_case_fake.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +""" +This module contains unit tests of CaseFake +""" + +import unittest +import tempfile +import os +import shutil + +from CIME.tests.case_fake import CaseFake + + +class TestCaseFake(unittest.TestCase): + def setUp(self): + self.tempdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.tempdir, ignore_errors=True) + + def test_create_clone(self): + # Setup + old_caseroot = os.path.join(self.tempdir, "oldcase") + oldcase = CaseFake(old_caseroot) + oldcase.set_value("foo", "bar") + + # Exercise + new_caseroot = os.path.join(self.tempdir, "newcase") + clone = oldcase.create_clone(new_caseroot) + + # Verify + self.assertEqual("bar", clone.get_value("foo")) + self.assertEqual("newcase", clone.get_value("CASE")) + self.assertEqual("newcase", clone.get_value("CASEBASEID")) + self.assertEqual(new_caseroot, clone.get_value("CASEROOT")) + self.assertEqual(os.path.join(new_caseroot, "run"), clone.get_value("RUNDIR")) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_case_setup.py b/CIME/tests/test_unit_case_setup.py new file mode 100644 index 00000000000..fe5fa7308c1 --- /dev/null +++ b/CIME/tests/test_unit_case_setup.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 + +import os +import unittest +import tempfile +import contextlib +from pathlib import Path +from unittest import mock + +from CIME.case import case_setup + + +@contextlib.contextmanager +def create_machines_dir(): + """Creates temp machines directory with fake content""" + with tempfile.TemporaryDirectory() as temp_path: + machines_path = os.path.join(temp_path, "machines") + cmake_path = os.path.join(machines_path, "cmake_macros") + Path(cmake_path).mkdir(parents=True) + Path(os.path.join(cmake_path, "Macros.cmake")).touch() + Path(os.path.join(cmake_path, "test.cmake")).touch() + + yield temp_path + + +@contextlib.contextmanager +def chdir(path): + old_path = os.getcwd() + os.chdir(path) + + try: + yield + finally: + os.chdir(old_path) + + +# pylint: disable=protected-access +class TestCaseSetup(unittest.TestCase): + @mock.patch("CIME.case.case_setup.copy_depends_files") + def test_create_macros_cmake(self, copy_depends_files): + machine_mock = mock.MagicMock() + machine_mock.get_machine_name.return_value = "test" + + # create context stack to cleanup after test + with contextlib.ExitStack() as stack: + root_path = stack.enter_context(create_machines_dir()) + case_path = stack.enter_context(tempfile.TemporaryDirectory()) + + machines_path = os.path.join(root_path, "machines") + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) + + # make sure we're calling everything from within the case root + stack.enter_context(chdir(case_path)) + + case_setup._create_macros_cmake( + case_path, + os.path.join(machines_path, "cmake_macros"), + machine_mock, + "gnu-test", + os.path.join(case_path, "cmake_macros"), + ) + + assert os.path.exists(os.path.join(case_path, "Macros.cmake")) + assert os.path.exists(os.path.join(case_path, "cmake_macros", "test.cmake")) + + copy_depends_files.assert_called_with( + "test", machines_path, case_path, "gnu-test" + ) + + @mock.patch("CIME.case.case_setup._create_macros_cmake") + def test_create_macros(self, _create_macros_cmake): + case_mock = mock.MagicMock() + + machine_mock = mock.MagicMock() + machine_mock.get_machine_name.return_value = "test" + + # create context stack to cleanup after test + with contextlib.ExitStack() as stack: + root_path = stack.enter_context(create_machines_dir()) + case_path = stack.enter_context(tempfile.TemporaryDirectory()) + + cmake_macros_path = os.path.join(root_path, "machines", "cmake_macros") + case_mock.get_value.return_value = cmake_macros_path + + machines_path = os.path.join(root_path, "machines") + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) + + # do not generate env_mach_specific.xml + Path(os.path.join(case_path, "env_mach_specific.xml")).touch() + + case_setup._create_macros( + case_mock, + machine_mock, + case_path, + "gnu-test", + "openmpi", + False, + "mct", + "LINUX", + ) + + case_mock.get_value.assert_any_call("CMAKE_MACROS_DIR") + + # make sure we're calling everything from within the case root + stack.enter_context(chdir(case_path)) + + _create_macros_cmake.assert_called_with( + case_path, + cmake_macros_path, + machine_mock, + "gnu-test", + os.path.join(case_path, "cmake_macros"), + ) + + def test_create_macros_copy_user(self): + case_mock = mock.MagicMock() + + machine_mock = mock.MagicMock() + machine_mock.get_machine_name.return_value = "test" + + # create context stack to cleanup after test + with contextlib.ExitStack() as stack: + root_path = stack.enter_context(create_machines_dir()) + case_path = stack.enter_context(tempfile.TemporaryDirectory()) + user_path = stack.enter_context(tempfile.TemporaryDirectory()) + + user_cime_path = Path(os.path.join(user_path, ".cime")) + user_cime_path.mkdir() + user_cmake = user_cime_path / "user.cmake" + user_cmake.touch() + + cmake_macros_path = os.path.join(root_path, "machines", "cmake_macros") + case_mock.get_value.return_value = cmake_macros_path + + machines_path = os.path.join(root_path, "machines") + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) + + # do not generate env_mach_specific.xml + Path(os.path.join(case_path, "env_mach_specific.xml")).touch() + + stack.enter_context(mock.patch.dict(os.environ, {"HOME": user_path})) + + # make sure we're calling everything from within the case root + stack.enter_context(chdir(case_path)) + + case_setup._create_macros( + case_mock, + machine_mock, + case_path, + "gnu-test", + "openmpi", + False, + "mct", + "LINUX", + ) + + case_mock.get_value.assert_any_call("CMAKE_MACROS_DIR") + + assert os.path.exists(os.path.join(case_path, "cmake_macros", "user.cmake")) + + def test_create_macros_copy_extra(self): + case_mock = mock.MagicMock() + + machine_mock = mock.MagicMock() + machine_mock.get_machine_name.return_value = "test" + + # create context stack to cleanup after test + with contextlib.ExitStack() as stack: + root_path = stack.enter_context(create_machines_dir()) + case_path = stack.enter_context(tempfile.TemporaryDirectory()) + extra_path = stack.enter_context(tempfile.TemporaryDirectory()) + + extra_cmake_path = Path(extra_path, "cmake_macros") + extra_cmake_path.mkdir() + + extra_macros_path = extra_cmake_path / "extra.cmake" + extra_macros_path.touch() + + cmake_macros_path = os.path.join(root_path, "machines", "cmake_macros") + case_mock.get_value.side_effect = [cmake_macros_path, extra_path] + + machines_path = os.path.join(root_path, "machines") + type(machine_mock).machines_dir = mock.PropertyMock( + return_value=machines_path + ) + + # do not generate env_mach_specific.xml + Path(os.path.join(case_path, "env_mach_specific.xml")).touch() + + # make sure we're calling everything from within the case root + stack.enter_context(chdir(case_path)) + + case_setup._create_macros( + case_mock, + machine_mock, + case_path, + "gnu-test", + "openmpi", + False, + "mct", + "LINUX", + ) + + case_mock.get_value.assert_any_call("EXTRA_MACHDIR") + + assert os.path.exists( + os.path.join(case_path, "cmake_macros", "extra.cmake") + ) diff --git a/CIME/tests/test_unit_compare_test_results.py b/CIME/tests/test_unit_compare_test_results.py new file mode 100755 index 00000000000..bc298aaf9f9 --- /dev/null +++ b/CIME/tests/test_unit_compare_test_results.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 + +""" +This module contains unit tests for compare_test_results +""" + +import unittest +import tempfile +import os +import shutil + +from CIME import utils +from CIME import compare_test_results +from CIME.test_status import * +from CIME.tests.case_fake import CaseFake + + +class TestCaseFake(unittest.TestCase): + def setUp(self): + self.tempdir = tempfile.mkdtemp() + self.test_root = os.path.join(self.tempdir, "tests") + self.baseline_root = os.path.join(self.test_root, "baselines") + + # TODO switch to unittest.mock + self._old_strftime = utils.time.strftime + utils.time.strftime = lambda *args: "2021-02-20" + + self._old_init = CaseFake.__init__ + CaseFake.__init__ = lambda x, y, *args: self._old_init( + x, y, create_case_root=False + ) + + self._old_case = compare_test_results.Case + compare_test_results.Case = CaseFake + + def tearDown(self): + utils.time.strftime = self._old_strftime + CaseFake.__init__ = self._old_init + compare_test_results.Case = self._old_case + + shutil.rmtree(self.tempdir, ignore_errors=True) + + def _compare_test_results(self, baseline, test_id, phases, **kwargs): + test_status_root = os.path.join(self.test_root, test_id) + os.makedirs(test_status_root) + + with TestStatus(test_status_root, "test") as status: + for x in phases: + status.set_status(x[0], x[1]) + + compare_test_results.compare_test_results( + baseline, self.baseline_root, self.test_root, "gnu", test_id, **kwargs + ) + + compare_log = os.path.join( + test_status_root, "compare.log.{}.2021-02-20".format(baseline) + ) + + self.assertTrue(os.path.exists(compare_log)) + + def test_namelists_only(self): + compare_test_results.compare_namelists = lambda *args: True + compare_test_results.compare_history = lambda *args: (True, "Detail comments") + + phases = [ + (SETUP_PHASE, "PASS"), + (RUN_PHASE, "PASS"), + ] + + self._compare_test_results( + "test1", "test-baseline", phases, namelists_only=True + ) + + def test_hist_only(self): + compare_test_results.compare_namelists = lambda *args: True + compare_test_results.compare_history = lambda *args: (True, "Detail comments") + + phases = [ + (SETUP_PHASE, "PASS"), + (RUN_PHASE, "PASS"), + ] + + self._compare_test_results("test1", "test-baseline", phases, hist_only=True) + + def test_failed_early(self): + compare_test_results.compare_namelists = lambda *args: True + compare_test_results.compare_history = lambda *args: (True, "Detail comments") + + phases = [ + (CREATE_NEWCASE_PHASE, "PASS"), + ] + + self._compare_test_results("test1", "test-baseline", phases) + + def test_baseline(self): + compare_test_results.compare_namelists = lambda *args: True + compare_test_results.compare_history = lambda *args: (True, "Detail comments") + + phases = [ + (SETUP_PHASE, "PASS"), + (RUN_PHASE, "PASS"), + ] + + self._compare_test_results("test1", "test-baseline", phases) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_compare_two.py b/CIME/tests/test_unit_compare_two.py new file mode 100755 index 00000000000..d455fd8deb4 --- /dev/null +++ b/CIME/tests/test_unit_compare_two.py @@ -0,0 +1,626 @@ +#!/usr/bin/env python3 + +""" +This module contains unit tests of the core logic in SystemTestsCompareTwo. +""" + +# Ignore privacy concerns for unit tests, so that unit tests can access +# protected members of the system under test +# +# pylint:disable=protected-access + +import unittest +from collections import namedtuple +import functools +import os +import shutil +import tempfile + +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +import CIME.test_status as test_status +from CIME.tests.case_fake import CaseFake + +# ======================================================================== +# Structure for storing information about calls made to methods +# ======================================================================== + +# You can create a Call object to record a single call made to a method: +# +# Call(method, arguments) +# method (str): name of method +# arguments (dict): dictionary mapping argument names to values +# +# Example: +# If you want to record a call to foo(bar = 1, baz = 2): +# somecall = Call(method = 'foo', arguments = {'bar': 1, 'baz': 2}) +# Or simply: +# somecall = Call('foo', {'bar': 1, 'baz': 2}) +Call = namedtuple("Call", ["method", "arguments"]) + +# ======================================================================== +# Names of methods for which we want to record calls +# ======================================================================== + +# We use constants for these method names because, in some cases, a typo in a +# hard-coded string could cause a test to always pass, which would be a Bad +# Thing. +# +# For now the names of the constants match the strings they equate to, which +# match the actual method names. But it's fine if this doesn't remain the case +# moving forward (which is another reason to use constants rather than +# hard-coded strings in the tests). + +METHOD_case_one_custom_prerun_action = "_case_one_custom_prerun_action" +METHOD_case_one_custom_postrun_action = "_case_one_custom_postrun_action" +METHOD_case_two_custom_prerun_action = "_case_two_custom_prerun_action" +METHOD_case_two_custom_postrun_action = "_case_two_custom_postrun_action" +METHOD_link_to_case2_output = "_link_to_case2_output" +METHOD_run_indv = "_run_indv" + +# ======================================================================== +# Fake version of SystemTestsCompareTwo that overrides some functionality for +# the sake of unit testing +# ======================================================================== + +# A SystemTestsCompareTwoFake object can be controlled to fail at a given +# point. See the documentation in its __init__ method for details. +# +# It logs what stubbed-out methods have been called in its log attribute; this +# is a list of Call objects (see above for their definition). + + +class SystemTestsCompareTwoFake(SystemTestsCompareTwo): + def __init__( + self, + case1, + run_one_suffix="base", + run_two_suffix="test", + separate_builds=False, + multisubmit=False, + case2setup_raises_exception=False, + run_one_should_pass=True, + run_two_should_pass=True, + compare_should_pass=True, + ): + """ + Initialize a SystemTestsCompareTwoFake object + + The core test phases prior to RUN_PHASE are set to TEST_PASS_STATUS; + RUN_PHASE is left unset (as is any later phase) + + Args: + case1 (CaseFake): existing case + run_one_suffix (str, optional): Suffix used for first run. Defaults + to 'base'. Currently MUST be 'base'. + run_two_suffix (str, optional): Suffix used for the second run. Defaults to 'test'. + separate_builds (bool, optional): Passed to SystemTestsCompareTwo.__init__ + multisubmit (bool, optional): Passed to SystemTestsCompareTwo.__init__ + case2setup_raises_exception (bool, optional): If True, then the call + to _case_two_setup will raise an exception. Default is False. + run_one_should_pass (bool, optional): Whether the run_indv method should + pass for the first run. Default is True, meaning it will pass. + run_two_should_pass (bool, optional): Whether the run_indv method should + pass for the second run. Default is True, meaning it will pass. + compare_should_pass (bool, optional): Whether the comparison between the two + cases should pass. Default is True, meaning it will pass. + """ + + self._case2setup_raises_exception = case2setup_raises_exception + + # NOTE(wjs, 2016-08-03) Currently, due to limitations in the test + # infrastructure, run_one_suffix MUST be 'base'. However, I'm keeping it + # as an explicit argument to the constructor so that it's easy to relax + # this requirement later: To relax this assumption, remove the following + # assertion and add run_one_suffix as an argument to + # SystemTestsCompareTwo.__init__ + assert run_one_suffix == "base" + + SystemTestsCompareTwo.__init__( + self, + case1, + separate_builds=separate_builds, + run_two_suffix=run_two_suffix, + multisubmit=multisubmit, + ) + + # Need to tell test status that all phases prior to the run phase have + # passed, since this is checked in the run call (at least for the build + # phase status) + with self._test_status: + for phase in test_status.CORE_PHASES: + if phase == test_status.RUN_PHASE: + break + self._test_status.set_status(phase, test_status.TEST_PASS_STATUS) + + self.run_pass_caseroot = [] + if run_one_should_pass: + self.run_pass_caseroot.append(self._case1.get_value("CASEROOT")) + if run_two_should_pass: + self.run_pass_caseroot.append(self._case2.get_value("CASEROOT")) + + self.compare_should_pass = compare_should_pass + + self.log = [] + + # ------------------------------------------------------------------------ + # Stubs of methods called by SystemTestsCommon.__init__ that interact with + # the system or case object in ways we want to avoid here + # ------------------------------------------------------------------------ + + def _init_environment(self, caseroot): + pass + + def _init_locked_files(self, caseroot, expected): + pass + + def _init_case_setup(self): + pass + + # ------------------------------------------------------------------------ + # Fake implementations of methods that are typically provided by + # SystemTestsCommon + # ------------------------------------------------------------------------ + + def run_indv(self, suffix="base", st_archive=False, submit_resubmits=None): + """ + This fake implementation appends to the log and raises an exception if + it's supposed to + + Note that the Call object appended to the log has the current CASE name + in addition to the method arguments. (This is mainly to ensure that the + proper suffix is used for the proper case, but this extra check can be + removed if it's a maintenance problem.) + """ + caseroot = self._case.get_value("CASEROOT") + self.log.append(Call(METHOD_run_indv, {"suffix": suffix, "CASEROOT": caseroot})) + + # Determine whether we should raise an exception + # + # It's important that this check be based on some attribute of the + # self._case object, to ensure that the right case has been activated + # for this call to run_indv (e.g., to catch if we forgot to activate + # case2 before the second call to run_indv). + if caseroot not in self.run_pass_caseroot: + raise RuntimeError("caseroot not in run_pass_caseroot") + + def _do_compare_test(self, suffix1, suffix2, ignore_fieldlist_diffs=False): + """ + This fake implementation allows controlling whether compare_test + passes or fails + """ + return (self.compare_should_pass, "no comment") + + def _check_for_memleak(self): + pass + + def _st_archive_case_test(self): + pass + + # ------------------------------------------------------------------------ + # Fake implementations of methods that are typically provided by + # SystemTestsCompareTwo + # + # Since we're overriding these, their functionality is untested here! + # (Though note that _link_to_case2_output is tested elsewhere.) + # ------------------------------------------------------------------------ + + def _case_from_existing_caseroot(self, caseroot): + """ + Returns a CaseFake object instead of a Case object + """ + return CaseFake(caseroot, create_case_root=False) + + def _link_to_case2_output(self): + self.log.append(Call(METHOD_link_to_case2_output, {})) + + # ------------------------------------------------------------------------ + # Fake implementations of methods that are typically provided by the + # individual test + # + # The values set here are asserted against in some unit tests + # ------------------------------------------------------------------------ + + def _common_setup(self): + self._case.set_value("var_set_in_common_setup", "common_val") + + def _case_one_setup(self): + self._case.set_value("var_set_in_setup", "case1val") + + def _case_two_setup(self): + self._case.set_value("var_set_in_setup", "case2val") + if self._case2setup_raises_exception: + raise RuntimeError + + def _case_one_custom_prerun_action(self): + self.log.append(Call(METHOD_case_one_custom_prerun_action, {})) + + def _case_one_custom_postrun_action(self): + self.log.append(Call(METHOD_case_one_custom_postrun_action, {})) + + def _case_two_custom_prerun_action(self): + self.log.append(Call(METHOD_case_two_custom_prerun_action, {})) + + def _case_two_custom_postrun_action(self): + self.log.append(Call(METHOD_case_two_custom_postrun_action, {})) + + +# ======================================================================== +# Test class itself +# ======================================================================== + + +class TestSystemTestsCompareTwo(unittest.TestCase): + def setUp(self): + self.original_wd = os.getcwd() + # create a sandbox in which case directories can be created + self.tempdir = tempfile.mkdtemp() + + def tearDown(self): + # Some tests trigger a chdir call in the SUT; make sure we return to the + # original directory at the end of the test + os.chdir(self.original_wd) + + shutil.rmtree(self.tempdir, ignore_errors=True) + + def get_caseroots(self, casename="mytest"): + """ + Returns a tuple (case1root, case2root) + """ + case1root = os.path.join(self.tempdir, casename) + case2root = os.path.join(case1root, "case2", casename) + return case1root, case2root + + def get_compare_phase_name(self, mytest): + """ + Returns a string giving the compare phase name for this test + """ + run_one_suffix = mytest._run_one_suffix + run_two_suffix = mytest._run_two_suffix + compare_phase_name = "{}_{}_{}".format( + test_status.COMPARE_PHASE, run_one_suffix, run_two_suffix + ) + return compare_phase_name + + def test_setup(self): + # Ensure that test setup properly sets up case 1 and case 2 + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + case1.set_value("var_preset", "preset_value") + + # Exercise + mytest = SystemTestsCompareTwoFake(case1) + + # Verify + # Make sure that pre-existing values in case1 are copied to case2 (via + # clone) + self.assertEqual("preset_value", mytest._case2.get_value("var_preset")) + + # Make sure that _common_setup is called for both + self.assertEqual( + "common_val", mytest._case1.get_value("var_set_in_common_setup") + ) + self.assertEqual( + "common_val", mytest._case2.get_value("var_set_in_common_setup") + ) + + # Make sure that _case_one_setup and _case_two_setup are called + # appropriately + self.assertEqual("case1val", mytest._case1.get_value("var_set_in_setup")) + self.assertEqual("case2val", mytest._case2.get_value("var_set_in_setup")) + + def test_setup_separate_builds_sharedlibroot(self): + # If we're using separate_builds, the two cases should still use + # the same sharedlibroot + + # Setup + case1root, _ = self.get_caseroots() + case1 = CaseFake(case1root) + case1.set_value("SHAREDLIBROOT", os.path.join(case1root, "sharedlibroot")) + + # Exercise + mytest = SystemTestsCompareTwoFake(case1, separate_builds=True) + + # Verify + self.assertEqual( + case1.get_value("SHAREDLIBROOT"), mytest._case2.get_value("SHAREDLIBROOT") + ) + + def test_setup_case2_exists(self): + # If case2 already exists, then setup code should not be called + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + os.makedirs(os.path.join(case1root, "case2", "case1")) + + # Exercise + mytest = SystemTestsCompareTwoFake(case1, run_two_suffix="test") + + # Verify: + + # Make sure that case2 object is set (i.e., that it doesn't remain None) + self.assertEqual("case1", mytest._case2.get_value("CASE")) + + # Variables set in various setup methods should not be set + # (In the real world - i.e., outside of this unit testing fakery - these + # values would be set when the Case objects are created.) + self.assertIsNone(mytest._case1.get_value("var_set_in_common_setup")) + self.assertIsNone(mytest._case2.get_value("var_set_in_common_setup")) + self.assertIsNone(mytest._case1.get_value("var_set_in_setup")) + self.assertIsNone(mytest._case2.get_value("var_set_in_setup")) + + def test_setup_error(self): + # If there is an error in setup, an exception should be raised and the + # case2 directory should be removed + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + + # Exercise + with self.assertRaises(Exception): + SystemTestsCompareTwoFake( + case1, run_two_suffix="test", case2setup_raises_exception=True + ) + + # Verify + self.assertFalse(os.path.exists(os.path.join(case1root, "case1.test"))) + + def test_run_phase_passes(self): + # Make sure the run phase behaves properly when all runs succeed. + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1) + + # Exercise + mytest.run() + + # Verify + self.assertEqual( + test_status.TEST_PASS_STATUS, + mytest._test_status.get_status(test_status.RUN_PHASE), + ) + + def test_run_phase_internal_calls(self): + # Make sure that the correct calls are made to methods stubbed out by + # SystemTestsCompareTwoFake (when runs succeed) + # + # The point of this is: A number of methods called from the run_phase + # method are stubbed out in the Fake test implementation, because their + # actions are awkward in these unit tests. But we still want to make + # sure that those methods actually got called correctly. + + # Setup + run_one_suffix = "base" + run_two_suffix = "run2" + case1root, case2root = self.get_caseroots() + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake( + case1, run_one_suffix=run_one_suffix, run_two_suffix=run_two_suffix + ) + + # Exercise + mytest.run() + + # Verify + expected_calls = [ + Call(METHOD_case_one_custom_prerun_action, {}), + Call(METHOD_run_indv, {"suffix": run_one_suffix, "CASEROOT": case1root}), + Call(METHOD_case_one_custom_postrun_action, {}), + Call(METHOD_case_two_custom_prerun_action, {}), + Call(METHOD_run_indv, {"suffix": run_two_suffix, "CASEROOT": case2root}), + Call(METHOD_case_two_custom_postrun_action, {}), + Call(METHOD_link_to_case2_output, {}), + ] + self.assertEqual(expected_calls, mytest.log) + + def test_run_phase_internal_calls_multisubmit_phase1(self): + # Make sure that the correct calls are made to methods stubbed out by + # SystemTestsCompareTwoFake (when runs succeed), when we have a + # multi-submit test, in the first phase + + # Setup + run_one_suffix = "base" + run_two_suffix = "run2" + case1root, _ = self.get_caseroots() + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake( + case1=case1, + run_one_suffix=run_one_suffix, + run_two_suffix=run_two_suffix, + multisubmit=True, + ) + # RESUBMIT=1 signals first phase + case1.set_value("RESUBMIT", 1) + + # Exercise + mytest.run() + + # Verify + expected_calls = [ + Call(METHOD_case_one_custom_prerun_action, {}), + Call(METHOD_run_indv, {"suffix": run_one_suffix, "CASEROOT": case1root}), + Call(METHOD_case_one_custom_postrun_action, {}), + ] + self.assertEqual(expected_calls, mytest.log) + + # Also verify that comparison is NOT called: + compare_phase_name = self.get_compare_phase_name(mytest) + self.assertEqual( + test_status.TEST_PEND_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) + + def test_run_phase_internal_calls_multisubmit_phase2(self): + # Make sure that the correct calls are made to methods stubbed out by + # SystemTestsCompareTwoFake (when runs succeed), when we have a + # multi-submit test, in the second phase + + # Setup + run_one_suffix = "base" + run_two_suffix = "run2" + case1root, case2root = self.get_caseroots() + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake( + case1=case1, + run_one_suffix=run_one_suffix, + run_two_suffix=run_two_suffix, + multisubmit=True, + compare_should_pass=True, + ) + # RESUBMIT=0 signals second phase + case1.set_value("RESUBMIT", 0) + + # Exercise + mytest.run() + + # Verify + expected_calls = [ + Call(METHOD_case_two_custom_prerun_action, {}), + Call(METHOD_run_indv, {"suffix": run_two_suffix, "CASEROOT": case2root}), + Call(METHOD_case_two_custom_postrun_action, {}), + Call(METHOD_link_to_case2_output, {}), + ] + self.assertEqual(expected_calls, mytest.log) + + # Also verify that comparison is called: + compare_phase_name = self.get_compare_phase_name(mytest) + self.assertEqual( + test_status.TEST_PASS_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) + + def test_internal_calls_multisubmit_failed_state(self): + run_one_suffix = "base" + run_two_suffix = "run2" + case1root, _ = self.get_caseroots() + case1 = CaseFake(case1root) + + def _set_initial_test_values(x): + x.set_value("RESUBMIT", 1) + + case1.set_initial_test_values = functools.partial( + _set_initial_test_values, case1 + ) + + # Standard first phase + case1.set_value("IS_FIRST_RUN", True) + case1.set_value("RESUBMIT", 1) + + mytest = SystemTestsCompareTwoFake( + case1=case1, + run_one_suffix=run_one_suffix, + run_two_suffix=run_two_suffix, + multisubmit=True, + ) + + mytest.run() + + expected_calls = [ + Call(METHOD_case_one_custom_prerun_action, {}), + Call(METHOD_run_indv, {"CASEROOT": case1root, "suffix": "base"}), + Call(METHOD_case_one_custom_postrun_action, {}), + ] + + self.assertEqual(expected_calls, mytest.log) + + # Emulate a rerun ensure phase 1 still runs + case1.set_value("IS_FIRST_RUN", True) + case1.set_value("RESUBMIT", 0) + + # Reset the log + mytest.log = [] + + mytest.run() + + expected_calls = [ + Call(METHOD_case_one_custom_prerun_action, {}), + Call(METHOD_run_indv, {"CASEROOT": case1root, "suffix": "base"}), + Call(METHOD_case_one_custom_postrun_action, {}), + ] + + self.assertEqual(expected_calls, mytest.log) + + def test_run1_fails(self): + # Make sure that a failure in run1 is reported correctly + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1, run_one_should_pass=False) + + # Exercise + try: + mytest.run() + except Exception: + pass + + # Verify + self.assertEqual( + test_status.TEST_FAIL_STATUS, + mytest._test_status.get_status(test_status.RUN_PHASE), + ) + + def test_run2_fails(self): + # Make sure that a failure in run2 is reported correctly + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1, run_two_should_pass=False) + + # Exercise + try: + mytest.run() + except Exception: + pass + + # Verify + self.assertEqual( + test_status.TEST_FAIL_STATUS, + mytest._test_status.get_status(test_status.RUN_PHASE), + ) + + def test_compare_passes(self): + # Make sure that a pass in the comparison is reported correctly + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1, compare_should_pass=True) + + # Exercise + mytest.run() + + # Verify + compare_phase_name = self.get_compare_phase_name(mytest) + self.assertEqual( + test_status.TEST_PASS_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) + + def test_compare_fails(self): + # Make sure that a failure in the comparison is reported correctly + + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1, compare_should_pass=False) + + # Exercise + mytest.run() + + # Verify + compare_phase_name = self.get_compare_phase_name(mytest) + self.assertEqual( + test_status.TEST_FAIL_STATUS, + mytest._test_status.get_status(compare_phase_name), + ) + + +if __name__ == "__main__": + unittest.main(verbosity=2, catchbreak=True) diff --git a/CIME/tests/test_unit_cs_status.py b/CIME/tests/test_unit_cs_status.py new file mode 100755 index 00000000000..38668ded879 --- /dev/null +++ b/CIME/tests/test_unit_cs_status.py @@ -0,0 +1,306 @@ +#!/usr/bin/env python3 + +import unittest +import shutil +import os +import tempfile +import re +import CIME.six +import CIME.six_additions +from CIME.cs_status import cs_status +from CIME import test_status +from CIME.tests.custom_assertions_test_status import CustomAssertionsTestStatus + + +class TestCsStatus(CustomAssertionsTestStatus): + + # ------------------------------------------------------------------------ + # Test helper functions + # ------------------------------------------------------------------------ + + # An arbitrary phase we can use when we want to work with a non-core phase + _NON_CORE_PHASE = test_status.MEMLEAK_PHASE + + # Another arbitrary phase if we need two different non-core phases + _NON_CORE_PHASE2 = test_status.BASELINE_PHASE + + def setUp(self): + self._testroot = tempfile.mkdtemp() + self._output = CIME.six.StringIO() + + def tearDown(self): + self._output.close() + shutil.rmtree(self._testroot, ignore_errors=True) + + def create_test_dir(self, test_dir): + """Creates the given test directory under testroot. + + Returns the full path to the created test directory. + """ + fullpath = os.path.join(self._testroot, test_dir) + os.makedirs(fullpath) + return fullpath + + @staticmethod + def create_test_status_core_passes(test_dir_path, test_name): + """Creates a TestStatus file in the given path, with PASS status + for all core phases""" + with test_status.TestStatus(test_dir=test_dir_path, test_name=test_name) as ts: + for phase in test_status.CORE_PHASES: + ts.set_status(phase, test_status.TEST_PASS_STATUS) + + def set_last_core_phase_to_fail(self, test_dir_path, test_name): + """Sets the last core phase to FAIL + + Returns the name of this phase""" + fail_phase = test_status.CORE_PHASES[-1] + self.set_phase_to_status( + test_dir_path=test_dir_path, + test_name=test_name, + phase=fail_phase, + status=test_status.TEST_FAIL_STATUS, + ) + return fail_phase + + @staticmethod + def set_phase_to_status(test_dir_path, test_name, phase, status): + """Sets the given phase to the given status for this test""" + with test_status.TestStatus(test_dir=test_dir_path, test_name=test_name) as ts: + ts.set_status(phase, status) + + # ------------------------------------------------------------------------ + # Begin actual tests + # ------------------------------------------------------------------------ + + def test_single_test(self): + """cs_status for a single test should include some minimal expected output""" + test_name = "my.test.name" + test_dir = "my.test.name.testid" + test_dir_path = self.create_test_dir(test_dir) + self.create_test_status_core_passes(test_dir_path, test_name) + cs_status([os.path.join(test_dir_path, "TestStatus")], out=self._output) + self.assert_core_phases(self._output.getvalue(), test_name, fails=[]) + + def test_two_tests(self): + """cs_status for two tests (one with a FAIL) should include some minimal expected output""" + test_name1 = "my.test.name1" + test_name2 = "my.test.name2" + test_dir1 = test_name1 + ".testid" + test_dir2 = test_name2 + ".testid" + test_dir_path1 = self.create_test_dir(test_dir1) + test_dir_path2 = self.create_test_dir(test_dir2) + self.create_test_status_core_passes(test_dir_path1, test_name1) + self.create_test_status_core_passes(test_dir_path2, test_name2) + test2_fail_phase = self.set_last_core_phase_to_fail(test_dir_path2, test_name2) + cs_status( + [ + os.path.join(test_dir_path1, "TestStatus"), + os.path.join(test_dir_path2, "TestStatus"), + ], + out=self._output, + ) + self.assert_core_phases(self._output.getvalue(), test_name1, fails=[]) + self.assert_core_phases( + self._output.getvalue(), test_name2, fails=[test2_fail_phase] + ) + + def test_fails_only(self): + """With fails_only flag, only fails and pends should appear in the output""" + test_name = "my.test.name" + test_dir = "my.test.name.testid" + test_dir_path = self.create_test_dir(test_dir) + self.create_test_status_core_passes(test_dir_path, test_name) + fail_phase = self.set_last_core_phase_to_fail(test_dir_path, test_name) + pend_phase = self._NON_CORE_PHASE + self.set_phase_to_status( + test_dir_path, + test_name, + phase=pend_phase, + status=test_status.TEST_PEND_STATUS, + ) + cs_status( + [os.path.join(test_dir_path, "TestStatus")], + fails_only=True, + out=self._output, + ) + self.assert_status_of_phase( + output=self._output.getvalue(), + status=test_status.TEST_FAIL_STATUS, + phase=fail_phase, + test_name=test_name, + ) + self.assert_status_of_phase( + output=self._output.getvalue(), + status=test_status.TEST_PEND_STATUS, + phase=pend_phase, + test_name=test_name, + ) + for phase in test_status.CORE_PHASES: + if phase != fail_phase: +<<<<<<< HEAD:CIME/tests/test_cs_status.py + self.assert_phase_absent(output=self._output.getvalue(), + phase=phase, + test_name=test_name) + CIME.six_additions.assertNotRegex(self, self._output.getvalue(), r'Overall:') +======= + self.assert_phase_absent( + output=self._output.getvalue(), phase=phase, test_name=test_name + ) + six_additions.assertNotRegex(self, self._output.getvalue(), r"Overall:") +>>>>>>> master:CIME/tests/test_unit_cs_status.py + + def test_count_fails(self): + """Test the count of fails with three tests + + For first phase of interest: First test FAILs, second PASSes, + third FAILs; count should be 2, and this phase should not appear + individually for each test. + + For second phase of interest: First test PASSes, second PASSes, + third FAILs; count should be 1, and this phase should not appear + individually for each test. + """ + # Note that this test does NOT cover: + # - combining count_fails_phase_list with fails_only: currently, + # this wouldn't cover any additional code/logic + # - ensuring that PENDs are also counted: currently, this + # wouldn't cover any additional code/logic + phase_of_interest1 = self._NON_CORE_PHASE + phase_of_interest2 = self._NON_CORE_PHASE2 + statuses1 = [ + test_status.TEST_FAIL_STATUS, + test_status.TEST_PASS_STATUS, + test_status.TEST_FAIL_STATUS, + ] + statuses2 = [ + test_status.TEST_PASS_STATUS, + test_status.TEST_PASS_STATUS, + test_status.TEST_FAIL_STATUS, + ] + test_paths = [] + test_names = [] + for testnum in range(3): + test_name = "my.test.name" + str(testnum) + test_names.append(test_name) + test_dir = test_name + ".testid" + test_dir_path = self.create_test_dir(test_dir) + self.create_test_status_core_passes(test_dir_path, test_name) + self.set_phase_to_status( + test_dir_path, + test_name, + phase=phase_of_interest1, + status=statuses1[testnum], + ) + self.set_phase_to_status( + test_dir_path, + test_name, + phase=phase_of_interest2, + status=statuses2[testnum], + ) + test_paths.append(os.path.join(test_dir_path, "TestStatus")) + + cs_status( + test_paths, + count_fails_phase_list=[phase_of_interest1, phase_of_interest2], + out=self._output, + ) + + for testnum in range(3): +<<<<<<< HEAD:CIME/tests/test_cs_status.py + self.assert_phase_absent(output=self._output.getvalue(), + phase=phase_of_interest1, + test_name=test_names[testnum]) + self.assert_phase_absent(output=self._output.getvalue(), + phase=phase_of_interest2, + test_name=test_names[testnum]) + count_regex1 = r'{} +non-passes: +2'.format(re.escape(phase_of_interest1)) + CIME.six.assertRegex(self, self._output.getvalue(), count_regex1) + count_regex2 = r'{} +non-passes: +1'.format(re.escape(phase_of_interest2)) + CIME.six.assertRegex(self, self._output.getvalue(), count_regex2) +======= + self.assert_phase_absent( + output=self._output.getvalue(), + phase=phase_of_interest1, + test_name=test_names[testnum], + ) + self.assert_phase_absent( + output=self._output.getvalue(), + phase=phase_of_interest2, + test_name=test_names[testnum], + ) + count_regex1 = r"{} +non-passes: +2".format(re.escape(phase_of_interest1)) + six.assertRegex(self, self._output.getvalue(), count_regex1) + count_regex2 = r"{} +non-passes: +1".format(re.escape(phase_of_interest2)) + six.assertRegex(self, self._output.getvalue(), count_regex2) +>>>>>>> master:CIME/tests/test_unit_cs_status.py + + def test_expected_fails(self): + """With the expected_fails_file flag, expected failures should be flagged as such""" + test_name1 = "my.test.name1" + test_name2 = "my.test.name2" + test_dir1 = test_name1 + ".testid" + test_dir2 = test_name2 + ".testid" + test_dir_path1 = self.create_test_dir(test_dir1) + test_dir_path2 = self.create_test_dir(test_dir2) + self.create_test_status_core_passes(test_dir_path1, test_name1) + self.create_test_status_core_passes(test_dir_path2, test_name2) + test1_fail_phase = self.set_last_core_phase_to_fail(test_dir_path1, test_name1) + test2_fail_phase = self.set_last_core_phase_to_fail(test_dir_path2, test_name2) + + # One phase is labeled as an expected failure for test1, nothing for test2: + expected_fails_contents = """ + + + + {fail_status} + + + +""".format( + test_name1=test_name1, + test1_fail_phase=test1_fail_phase, + fail_status=test_status.TEST_FAIL_STATUS, + ) + expected_fails_filepath = os.path.join(self._testroot, "ExpectedFails.xml") + with open(expected_fails_filepath, "w") as expected_fails_file: + expected_fails_file.write(expected_fails_contents) + + cs_status( + [ + os.path.join(test_dir_path1, "TestStatus"), + os.path.join(test_dir_path2, "TestStatus"), + ], + expected_fails_filepath=expected_fails_filepath, + out=self._output, + ) + + # Both test1 and test2 should have a failure for one phase, but this should be + # marked as expected only for test1. + self.assert_core_phases( + self._output.getvalue(), test_name1, fails=[test1_fail_phase] + ) + self.assert_status_of_phase( + self._output.getvalue(), + test_status.TEST_FAIL_STATUS, + test1_fail_phase, + test_name1, + xfail="expected", + ) + self.assert_core_phases( + self._output.getvalue(), test_name2, fails=[test2_fail_phase] + ) + self.assert_status_of_phase( + self._output.getvalue(), + test_status.TEST_FAIL_STATUS, + test2_fail_phase, + test_name2, + xfail="no", + ) + # Make sure that no other phases are mistakenly labeled as expected failures: + self.assert_num_expected_unexpected_fails( + self._output.getvalue(), num_expected=1, num_unexpected=0 + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_custom_assertions_test_status.py b/CIME/tests/test_unit_custom_assertions_test_status.py new file mode 100755 index 00000000000..99e6cb05d04 --- /dev/null +++ b/CIME/tests/test_unit_custom_assertions_test_status.py @@ -0,0 +1,260 @@ +#!/usr/bin/env python3 + +""" +This module contains unit tests of CustomAssertionsTestStatus +""" + +import unittest +from CIME import test_status +from CIME.tests.custom_assertions_test_status import CustomAssertionsTestStatus + + +class TestCustomAssertions(CustomAssertionsTestStatus): + + _UNEXPECTED_COMMENT = test_status.TEST_UNEXPECTED_FAILURE_COMMENT_START + " blah)" + + @staticmethod + def output_line(status, test_name, phase, extra=""): + output = status + " " + test_name + " " + phase + if extra: + output += " " + extra + output += "\n" + return output + + def test_assertPhaseAbsent_passes(self): + """assert_phase_absent should pass when the phase is absent for + the given test_name""" + test_name1 = "my.test.name1" + test_name2 = "my.test.name2" + output = self.output_line("PASS", test_name1, "PHASE1") + output += self.output_line("PASS", test_name2, "PHASE2") + + self.assert_phase_absent(output, "PHASE2", test_name1) + self.assert_phase_absent(output, "PHASE1", test_name2) + + def test_assertPhaseAbsent_fails(self): + """assert_phase_absent should fail when the phase is present for + the given test_name""" + test_name = "my.test.name" + output = self.output_line("PASS", test_name, "PHASE1") + + with self.assertRaises(AssertionError): + self.assert_phase_absent(output, "PHASE1", test_name) + + def test_assertCorePhases_passes(self): + """assert_core_phases passes when it should""" + output = "" + fails = [test_status.CORE_PHASES[1]] + test_name = "my.test.name" + for phase in test_status.CORE_PHASES: + if phase in fails: + status = test_status.TEST_FAIL_STATUS + else: + status = test_status.TEST_PASS_STATUS + output = output + self.output_line(status, test_name, phase) + + self.assert_core_phases(output, test_name, fails) + + def test_assertCorePhases_missingPhase_fails(self): + """assert_core_phases fails if there is a missing phase""" + output = "" + test_name = "my.test.name" + for phase in test_status.CORE_PHASES: + if phase != test_status.CORE_PHASES[1]: + output = output + self.output_line( + test_status.TEST_PASS_STATUS, test_name, phase + ) + + with self.assertRaises(AssertionError): + self.assert_core_phases(output, test_name, fails=[]) + + def test_assertCorePhases_wrongStatus_fails(self): + """assert_core_phases fails if a phase has the wrong status""" + output = "" + test_name = "my.test.name" + for phase in test_status.CORE_PHASES: + output = output + self.output_line( + test_status.TEST_PASS_STATUS, test_name, phase + ) + + with self.assertRaises(AssertionError): + self.assert_core_phases( + output, test_name, fails=[test_status.CORE_PHASES[1]] + ) + + def test_assertCorePhases_wrongName_fails(self): + """assert_core_phases fails if the test name is wrong""" + output = "" + test_name = "my.test.name" + for phase in test_status.CORE_PHASES: + output = output + self.output_line( + test_status.TEST_PASS_STATUS, test_name, phase + ) + + with self.assertRaises(AssertionError): + self.assert_core_phases(output, "my.test", fails=[]) + + # Note: Basic functionality of assert_status_of_phase is covered sufficiently via + # tests of assert_core_phases. Below we just cover some other aspects that aren't + # already covered. + + def test_assertStatusOfPhase_withExtra_passes(self): + """Make sure assert_status_of_phase passes when there is some extra text at the + end of the line""" + test_name = "my.test.name" + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=test_status.TEST_EXPECTED_FAILURE_COMMENT, + ) + self.assert_status_of_phase( + output, test_status.TEST_FAIL_STATUS, test_status.CORE_PHASES[0], test_name + ) + + def test_assertStatusOfPhase_xfailNo_passes(self): + """assert_status_of_phase should pass when xfail='no' and there is no + EXPECTED/UNEXPECTED on the line""" + test_name = "my.test.name" + output = self.output_line( + test_status.TEST_FAIL_STATUS, test_name, test_status.CORE_PHASES[0] + ) + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="no", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=0 + ) + + def test_assertStatusOfPhase_xfailNo_fails(self): + """assert_status_of_phase should fail when xfail='no' but the line contains the + EXPECTED comment""" + test_name = "my.test.name" + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=test_status.TEST_EXPECTED_FAILURE_COMMENT, + ) + + with self.assertRaises(AssertionError): + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="no", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=1, num_unexpected=0 + ) + + def test_assertStatusOfPhase_xfailExpected_passes(self): + """assert_status_of_phase should pass when xfail='expected' and the line contains + the EXPECTED comment""" + test_name = "my.test.name" + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=test_status.TEST_EXPECTED_FAILURE_COMMENT, + ) + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="expected", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=1, num_unexpected=0 + ) + + def test_assertStatusOfPhase_xfailExpected_fails(self): + """assert_status_of_phase should fail when xfail='expected' but the line does NOT contain + the EXPECTED comment""" + test_name = "my.test.name" + # Note that the line contains the UNEXPECTED comment, but not the EXPECTED comment + # (we assume that if the assertion correctly fails in this case, then it will also + # correctly handle the case where neither the EXPECTED nor UNEXPECTED comment is + # present). + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=self._UNEXPECTED_COMMENT, + ) + + with self.assertRaises(AssertionError): + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="expected", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=1 + ) + + def test_assertStatusOfPhase_xfailUnexpected_passes(self): + """assert_status_of_phase should pass when xfail='unexpected' and the line contains + the UNEXPECTED comment""" + test_name = "my.test.name" + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=self._UNEXPECTED_COMMENT, + ) + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="unexpected", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=1 + ) + + def test_assertStatusOfPhase_xfailUnexpected_fails(self): + """assert_status_of_phase should fail when xfail='unexpected' but the line does NOT + contain the UNEXPECTED comment""" + test_name = "my.test.name" + # Note that the line contains the EXPECTED comment, but not the UNEXPECTED comment + # (we assume that if the assertion correctly fails in this case, then it will also + # correctly handle the case where neither the EXPECTED nor UNEXPECTED comment is + # present). + output = self.output_line( + test_status.TEST_FAIL_STATUS, + test_name, + test_status.CORE_PHASES[0], + extra=test_status.TEST_EXPECTED_FAILURE_COMMENT, + ) + + with self.assertRaises(AssertionError): + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + test_status.CORE_PHASES[0], + test_name, + xfail="unexpected", + ) + # While we're at it, also test assert_num_expected_unexpected_fails + self.assert_num_expected_unexpected_fails( + output, num_expected=1, num_unexpected=0 + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_expected_fails_file.py b/CIME/tests/test_unit_expected_fails_file.py new file mode 100755 index 00000000000..e1094ea7d25 --- /dev/null +++ b/CIME/tests/test_unit_expected_fails_file.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 + +import unittest +import os +import shutil +import tempfile +import CIME.six +from CIME.XML.expected_fails_file import ExpectedFailsFile +from CIME.utils import CIMEError +from CIME.expected_fails import ExpectedFails + + +class TestExpectedFailsFile(unittest.TestCase): + def setUp(self): + self._workdir = tempfile.mkdtemp() + self._xml_filepath = os.path.join(self._workdir, "expected_fails.xml") + + def tearDown(self): + shutil.rmtree(self._workdir) + + def test_basic(self): + """Basic test of the parsing of an expected fails file""" + contents = """ + + + + FAIL + #404 + + + PEND + #404 + Because of the RUN failure, this phase is listed as PEND + + + + + FAIL + ESMCI/cime#2917 + + + FAIL + ESMCI/cime#2917 + + + +""" + with open(self._xml_filepath, "w") as xml_file: + xml_file.write(contents) + expected_fails_file = ExpectedFailsFile(self._xml_filepath) + xfails = expected_fails_file.get_expected_fails() + + expected_test1 = ExpectedFails() + expected_test1.add_failure("RUN", "FAIL") + expected_test1.add_failure("COMPARE_base_rest", "PEND") + expected_test2 = ExpectedFails() + expected_test2.add_failure("GENERATE", "FAIL") + expected_test2.add_failure("BASELINE", "FAIL") + expected = {"my.test.1": expected_test1, "my.test.2": expected_test2} + + self.assertEqual(xfails, expected) + + def test_same_test_appears_twice(self): + """If the same test appears twice, its information should be appended. + + This is not the typical, expected layout of the file, but it should be handled + correctly in case the file is written this way. + """ + contents = """ + + + + FAIL + #404 + + + + + PEND + #404 + Because of the RUN failure, this phase is listed as PEND + + + +""" + with open(self._xml_filepath, "w") as xml_file: + xml_file.write(contents) + expected_fails_file = ExpectedFailsFile(self._xml_filepath) + xfails = expected_fails_file.get_expected_fails() + + expected_test1 = ExpectedFails() + expected_test1.add_failure("RUN", "FAIL") + expected_test1.add_failure("COMPARE_base_rest", "PEND") + expected = {"my.test.1": expected_test1} + + self.assertEqual(xfails, expected) + + def test_invalid_file(self): + """Given an invalid file, an exception should be raised in schema validation""" + + # This file is missing a element in the block. + # + # It's important to have the expectedFails version number be greater than 1, + # because schema validation isn't done in cime for files with a version of 1. + contents = """ + + + + + + +""" + with open(self._xml_filepath, "w") as xml_file: + xml_file.write(contents) + + with CIME.six.assertRaisesRegex(self, CIMEError, "Schemas validity error"): + _ = ExpectedFailsFile(self._xml_filepath) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_grids.py b/CIME/tests/test_unit_grids.py new file mode 100755 index 00000000000..8417177e83e --- /dev/null +++ b/CIME/tests/test_unit_grids.py @@ -0,0 +1,483 @@ +#!/usr/bin/env python3 + +""" +This module tests *some* functionality of CIME.XML.grids +""" + +# Ignore privacy concerns for unit tests, so that unit tests can access +# protected members of the system under test +# +# pylint:disable=protected-access + +# Also ignore too-long lines, since these are common in unit tests +# +# pylint:disable=line-too-long + +import unittest +import os +import shutil +import string +import tempfile +from CIME.XML.grids import Grids, _ComponentGrids, _add_grid_info, _strip_grid_from_name +from CIME.utils import CIMEError + + +class TestGrids(unittest.TestCase): + """Tests some functionality of CIME.XML.grids + + Note that much of the functionality of CIME.XML.grids is NOT covered here + """ + + _CONFIG_GRIDS_TEMPLATE = string.Template( + """ + + + + + + + + atm_default_grid + lnd_default_grid + ocnice_default_grid + rof_default_grid + glc_default_grid + wav_default_grid + null + + +$MODEL_GRID_ENTRIES + + + + + + 0 0 + unset + null is no grid: + + +$DOMAIN_ENTRIES + + + + ATM2OCN_FMAPNAME + OCN2ATM_FMAPNAME +$EXTRA_REQUIRED_GRIDMAPS + + + +$GRIDMAP_ENTRIES + + +""" + ) + + _MODEL_GRID_F09_G17 = """ + + 0.9x1.25 + 0.9x1.25 + gx1v7 + gx1v7 + +""" + + # For testing multiple GLC grids + _MODEL_GRID_F09_G17_3GLC = """ + + 0.9x1.25 + 0.9x1.25 + gx1v7 + ais8:gris4:lis12 + gx1v7 + +""" + + _DOMAIN_F09 = """ + + 288 192 + fv0.9x1.25_ESMFmesh.nc + 0.9x1.25 is FV 1-deg grid: + +""" + + _DOMAIN_G17 = """ + + 320 384 + gx1v7_ESMFmesh.nc + gx1v7 is displaced Greenland pole 1-deg grid with Caspian as a land feature: + +""" + + _DOMAIN_GRIS4 = """ + + 416 704 + greenland_4km_ESMFmesh.nc + 4-km Greenland grid + +""" + + _DOMAIN_AIS8 = """ + + 704 576 + antarctica_8km_ESMFmesh.nc + 8-km Antarctica grid + +""" + + _DOMAIN_LIS12 = """ + + 123 456 + laurentide_12km_ESMFmesh.nc + 12-km Laurentide grid + +""" + + _GRIDMAP_F09_G17 = """ + + + map_foo_TO_gx1v7_aave.nc + map_gx1v7_TO_foo_aave.nc + map_gx1v7_TO_foo_xxx.nc + + + + + map_fv0.9x1.25_TO_gx1v7_aave.nc + map_gx1v7_TO_fv0.9x1.25_aave.nc + + + + + map_fv0.9x1.25_TO_foo_aave.nc + map_foo_TO_fv0.9x1.25_aave.nc + map_foo_TO_fv0.9x1.25_xxx.nc + +""" + + _GRIDMAP_GRIS4_G17 = """ + + map_gris4_to_gx1v7_liq.nc + map_gris4_to_gx1v7_ice.nc + +""" + + _GRIDMAP_AIS8_G17 = """ + + map_ais8_to_gx1v7_liq.nc + map_ais8_to_gx1v7_ice.nc + +""" + + _GRIDMAP_LIS12_G17 = """ + + map_lis12_to_gx1v7_liq.nc + map_lis12_to_gx1v7_ice.nc + +""" + + def setUp(self): + self._workdir = tempfile.mkdtemp() + self._xml_filepath = os.path.join(self._workdir, "config_grids.xml") + + def tearDown(self): + shutil.rmtree(self._workdir) + + def _create_grids_xml( + self, + model_grid_entries, + domain_entries, + gridmap_entries, + extra_required_gridmaps="", + ): + grids_xml = self._CONFIG_GRIDS_TEMPLATE.substitute( + { + "MODEL_GRID_ENTRIES": model_grid_entries, + "DOMAIN_ENTRIES": domain_entries, + "EXTRA_REQUIRED_GRIDMAPS": extra_required_gridmaps, + "GRIDMAP_ENTRIES": gridmap_entries, + } + ) + with open(self._xml_filepath, "w", encoding="UTF-8") as xml_file: + xml_file.write(grids_xml) + + def assert_grid_info_f09_g17(self, grid_info): + """Asserts that expected grid info is present and correct when using _MODEL_GRID_F09_G17""" + self.assertEqual(grid_info["ATM_NX"], 288) + self.assertEqual(grid_info["ATM_NY"], 192) + self.assertEqual(grid_info["ATM_GRID"], "0.9x1.25") + self.assertEqual(grid_info["ATM_DOMAIN_MESH"], "fv0.9x1.25_ESMFmesh.nc") + + self.assertEqual(grid_info["LND_NX"], 288) + self.assertEqual(grid_info["LND_NY"], 192) + self.assertEqual(grid_info["LND_GRID"], "0.9x1.25") + self.assertEqual(grid_info["LND_DOMAIN_MESH"], "fv0.9x1.25_ESMFmesh.nc") + + self.assertEqual(grid_info["OCN_NX"], 320) + self.assertEqual(grid_info["OCN_NY"], 384) + self.assertEqual(grid_info["OCN_GRID"], "gx1v7") + self.assertEqual(grid_info["OCN_DOMAIN_MESH"], "gx1v7_ESMFmesh.nc") + + self.assertEqual(grid_info["ICE_NX"], 320) + self.assertEqual(grid_info["ICE_NY"], 384) + self.assertEqual(grid_info["ICE_GRID"], "gx1v7") + self.assertEqual(grid_info["ICE_DOMAIN_MESH"], "gx1v7_ESMFmesh.nc") + + self.assertEqual( + grid_info["ATM2OCN_FMAPNAME"], "map_fv0.9x1.25_TO_gx1v7_aave.nc" + ) + self.assertEqual( + grid_info["OCN2ATM_FMAPNAME"], "map_gx1v7_TO_fv0.9x1.25_aave.nc" + ) + self.assertFalse("OCN2ATM_SHOULDBEABSENT" in grid_info) + + def assert_grid_info_f09_g17_3glc(self, grid_info): + """Asserts that all domain info is present & correct for _MODEL_GRID_F09_G17_3GLC""" + self.assert_grid_info_f09_g17(grid_info) + + # Note that we don't assert GLC_NX and GLC_NY here: these are unused for this + # multi-grid case, so we don't care what arbitrary values they have. + self.assertEqual(grid_info["GLC_GRID"], "ais8:gris4:lis12") + self.assertEqual( + grid_info["GLC_DOMAIN_MESH"], + "antarctica_8km_ESMFmesh.nc:greenland_4km_ESMFmesh.nc:laurentide_12km_ESMFmesh.nc", + ) + self.assertEqual( + grid_info["GLC2OCN_LIQ_RMAPNAME"], + "map_ais8_to_gx1v7_liq.nc:map_gris4_to_gx1v7_liq.nc:map_lis12_to_gx1v7_liq.nc", + ) + self.assertEqual( + grid_info["GLC2OCN_ICE_RMAPNAME"], + "map_ais8_to_gx1v7_ice.nc:map_gris4_to_gx1v7_ice.nc:map_lis12_to_gx1v7_ice.nc", + ) + + def test_get_grid_info_basic(self): + """Basic test of get_grid_info""" + model_grid_entries = self._MODEL_GRID_F09_G17 + domain_entries = self._DOMAIN_F09 + self._DOMAIN_G17 + gridmap_entries = self._GRIDMAP_F09_G17 + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + ) + + grids = Grids(self._xml_filepath) + grid_info = grids.get_grid_info( + name="f09_g17", + compset="NOT_IMPORTANT", + driver="nuopc", + ) + + self.assert_grid_info_f09_g17(grid_info) + + def test_get_grid_info_extra_required_gridmaps(self): + """Test of get_grid_info with some extra required gridmaps""" + model_grid_entries = self._MODEL_GRID_F09_G17 + domain_entries = self._DOMAIN_F09 + self._DOMAIN_G17 + gridmap_entries = self._GRIDMAP_F09_G17 + # These are some extra required gridmaps that aren't explicitly specified + extra_required_gridmaps = """ + ATM2OCN_EXTRA + OCN2ATM_EXTRA +""" + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + extra_required_gridmaps=extra_required_gridmaps, + ) + + grids = Grids(self._xml_filepath) + grid_info = grids.get_grid_info( + name="f09_g17", + compset="NOT_IMPORTANT", + driver="nuopc", + ) + + self.assert_grid_info_f09_g17(grid_info) + self.assertEqual(grid_info["ATM2OCN_EXTRA"], "unset") + self.assertEqual(grid_info["OCN2ATM_EXTRA"], "unset") + + def test_get_grid_info_extra_gridmaps(self): + """Test of get_grid_info with some extra gridmaps""" + model_grid_entries = self._MODEL_GRID_F09_G17 + domain_entries = self._DOMAIN_F09 + self._DOMAIN_G17 + gridmap_entries = self._GRIDMAP_F09_G17 + # These are some extra gridmaps that aren't in the required list + gridmap_entries += """ + + map_fv0.9x1.25_TO_gx1v7_extra.nc + map_gx1v7_TO_fv0.9x1.25_extra.nc + +""" + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + ) + + grids = Grids(self._xml_filepath) + grid_info = grids.get_grid_info( + name="f09_g17", + compset="NOT_IMPORTANT", + driver="nuopc", + ) + + self.assert_grid_info_f09_g17(grid_info) + self.assertEqual(grid_info["ATM2OCN_EXTRA"], "map_fv0.9x1.25_TO_gx1v7_extra.nc") + self.assertEqual(grid_info["OCN2ATM_EXTRA"], "map_gx1v7_TO_fv0.9x1.25_extra.nc") + + def test_get_grid_info_3glc(self): + """Test of get_grid_info with 3 glc grids""" + model_grid_entries = self._MODEL_GRID_F09_G17_3GLC + domain_entries = ( + self._DOMAIN_F09 + + self._DOMAIN_G17 + + self._DOMAIN_GRIS4 + + self._DOMAIN_AIS8 + + self._DOMAIN_LIS12 + ) + gridmap_entries = ( + self._GRIDMAP_F09_G17 + + self._GRIDMAP_GRIS4_G17 + + self._GRIDMAP_AIS8_G17 + + self._GRIDMAP_LIS12_G17 + ) + # Claim that a glc2atm gridmap is required in order to test the logic that handles + # an unset required gridmap for a component with multiple grids. + extra_required_gridmaps = """ + GLC2ATM_EXTRA +""" + self._create_grids_xml( + model_grid_entries=model_grid_entries, + domain_entries=domain_entries, + gridmap_entries=gridmap_entries, + extra_required_gridmaps=extra_required_gridmaps, + ) + + grids = Grids(self._xml_filepath) + grid_info = grids.get_grid_info( + name="f09_g17_3glc", + compset="NOT_IMPORTANT", + driver="nuopc", + ) + + self.assert_grid_info_f09_g17_3glc(grid_info) + self.assertEqual(grid_info["GLC2ATM_EXTRA"], "unset") + + +class TestComponentGrids(unittest.TestCase): + """Tests the _ComponentGrids helper class defined in CIME.XML.grids""" + + # A valid grid long name used in a lot of these tests; there are two rof grids and + # three glc grids, and one grid for each other component + _GRID_LONGNAME = "a%0.9x1.25_l%0.9x1.25_oi%gx1v7_r%r05:r01_g%ais8:gris4:lis12_w%ww3a_z%null_m%gx1v7" + + # ------------------------------------------------------------------------ + # Tests of check_num_elements + # + # These tests cover a lot of the code in _ComponentGrids + # + # We don't cover all of the branches in check_num_elements because many of the + # branches that lead to a successful pass are already covered by unit tests in the + # TestGrids class. + # ------------------------------------------------------------------------ + + def test_check_num_elements_right_ndomains(self): + """With the right number of domains for a component, check_num_elements should pass""" + component_grids = _ComponentGrids(self._GRID_LONGNAME) + gridinfo = {"GLC_DOMAIN_MESH": "foo:bar:baz"} + + # The test passes as long as the following call doesn't generate any errors + component_grids.check_num_elements(gridinfo) + + def test_check_num_elements_wrong_ndomains(self): + """With the wrong number of domains for a component, check_num_elements should fail""" + component_grids = _ComponentGrids(self._GRID_LONGNAME) + # In the following, there should be 3 elements, but we only specify 2 + gridinfo = {"GLC_DOMAIN_MESH": "foo:bar"} + + self.assertRaisesRegex( + CIMEError, + "Unexpected number of colon-delimited elements", + component_grids.check_num_elements, + gridinfo, + ) + + def test_check_num_elements_right_nmaps(self): + """With the right number of maps between two components, check_num_elements should pass""" + component_grids = _ComponentGrids(self._GRID_LONGNAME) + gridinfo = {"GLC2ROF_RMAPNAME": "map1:map2:map3:map4:map5:map6"} + + # The test passes as long as the following call doesn't generate any errors + component_grids.check_num_elements(gridinfo) + + def test_check_num_elements_wrong_nmaps(self): + """With the wrong number of maps between two components, check_num_elements should fail""" + component_grids = _ComponentGrids(self._GRID_LONGNAME) + # In the following, there should be 6 elements, but we only specify 5 + gridinfo = {"GLC2ROF_RMAPNAME": "map1:map2:map3:map4:map5"} + + self.assertRaisesRegex( + CIMEError, + "Unexpected number of colon-delimited elements", + component_grids.check_num_elements, + gridinfo, + ) + + +class TestGridsFunctions(unittest.TestCase): + """Tests helper functions defined in CIME.XML.grids + + These tests are in a separate class to avoid the unnecessary setUp and tearDown + function of the main test class. + + """ + + # ------------------------------------------------------------------------ + # Tests of _add_grid_info + # ------------------------------------------------------------------------ + + def test_add_grid_info_initial(self): + """Test of _add_grid_info for the initial add of a given key""" + grid_info = {"foo": "a"} + _add_grid_info(grid_info, "bar", "b") + self.assertEqual(grid_info, {"foo": "a", "bar": "b"}) + + def test_add_grid_info_existing(self): + """Test of _add_grid_info when the given key already exists""" + grid_info = {"foo": "bar"} + _add_grid_info(grid_info, "foo", "baz") + self.assertEqual(grid_info, {"foo": "bar:baz"}) + + def test_add_grid_info_existing_with_value_for_multiple(self): + """Test of _add_grid_info when the given key already exists and value_for_multiple is provided""" + grid_info = {"foo": 1} + _add_grid_info(grid_info, "foo", 2, value_for_multiple=0) + self.assertEqual(grid_info, {"foo": 0}) + + # ------------------------------------------------------------------------ + # Tests of strip_grid_from_name + # ------------------------------------------------------------------------ + + def test_strip_grid_from_name_basic(self): + """Basic test of _strip_grid_from_name""" + result = _strip_grid_from_name("atm_grid") + self.assertEqual(result, "atm") + + def test_strip_grid_from_name_badname(self): + """_strip_grid_from_name should raise an exception for a name not ending with _grid""" + self.assertRaisesRegex( + CIMEError, "does not end with _grid", _strip_grid_from_name, name="atm" + ) + + # ------------------------------------------------------------------------ + # Tests of _check_grid_info_component_counts + # ------------------------------------------------------------------------ + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_nmlgen.py b/CIME/tests/test_unit_nmlgen.py new file mode 100644 index 00000000000..52e04d28856 --- /dev/null +++ b/CIME/tests/test_unit_nmlgen.py @@ -0,0 +1,59 @@ +from collections import OrderedDict +import tempfile +import unittest +from unittest import mock + +from CIME.nmlgen import NamelistGenerator + +# pylint: disable=protected-access +class TestNamelistGenerator(unittest.TestCase): + def test_init_defaults(self): + test_nml_infile = b"""&test +test1 = 'test1_updated' +/""" + + test_data = """ + + + + + char + test + test_nml + test1_value,test1_updated + + test1_value + + + + char + test + test_nml + + test2_value + + +""" + + with tempfile.NamedTemporaryFile() as temp, tempfile.NamedTemporaryFile() as temp2: + temp.write(test_data.encode()) + temp.flush() + + temp2.write(test_nml_infile) + temp2.flush() + + case = mock.MagicMock() + + nmlgen = NamelistGenerator(case, [temp.name]) + + nmlgen.init_defaults([temp2.name], None) + + expected_groups = OrderedDict( + {"test_nml": {"test1": ["'test1_updated'"], "test2": ['"test2_value"']}} + ) + + assert nmlgen._namelist._groups == expected_groups + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_provenance.py b/CIME/tests/test_unit_provenance.py new file mode 100755 index 00000000000..21038a8b48d --- /dev/null +++ b/CIME/tests/test_unit_provenance.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 + +import os +import sys +import tempfile +import unittest +from unittest import mock + +from CIME import provenance +from CIME import utils + +# pylint: disable=protected-access +class TestProvenance(unittest.TestCase): + def test_parse_dot_git_path_error(self): + with self.assertRaises(utils.CIMEError): + provenance._parse_dot_git_path("/src/CIME") + + def test_parse_dot_git_path(self): + value = provenance._parse_dot_git_path("/src/CIME/.git/worktrees/test") + + assert value == "/src/CIME/.git" + + def test_find_git_root(self): + with tempfile.TemporaryDirectory() as tempdir: + os.makedirs(os.path.join(tempdir, ".git")) + + value = provenance._find_git_root(tempdir) + + assert value == f"{tempdir}/.git" + + def test_find_git_root_worktree(self): + with tempfile.TemporaryDirectory() as tempdir: + with open(os.path.join(tempdir, ".git"), "w") as fd: + fd.write("gitdir: /src/CIME/.git/worktrees/test") + + value = provenance._find_git_root(tempdir) + + assert value == "/src/CIME/.git/worktrees/test" + + def test_find_git_root_worktree_malformed(self): + with tempfile.TemporaryDirectory() as tempdir: + with open(os.path.join(tempdir, ".git"), "w") as fd: + fd.write("some value: /src/CIME/.git/worktrees/test") + + with self.assertRaises(utils.CIMEError): + provenance._find_git_root(tempdir) + + def test_find_git_root_error(self): + with tempfile.TemporaryDirectory() as tempdir: + with self.assertRaises(utils.CIMEError): + provenance._find_git_root(tempdir) + + @mock.patch("CIME.provenance.run_cmd") + def test_run_git_cmd_recursively(self, run_cmd): + run_cmd.return_value = (0, "data", None) + + with mock.patch("CIME.provenance.open", mock.mock_open()) as m: + provenance._run_git_cmd_recursively( + "status", "/srcroot", "/output.txt" + ) # pylint: disable=protected-access + + m.assert_called_with("/output.txt", "w") + + write = m.return_value.__enter__.return_value.write + + write.assert_any_call("data\n\n") + write.assert_any_call("data\n") + + run_cmd.assert_any_call("git status", from_dir="/srcroot") + run_cmd.assert_any_call( + 'git submodule foreach --recursive "git status; echo"', from_dir="/srcroot" + ) + + @mock.patch("CIME.provenance.run_cmd") + def test_run_git_cmd_recursively_error(self, run_cmd): + run_cmd.return_value = (1, "data", "error") + + with mock.patch("CIME.provenance.open", mock.mock_open()) as m: + provenance._run_git_cmd_recursively( + "status", "/srcroot", "/output.txt" + ) # pylint: disable=protected-access + + m.assert_called_with("/output.txt", "w") + + write = m.return_value.__enter__.return_value.write + + write.assert_any_call("error\n\n") + write.assert_any_call("error\n") + + run_cmd.assert_any_call("git status", from_dir="/srcroot") + run_cmd.assert_any_call( + 'git submodule foreach --recursive "git status; echo"', from_dir="/srcroot" + ) + + @mock.patch("CIME.provenance.safe_copy") + @mock.patch("CIME.provenance.run_cmd") + def test_record_git_provenance(self, run_cmd, safe_copy): + run_cmd.return_value = (0, "data", None) + + with mock.patch("CIME.provenance.open", mock.mock_open()) as m: + with tempfile.TemporaryDirectory() as tempdir: + os.makedirs(os.path.join(tempdir, ".git")) + + provenance._record_git_provenance( + tempdir, "/output", "5" + ) # pylint: disable=protected-access + + m.assert_any_call("/output/GIT_STATUS.5", "w") + m.assert_any_call("/output/GIT_DIFF.5", "w") + m.assert_any_call("/output/GIT_LOG.5", "w") + m.assert_any_call("/output/GIT_REMOTE.5", "w") + + write = m.return_value.__enter__.return_value.write + + write.assert_any_call("data\n\n") + write.assert_any_call("data\n") + + run_cmd.assert_any_call("git status", from_dir=tempdir) + run_cmd.assert_any_call( + 'git submodule foreach --recursive "git status; echo"', from_dir=tempdir + ) + run_cmd.assert_any_call("git diff", from_dir=tempdir) + run_cmd.assert_any_call( + 'git submodule foreach --recursive "git diff; echo"', from_dir=tempdir + ) + run_cmd.assert_any_call( + "git log --first-parent --pretty=oneline -n 5", from_dir=tempdir + ) + run_cmd.assert_any_call( + 'git submodule foreach --recursive "git log --first-parent' + ' --pretty=oneline -n 5; echo"', + from_dir=tempdir, + ) + run_cmd.assert_any_call("git remote -v", from_dir=tempdir) + run_cmd.assert_any_call( + 'git submodule foreach --recursive "git remote -v; echo"', from_dir=tempdir + ) + + safe_copy.assert_any_call( + f"{tempdir}/.git/config", "/output/GIT_CONFIG.5", preserve_meta=False + ) + + +if __name__ == "__main__": + sys.path.insert(0, os.path.abspath(os.path.join(".", "..", "..", "lib"))) + unittest.main() diff --git a/CIME/tests/test_unit_test_status.py b/CIME/tests/test_unit_test_status.py new file mode 100755 index 00000000000..0b79c8bac6a --- /dev/null +++ b/CIME/tests/test_unit_test_status.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 + +import unittest +import os +from CIME import test_status +from CIME import expected_fails +from CIME.tests.custom_assertions_test_status import CustomAssertionsTestStatus + + +class TestTestStatus(CustomAssertionsTestStatus): + + _TESTNAME = "fake_test" + + # An arbitrary phase we can use when we want to work with a non-core phase + _NON_CORE_PHASE = test_status.MEMLEAK_PHASE + + def setUp(self): + self._ts = test_status.TestStatus( + test_dir=os.path.join("nonexistent", "path"), + test_name=self._TESTNAME, + no_io=True, + ) + self._set_core_phases_to_pass() + + def _set_core_phases_to_pass(self): + """Set all core phases of self._ts to pass status""" + with self._ts: + for phase in test_status.CORE_PHASES: + self._ts.set_status(phase, test_status.TEST_PASS_STATUS) + + def _set_last_core_phase_to_fail(self): + """Sets the last core phase to FAIL + + Returns the name of this phase""" + fail_phase = test_status.CORE_PHASES[-1] + self._set_phase_to_status(fail_phase, test_status.TEST_FAIL_STATUS) + return fail_phase + + def _set_phase_to_status(self, phase, status): + """Set given phase to given status""" + with self._ts: + self._ts.set_status(phase, status) + + # ------------------------------------------------------------------------ + # Tests of TestStatus.phase_statuses_dump + # ------------------------------------------------------------------------ + + def test_psdump_corePhasesPass(self): + output = self._ts.phase_statuses_dump() + self.assert_core_phases(output, self._TESTNAME, fails=[]) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=0 + ) + + def test_psdump_oneCorePhaseFails(self): + fail_phase = self._set_last_core_phase_to_fail() + output = self._ts.phase_statuses_dump() + self.assert_core_phases(output, self._TESTNAME, fails=[fail_phase]) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=0 + ) + + def test_psdump_oneCorePhaseFailsAbsentFromXFails(self): + """One phase fails. There is an expected fails list, but that phase is not in it.""" + fail_phase = self._set_last_core_phase_to_fail() + xfails = expected_fails.ExpectedFails() + xfails.add_failure( + phase=self._NON_CORE_PHASE, expected_status=test_status.TEST_FAIL_STATUS + ) + output = self._ts.phase_statuses_dump(xfails=xfails) + self.assert_status_of_phase( + output, test_status.TEST_FAIL_STATUS, fail_phase, self._TESTNAME, xfail="no" + ) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=0 + ) + + def test_psdump_oneCorePhaseFailsInXFails(self): + """One phase fails. That phase is in the expected fails list.""" + fail_phase = self._set_last_core_phase_to_fail() + xfails = expected_fails.ExpectedFails() + xfails.add_failure( + phase=fail_phase, expected_status=test_status.TEST_FAIL_STATUS + ) + output = self._ts.phase_statuses_dump(xfails=xfails) + self.assert_status_of_phase( + output, + test_status.TEST_FAIL_STATUS, + fail_phase, + self._TESTNAME, + xfail="expected", + ) + self.assert_num_expected_unexpected_fails( + output, num_expected=1, num_unexpected=0 + ) + + def test_psdump_oneCorePhasePassesInXFails(self): + """One phase passes despite being in the expected fails list.""" + xfail_phase = test_status.CORE_PHASES[-1] + xfails = expected_fails.ExpectedFails() + xfails.add_failure( + phase=xfail_phase, expected_status=test_status.TEST_FAIL_STATUS + ) + output = self._ts.phase_statuses_dump(xfails=xfails) + self.assert_status_of_phase( + output, + test_status.TEST_PASS_STATUS, + xfail_phase, + self._TESTNAME, + xfail="unexpected", + ) + self.assert_num_expected_unexpected_fails( + output, num_expected=0, num_unexpected=1 + ) + + def test_psdump_skipPasses(self): + """With the skip_passes argument, only non-passes should appear""" + fail_phase = self._set_last_core_phase_to_fail() + output = self._ts.phase_statuses_dump(skip_passes=True) + self.assert_status_of_phase( + output, test_status.TEST_FAIL_STATUS, fail_phase, self._TESTNAME, xfail="no" + ) + for phase in test_status.CORE_PHASES: + if phase != fail_phase: + self.assert_phase_absent(output, phase, self._TESTNAME) + + def test_psdump_unexpectedPass_shouldBePresent(self): + """Even with the skip_passes argument, an unexpected PASS should be present""" + xfail_phase = test_status.CORE_PHASES[-1] + xfails = expected_fails.ExpectedFails() + xfails.add_failure( + phase=xfail_phase, expected_status=test_status.TEST_FAIL_STATUS + ) + output = self._ts.phase_statuses_dump(skip_passes=True, xfails=xfails) + self.assert_status_of_phase( + output, + test_status.TEST_PASS_STATUS, + xfail_phase, + self._TESTNAME, + xfail="unexpected", + ) + for phase in test_status.CORE_PHASES: + if phase != xfail_phase: + self.assert_phase_absent(output, phase, self._TESTNAME) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_two_link_to_case2_output.py b/CIME/tests/test_unit_two_link_to_case2_output.py new file mode 100755 index 00000000000..2984fa4c802 --- /dev/null +++ b/CIME/tests/test_unit_two_link_to_case2_output.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python3 + +""" +This module contains unit tests of the method +SystemTestsCompareTwo._link_to_case2_output +""" + +# Ignore privacy concerns for unit tests, so that unit tests can access +# protected members of the system under test +# +# pylint:disable=protected-access + +import unittest +import os +import shutil +import tempfile +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +from CIME.tests.case_fake import CaseFake + +# ======================================================================== +# Fake version of SystemTestsCompareTwo that overrides some functionality for +# the sake of unit testing +# ======================================================================== + + +class SystemTestsCompareTwoFake(SystemTestsCompareTwo): + def __init__(self, case1, run_two_suffix="test"): + + SystemTestsCompareTwo.__init__( + self, case1, separate_builds=False, run_two_suffix=run_two_suffix + ) + + # ------------------------------------------------------------------------ + # Stubs of methods called by SystemTestsCommon.__init__ that interact with + # the system or case object in ways we want to avoid here + # ------------------------------------------------------------------------ + + def _init_environment(self, caseroot): + pass + + def _init_locked_files(self, caseroot, expected): + pass + + def _init_case_setup(self): + pass + + # ------------------------------------------------------------------------ + # Stubs of methods that are typically provided by the individual test + # ------------------------------------------------------------------------ + + def _case_one_setup(self): + pass + + def _case_two_setup(self): + pass + + +# ======================================================================== +# Test class itself +# ======================================================================== + + +class TestLinkToCase2Output(unittest.TestCase): + + # ======================================================================== + # Test helper functions + # ======================================================================== + + def setUp(self): + self.original_wd = os.getcwd() + # Create a sandbox in which case directories can be created + self.tempdir = tempfile.mkdtemp() + + def tearDown(self): + # Some tests trigger a chdir call in the SUT; make sure we return to the + # original directory at the end of the test + os.chdir(self.original_wd) + + shutil.rmtree(self.tempdir, ignore_errors=True) + + def setup_test_and_directories(self, casename1, run2_suffix): + """ + Returns test object + """ + + case1root = os.path.join(self.tempdir, casename1) + case1 = CaseFake(case1root) + mytest = SystemTestsCompareTwoFake(case1, run_two_suffix=run2_suffix) + mytest._case1.make_rundir() # pylint: disable=maybe-no-member + mytest._case2.make_rundir() # pylint: disable=maybe-no-member + + return mytest + + def create_file_in_rundir2(self, mytest, core_filename, run2_suffix): + """ + Creates a file in rundir2 named CASE2.CORE_FILENAME.nc.RUN2_SUFFIX + (where CASE2 is the casename of case2) + + Returns full path to the file created + """ + filename = "{}.{}.nc.{}".format( + mytest._case2.get_value("CASE"), core_filename, run2_suffix + ) + filepath = os.path.join(mytest._case2.get_value("RUNDIR"), filename) + open(filepath, "w").close() + return filepath + + # ======================================================================== + # Begin actual tests + # ======================================================================== + + def test_basic(self): + # Setup + casename1 = "mytest" + run2_suffix = "run2" + + mytest = self.setup_test_and_directories(casename1, run2_suffix) + filepath1 = self.create_file_in_rundir2(mytest, "clm2.h0", run2_suffix) + filepath2 = self.create_file_in_rundir2(mytest, "clm2.h1", run2_suffix) + + # Exercise + mytest._link_to_case2_output() + + # Verify + expected_link_filename1 = "{}.clm2.h0.nc.{}".format(casename1, run2_suffix) + expected_link_filepath1 = os.path.join( + mytest._case1.get_value("RUNDIR"), expected_link_filename1 + ) + self.assertTrue(os.path.islink(expected_link_filepath1)) + self.assertEqual(filepath1, os.readlink(expected_link_filepath1)) + + expected_link_filename2 = "{}.clm2.h1.nc.{}".format(casename1, run2_suffix) + expected_link_filepath2 = os.path.join( + mytest._case1.get_value("RUNDIR"), expected_link_filename2 + ) + self.assertTrue(os.path.islink(expected_link_filepath2)) + self.assertEqual(filepath2, os.readlink(expected_link_filepath2)) + + def test_existing_link(self): + # Setup + casename1 = "mytest" + run2_suffix = "run2" + + mytest = self.setup_test_and_directories(casename1, run2_suffix) + self.create_file_in_rundir2(mytest, "clm2.h0", run2_suffix) + + # Create initial link via a call to _link_to_case2_output + mytest._link_to_case2_output() + + # Exercise + # See what happens when we try to recreate that link + mytest._link_to_case2_output() + + # (No verification: Test passes if no exception was raised) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_user_mod_support.py b/CIME/tests/test_unit_user_mod_support.py new file mode 100755 index 00000000000..8f54986f0e8 --- /dev/null +++ b/CIME/tests/test_unit_user_mod_support.py @@ -0,0 +1,219 @@ +#!/usr/bin/env python3 + +import unittest +import shutil +import tempfile +import os +from CIME.user_mod_support import apply_user_mods +from CIME.utils import CIMEError +import CIME.six + +# ======================================================================== +# Define some parameters +# ======================================================================== + +_SOURCEMODS = os.path.join("SourceMods", "src.drv") + + +class TestUserModSupport(unittest.TestCase): + + # ======================================================================== + # Test helper functions + # ======================================================================== + + def setUp(self): + self._caseroot = tempfile.mkdtemp() + self._caseroot_sourcemods = os.path.join(self._caseroot, _SOURCEMODS) + os.makedirs(self._caseroot_sourcemods) + self._user_mods_parent_dir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self._caseroot, ignore_errors=True) + shutil.rmtree(self._user_mods_parent_dir, ignore_errors=True) + + def createUserMod(self, name, include_dirs=None): + """Create a user_mods directory with the given name. + + This directory is created within self._user_mods_parent_dir + + For name='foo', it will contain: + + - A user_nl_cpl file with contents: + foo + + - A shell_commands file with contents: + echo foo >> /PATH/TO/CASEROOT/shell_commands_result + + - A file in _SOURCEMODS named myfile.F90 with contents: + foo + + If include_dirs is given, it should be a list of strings, giving names + of other user_mods directories to include. e.g., if include_dirs is + ['foo1', 'foo2'], then this will create a file 'include_user_mods' that + contains paths to the 'foo1' and 'foo2' user_mods directories, one per + line. + """ + + mod_dir = os.path.join(self._user_mods_parent_dir, name) + os.makedirs(mod_dir) + mod_dir_sourcemods = os.path.join(mod_dir, _SOURCEMODS) + os.makedirs(mod_dir_sourcemods) + + with open(os.path.join(mod_dir, "user_nl_cpl"), "w") as user_nl_cpl: + user_nl_cpl.write(name + "\n") + with open(os.path.join(mod_dir, "shell_commands"), "w") as shell_commands: + command = "echo {} >> {}/shell_commands_result\n".format( + name, self._caseroot + ) + shell_commands.write(command) + with open(os.path.join(mod_dir_sourcemods, "myfile.F90"), "w") as f90_file: + f90_file.write(name + "\n") + + if include_dirs: + with open( + os.path.join(mod_dir, "include_user_mods"), "w" + ) as include_user_mods: + for one_include in include_dirs: + include_user_mods.write( + os.path.join(self._user_mods_parent_dir, one_include) + "\n" + ) + + def assertResults( + self, + expected_user_nl_cpl, + expected_shell_commands_result, + expected_sourcemod, + msg="", + ): + """Asserts that the contents of the files in self._caseroot match expectations + + If msg is provided, it is printed for some failing assertions + """ + + path_to_user_nl_cpl = os.path.join(self._caseroot, "user_nl_cpl") + self.assertTrue( + os.path.isfile(path_to_user_nl_cpl), + msg=msg + ": user_nl_cpl does not exist", + ) + with open(path_to_user_nl_cpl, "r") as user_nl_cpl: + contents = user_nl_cpl.read() + self.assertEqual(expected_user_nl_cpl, contents) + + path_to_shell_commands_result = os.path.join( + self._caseroot, "shell_commands_result" + ) + self.assertTrue( + os.path.isfile(path_to_shell_commands_result), + msg=msg + ": shell_commands_result does not exist", + ) + with open(path_to_shell_commands_result, "r") as shell_commands_result: + contents = shell_commands_result.read() + self.assertEqual(expected_shell_commands_result, contents) + + path_to_sourcemod = os.path.join(self._caseroot_sourcemods, "myfile.F90") + self.assertTrue( + os.path.isfile(path_to_sourcemod), + msg=msg + ": sourcemod file does not exist", + ) + with open(path_to_sourcemod, "r") as sourcemod: + contents = sourcemod.read() + self.assertEqual(expected_sourcemod, contents) + + # ======================================================================== + # Begin actual tests + # ======================================================================== + + def test_basic(self): + self.createUserMod("foo") + apply_user_mods(self._caseroot, os.path.join(self._user_mods_parent_dir, "foo")) + self.assertResults( + expected_user_nl_cpl="foo\n", + expected_shell_commands_result="foo\n", + expected_sourcemod="foo\n", + msg="test_basic", + ) + + def test_keepexe(self): + self.createUserMod("foo") +<<<<<<< HEAD:CIME/tests/test_user_mod_support.py + with CIME.six.assertRaisesRegex(self, CIMEError, "cannot have any source mods"): + apply_user_mods(self._caseroot, + os.path.join(self._user_mods_parent_dir, "foo"), keepexe=True) +======= + with six.assertRaisesRegex(self, CIMEError, "cannot have any source mods"): + apply_user_mods( + self._caseroot, + os.path.join(self._user_mods_parent_dir, "foo"), + keepexe=True, + ) +>>>>>>> master:CIME/tests/test_unit_user_mod_support.py + + def test_two_applications(self): + """If apply_user_mods is called twice, the second should appear after the first so that it takes precedence.""" + + self.createUserMod("foo1") + self.createUserMod("foo2") + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "foo1") + ) + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "foo2") + ) + self.assertResults( + expected_user_nl_cpl="foo1\nfoo2\n", + expected_shell_commands_result="foo1\nfoo2\n", + expected_sourcemod="foo2\n", + msg="test_two_applications", + ) + + def test_include(self): + """If there is an included mod, the main one should appear after the included one so that it takes precedence.""" + + self.createUserMod("base") + self.createUserMod("derived", include_dirs=["base"]) + + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "derived") + ) + + self.assertResults( + expected_user_nl_cpl="base\nderived\n", + expected_shell_commands_result="base\nderived\n", + expected_sourcemod="derived\n", + msg="test_include", + ) + + def test_duplicate_includes(self): + """Test multiple includes, where both include the same base mod. + + The base mod should only be included once. + """ + + self.createUserMod("base") + self.createUserMod("derived1", include_dirs=["base"]) + self.createUserMod("derived2", include_dirs=["base"]) + self.createUserMod("derived_combo", include_dirs=["derived1", "derived2"]) + + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "derived_combo") + ) + + # NOTE(wjs, 2017-04-15) The ordering of derived1 vs. derived2 is not + # critical here: If this aspect of the behavior changes, the + # expected_contents can be changed to match the new behavior in this + # respect. + expected_contents = """base +derived2 +derived1 +derived_combo +""" + self.assertResults( + expected_user_nl_cpl=expected_contents, + expected_shell_commands_result=expected_contents, + expected_sourcemod="derived_combo\n", + msg="test_duplicate_includes", + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_user_nl_utils.py b/CIME/tests/test_unit_user_nl_utils.py new file mode 100755 index 00000000000..e5f585372b8 --- /dev/null +++ b/CIME/tests/test_unit_user_nl_utils.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python3 + +import unittest +import os +import shutil +import tempfile +from CIME.SystemTests.test_utils import user_nl_utils +import CIME.six + + +class TestUserNLCopier(unittest.TestCase): + + # ======================================================================== + # Test helper functions + # ======================================================================== + + def setUp(self): + self._caseroot = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self._caseroot, ignore_errors=True) + + def write_user_nl_file(self, component, contents, suffix=""): + """Write contents to a user_nl file in the case directory. Returns the + basename (i.e., not the full path) of the file that is created. + + For a component foo, with the default suffix of '', the file name will + be user_nl_foo + + If the suffix is '_0001', the file name will be user_nl_foo_0001 + """ + + filename = "user_nl_" + component + suffix + + with open(os.path.join(self._caseroot, filename), "w") as user_nl_file: + user_nl_file.write(contents) + + return filename + + def assertFileContentsEqual(self, expected, filepath, msg=None): + """Asserts that the contents of the file given by 'filepath' are equal to + the string given by 'expected'. 'msg' gives an optional message to be + printed if the assertion fails.""" + + with open(filepath, "r") as myfile: + contents = myfile.read() + + self.assertEqual(expected, contents, msg=msg) + + # ======================================================================== + # Begin actual tests + # ======================================================================== + + def test_append(self): + # Define some variables + component = "foo" + # deliberately exclude new line from file contents, to make sure that's + # handled correctly + orig_contents = "bar = 42" + contents_to_append = "baz = 101" + + # Setup + filename = self.write_user_nl_file(component, orig_contents) + + # Exercise + user_nl_utils.append_to_user_nl_files( + caseroot=self._caseroot, component=component, contents=contents_to_append + ) + + # Verify + expected_contents = orig_contents + "\n" + contents_to_append + "\n" + self.assertFileContentsEqual( + expected_contents, os.path.join(self._caseroot, filename) + ) + + def test_append_multiple_files(self): + # Simulates a multi-instance test + component = "foo" + orig_contents1 = "bar = 42" + orig_contents2 = "bar = 17" + contents_to_append = "baz = 101" + + # Setup + filename1 = self.write_user_nl_file(component, orig_contents1, suffix="_0001") + filename2 = self.write_user_nl_file(component, orig_contents2, suffix="_0002") + + # Exercise + user_nl_utils.append_to_user_nl_files( + caseroot=self._caseroot, component=component, contents=contents_to_append + ) + + # Verify + expected_contents1 = orig_contents1 + "\n" + contents_to_append + "\n" + expected_contents2 = orig_contents2 + "\n" + contents_to_append + "\n" + self.assertFileContentsEqual( + expected_contents1, os.path.join(self._caseroot, filename1) + ) + self.assertFileContentsEqual( + expected_contents2, os.path.join(self._caseroot, filename2) + ) + + def test_append_without_files_raises_exception(self): + # This test verifies that you get an exception if you call + # append_to_user_nl_files when there are no user_nl files of interest + + # Define some variables + component_exists = "foo" + component_for_append = "bar" + + # Setup + # Create file in caseroot for component_exists, but not for component_for_append + self.write_user_nl_file(component_exists, "irrelevant contents") + + # Exercise & verify +<<<<<<< HEAD:CIME/tests/SystemTests/test_utils/test_user_nl_utils.py + CIME.six.assertRaisesRegex(self, RuntimeError, "No user_nl files found", + user_nl_utils.append_to_user_nl_files, + caseroot = self._caseroot, + component = component_for_append, + contents = 'irrelevant contents to append') + +if __name__ == '__main__': +======= + six.assertRaisesRegex( + self, + RuntimeError, + "No user_nl files found", + user_nl_utils.append_to_user_nl_files, + caseroot=self._caseroot, + component=component_for_append, + contents="irrelevant contents to append", + ) + + +if __name__ == "__main__": +>>>>>>> master:CIME/tests/test_unit_user_nl_utils.py + unittest.main() diff --git a/CIME/tests/test_unit_utils.py b/CIME/tests/test_unit_utils.py new file mode 100755 index 00000000000..00e59d5194c --- /dev/null +++ b/CIME/tests/test_unit_utils.py @@ -0,0 +1,389 @@ +#!/usr/bin/env python3 + +import os +import shutil +import sys +import tempfile + +import unittest +from unittest import mock +from CIME.utils import ( + indent_string, + run_and_log_case_status, + import_from_file, + _line_defines_python_function, + file_contains_python_function, +) + +<<<<<<< HEAD:CIME/tests/test_utils.py +class TestIndentStr(unittest.TestCase): + """Test the indent_string function. +======= +from CIME.tests import utils + +>>>>>>> master:CIME/tests/test_unit_utils.py + +class TestIndentStr(unittest.TestCase): + """Test the indent_string function.""" + + def test_indent_string_singleline(self): + """Test the indent_string function with a single-line string""" + mystr = "foo" + result = indent_string(mystr, 4) + expected = " foo" + self.assertEqual(expected, result) + + def test_indent_string_multiline(self): + """Test the indent_string function with a multi-line string""" + mystr = """hello +hi +goodbye +""" + result = indent_string(mystr, 2) + expected = """ hello + hi + goodbye +""" + self.assertEqual(expected, result) + + +class TestLineDefinesPythonFunction(unittest.TestCase): + """Tests of _line_defines_python_function""" + + # ------------------------------------------------------------------------ + # Tests of _line_defines_python_function that should return True + # ------------------------------------------------------------------------ + + def test_def_foo(self): + """Test of a def of the function of interest""" + line = "def foo():" + self.assertTrue(_line_defines_python_function(line, "foo")) + + def test_def_foo_space(self): + """Test of a def of the function of interest, with an extra space before the parentheses""" + line = "def foo ():" + self.assertTrue(_line_defines_python_function(line, "foo")) + + def test_import_foo(self): + """Test of an import of the function of interest""" + line = "from bar.baz import foo" + self.assertTrue(_line_defines_python_function(line, "foo")) + + def test_import_foo_space(self): + """Test of an import of the function of interest, with trailing spaces""" + line = "from bar.baz import foo " + self.assertTrue(_line_defines_python_function(line, "foo")) + + def test_import_foo_then_others(self): + """Test of an import of the function of interest, along with others""" + line = "from bar.baz import foo, bar" + self.assertTrue(_line_defines_python_function(line, "foo")) + + def test_import_others_then_foo(self): + """Test of an import of the function of interest, after others""" + line = "from bar.baz import bar, foo" + self.assertTrue(_line_defines_python_function(line, "foo")) + + # ------------------------------------------------------------------------ + # Tests of _line_defines_python_function that should return False + # ------------------------------------------------------------------------ + + def test_def_barfoo(self): + """Test of a def of a different function""" + line = "def barfoo():" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_def_foobar(self): + """Test of a def of a different function""" + line = "def foobar():" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_def_foo_indented(self): + """Test of a def of the function of interest, but indented""" + line = " def foo():" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_def_foo_no_parens(self): + """Test of a def of the function of interest, but without parentheses""" + line = "def foo:" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_import_foo_indented(self): + """Test of an import of the function of interest, but indented""" + line = " from bar.baz import foo" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_import_barfoo(self): + """Test of an import of a different function""" + line = "from bar.baz import barfoo" + self.assertFalse(_line_defines_python_function(line, "foo")) + + def test_import_foobar(self): + """Test of an import of a different function""" + line = "from bar.baz import foobar" + self.assertFalse(_line_defines_python_function(line, "foo")) + + +class TestFileContainsPythonFunction(unittest.TestCase): + """Tests of file_contains_python_function""" + + def setUp(self): + self._workdir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self._workdir, ignore_errors=True) + + def create_test_file(self, contents): + """Creates a test file with the given contents, and returns the path to that file""" + + filepath = os.path.join(self._workdir, "testfile") + with open(filepath, "w") as fd: + fd.write(contents) + + return filepath + + def test_contains_correct_def_and_others(self): + """Test file_contains_python_function with a correct def mixed with other defs""" + contents = """ +def bar(): +def foo(): +def baz(): +""" + filepath = self.create_test_file(contents) + self.assertTrue(file_contains_python_function(filepath, "foo")) + + def test_does_not_contain_correct_def(self): + """Test file_contains_python_function without the correct def""" + contents = """ +def bar(): +def notfoo(): +def baz(): +""" + filepath = self.create_test_file(contents) + self.assertFalse(file_contains_python_function(filepath, "foo")) + + +class MockTime(object): + def __init__(self): + self._old = None + + def __enter__(self): + self._old = getattr(sys.modules["time"], "strftime") + setattr(sys.modules["time"], "strftime", lambda *args: "00:00:00 ") + + def __exit__(self, *args, **kwargs): + setattr(sys.modules["time"], "strftime", self._old) + + +def match_all_lines(data, lines): + for line in data: + for i, x in enumerate(lines): + if x == line: + lines.pop(i) + + continue + + if len(lines) == 0: + return True, [] + + return False, lines + + +class TestUtils(unittest.TestCase): + def setUp(self): + self.base_func = lambda *args: None + + # pylint: disable=unused-argument + def _error_func(*args): + raise Exception("Something went wrong") + + self.error_func = _error_func + + def assertMatchAllLines(self, tempdir, test_lines): + with open(os.path.join(tempdir, "CaseStatus")) as fd: + data = fd.readlines() + + result, missing = match_all_lines(data, test_lines) + + error = [] + + if len(missing) != 0: + error.extend(["Missing Lines", ""]) + error.extend([x.rstrip("\n") for x in missing]) + error.extend(["", "Tempfile contents", ""]) + error.extend([x.rstrip("\n") for x in data]) + + self.assertTrue(result, msg="\n".join(error)) + + def test_import_from_file(self): + with tempfile.NamedTemporaryFile() as fd: + fd.writelines( + [ + b"def test():\n", + b" return 'value'", + ] + ) + + fd.flush() + + module = import_from_file("test.py", fd.name) + + assert module.test() == "value" + + def test_run_and_log_case_status(self): + test_lines = [ + "00:00:00 default starting \n", + "00:00:00 default success \n", + ] + +<<<<<<< HEAD:CIME/tests/test_utils.py + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status(self.base_func, "default", + caseroot=tempdir) +======= + with utils.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status(self.base_func, "default", caseroot=tempdir) +>>>>>>> master:CIME/tests/test_unit_utils.py + + self.assertMatchAllLines(tempdir, test_lines) + + def test_run_and_log_case_status_case_submit_on_batch(self): + test_lines = [ + "00:00:00 case.submit starting \n", + "00:00:00 case.submit success \n", + ] + +<<<<<<< HEAD:CIME/tests/test_utils.py + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status(self.base_func, "case.submit", + caseroot=tempdir, is_batch=True) +======= + with utils.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status( + self.base_func, "case.submit", caseroot=tempdir, is_batch=True + ) +>>>>>>> master:CIME/tests/test_unit_utils.py + + self.assertMatchAllLines(tempdir, test_lines) + + def test_run_and_log_case_status_case_submit_no_batch(self): + test_lines = [ + "00:00:00 case.submit starting \n", + "00:00:00 case.submit success \n", + ] + +<<<<<<< HEAD:CIME/tests/test_utils.py + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status(self.base_func, "case.submit", + caseroot=tempdir, is_batch=False) +======= + with utils.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status( + self.base_func, "case.submit", caseroot=tempdir, is_batch=False + ) +>>>>>>> master:CIME/tests/test_unit_utils.py + + self.assertMatchAllLines(tempdir, test_lines) + + def test_run_and_log_case_status_case_submit_error_on_batch(self): + test_lines = [ + "00:00:00 case.submit starting \n", + "00:00:00 case.submit error \n", + "Something went wrong\n", + ] + + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + with self.assertRaises(Exception): + run_and_log_case_status( + self.error_func, "case.submit", caseroot=tempdir, is_batch=True + ) + + self.assertMatchAllLines(tempdir, test_lines) + + def test_run_and_log_case_status_custom_msg(self): + test_lines = [ + "00:00:00 default starting starting extra\n", + "00:00:00 default success success extra\n", + ] + + starting_func = mock.MagicMock(return_value="starting extra") + success_func = mock.MagicMock(return_value="success extra") + + def normal_func(): + return "data" + +<<<<<<< HEAD:CIME/tests/test_utils.py + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status(normal_func, "default", + custom_starting_msg_functor=starting_func, + custom_success_msg_functor=success_func, + caseroot=tempdir) +======= + with utils.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status( + normal_func, + "default", + custom_starting_msg_functor=starting_func, + custom_success_msg_functor=success_func, + caseroot=tempdir, + ) +>>>>>>> master:CIME/tests/test_unit_utils.py + + self.assertMatchAllLines(tempdir, test_lines) + + starting_func.assert_called_with() + success_func.assert_called_with("data") + + def test_run_and_log_case_status_custom_msg_error_on_batch(self): + test_lines = [ + "00:00:00 default starting starting extra\n", + "00:00:00 default success success extra\n", + ] + + starting_func = mock.MagicMock(return_value="starting extra") + success_func = mock.MagicMock(return_value="success extra") + + def error_func(): + raise Exception("Error") + +<<<<<<< HEAD:CIME/tests/test_utils.py + with tempfile.TemporaryDirectory() as tempdir, MockTime(), \ + self.assertRaises(Exception): + run_and_log_case_status(error_func, "default", + custom_starting_msg_functor=starting_func, + custom_success_msg_functor=success_func, + caseroot=tempdir) +======= + with utils.TemporaryDirectory() as tempdir, MockTime(), self.assertRaises( + Exception + ): + run_and_log_case_status( + error_func, + "default", + custom_starting_msg_functor=starting_func, + custom_success_msg_functor=success_func, + caseroot=tempdir, + ) +>>>>>>> master:CIME/tests/test_unit_utils.py + + self.assertMatchAllLines(tempdir, test_lines) + + starting_func.assert_called_with() + success_func.assert_not_called() + + def test_run_and_log_case_status_error(self): + test_lines = [ + "00:00:00 default starting \n", + "00:00:00 default error \n", + "Something went wrong\n", + ] + + with tempfile.TemporaryDirectory() as tempdir, MockTime(): + with self.assertRaises(Exception): + run_and_log_case_status(self.error_func, "default", caseroot=tempdir) + + self.assertMatchAllLines(tempdir, test_lines) + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_unit_xml_namelist_definition.py b/CIME/tests/test_unit_xml_namelist_definition.py new file mode 100644 index 00000000000..96cc9f6a527 --- /dev/null +++ b/CIME/tests/test_unit_xml_namelist_definition.py @@ -0,0 +1,42 @@ +import tempfile +import unittest + +from CIME.XML.namelist_definition import NamelistDefinition + +# pylint: disable=protected-access + + +class TestXMLNamelistDefinition(unittest.TestCase): + def test_set_nodes(self): + test_data = """ + + + + + char + test + + + char + test + +""" + + with tempfile.NamedTemporaryFile() as temp: + temp.write(test_data.encode()) + temp.flush() + + nmldef = NamelistDefinition(temp.name) + + nmldef.set_nodes() + + assert len(nmldef._entry_nodes) == 2 + assert nmldef._entry_ids == ["test1", "test2"] + assert len(nmldef._nodes) == 2 + assert nmldef._entry_types == {"test1": "char", "test2": "char"} + assert nmldef._valid_values == {"test1": None, "test2": None} + assert nmldef._group_names == {"test1": None, "test2": None} + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/tests/test_user_mod_support.py b/CIME/tests/test_user_mod_support.py index b57a449d9aa..8f54986f0e8 100755 --- a/CIME/tests/test_user_mod_support.py +++ b/CIME/tests/test_user_mod_support.py @@ -14,6 +14,7 @@ _SOURCEMODS = os.path.join("SourceMods", "src.drv") + class TestUserModSupport(unittest.TestCase): # ======================================================================== @@ -61,42 +62,59 @@ def createUserMod(self, name, include_dirs=None): with open(os.path.join(mod_dir, "user_nl_cpl"), "w") as user_nl_cpl: user_nl_cpl.write(name + "\n") with open(os.path.join(mod_dir, "shell_commands"), "w") as shell_commands: - command = "echo {} >> {}/shell_commands_result\n".format(name, self._caseroot) + command = "echo {} >> {}/shell_commands_result\n".format( + name, self._caseroot + ) shell_commands.write(command) with open(os.path.join(mod_dir_sourcemods, "myfile.F90"), "w") as f90_file: f90_file.write(name + "\n") if include_dirs: - with open(os.path.join(mod_dir, "include_user_mods"), "w") as include_user_mods: + with open( + os.path.join(mod_dir, "include_user_mods"), "w" + ) as include_user_mods: for one_include in include_dirs: - include_user_mods.write(os.path.join(self._user_mods_parent_dir, one_include) + "\n") - - def assertResults(self, expected_user_nl_cpl, - expected_shell_commands_result, - expected_sourcemod, - msg = ""): + include_user_mods.write( + os.path.join(self._user_mods_parent_dir, one_include) + "\n" + ) + + def assertResults( + self, + expected_user_nl_cpl, + expected_shell_commands_result, + expected_sourcemod, + msg="", + ): """Asserts that the contents of the files in self._caseroot match expectations If msg is provided, it is printed for some failing assertions """ path_to_user_nl_cpl = os.path.join(self._caseroot, "user_nl_cpl") - self.assertTrue(os.path.isfile(path_to_user_nl_cpl), - msg = msg + ": user_nl_cpl does not exist") + self.assertTrue( + os.path.isfile(path_to_user_nl_cpl), + msg=msg + ": user_nl_cpl does not exist", + ) with open(path_to_user_nl_cpl, "r") as user_nl_cpl: contents = user_nl_cpl.read() self.assertEqual(expected_user_nl_cpl, contents) - path_to_shell_commands_result = os.path.join(self._caseroot, "shell_commands_result") - self.assertTrue(os.path.isfile(path_to_shell_commands_result), - msg = msg + ": shell_commands_result does not exist") + path_to_shell_commands_result = os.path.join( + self._caseroot, "shell_commands_result" + ) + self.assertTrue( + os.path.isfile(path_to_shell_commands_result), + msg=msg + ": shell_commands_result does not exist", + ) with open(path_to_shell_commands_result, "r") as shell_commands_result: contents = shell_commands_result.read() self.assertEqual(expected_shell_commands_result, contents) path_to_sourcemod = os.path.join(self._caseroot_sourcemods, "myfile.F90") - self.assertTrue(os.path.isfile(path_to_sourcemod), - msg = msg + ": sourcemod file does not exist") + self.assertTrue( + os.path.isfile(path_to_sourcemod), + msg=msg + ": sourcemod file does not exist", + ) with open(path_to_sourcemod, "r") as sourcemod: contents = sourcemod.read() self.assertEqual(expected_sourcemod, contents) @@ -107,32 +125,46 @@ def assertResults(self, expected_user_nl_cpl, def test_basic(self): self.createUserMod("foo") - apply_user_mods(self._caseroot, - os.path.join(self._user_mods_parent_dir, "foo")) - self.assertResults(expected_user_nl_cpl = "foo\n", - expected_shell_commands_result = "foo\n", - expected_sourcemod = "foo\n", - msg = "test_basic") + apply_user_mods(self._caseroot, os.path.join(self._user_mods_parent_dir, "foo")) + self.assertResults( + expected_user_nl_cpl="foo\n", + expected_shell_commands_result="foo\n", + expected_sourcemod="foo\n", + msg="test_basic", + ) def test_keepexe(self): self.createUserMod("foo") +<<<<<<< HEAD:CIME/tests/test_user_mod_support.py with CIME.six.assertRaisesRegex(self, CIMEError, "cannot have any source mods"): apply_user_mods(self._caseroot, os.path.join(self._user_mods_parent_dir, "foo"), keepexe=True) +======= + with six.assertRaisesRegex(self, CIMEError, "cannot have any source mods"): + apply_user_mods( + self._caseroot, + os.path.join(self._user_mods_parent_dir, "foo"), + keepexe=True, + ) +>>>>>>> master:CIME/tests/test_unit_user_mod_support.py def test_two_applications(self): """If apply_user_mods is called twice, the second should appear after the first so that it takes precedence.""" self.createUserMod("foo1") self.createUserMod("foo2") - apply_user_mods(self._caseroot, - os.path.join(self._user_mods_parent_dir, "foo1")) - apply_user_mods(self._caseroot, - os.path.join(self._user_mods_parent_dir, "foo2")) - self.assertResults(expected_user_nl_cpl = "foo1\nfoo2\n", - expected_shell_commands_result = "foo1\nfoo2\n", - expected_sourcemod = "foo2\n", - msg = "test_two_applications") + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "foo1") + ) + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "foo2") + ) + self.assertResults( + expected_user_nl_cpl="foo1\nfoo2\n", + expected_shell_commands_result="foo1\nfoo2\n", + expected_sourcemod="foo2\n", + msg="test_two_applications", + ) def test_include(self): """If there is an included mod, the main one should appear after the included one so that it takes precedence.""" @@ -140,13 +172,16 @@ def test_include(self): self.createUserMod("base") self.createUserMod("derived", include_dirs=["base"]) - apply_user_mods(self._caseroot, - os.path.join(self._user_mods_parent_dir, "derived")) + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "derived") + ) - self.assertResults(expected_user_nl_cpl = "base\nderived\n", - expected_shell_commands_result = "base\nderived\n", - expected_sourcemod = "derived\n", - msg = "test_include") + self.assertResults( + expected_user_nl_cpl="base\nderived\n", + expected_shell_commands_result="base\nderived\n", + expected_sourcemod="derived\n", + msg="test_include", + ) def test_duplicate_includes(self): """Test multiple includes, where both include the same base mod. @@ -157,11 +192,11 @@ def test_duplicate_includes(self): self.createUserMod("base") self.createUserMod("derived1", include_dirs=["base"]) self.createUserMod("derived2", include_dirs=["base"]) - self.createUserMod("derived_combo", - include_dirs = ["derived1", "derived2"]) + self.createUserMod("derived_combo", include_dirs=["derived1", "derived2"]) - apply_user_mods(self._caseroot, - os.path.join(self._user_mods_parent_dir, "derived_combo")) + apply_user_mods( + self._caseroot, os.path.join(self._user_mods_parent_dir, "derived_combo") + ) # NOTE(wjs, 2017-04-15) The ordering of derived1 vs. derived2 is not # critical here: If this aspect of the behavior changes, the @@ -172,10 +207,13 @@ def test_duplicate_includes(self): derived1 derived_combo """ - self.assertResults(expected_user_nl_cpl = expected_contents, - expected_shell_commands_result = expected_contents, - expected_sourcemod = "derived_combo\n", - msg = "test_duplicate_includes") + self.assertResults( + expected_user_nl_cpl=expected_contents, + expected_shell_commands_result=expected_contents, + expected_sourcemod="derived_combo\n", + msg="test_duplicate_includes", + ) + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/CIME/tests/test_utils.py b/CIME/tests/test_utils.py index bab651607fc..00e59d5194c 100755 --- a/CIME/tests/test_utils.py +++ b/CIME/tests/test_utils.py @@ -7,28 +7,34 @@ import unittest from unittest import mock -from CIME.utils import indent_string, run_and_log_case_status, \ - import_from_file, \ - _line_defines_python_function, file_contains_python_function - +from CIME.utils import ( + indent_string, + run_and_log_case_status, + import_from_file, + _line_defines_python_function, + file_contains_python_function, +) + +<<<<<<< HEAD:CIME/tests/test_utils.py class TestIndentStr(unittest.TestCase): """Test the indent_string function. +======= +from CIME.tests import utils - """ +>>>>>>> master:CIME/tests/test_unit_utils.py - def test_indent_string_singleline(self): - """Test the indent_string function with a single-line string +class TestIndentStr(unittest.TestCase): + """Test the indent_string function.""" - """ - mystr = 'foo' + def test_indent_string_singleline(self): + """Test the indent_string function with a single-line string""" + mystr = "foo" result = indent_string(mystr, 4) - expected = ' foo' + expected = " foo" self.assertEqual(expected, result) def test_indent_string_multiline(self): - """Test the indent_string function with a multi-line string - - """ + """Test the indent_string function with a multi-line string""" mystr = """hello hi goodbye @@ -40,6 +46,7 @@ def test_indent_string_multiline(self): """ self.assertEqual(expected, result) + class TestLineDefinesPythonFunction(unittest.TestCase): """Tests of _line_defines_python_function""" @@ -116,6 +123,7 @@ def test_import_foobar(self): line = "from bar.baz import foobar" self.assertFalse(_line_defines_python_function(line, "foo")) + class TestFileContainsPythonFunction(unittest.TestCase): """Tests of file_contains_python_function""" @@ -129,7 +137,7 @@ def create_test_file(self, contents): """Creates a test file with the given contents, and returns the path to that file""" filepath = os.path.join(self._workdir, "testfile") - with open(filepath, 'w') as fd: + with open(filepath, "w") as fd: fd.write(contents) return filepath @@ -154,6 +162,7 @@ def baz(): filepath = self.create_test_file(contents) self.assertFalse(file_contains_python_function(filepath, "foo")) + class MockTime(object): def __init__(self): self._old = None @@ -165,6 +174,7 @@ def __enter__(self): def __exit__(self, *args, **kwargs): setattr(sys.modules["time"], "strftime", self._old) + def match_all_lines(data, lines): for line in data: for i, x in enumerate(lines): @@ -178,11 +188,12 @@ def match_all_lines(data, lines): return False, lines + class TestUtils(unittest.TestCase): def setUp(self): self.base_func = lambda *args: None - #pylint: disable=unused-argument + # pylint: disable=unused-argument def _error_func(*args): raise Exception("Something went wrong") @@ -206,10 +217,12 @@ def assertMatchAllLines(self, tempdir, test_lines): def test_import_from_file(self): with tempfile.NamedTemporaryFile() as fd: - fd.writelines([ - b"def test():\n", - b" return 'value'", - ]) + fd.writelines( + [ + b"def test():\n", + b" return 'value'", + ] + ) fd.flush() @@ -223,9 +236,14 @@ def test_run_and_log_case_status(self): "00:00:00 default success \n", ] +<<<<<<< HEAD:CIME/tests/test_utils.py with tempfile.TemporaryDirectory() as tempdir, MockTime(): run_and_log_case_status(self.base_func, "default", caseroot=tempdir) +======= + with utils.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status(self.base_func, "default", caseroot=tempdir) +>>>>>>> master:CIME/tests/test_unit_utils.py self.assertMatchAllLines(tempdir, test_lines) @@ -235,9 +253,16 @@ def test_run_and_log_case_status_case_submit_on_batch(self): "00:00:00 case.submit success \n", ] +<<<<<<< HEAD:CIME/tests/test_utils.py with tempfile.TemporaryDirectory() as tempdir, MockTime(): run_and_log_case_status(self.base_func, "case.submit", caseroot=tempdir, is_batch=True) +======= + with utils.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status( + self.base_func, "case.submit", caseroot=tempdir, is_batch=True + ) +>>>>>>> master:CIME/tests/test_unit_utils.py self.assertMatchAllLines(tempdir, test_lines) @@ -247,9 +272,16 @@ def test_run_and_log_case_status_case_submit_no_batch(self): "00:00:00 case.submit success \n", ] +<<<<<<< HEAD:CIME/tests/test_utils.py with tempfile.TemporaryDirectory() as tempdir, MockTime(): run_and_log_case_status(self.base_func, "case.submit", caseroot=tempdir, is_batch=False) +======= + with utils.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status( + self.base_func, "case.submit", caseroot=tempdir, is_batch=False + ) +>>>>>>> master:CIME/tests/test_unit_utils.py self.assertMatchAllLines(tempdir, test_lines) @@ -262,8 +294,9 @@ def test_run_and_log_case_status_case_submit_error_on_batch(self): with tempfile.TemporaryDirectory() as tempdir, MockTime(): with self.assertRaises(Exception): - run_and_log_case_status(self.error_func, "case.submit", - caseroot=tempdir, is_batch=True) + run_and_log_case_status( + self.error_func, "case.submit", caseroot=tempdir, is_batch=True + ) self.assertMatchAllLines(tempdir, test_lines) @@ -279,11 +312,22 @@ def test_run_and_log_case_status_custom_msg(self): def normal_func(): return "data" +<<<<<<< HEAD:CIME/tests/test_utils.py with tempfile.TemporaryDirectory() as tempdir, MockTime(): run_and_log_case_status(normal_func, "default", custom_starting_msg_functor=starting_func, custom_success_msg_functor=success_func, caseroot=tempdir) +======= + with utils.TemporaryDirectory() as tempdir, MockTime(): + run_and_log_case_status( + normal_func, + "default", + custom_starting_msg_functor=starting_func, + custom_success_msg_functor=success_func, + caseroot=tempdir, + ) +>>>>>>> master:CIME/tests/test_unit_utils.py self.assertMatchAllLines(tempdir, test_lines) @@ -302,12 +346,25 @@ def test_run_and_log_case_status_custom_msg_error_on_batch(self): def error_func(): raise Exception("Error") +<<<<<<< HEAD:CIME/tests/test_utils.py with tempfile.TemporaryDirectory() as tempdir, MockTime(), \ self.assertRaises(Exception): run_and_log_case_status(error_func, "default", custom_starting_msg_functor=starting_func, custom_success_msg_functor=success_func, caseroot=tempdir) +======= + with utils.TemporaryDirectory() as tempdir, MockTime(), self.assertRaises( + Exception + ): + run_and_log_case_status( + error_func, + "default", + custom_starting_msg_functor=starting_func, + custom_success_msg_functor=success_func, + caseroot=tempdir, + ) +>>>>>>> master:CIME/tests/test_unit_utils.py self.assertMatchAllLines(tempdir, test_lines) @@ -323,10 +380,10 @@ def test_run_and_log_case_status_error(self): with tempfile.TemporaryDirectory() as tempdir, MockTime(): with self.assertRaises(Exception): - run_and_log_case_status(self.error_func, "default", - caseroot=tempdir) + run_and_log_case_status(self.error_func, "default", caseroot=tempdir) self.assertMatchAllLines(tempdir, test_lines) -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/CIME/tests/test_xml_env_batch.py b/CIME/tests/test_xml_env_batch.py index 3c992b8fbc9..36459a66397 100755 --- a/CIME/tests/test_xml_env_batch.py +++ b/CIME/tests/test_xml_env_batch.py @@ -7,8 +7,8 @@ # pylint: disable=unused-argument -class TestXMLEnvBatch(unittest.TestCase): +class TestXMLEnvBatch(unittest.TestCase): @mock.patch("CIME.XML.env_batch.EnvBatch.get") def test_get_queue_specs(self, get): node = mock.MagicMock() @@ -16,12 +16,28 @@ def test_get_queue_specs(self, get): batch = EnvBatch() get.side_effect = [ - "1", "1", None, None, "case.run", "08:00:00", "05:00:00", \ - "12:00:00", "false", + "1", + "1", + None, + None, + "case.run", + "08:00:00", + "05:00:00", + "12:00:00", + "false", ] - nodemin, nodemax, jobname, walltimedef, walltimemin, walltimemax, \ - jobmin, jobmax, strict = batch.get_queue_specs(node) + ( + nodemin, + nodemax, + jobname, + walltimedef, + walltimemin, + walltimemax, + jobmin, + jobmax, + strict, + ) = batch.get_queue_specs(node) self.assertTrue(nodemin == 1) self.assertTrue(nodemax == 1) @@ -35,20 +51,35 @@ def test_get_queue_specs(self, get): @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") # nodemin, nodemax, jobname, walltimemin, walltimemax, jobmin, jobmax, strict - @mock.patch("CIME.XML.env_batch.EnvBatch.get_queue_specs", return_value=[ - 1, 1, "case.run", "10:00:00", "08:00:00", "12:00:00", 1, 1, False, - ]) + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + "08:00:00", + "12:00:00", + 1, + 1, + False, + ], + ) @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") - def test_set_job_defaults_honor_walltimemax(self, get_default_queue, select_best_queue, - get_queue_specs, text): + def test_set_job_defaults_honor_walltimemax( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): case = mock.MagicMock() batch_jobs = [ - ("case.run", { - "template": "template.case.run", - "prereq": "$BUILD_COMPLETE and not $TEST" - }) + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) ] def get_value(*args, **kwargs): @@ -67,28 +98,44 @@ def get_value(*args, **kwargs): env_workflow = case.get_env.return_value - env_workflow.set_value.assert_any_call("JOB_QUEUE", "default", - subgroup="case.run", - ignore_type=False) - env_workflow.set_value.assert_any_call("JOB_WALLCLOCK_TIME", "20:00:00", - subgroup="case.run") + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "20:00:00", subgroup="case.run" + ) @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") # nodemin, nodemax, jobname, walltimemin, walltimemax, jobmin, jobmax, strict - @mock.patch("CIME.XML.env_batch.EnvBatch.get_queue_specs", return_value=[ - 1, 1, "case.run", "10:00:00", "08:00:00", "12:00:00", 1, 1, False, - ]) + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + "08:00:00", + "12:00:00", + 1, + 1, + False, + ], + ) @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") - def test_set_job_defaults_honor_walltimemin(self, get_default_queue, select_best_queue, - get_queue_specs, text): + def test_set_job_defaults_honor_walltimemin( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): case = mock.MagicMock() batch_jobs = [ - ("case.run", { - "template": "template.case.run", - "prereq": "$BUILD_COMPLETE and not $TEST" - }) + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) ] def get_value(*args, **kwargs): @@ -107,29 +154,44 @@ def get_value(*args, **kwargs): env_workflow = case.get_env.return_value - env_workflow.set_value.assert_any_call("JOB_QUEUE", "default", - subgroup="case.run", - ignore_type=False) - env_workflow.set_value.assert_any_call("JOB_WALLCLOCK_TIME", "05:00:00", - subgroup="case.run") + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "05:00:00", subgroup="case.run" + ) @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") # nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict - @mock.patch("CIME.XML.env_batch.EnvBatch.get_queue_specs", return_value=[ - 1, 1, "case.run", "10:00:00", "08:00:00", "12:00:00", 1, 1, False, - ]) + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + "08:00:00", + "12:00:00", + 1, + 1, + False, + ], + ) @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") - def test_set_job_defaults_user_walltime(self, get_default_queue, - select_best_queue, - get_queue_specs, text): + def test_set_job_defaults_user_walltime( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): case = mock.MagicMock() batch_jobs = [ - ("case.run", { - "template": "template.case.run", - "prereq": "$BUILD_COMPLETE and not $TEST" - }) + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) ] def get_value(*args, **kwargs): @@ -148,28 +210,44 @@ def get_value(*args, **kwargs): env_workflow = case.get_env.return_value - env_workflow.set_value.assert_any_call("JOB_QUEUE", "default", - subgroup="case.run", - ignore_type=False) - env_workflow.set_value.assert_any_call("JOB_WALLCLOCK_TIME", "10:00:00", - subgroup="case.run") + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "10:00:00", subgroup="case.run" + ) @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") # nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict - @mock.patch("CIME.XML.env_batch.EnvBatch.get_queue_specs", return_value=[ - 1, 1, "case.run", "10:00:00", "05:00:00", None, 1, 1, False, - ]) + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + "05:00:00", + None, + 1, + 1, + False, + ], + ) @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") - def test_set_job_defaults_walltimemax_none(self, get_default_queue, select_best_queue, - get_queue_specs, text): + def test_set_job_defaults_walltimemax_none( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): case = mock.MagicMock() batch_jobs = [ - ("case.run", { - "template": "template.case.run", - "prereq": "$BUILD_COMPLETE and not $TEST" - }) + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) ] def get_value(*args, **kwargs): @@ -188,28 +266,44 @@ def get_value(*args, **kwargs): env_workflow = case.get_env.return_value - env_workflow.set_value.assert_any_call("JOB_QUEUE", "default", - subgroup="case.run", - ignore_type=False) - env_workflow.set_value.assert_any_call("JOB_WALLCLOCK_TIME", "08:00:00", - subgroup="case.run") + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "08:00:00", subgroup="case.run" + ) @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") # nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict - @mock.patch("CIME.XML.env_batch.EnvBatch.get_queue_specs", return_value=[ - 1, 1, "case.run", "10:00:00", None, "12:00:00", 1, 1, False, - ]) + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + None, + "12:00:00", + 1, + 1, + False, + ], + ) @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") - def test_set_job_defaults_walltimemin_none(self, get_default_queue, select_best_queue, - get_queue_specs, text): + def test_set_job_defaults_walltimemin_none( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): case = mock.MagicMock() batch_jobs = [ - ("case.run", { - "template": "template.case.run", - "prereq": "$BUILD_COMPLETE and not $TEST" - }) + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) ] def get_value(*args, **kwargs): @@ -228,28 +322,44 @@ def get_value(*args, **kwargs): env_workflow = case.get_env.return_value - env_workflow.set_value.assert_any_call("JOB_QUEUE", "default", - subgroup="case.run", - ignore_type=False) - env_workflow.set_value.assert_any_call("JOB_WALLCLOCK_TIME", "08:00:00", - subgroup="case.run") + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "08:00:00", subgroup="case.run" + ) @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") # nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict - @mock.patch("CIME.XML.env_batch.EnvBatch.get_queue_specs", return_value=[ - 1, 1, "case.run", "10:00:00", "08:00:00", "12:00:00", 1, 1, False, - ]) + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + "10:00:00", + "08:00:00", + "12:00:00", + 1, + 1, + False, + ], + ) @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") - def test_set_job_defaults_walltimedef(self, get_default_queue, select_best_queue, - get_queue_specs, text): + def test_set_job_defaults_walltimedef( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): case = mock.MagicMock() batch_jobs = [ - ("case.run", { - "template": "template.case.run", - "prereq": "$BUILD_COMPLETE and not $TEST" - }) + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) ] def get_value(*args, **kwargs): @@ -268,28 +378,44 @@ def get_value(*args, **kwargs): env_workflow = case.get_env.return_value - env_workflow.set_value.assert_any_call("JOB_QUEUE", "default", - subgroup="case.run", - ignore_type=False) - env_workflow.set_value.assert_any_call("JOB_WALLCLOCK_TIME", "10:00:00", - subgroup="case.run") + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "10:00:00", subgroup="case.run" + ) @mock.patch("CIME.XML.env_batch.EnvBatch.text", return_value="default") # nodemin, nodemax, jobname, walltimemax, jobmin, jobmax, strict - @mock.patch("CIME.XML.env_batch.EnvBatch.get_queue_specs", return_value=[ - 1, 1, "case.run", None, "08:00:00", "12:00:00", 1, 1, False, - ]) + @mock.patch( + "CIME.XML.env_batch.EnvBatch.get_queue_specs", + return_value=[ + 1, + 1, + "case.run", + None, + "08:00:00", + "12:00:00", + 1, + 1, + False, + ], + ) @mock.patch("CIME.XML.env_batch.EnvBatch.select_best_queue") @mock.patch("CIME.XML.env_batch.EnvBatch.get_default_queue") - def test_set_job_defaults(self, get_default_queue, select_best_queue, - get_queue_specs, text): + def test_set_job_defaults( + self, get_default_queue, select_best_queue, get_queue_specs, text + ): case = mock.MagicMock() batch_jobs = [ - ("case.run", { - "template": "template.case.run", - "prereq": "$BUILD_COMPLETE and not $TEST" - }) + ( + "case.run", + { + "template": "template.case.run", + "prereq": "$BUILD_COMPLETE and not $TEST", + }, + ) ] def get_value(*args, **kwargs): @@ -308,11 +434,13 @@ def get_value(*args, **kwargs): env_workflow = case.get_env.return_value - env_workflow.set_value.assert_any_call("JOB_QUEUE", "default", - subgroup="case.run", - ignore_type=False) - env_workflow.set_value.assert_any_call("JOB_WALLCLOCK_TIME", "12:00:00", - subgroup="case.run") + env_workflow.set_value.assert_any_call( + "JOB_QUEUE", "default", subgroup="case.run", ignore_type=False + ) + env_workflow.set_value.assert_any_call( + "JOB_WALLCLOCK_TIME", "12:00:00", subgroup="case.run" + ) + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/CIME/tests/test_xml_namelist_definition.py b/CIME/tests/test_xml_namelist_definition.py index e3f5b70a083..96cc9f6a527 100644 --- a/CIME/tests/test_xml_namelist_definition.py +++ b/CIME/tests/test_xml_namelist_definition.py @@ -5,8 +5,8 @@ # pylint: disable=protected-access -class TestXMLNamelistDefinition(unittest.TestCase): +class TestXMLNamelistDefinition(unittest.TestCase): def test_set_nodes(self): test_data = """ @@ -37,5 +37,6 @@ def test_set_nodes(self): assert nmldef._valid_values == {"test1": None, "test2": None} assert nmldef._group_names == {"test1": None, "test2": None} + if __name__ == "__main__": unittest.main() diff --git a/CIME/user_mod_support.py b/CIME/user_mod_support.py index 667526d2ec9..2c182a19dbd 100644 --- a/CIME/user_mod_support.py +++ b/CIME/user_mod_support.py @@ -8,8 +8,9 @@ logger = logging.getLogger(__name__) + def apply_user_mods(caseroot, user_mods_path, keepexe=None): - ''' + """ Recursivlely apply user_mods to caseroot - this includes updating user_nl_xxx, updating SourceMods and creating case shell_commands and xmlchange_cmds files @@ -20,9 +21,11 @@ def apply_user_mods(caseroot, user_mods_path, keepexe=None): keepexe is an optional argument that is needed for cases where apply_user_mods is called from create_clone - ''' - case_shell_command_files = [os.path.join(caseroot,"shell_commands"), - os.path.join(caseroot,"xmlchange_cmnds")] + """ + case_shell_command_files = [ + os.path.join(caseroot, "shell_commands"), + os.path.join(caseroot, "xmlchange_cmnds"), + ] for shell_command_file in case_shell_command_files: if os.path.isfile(shell_command_file): os.remove(shell_command_file) @@ -38,7 +41,7 @@ def apply_user_mods(caseroot, user_mods_path, keepexe=None): logger.debug("include_dirs are {}".format(include_dirs)) for include_dir in include_dirs: # write user_nl_xxx file in caseroot - for user_nl in glob.iglob(os.path.join(include_dir,"user_nl_*")): + for user_nl in glob.iglob(os.path.join(include_dir, "user_nl_*")): with open(os.path.join(include_dir, user_nl), "r") as fd: newcontents = fd.read() if len(newcontents) == 0: @@ -51,40 +54,62 @@ def apply_user_mods(caseroot, user_mods_path, keepexe=None): fd.write(newcontents) # update SourceMods in caseroot - for root, _, files in os.walk(include_dir,followlinks=True,topdown=False): + for root, _, files in os.walk(include_dir, followlinks=True, topdown=False): if "src" in os.path.basename(root): if keepexe is not None: - expect(False, - "cannot have any source mods in {} if keepexe is an option".format(user_mods_path)) + expect( + False, + "cannot have any source mods in {} if keepexe is an option".format( + user_mods_path + ), + ) for sfile in files: - source_mods = os.path.join(root,sfile) + source_mods = os.path.join(root, sfile) case_source_mods = source_mods.replace(include_dir, caseroot) # We overwrite any existing SourceMods file so that later # include_dirs take precedence over earlier ones if os.path.isfile(case_source_mods): - logger.warning("WARNING: Overwriting existing SourceMods in {}".format(case_source_mods)) + logger.warning( + "WARNING: Overwriting existing SourceMods in {}".format( + case_source_mods + ) + ) else: - logger.info("Adding SourceMod to case {}".format(case_source_mods)) + logger.info( + "Adding SourceMod to case {}".format(case_source_mods) + ) try: safe_copy(source_mods, case_source_mods) except Exception: - expect(False, "Could not write file {} in caseroot {}".format(case_source_mods,caseroot)) + expect( + False, + "Could not write file {} in caseroot {}".format( + case_source_mods, caseroot + ), + ) # create xmlchange_cmnds and shell_commands in caseroot - shell_command_files = glob.glob(os.path.join(include_dir,"shell_commands")) +\ - glob.glob(os.path.join(include_dir,"xmlchange_cmnds")) + shell_command_files = glob.glob( + os.path.join(include_dir, "shell_commands") + ) + glob.glob(os.path.join(include_dir, "xmlchange_cmnds")) for shell_commands_file in shell_command_files: case_shell_commands = shell_commands_file.replace(include_dir, caseroot) # add commands from both shell_commands and xmlchange_cmnds to # the same file (caseroot/shell_commands) - case_shell_commands = case_shell_commands.replace("xmlchange_cmnds","shell_commands") + case_shell_commands = case_shell_commands.replace( + "xmlchange_cmnds", "shell_commands" + ) # Note that use of xmlchange_cmnds has been deprecated and will soon # be removed altogether, so new tests should rely on shell_commands if shell_commands_file.endswith("xmlchange_cmnds"): - logger.warning("xmlchange_cmnds is deprecated and will be removed " +\ - "in a future release; please rename {} shell_commands".format(shell_commands_file)) - with open(shell_commands_file,"r") as fd: - new_shell_commands = fd.read().replace("xmlchange","xmlchange --force") + logger.warning( + "xmlchange_cmnds is deprecated and will be removed " + + "in a future release; please rename {} shell_commands".format( + shell_commands_file + ) + ) + with open(shell_commands_file, "r") as fd: + new_shell_commands = fd.read().replace("xmlchange", "xmlchange --force") # By appending the new commands to the end, settings from later # include_dirs take precedence over earlier ones with open(case_shell_commands, "a") as fd: @@ -93,23 +118,26 @@ def apply_user_mods(caseroot, user_mods_path, keepexe=None): for shell_command_file in case_shell_command_files: if os.path.isfile(shell_command_file): os.chmod(shell_command_file, 0o777) - run_cmd_no_fail(shell_command_file,verbose=True) + run_cmd_no_fail(shell_command_file, verbose=True) def build_include_dirs_list(user_mods_path, include_dirs=None): - ''' + """ If user_mods_path has a file "include_user_mods" read that file and add directories to the include_dirs, recursively check each of those directories for further directories. The file may also include comments deleneated with # in the first column - ''' + """ include_dirs = [] if include_dirs is None else include_dirs - if user_mods_path is None or user_mods_path == 'UNSET': + if user_mods_path is None or user_mods_path == "UNSET": return include_dirs - expect(os.path.isabs(user_mods_path), - "Expected full directory path, got '{}'".format(user_mods_path)) - expect(os.path.isdir(user_mods_path), - "Directory not found {}".format(user_mods_path)) + expect( + os.path.isabs(user_mods_path), + "Expected full directory path, got '{}'".format(user_mods_path), + ) + expect( + os.path.isdir(user_mods_path), "Directory not found {}".format(user_mods_path) + ) norm_path = os.path.normpath(user_mods_path) for dir_ in include_dirs: @@ -119,7 +147,7 @@ def build_include_dirs_list(user_mods_path, include_dirs=None): logger.info("Adding user mods directory {}".format(norm_path)) include_dirs.append(norm_path) - include_file = os.path.join(norm_path,"include_user_mods") + include_file = os.path.join(norm_path, "include_user_mods") if os.path.isfile(include_file): with open(include_file, "r") as fd: for newpath in fd: @@ -130,6 +158,10 @@ def build_include_dirs_list(user_mods_path, include_dirs=None): if os.path.isabs(newpath): build_include_dirs_list(newpath, include_dirs) else: - logger.warning("Could not resolve path '{}' in file '{}'".format(newpath, include_file)) + logger.warning( + "Could not resolve path '{}' in file '{}'".format( + newpath, include_file + ) + ) return include_dirs diff --git a/CIME/utils.py b/CIME/utils.py index 7efabd2039a..77108fb95ca 100644 --- a/CIME/utils.py +++ b/CIME/utils.py @@ -2,8 +2,7 @@ Common functions used by cime python scripts Warning: you cannot use CIME Classes in this module as it causes circular dependencies """ -import io, logging, gzip, sys, os, time, re, shutil, glob, string, random, \ - importlib, fnmatch +import io, logging, gzip, sys, os, time, re, shutil, glob, string, random, importlib, fnmatch import importlib.util import errno, signal, warnings, filecmp import stat as statlib @@ -34,21 +33,24 @@ def import_from_file(name, file_path): return module + @contextmanager def redirect_stdout(new_target): - old_target, sys.stdout = sys.stdout, new_target # replace sys.stdout + old_target, sys.stdout = sys.stdout, new_target # replace sys.stdout try: - yield new_target # run some code with the replaced stdout + yield new_target # run some code with the replaced stdout finally: - sys.stdout = old_target # restore to the previous value + sys.stdout = old_target # restore to the previous value + @contextmanager def redirect_stderr(new_target): - old_target, sys.stderr = sys.stderr, new_target # replace sys.stdout + old_target, sys.stderr = sys.stderr, new_target # replace sys.stdout try: - yield new_target # run some code with the replaced stdout + yield new_target # run some code with the replaced stdout finally: - sys.stderr = old_target # restore to the previous value + sys.stderr = old_target # restore to the previous value + @contextmanager def redirect_stdout_stderr(new_target): @@ -59,6 +61,7 @@ def redirect_stdout_stderr(new_target): finally: sys.stdout, sys.stderr = old_stdout, old_stderr + @contextmanager def redirect_logger(new_target, logger_name): ch = logging.StreamHandler(stream=new_target) @@ -76,6 +79,7 @@ def redirect_logger(new_target, logger_name): root_log.handlers = orig_root_loggers log.handlers = orig_handlers + class IndentFormatter(logging.Formatter): def __init__(self, indent, fmt=None, datefmt=None): logging.Formatter.__init__(self, fmt, datefmt) @@ -86,6 +90,7 @@ def format(self, record): out = logging.Formatter.format(self, record) return out + def set_logger_indent(indent): root_log = logging.getLogger() root_log.handlers = [] @@ -95,6 +100,7 @@ def set_logger_indent(indent): handler.setFormatter(formatter) root_log.addHandler(handler) + class EnvironmentContext(object): """ Context manager for environment variables @@ -126,6 +132,7 @@ def __exit__(self, *args): else: del os.environ[k] + # This should be the go-to exception for CIME use. It's a subclass # of SystemExit in order suppress tracebacks, which users generally # hate seeing. It's a subclass of Exception because we want it to be @@ -134,6 +141,7 @@ def __exit__(self, *args): class CIMEError(SystemExit, Exception): pass + def expect(condition, error_msg, exc_type=CIMEError, error_prefix="ERROR:"): """ Similar to assert except doesn't generate an ugly stacktrace. Useful for @@ -150,13 +158,16 @@ def expect(condition, error_msg, exc_type=CIMEError, error_prefix="ERROR:"): if not condition: if logger.isEnabledFor(logging.DEBUG): import pdb - pdb.set_trace() + + pdb.set_trace() # pylint: disable=forgotten-debug-statement msg = error_prefix + " " + error_msg raise exc_type(msg) + def id_generator(size=6, chars=string.ascii_lowercase + string.digits): - return ''.join(random.choice(chars) for _ in range(size)) + return "".join(random.choice(chars) for _ in range(size)) + def check_name(fullname, additional_chars=None, fullpath=False): """ @@ -180,21 +191,24 @@ def check_name(fullname, additional_chars=None, fullpath=False): False """ - chars = '+*?<>/{}[\]~`@:' # pylint: disable=anomalous-backslash-in-string + chars = "+*?<>/{}[\]~`@:" # pylint: disable=anomalous-backslash-in-string if additional_chars is not None: chars += additional_chars - if fullname.endswith('/'): + if fullname.endswith("/"): return False if fullpath: _, name = os.path.split(fullname) else: name = fullname - match = re.search(r"["+re.escape(chars)+"]", name) + match = re.search(r"[" + re.escape(chars) + "]", name) if match is not None: - logger.warning("Illegal character {} found in name {}".format(match.group(0), name)) + logger.warning( + "Illegal character {} found in name {}".format(match.group(0), name) + ) return False return True + # Should only be called from get_cime_config() def _read_cime_config_file(): """ @@ -205,42 +219,92 @@ def _read_cime_config_file(): """ allowed_sections = ("main", "create_test") - allowed_in_main = ("cime_model", "project", "charge_account", "srcroot", "mail_type", - "mail_user", "machine", "mpilib", "compiler", "input_dir", "cime_driver") - allowed_in_create_test = ("mail_type", "mail_user", "save_timing", "single_submit", - "test_root", "output_root", "baseline_root", "clean", - "machine", "mpilib", "compiler", "parallel_jobs", "proc_pool", - "walltime", "job_queue", "allow_baseline_overwrite", "wait", - "force_procs", "force_threads", "input_dir", "pesfile", "retry", - "walltime") - - cime_config_file = os.path.abspath(os.path.join(os.path.expanduser("~"), - ".cime","config")) + allowed_in_main = ( + "cime_model", + "project", + "charge_account", + "srcroot", + "mail_type", + "mail_user", + "machine", + "mpilib", + "compiler", + "input_dir", + "cime_driver", + ) + allowed_in_create_test = ( + "mail_type", + "mail_user", + "save_timing", + "single_submit", + "test_root", + "output_root", + "baseline_root", + "clean", + "machine", + "mpilib", + "compiler", + "parallel_jobs", + "proc_pool", + "walltime", + "job_queue", + "allow_baseline_overwrite", + "wait", + "force_procs", + "force_threads", + "input_dir", + "pesfile", + "retry", + "walltime", + ) + + cime_config_file = os.path.abspath( + os.path.join(os.path.expanduser("~"), ".cime", "config") + ) cime_config = configparser.SafeConfigParser() - if(os.path.isfile(cime_config_file)): + if os.path.isfile(cime_config_file): cime_config.read(cime_config_file) for section in cime_config.sections(): - expect(section in allowed_sections,"Unknown section {} in .cime/config\nallowed sections are {}".format(section, allowed_sections)) - if cime_config.has_section('main'): - for item,_ in cime_config.items('main'): - expect(item in allowed_in_main,"Unknown option in config section \"main\": \"{}\"\nallowed options are {}".format(item, allowed_in_main)) - if cime_config.has_section('create_test'): - for item,_ in cime_config.items('create_test'): - expect(item in allowed_in_create_test,"Unknown option in config section \"test\": \"{}\"\nallowed options are {}".format(item, allowed_in_create_test)) + expect( + section in allowed_sections, + "Unknown section {} in .cime/config\nallowed sections are {}".format( + section, allowed_sections + ), + ) + if cime_config.has_section("main"): + for item, _ in cime_config.items("main"): + expect( + item in allowed_in_main, + 'Unknown option in config section "main": "{}"\nallowed options are {}'.format( + item, allowed_in_main + ), + ) + if cime_config.has_section("create_test"): + for item, _ in cime_config.items("create_test"): + expect( + item in allowed_in_create_test, + 'Unknown option in config section "test": "{}"\nallowed options are {}'.format( + item, allowed_in_create_test + ), + ) else: logger.debug("File {} not found".format(cime_config_file)) - cime_config.add_section('main') + cime_config.add_section("main") return cime_config + _CIMECONFIG = None + + def get_cime_config(): global _CIMECONFIG - if (not _CIMECONFIG): + if not _CIMECONFIG: _CIMECONFIG = _read_cime_config_file() return _CIMECONFIG + def reset_cime_config(): """ Useful to keep unit tests from interfering with each other @@ -258,9 +322,14 @@ def get_cime_root(case=None): if case is not None: case_cimeroot = os.path.abspath(case.get_value("CIMEROOT")) cimeroot = os.path.abspath(cimeroot) - expect(cimeroot == case_cimeroot, "Inconsistent CIMEROOT variable: case -> '{}', file location -> '{}'".format(case_cimeroot, cimeroot)) - - logger.debug( "CIMEROOT is " + cimeroot) + expect( + cimeroot == case_cimeroot, + "Inconsistent CIMEROOT variable: case -> '{}', file location -> '{}'".format( + case_cimeroot, cimeroot + ), + ) + + logger.debug("CIMEROOT is " + cimeroot) return cimeroot def get_config_path(): @@ -301,25 +370,32 @@ def get_src_root(): return srcroot + def get_cime_default_driver(): driver = os.environ.get("CIME_DRIVER") if driver: logger.debug("Setting CIME_DRIVER={} from environment".format(driver)) else: cime_config = get_cime_config() - if (cime_config.has_option('main','CIME_DRIVER')): - driver = cime_config.get('main','CIME_DRIVER') + if cime_config.has_option("main", "CIME_DRIVER"): + driver = cime_config.get("main", "CIME_DRIVER") if driver: - logger.debug("Setting CIME_driver={} from ~/.cime/config".format(driver)) + logger.debug( + "Setting CIME_driver={} from ~/.cime/config".format(driver) + ) if not driver: model = get_model() if model == "ufs" or model == "cesm": driver = "nuopc" else: driver = "mct" - expect(driver in ("mct", "nuopc", "moab"),"Attempt to set invalid driver {}".format(driver)) + expect( + driver in ("mct", "nuopc", "moab"), + "Attempt to set invalid driver {}".format(driver), + ) return driver + def get_all_cime_models(): config_path = get_config_path() models = [] @@ -332,16 +408,23 @@ def get_all_cime_models(): return models + def set_model(model): """ Set the model to be used in this session """ cime_config = get_cime_config() cime_models = get_all_cime_models() - if not cime_config.has_section('main'): - cime_config.add_section('main') - expect(model in cime_models,"model {} not recognized. The acceptable values of CIME_MODEL currently are {}".format(model, cime_models)) - cime_config.set('main','CIME_MODEL',model) + if not cime_config.has_section("main"): + cime_config.add_section("main") + expect( + model in cime_models, + "model {} not recognized. The acceptable values of CIME_MODEL currently are {}".format( + model, cime_models + ), + ) + cime_config.set("main", "CIME_MODEL", model) + def get_model(): """ @@ -368,10 +451,15 @@ def get_model(): if model in cime_models: logger.debug("Setting CIME_MODEL={} from environment".format(model)) else: - expect(model is None,"model {} not recognized. The acceptable values of CIME_MODEL currently are {}".format(model, cime_models)) + expect( + model is None, + "model {} not recognized. The acceptable values of CIME_MODEL currently are {}".format( + model, cime_models + ), + ) cime_config = get_cime_config() - if (cime_config.has_option('main','CIME_MODEL')): - model = cime_config.get('main','CIME_MODEL') + if cime_config.has_option("main", "CIME_MODEL"): + model = cime_config.get("main", "CIME_MODEL") if model is not None: logger.debug("Setting CIME_MODEL={} from ~/.cime/config".format(model)) @@ -380,13 +468,13 @@ def get_model(): srcroot = get_src_root() if os.path.isfile(os.path.join(srcroot, "Externals.cfg")): - model = 'cesm' + model = "cesm" with open(os.path.join(srcroot, "Externals.cfg")) as fd: for line in fd: - if re.search('ufs', line): - model = 'ufs' + if re.search("ufs", line): + model = "ufs" else: - model = 'e3sm' + model = "e3sm" # This message interfers with the correct operation of xmlquery # logger.debug("Guessing CIME_MODEL={}, set environment variable if this is incorrect".format(model)) @@ -397,23 +485,31 @@ def get_model(): modelroot = os.path.join(get_cime_root(), "CIME", "config") models = os.listdir(modelroot) msg = ".cime/config or environment variable CIME_MODEL must be set to one of: " - msg += ", ".join([model for model in models - if os.path.isdir(os.path.join(modelroot,model)) - and model != "xml_schemas"]) + msg += ", ".join( + [ + model + for model in models + if os.path.isdir(os.path.join(modelroot, model)) and model != "xml_schemas" + ] + ) expect(False, msg) + def _get_path(filearg, from_dir): if not filearg.startswith("/") and from_dir is not None: filearg = os.path.join(from_dir, filearg) return filearg + def _convert_to_fd(filearg, from_dir, mode="a"): filearg = _get_path(filearg, from_dir) return open(filearg, mode) -_hack=object() + +_hack = object() + def _line_defines_python_function(line, funcname): """Returns True if the given line defines the function 'funcname' as a top-level definition @@ -422,27 +518,67 @@ def _line_defines_python_function(line, funcname): be at the start of the line, not indented) """ - if (re.search(r"^def\s+{}\s*\(".format(funcname), line) or - re.search(r"^from\s.+\simport.*\s{}(?:,|\s|$)".format(funcname), line)): + if re.search(r"^def\s+{}\s*\(".format(funcname), line) or re.search( + r"^from\s.+\simport.*\s{}(?:,|\s|$)".format(funcname), line + ): return True return False + def file_contains_python_function(filepath, funcname): """Checks whether the given file contains a top-level definition of the function 'funcname' Returns a boolean value (True if the file contains this function definition, False otherwise) """ has_function = False - with open(filepath, 'r') as fd: + with open(filepath, "r") as fd: for line in fd.readlines(): - if (_line_defines_python_function(line, funcname)): + if _line_defines_python_function(line, funcname): has_function = True break return has_function -def run_sub_or_cmd(cmd, cmdargs, subname, subargs, logfile=None, case=None, - from_dir=None, timeout=None): + +def import_and_run_sub_or_cmd( + cmd, + cmdargs, + subname, + subargs, + config_dir, + compname, + logfile=None, + case=None, + from_dir=None, + timeout=None, +): + sys_path_old = sys.path + sys.path.insert(1, config_dir) + try: + mod = importlib.import_module(f"{compname}_cime_py") + getattr(mod, subname)(*subargs) + except (ModuleNotFoundError, AttributeError) as _: + # * ModuleNotFoundError if importlib can not find module, + # * AttributeError if importlib finds the module but + # {subname} is not defined in the module + expect( + os.path.isfile(cmd), + f"Could not find {subname} file for component {compname}", + ) + run_sub_or_cmd(cmd, cmdargs, subname, subargs, logfile, case, from_dir, timeout) + except Exception: + if logfile: + with open(logfile, "a") as log_fd: + log_fd.write(str(sys.exc_info()[1])) + expect(False, "{} FAILED, cat {}".format(cmd, logfile)) + else: + raise + sys.path = sys_path_old + + +def run_sub_or_cmd( + cmd, cmdargs, subname, subargs, logfile=None, case=None, from_dir=None, timeout=None +): """ This code will try to import and run each cmd as a subroutine if that fails it will run it as a program in a seperate shell @@ -460,7 +596,7 @@ def run_sub_or_cmd(cmd, cmdargs, subname, subargs, logfile=None, case=None, logger.info(" Calling {}".format(cmd)) # Careful: logfile code is not thread safe! if logfile: - with open(logfile,"w") as log_fd: + with open(logfile, "w") as log_fd: with redirect_logger(log_fd, subname): with redirect_stdout_stderr(log_fd): getattr(mod, subname)(*subargs) @@ -468,7 +604,7 @@ def run_sub_or_cmd(cmd, cmdargs, subname, subargs, logfile=None, case=None, getattr(mod, subname)(*subargs) except (SyntaxError, AttributeError) as _: - pass # Need to try to run as shell command + pass # Need to try to run as shell command except Exception: if logfile: @@ -480,7 +616,7 @@ def run_sub_or_cmd(cmd, cmdargs, subname, subargs, logfile=None, case=None, raise else: - return # Running as python function worked, we're done + return # Running as python function worked, we're done logger.info(" Running {} ".format(cmd)) if case is not None: @@ -496,9 +632,10 @@ def run_sub_or_cmd(cmd, cmdargs, subname, subargs, logfile=None, case=None, if logfile: fullcmd += " >& {} ".format(logfile) - stat, output, _ = run_cmd("{}".format(fullcmd), combine_output=True, - from_dir=from_dir, timeout=timeout) - if output: # Will be empty if logfile + stat, output, _ = run_cmd( + "{}".format(fullcmd), combine_output=True, from_dir=from_dir, timeout=timeout + ) + if output: # Will be empty if logfile logger.info(output) if stat != 0: @@ -511,16 +648,26 @@ def run_sub_or_cmd(cmd, cmdargs, subname, subargs, logfile=None, case=None, if case is not None: case.read_xml() -def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, - arg_stdout=_hack, arg_stderr=_hack, env=None, - combine_output=False, timeout=None, executable=None): + +def run_cmd( + cmd, + input_str=None, + from_dir=None, + verbose=None, + arg_stdout=_hack, + arg_stderr=_hack, + env=None, + combine_output=False, + timeout=None, + executable=None, +): """ Wrapper around subprocess to make it much more convenient to run shell commands >>> run_cmd('ls file_i_hope_doesnt_exist')[0] != 0 True """ - import subprocess # Not safe to do globally, module not available in older pythons + import subprocess # Not safe to do globally, module not available in older pythons # Real defaults for these value should be subprocess.PIPE if arg_stdout is _hack: @@ -533,35 +680,43 @@ def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, elif isinstance(arg_stderr, CIME.six.string_types): arg_stderr = _convert_to_fd(arg_stdout, from_dir) - if (verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG))): - logger.info("RUN: {}\nFROM: {}".format(cmd, os.getcwd() if from_dir is None else from_dir)) + if verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG)): + logger.info( + "RUN: {}\nFROM: {}".format( + cmd, os.getcwd() if from_dir is None else from_dir + ) + ) - if (input_str is not None): + if input_str is not None: stdin = subprocess.PIPE else: stdin = None if timeout: with Timeout(timeout): - proc = subprocess.Popen(cmd, - shell=True, - stdout=arg_stdout, - stderr=arg_stderr, - stdin=stdin, - cwd=from_dir, - executable=executable, - env=env) + proc = subprocess.Popen( + cmd, + shell=True, + stdout=arg_stdout, + stderr=arg_stderr, + stdin=stdin, + cwd=from_dir, + executable=executable, + env=env, + ) output, errput = proc.communicate(input_str) else: - proc = subprocess.Popen(cmd, - shell=True, - stdout=arg_stdout, - stderr=arg_stderr, - stdin=stdin, - cwd=from_dir, - executable=executable, - env=env) + proc = subprocess.Popen( + cmd, + shell=True, + stdout=arg_stdout, + stderr=arg_stderr, + stdin=stdin, + cwd=from_dir, + executable=executable, + env=env, + ) output, errput = proc.communicate(input_str) @@ -572,12 +727,12 @@ def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, if not CIME.six.PY2: if output is not None: try: - output = output.decode('utf-8', errors='ignore') + output = output.decode("utf-8", errors="ignore") except AttributeError: pass if errput is not None: try: - errput = errput.decode('utf-8', errors='ignore') + errput = errput.decode("utf-8", errors="ignore") except AttributeError: pass @@ -589,18 +744,20 @@ def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, stat = proc.wait() if CIME.six.PY2: - if isinstance(arg_stdout, file): # pylint: disable=undefined-variable - arg_stdout.close() # pylint: disable=no-member - if isinstance(arg_stderr, file) and arg_stderr is not arg_stdout: # pylint: disable=undefined-variable - arg_stderr.close() # pylint: disable=no-member + if isinstance(arg_stdout, file): # pylint: disable=undefined-variable + arg_stdout.close() # pylint: disable=no-member + if ( + isinstance(arg_stderr, file) # pylint: disable=undefined-variable + and arg_stderr is not arg_stdout + ): + arg_stderr.close() # pylint: disable=no-member else: if isinstance(arg_stdout, io.IOBase): - arg_stdout.close() # pylint: disable=no-member + arg_stdout.close() # pylint: disable=no-member if isinstance(arg_stderr, io.IOBase) and arg_stderr is not arg_stdout: - arg_stderr.close() # pylint: disable=no-member - + arg_stderr.close() # pylint: disable=no-member - if (verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG))): + if verbose != False and (verbose or logger.isEnabledFor(logging.DEBUG)): if stat != 0: logger.info(" stat: {:d}\n".format(stat)) if output: @@ -610,9 +767,19 @@ def run_cmd(cmd, input_str=None, from_dir=None, verbose=None, return stat, output, errput -def run_cmd_no_fail(cmd, input_str=None, from_dir=None, verbose=None, - arg_stdout=_hack, arg_stderr=_hack, env=None, - combine_output=False, timeout=None, executable=None): + +def run_cmd_no_fail( + cmd, + input_str=None, + from_dir=None, + verbose=None, + arg_stdout=_hack, + arg_stderr=_hack, + env=None, + combine_output=False, + timeout=None, + executable=None, +): """ Wrapper around subprocess to make it much more convenient to run shell commands. Expects command to work. Just returns output string. @@ -629,9 +796,18 @@ def run_cmd_no_fail(cmd, input_str=None, from_dir=None, verbose=None, >>> run_cmd_no_fail('echo THE ERROR >&2', combine_output=True) == 'THE ERROR' True """ - stat, output, errput = run_cmd(cmd, input_str, from_dir, verbose, arg_stdout, - arg_stderr, env, combine_output, - executable=executable, timeout=timeout) + stat, output, errput = run_cmd( + cmd, + input_str, + from_dir, + verbose, + arg_stdout, + arg_stderr, + env, + combine_output, + executable=executable, + timeout=timeout, + ) if stat != 0: # If command produced no errput, put output in the exception since we # have nothing else to go on. @@ -647,10 +823,16 @@ def run_cmd_no_fail(cmd, input_str=None, from_dir=None, verbose=None, else: errput = "" - expect(False, "Command: '{}' failed with error '{}' from dir '{}'".format(cmd, errput, os.getcwd() if from_dir is None else from_dir)) + expect( + False, + "Command: '{}' failed with error '{}' from dir '{}'".format( + cmd, errput, os.getcwd() if from_dir is None else from_dir + ), + ) return output + def check_minimum_python_version(major, minor): """ Check your python version. @@ -658,9 +840,22 @@ def check_minimum_python_version(major, minor): >>> check_minimum_python_version(sys.version_info[0], sys.version_info[1]) >>> """ - msg = "Python " + str(major) + ", minor version " + str(minor) + " is required, you have " + str(sys.version_info[0]) + "." + str(sys.version_info[1]) - expect(sys.version_info[0] > major or - (sys.version_info[0] == major and sys.version_info[1] >= minor), msg) + msg = ( + "Python " + + str(major) + + ", minor version " + + str(minor) + + " is required, you have " + + str(sys.version_info[0]) + + "." + + str(sys.version_info[1]) + ) + expect( + sys.version_info[0] > major + or (sys.version_info[0] == major and sys.version_info[1] >= minor), + msg, + ) + def normalize_case_id(case_id): """ @@ -676,13 +871,18 @@ def normalize_case_id(case_id): 'ERT.ne16_g37.B1850C5.sandiatoss3_intel.test-mod' """ sep_count = case_id.count(".") - expect(sep_count >= 3 and sep_count <= 6, - "Case '{}' needs to be in form: TESTCASE.GRID.COMPSET.PLATFORM[.TESTMOD] or TESTCASE.GRID.COMPSET.PLATFORM[.TESTMOD].GC.TESTID".format(case_id)) - if (sep_count in [5, 6]): + expect( + sep_count >= 3 and sep_count <= 6, + "Case '{}' needs to be in form: TESTCASE.GRID.COMPSET.PLATFORM[.TESTMOD] or TESTCASE.GRID.COMPSET.PLATFORM[.TESTMOD].GC.TESTID".format( + case_id + ), + ) + if sep_count in [5, 6]: return ".".join(case_id.split(".")[:-2]) else: return case_id + def parse_test_name(test_name): """ Given a CIME test name TESTCASE[_CASEOPTS].GRID.COMPSET[.MACHINE_COMPILER[.TESTMODS]], @@ -733,36 +933,53 @@ def parse_test_name(test_name): rv = [None] * 7 num_dots = test_name.count(".") - rv[0:num_dots+1] = test_name.split(".") + rv[0 : num_dots + 1] = test_name.split(".") testcase_field_underscores = rv[0].count("_") - rv.insert(1, None) # Make room for caseopts + rv.insert(1, None) # Make room for caseopts rv.pop() - if (testcase_field_underscores > 0): + if testcase_field_underscores > 0: full_str = rv[0] - rv[0] = full_str.split("_")[0] - rv[1] = full_str.split("_")[1:] - - if (num_dots >= 3): - expect(check_name( rv[3] ), "Invalid compset name {}".format(rv[3])) - - expect(rv[4].count("_") == 1, - "Expected 4th item of '{}' ('{}') to be in form machine_compiler".format(test_name, rv[4])) + rv[0] = full_str.split("_")[0] + rv[1] = full_str.split("_")[1:] + + if num_dots >= 3: + expect(check_name(rv[3]), "Invalid compset name {}".format(rv[3])) + + expect( + rv[4].count("_") == 1, + "Expected 4th item of '{}' ('{}') to be in form machine_compiler".format( + test_name, rv[4] + ), + ) rv[4:5] = rv[4].split("_") rv.pop() - if (rv[-1] is not None): + if rv[-1] is not None: # The last element of the return value - testmods - will be a list of testmods, # built by separating the TESTMODS component on strings of double hyphens - testmods = rv[-1].split('--') + testmods = rv[-1].split("--") rv[-1] = [one_testmod.replace("-", "/") for one_testmod in testmods] - expect(num_dots <= 4, - "'{}' does not look like a CIME test name, expect TESTCASE.GRID.COMPSET[.MACHINE_COMPILER[.TESTMODS]]".format(test_name)) + expect( + num_dots <= 4, + "'{}' does not look like a CIME test name, expect TESTCASE.GRID.COMPSET[.MACHINE_COMPILER[.TESTMODS]]".format( + test_name + ), + ) return rv -def get_full_test_name(partial_test, caseopts=None, grid=None, compset=None, machine=None, compiler=None, - testmods_list=None, testmods_string=None): + +def get_full_test_name( + partial_test, + caseopts=None, + grid=None, + compset=None, + machine=None, + compiler=None, + testmods_list=None, + testmods_string=None, +): """ Given a partial CIME test name, return in form TESTCASE.GRID.COMPSET.MACHINE_COMPILER[.TESTMODS] Use the additional args to fill out the name if needed @@ -809,55 +1026,91 @@ def get_full_test_name(partial_test, caseopts=None, grid=None, compset=None, mac >>> get_full_test_name("ERS.ne16_fe16.JGF.melvin_gnu.mods-test--mods2-test2-subdir2--mods3-test3-subdir3", machine="melvin", compiler="gnu", testmods_list=["mods/test", "mods2/test2/subdir2", "mods3/test3/subdir3"]) 'ERS.ne16_fe16.JGF.melvin_gnu.mods-test--mods2-test2-subdir2--mods3-test3-subdir3' """ - partial_testcase, partial_caseopts, partial_grid, partial_compset, partial_machine, partial_compiler, partial_testmods = parse_test_name(partial_test) + ( + partial_testcase, + partial_caseopts, + partial_grid, + partial_compset, + partial_machine, + partial_compiler, + partial_testmods, + ) = parse_test_name(partial_test) required_fields = [ (partial_grid, grid, "grid"), (partial_compset, compset, "compset"), (partial_machine, machine, "machine"), (partial_compiler, compiler, "compiler"), - ] + ] result = partial_test for partial_val, arg_val, name in required_fields: - if (partial_val is None): + if partial_val is None: # Add to result based on args - expect(arg_val is not None, - "Could not fill-out test name, partial string '{}' had no {} information and you did not provide any".format(partial_test, name)) - result = "{}{}{}".format(result, "_" if name == "compiler" else ".", arg_val) - elif (arg_val is not None and partial_val != partial_compiler): - expect(arg_val == partial_val, - "Mismatch in field {}, partial string '{}' indicated it should be '{}' but you provided '{}'".format(name, partial_test, partial_val, arg_val)) + expect( + arg_val is not None, + "Could not fill-out test name, partial string '{}' had no {} information and you did not provide any".format( + partial_test, name + ), + ) + result = "{}{}{}".format( + result, "_" if name == "compiler" else ".", arg_val + ) + elif arg_val is not None and partial_val != partial_compiler: + expect( + arg_val == partial_val, + "Mismatch in field {}, partial string '{}' indicated it should be '{}' but you provided '{}'".format( + name, partial_test, partial_val, arg_val + ), + ) if testmods_string is not None: - expect(testmods_list is None, "Cannot provide both testmods_list and testmods_string") + expect( + testmods_list is None, + "Cannot provide both testmods_list and testmods_string", + ) # Convert testmods_string to testmods_list; after this point, the code will work # the same regardless of whether testmods_string or testmods_list was provided. - testmods_list = testmods_string.split('--') - if (partial_testmods is None): - if (testmods_list is None): + testmods_list = testmods_string.split("--") + if partial_testmods is None: + if testmods_list is None: # No testmods for this test and that's OK pass else: - testmods_hyphenated = [one_testmod.replace("/", "-") for one_testmod in testmods_list] - result += ".{}".format('--'.join(testmods_hyphenated)) - elif (testmods_list is not None): - expect(testmods_list == partial_testmods, - "Mismatch in field testmods, partial string '{}' indicated it should be '{}' but you provided '{}'".format(partial_test, partial_testmods, testmods_list)) - - if (partial_caseopts is None): + testmods_hyphenated = [ + one_testmod.replace("/", "-") for one_testmod in testmods_list + ] + result += ".{}".format("--".join(testmods_hyphenated)) + elif testmods_list is not None: + expect( + testmods_list == partial_testmods, + "Mismatch in field testmods, partial string '{}' indicated it should be '{}' but you provided '{}'".format( + partial_test, partial_testmods, testmods_list + ), + ) + + if partial_caseopts is None: if caseopts is None: # No casemods for this test and that's OK pass else: - result = result.replace(partial_testcase, "{}_{}".format(partial_testcase, "_".join(caseopts)), 1) + result = result.replace( + partial_testcase, + "{}_{}".format(partial_testcase, "_".join(caseopts)), + 1, + ) elif caseopts is not None: - expect(caseopts == partial_caseopts, - "Mismatch in field caseopts, partial string '{}' indicated it should be '{}' but you provided '{}'".format(partial_test, partial_caseopts, caseopts)) + expect( + caseopts == partial_caseopts, + "Mismatch in field caseopts, partial string '{}' indicated it should be '{}' but you provided '{}'".format( + partial_test, partial_caseopts, caseopts + ), + ) return result + def get_current_branch(repo=None): """ Return the name of the current branch for a repository @@ -869,21 +1122,22 @@ def get_current_branch(repo=None): ... get_current_branch() == "foo" True """ - if ("GIT_BRANCH" in os.environ): + if "GIT_BRANCH" in os.environ: # This approach works better for Jenkins jobs because the Jenkins # git plugin does not use local tracking branches, it just checks out # to a commit branch = os.environ["GIT_BRANCH"] - if (branch.startswith("origin/")): + if branch.startswith("origin/"): branch = branch.replace("origin/", "", 1) return branch else: stat, output, _ = run_cmd("git symbolic-ref HEAD", from_dir=repo) - if (stat != 0): + if stat != 0: return None else: return output.replace("refs/heads/", "") + def get_current_commit(short=False, repo=None, tag=False): """ Return the sha1 of the current HEAD commit @@ -892,9 +1146,13 @@ def get_current_commit(short=False, repo=None, tag=False): True """ if tag: - rc, output, _ = run_cmd("git describe --tags $(git log -n1 --pretty='%h')", from_dir=repo) + rc, output, _ = run_cmd( + "git describe --tags $(git log -n1 --pretty='%h')", from_dir=repo + ) else: - rc, output, _ = run_cmd("git rev-parse {} HEAD".format("--short" if short else ""), from_dir=repo) + rc, output, _ = run_cmd( + "git rev-parse {} HEAD".format("--short" if short else ""), from_dir=repo + ) return output if rc == 0 else "unknown" @@ -911,6 +1169,7 @@ def get_scripts_root(): """ return os.path.join(get_cime_root(), "scripts") + def get_model_config_root(model=None): """ Get absolute path to model config area" @@ -921,11 +1180,13 @@ def get_model_config_root(model=None): model = get_model() if model is None else model return os.path.join(get_cime_root(), get_model_config_location_within_cime(model)) + def stop_buffering_output(): """ All stdout, stderr will not be buffered after this is called. """ - os.environ['PYTHONUNBUFFERED'] = '1' + os.environ["PYTHONUNBUFFERED"] = "1" + def start_buffering_output(): """ @@ -933,7 +1194,8 @@ def start_buffering_output(): default behavior. """ sys.stdout.flush() - sys.stdout = os.fdopen(sys.stdout.fileno(), 'w') + sys.stdout = os.fdopen(sys.stdout.fileno(), "w") + def match_any(item, re_list): """ @@ -941,11 +1203,12 @@ def match_any(item, re_list): """ for regex_str in re_list: regex = re.compile(regex_str) - if (regex.match(item)): + if regex.match(item): return True return False + def get_current_submodule_status(recursive=False, repo=None): """ Return the sha1s of the current currently checked out commit for each submodule, @@ -954,10 +1217,14 @@ def get_current_submodule_status(recursive=False, repo=None): >>> get_current_submodule_status() is not None True """ - rc, output, _ = run_cmd("git submodule status {}".format("--recursive" if recursive else ""), from_dir=repo) + rc, output, _ = run_cmd( + "git submodule status {}".format("--recursive" if recursive else ""), + from_dir=repo, + ) return output if rc == 0 else "unknown" + def safe_copy(src_path, tgt_path, preserve_meta=True): """ A flexbile and safe copy routine. Will try to copy file and metadata, but this @@ -975,7 +1242,11 @@ def safe_copy(src_path, tgt_path, preserve_meta=True): permissions of the src files. """ - tgt_path = os.path.join(tgt_path, os.path.basename(src_path)) if os.path.isdir(tgt_path) else tgt_path + tgt_path = ( + os.path.join(tgt_path, os.path.basename(src_path)) + if os.path.isdir(tgt_path) + else tgt_path + ) # Handle pre-existing file if os.path.isfile(tgt_path): @@ -989,11 +1260,20 @@ def safe_copy(src_path, tgt_path, preserve_meta=True): os.chmod(tgt_path, st.st_mode | statlib.S_IWRITE) else: # I won't be able to copy this file - raise OSError("Cannot copy over file {}, it is readonly and you are not the owner".format(tgt_path)) + raise OSError( + "Cannot copy over file {}, it is readonly and you are not the owner".format( + tgt_path + ) + ) if owner_uid == os.getuid(): # I am the owner, copy file contents, permissions, and metadata - file_util.copy_file(src_path, tgt_path, preserve_mode=preserve_meta, preserve_times=preserve_meta) + file_util.copy_file( + src_path, + tgt_path, + preserve_mode=preserve_meta, + preserve_times=preserve_meta, + ) else: # I am not the owner, just copy file contents shutil.copyfile(src_path, tgt_path) @@ -1001,12 +1281,20 @@ def safe_copy(src_path, tgt_path, preserve_meta=True): else: # We are making a new file, copy file contents, permissions, and metadata. # This can fail if the underlying directory is not writable by current user. - file_util.copy_file(src_path, tgt_path, preserve_mode=preserve_meta, preserve_times=preserve_meta) + file_util.copy_file( + src_path, + tgt_path, + preserve_mode=preserve_meta, + preserve_times=preserve_meta, + ) # If src file was executable, then the tgt file should be too st = os.stat(tgt_path) if os.access(src_path, os.X_OK) and st.st_uid == os.getuid(): - os.chmod(tgt_path, st.st_mode | statlib.S_IXUSR | statlib.S_IXGRP | statlib.S_IXOTH) + os.chmod( + tgt_path, st.st_mode | statlib.S_IXUSR | statlib.S_IXGRP | statlib.S_IXOTH + ) + def safe_recursive_copy(src_dir, tgt_dir, file_map): """ @@ -1016,10 +1304,16 @@ def safe_recursive_copy(src_dir, tgt_dir, file_map): """ for src_file, tgt_file in file_map: full_tgt = os.path.join(tgt_dir, tgt_file) - full_src = src_file if os.path.isabs(src_file) else os.path.join(src_dir, src_file) - expect(os.path.isfile(full_src), "Source dir '{}' missing file '{}'".format(src_dir, src_file)) + full_src = ( + src_file if os.path.isabs(src_file) else os.path.join(src_dir, src_file) + ) + expect( + os.path.isfile(full_src), + "Source dir '{}' missing file '{}'".format(src_dir, src_file), + ) safe_copy(full_src, full_tgt) + def symlink_force(target, link_name): """ Makes a symlink from link_name to target. Unlike the standard @@ -1035,35 +1329,43 @@ def symlink_force(target, link_name): else: raise e -def find_proc_id(proc_name=None, - children_only=False, - of_parent=None): + +def find_proc_id(proc_name=None, children_only=False, of_parent=None): """ Children implies recursive. """ - expect(proc_name is not None or children_only, - "Must provide proc_name if not searching for children") - expect(not (of_parent is not None and not children_only), - "of_parent only used with children_only") + expect( + proc_name is not None or children_only, + "Must provide proc_name if not searching for children", + ) + expect( + not (of_parent is not None and not children_only), + "of_parent only used with children_only", + ) parent = of_parent if of_parent is not None else os.getpid() - pgrep_cmd = "pgrep {} {}".format(proc_name if proc_name is not None else "", - "-P {:d}".format(parent if children_only else "")) + pgrep_cmd = "pgrep {} {}".format( + proc_name if proc_name is not None else "", + "-P {:d}".format(parent if children_only else ""), + ) stat, output, errput = run_cmd(pgrep_cmd) expect(stat in [0, 1], "pgrep failed with error: '{}'".format(errput)) rv = set([int(item.strip()) for item in output.splitlines()]) - if (children_only): + if children_only: pgrep_cmd = "pgrep -P {}".format(parent) stat, output, errput = run_cmd(pgrep_cmd) expect(stat in [0, 1], "pgrep failed with error: '{}'".format(errput)) for child in output.splitlines(): - rv = rv.union(set(find_proc_id(proc_name, children_only, int(child.strip())))) + rv = rv.union( + set(find_proc_id(proc_name, children_only, int(child.strip()))) + ) return list(rv) + def get_timestamp(timestamp_format="%Y%m%d_%H%M%S", utc_time=False): """ Get a string representing the current UTC time in format: YYYYMMDD_HHMMSS @@ -1076,6 +1378,7 @@ def get_timestamp(timestamp_format="%Y%m%d_%H%M%S", utc_time=False): time_tuple = time.localtime() return time.strftime(timestamp_format, time_tuple) + def get_project(machobj=None): """ Hierarchy for choosing PROJECT: @@ -1087,31 +1390,31 @@ def get_project(machobj=None): 5 config_machines.xml (if machobj provided) """ project = os.environ.get("PROJECT") - if (project is not None): + if project is not None: logger.info("Using project from env PROJECT: " + project) return project project = os.environ.get("ACCOUNT") - if (project is not None): + if project is not None: logger.info("Using project from env ACCOUNT: " + project) return project cime_config = get_cime_config() - if (cime_config.has_option('main','PROJECT')): - project = cime_config.get('main','PROJECT') - if (project is not None): + if cime_config.has_option("main", "PROJECT"): + project = cime_config.get("main", "PROJECT") + if project is not None: logger.info("Using project from .cime/config: " + project) return project projectfile = os.path.abspath(os.path.join(os.path.expanduser("~"), ".cesm_proj")) - if (os.path.isfile(projectfile)): - with open(projectfile,'r') as myfile: + if os.path.isfile(projectfile): + with open(projectfile, "r") as myfile: for line in myfile: project = line.rstrip() if not project.startswith("#"): break - if (project is not None): + if project is not None: logger.info("Using project from .cesm_proj: " + project) - cime_config.set('main','PROJECT',project) + cime_config.set("main", "PROJECT", project) return project if machobj is not None: @@ -1123,6 +1426,7 @@ def get_project(machobj=None): logger.info("No project info available") return None + def get_charge_account(machobj=None, project=None): """ Hierarchy for choosing CHARGE_ACCOUNT: @@ -1144,26 +1448,29 @@ def get_charge_account(machobj=None, project=None): >>> del os.environ["CHARGE_ACCOUNT"] """ charge_account = os.environ.get("CHARGE_ACCOUNT") - if (charge_account is not None): + if charge_account is not None: logger.info("Using charge_account from env CHARGE_ACCOUNT: " + charge_account) return charge_account cime_config = get_cime_config() - if (cime_config.has_option('main','CHARGE_ACCOUNT')): - charge_account = cime_config.get('main','CHARGE_ACCOUNT') - if (charge_account is not None): + if cime_config.has_option("main", "CHARGE_ACCOUNT"): + charge_account = cime_config.get("main", "CHARGE_ACCOUNT") + if charge_account is not None: logger.info("Using charge_account from .cime/config: " + charge_account) return charge_account if machobj is not None: charge_account = machobj.get_value("CHARGE_ACCOUNT") if charge_account is not None: - logger.info("Using charge_account from config_machines.xml: " + charge_account) + logger.info( + "Using charge_account from config_machines.xml: " + charge_account + ) return charge_account logger.info("No charge_account info available, using value from PROJECT") return project + def find_files(rootdir, pattern): """ recursively find all files matching a pattern @@ -1171,20 +1478,33 @@ def find_files(rootdir, pattern): result = [] for root, _, files in os.walk(rootdir): for filename in files: - if (fnmatch.fnmatch(filename, pattern)): + if fnmatch.fnmatch(filename, pattern): result.append(os.path.join(root, filename)) return result def setup_standard_logging_options(parser): - helpfile = os.path.join(os.getcwd(),os.path.basename("{}.log".format(sys.argv[0]))) - parser.add_argument("-d", "--debug", action="store_true", - help="Print debug information (very verbose) to file {}".format(helpfile)) - parser.add_argument("-v", "--verbose", action="store_true", - help="Add additional context (time and file) to log messages") - parser.add_argument("-s", "--silent", action="store_true", - help="Print only warnings and error messages") + helpfile = os.path.join(os.getcwd(), os.path.basename("{}.log".format(sys.argv[0]))) + parser.add_argument( + "-d", + "--debug", + action="store_true", + help="Print debug information (very verbose) to file {}".format(helpfile), + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Add additional context (time and file) to log messages", + ) + parser.add_argument( + "-s", + "--silent", + action="store_true", + help="Print only warnings and error messages", + ) + class _LessThanFilter(logging.Filter): def __init__(self, exclusive_maximum, name=""): @@ -1192,22 +1512,16 @@ def __init__(self, exclusive_maximum, name=""): self.max_level = exclusive_maximum def filter(self, record): - #non-zero return means we log this message + # non-zero return means we log this message return 1 if record.levelno < self.max_level else 0 -def parse_args_and_handle_standard_logging_options(args, parser=None): - """ - Guide to logging in CIME. - logger.debug -> Verbose/detailed output, use for debugging, off by default. Goes to a .log file - logger.info -> Goes to stdout (and log if --debug). Use for normal program output - logger.warning -> Goes to stderr (and log if --debug). Use for minor problems - logger.error -> Goes to stderr (and log if --debug) - """ +def configure_logging(verbose, debug, silent): root_logger = logging.getLogger() - verbose_formatter = logging.Formatter(fmt='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', - datefmt='%m-%d %H:%M') + verbose_formatter = logging.Formatter( + fmt="%(asctime)s %(name)-12s %(levelname)-8s %(message)s", datefmt="%m-%d %H:%M" + ) # Change info to go to stdout. This handle applies to INFO exclusively stdout_stream_handler = logging.StreamHandler(stream=sys.stdout) @@ -1218,36 +1532,50 @@ def parse_args_and_handle_standard_logging_options(args, parser=None): stderr_stream_handler = logging.StreamHandler(stream=sys.stderr) stderr_stream_handler.setLevel(logging.WARNING) - # scripts_regression_tests is the only thing that should pass a None argument in parser - if parser is not None: - if "--help" not in args[1:]: - _check_for_invalid_args(args[1:]) - args = parser.parse_args(args[1:]) - # --verbose adds to the message format but does not impact the log level - if args.verbose: + if verbose: stdout_stream_handler.setFormatter(verbose_formatter) stderr_stream_handler.setFormatter(verbose_formatter) root_logger.addHandler(stdout_stream_handler) root_logger.addHandler(stderr_stream_handler) - if args.debug: + if debug: # Set up log file to catch ALL logging records log_file = "{}.log".format(os.path.basename(sys.argv[0])) - debug_log_handler = logging.FileHandler(log_file, mode='w') + debug_log_handler = logging.FileHandler(log_file, mode="w") debug_log_handler.setFormatter(verbose_formatter) debug_log_handler.setLevel(logging.DEBUG) root_logger.addHandler(debug_log_handler) root_logger.setLevel(logging.DEBUG) - elif args.silent: + elif silent: root_logger.setLevel(logging.WARN) else: root_logger.setLevel(logging.INFO) + + +def parse_args_and_handle_standard_logging_options(args, parser=None): + """ + Guide to logging in CIME. + + logger.debug -> Verbose/detailed output, use for debugging, off by default. Goes to a .log file + logger.info -> Goes to stdout (and log if --debug). Use for normal program output + logger.warning -> Goes to stderr (and log if --debug). Use for minor problems + logger.error -> Goes to stderr (and log if --debug) + """ + # scripts_regression_tests is the only thing that should pass a None argument in parser + if parser is not None: + if "--help" not in args[1:]: + _check_for_invalid_args(args[1:]) + args = parser.parse_args(args[1:]) + + configure_logging(args.verbose, args.debug, args.silent) + return args + def get_logging_options(): """ Use to pass same logging options as was used for current @@ -1255,13 +1583,14 @@ def get_logging_options(): """ root_logger = logging.getLogger() - if (root_logger.level == logging.DEBUG): + if root_logger.level == logging.DEBUG: return "--debug" - elif (root_logger.level == logging.WARN): + elif root_logger.level == logging.WARN: return "--silent" else: return "" + def convert_to_type(value, type_str, vid=""): """ Convert value from string to another type. @@ -1276,24 +1605,39 @@ def convert_to_type(value, type_str, vid=""): try: value = int(eval(value)) except Exception: - expect(False, "Entry {} was listed as type int but value '{}' is not valid int".format(vid, value)) + expect( + False, + "Entry {} was listed as type int but value '{}' is not valid int".format( + vid, value + ), + ) elif type_str == "logical": - expect(value.upper() in ["TRUE", "FALSE"], - "Entry {} was listed as type logical but had val '{}' instead of TRUE or FALSE".format(vid, value)) + expect( + value.upper() in ["TRUE", "FALSE"], + "Entry {} was listed as type logical but had val '{}' instead of TRUE or FALSE".format( + vid, value + ), + ) value = value.upper() == "TRUE" elif type_str == "real": try: value = float(value) except Exception: - expect(False, "Entry {} was listed as type real but value '{}' is not valid real".format(vid, value)) + expect( + False, + "Entry {} was listed as type real but value '{}' is not valid real".format( + vid, value + ), + ) else: expect(False, "Unknown type '{}'".format(type_str)) return value + def convert_to_unknown_type(value): """ Convert value to it's real type by probing conversions. @@ -1324,6 +1668,7 @@ def convert_to_unknown_type(value): return value + def convert_to_string(value, type_str=None, vid=""): """ Convert value back to string. @@ -1339,9 +1684,15 @@ def convert_to_string(value, type_str=None, vid=""): """ if value is not None and not isinstance(value, CIME.six.string_types): if type_str == "char": - expect(isinstance(value, CIME.six.string_types), "Wrong type for entry id '{}'".format(vid)) + expect( + isinstance(value, CIME.six.string_types), + "Wrong type for entry id '{}'".format(vid), + ) elif type_str == "integer": - expect(isinstance(value, CIME.six.integer_types), "Wrong type for entry id '{}'".format(vid)) + expect( + isinstance(value, CIME.six.integer_types), + "Wrong type for entry id '{}'".format(vid), + ) value = str(value) elif type_str == "logical": expect(type(value) is bool, "Wrong type for entry id '{}'".format(vid)) @@ -1353,10 +1704,11 @@ def convert_to_string(value, type_str=None, vid=""): expect(False, "Unknown type '{}'".format(type_str)) if value is None: value = "" - logger.debug("Attempt to convert None value for vid {} {}".format(vid,value)) + logger.debug("Attempt to convert None value for vid {} {}".format(vid, value)) return value + def convert_to_seconds(time_str): """ Convert time value in [[HH:]MM:]SS to seconds @@ -1381,6 +1733,7 @@ def convert_to_seconds(time_str): return result + def convert_to_babylonian_time(seconds): """ Convert time value to seconds to HH:MM:SS @@ -1397,25 +1750,27 @@ def convert_to_babylonian_time(seconds): return "{:02d}:{:02d}:{:02d}".format(hours, minutes, seconds) + def get_time_in_seconds(timeval, unit): """ Convert a time from 'unit' to seconds """ - if 'nyear' in unit: + if "nyear" in unit: dmult = 365 * 24 * 3600 - elif 'nmonth' in unit: + elif "nmonth" in unit: dmult = 30 * 24 * 3600 - elif 'nday' in unit: + elif "nday" in unit: dmult = 24 * 3600 - elif 'nhour' in unit: + elif "nhour" in unit: dmult = 3600 - elif 'nminute' in unit: + elif "nminute" in unit: dmult = 60 else: dmult = 1 return dmult * timeval + def compute_total_time(job_cost_map, proc_pool): """ Given a map: jobname -> (procs, est-time), return a total time @@ -1431,7 +1786,7 @@ def compute_total_time(job_cost_map, proc_pool): """ current_time = 0 waiting_jobs = dict(job_cost_map) - running_jobs = {} # name -> (procs, est-time, start-time) + running_jobs = {} # name -> (procs, est-time, start-time) while len(waiting_jobs) > 0 or len(running_jobs) > 0: launched_jobs = [] for jobname, data in waiting_jobs.items(): @@ -1454,10 +1809,11 @@ def compute_total_time(job_cost_map, proc_pool): for completed_job in completed_jobs: del running_jobs[completed_job] - current_time += 60 # minute time step + current_time += 60 # minute time step return current_time + def format_time(time_format, input_format, input_time): """ Converts the string input_time from input_format to time_format @@ -1477,37 +1833,55 @@ def format_time(time_format, input_format, input_time): '2, 09' """ input_fields = input_format.split("%") - expect(input_fields[0] == input_time[:len(input_fields[0])], - "Failed to parse the input time '{}'; does not match the header string '{}'".format(input_time, input_format)) - input_time = input_time[len(input_fields[0]):] + expect( + input_fields[0] == input_time[: len(input_fields[0])], + "Failed to parse the input time '{}'; does not match the header string '{}'".format( + input_time, input_format + ), + ) + input_time = input_time[len(input_fields[0]) :] timespec = {"H": None, "M": None, "S": None} maxvals = {"M": 60, "S": 60} - DIGIT_CHECK = re.compile('[^0-9]*') + DIGIT_CHECK = re.compile("[^0-9]*") # Loop invariants given input follows the specs: # field starts with H, M, or S # input_time starts with a number corresponding with the start of field for field in input_fields[1:]: # Find all of the digits at the start of the string spec = field[0] - value_re = re.match(r'\d*', input_time) - expect(value_re is not None, - "Failed to parse the input time for the '{}' specifier, expected an integer".format(spec)) + value_re = re.match(r"\d*", input_time) + expect( + value_re is not None, + "Failed to parse the input time for the '{}' specifier, expected an integer".format( + spec + ), + ) value = value_re.group(0) expect(spec in timespec, "Unknown time specifier '" + spec + "'") # Don't do anything if the time field is already specified if timespec[spec] is None: # Verify we aren't exceeding the maximum value if spec in maxvals: - expect(int(value) < maxvals[spec], - "Failed to parse the '{}' specifier: A value less than {:d} is expected".format(spec, maxvals[spec])) + expect( + int(value) < maxvals[spec], + "Failed to parse the '{}' specifier: A value less than {:d} is expected".format( + spec, maxvals[spec] + ), + ) timespec[spec] = value - input_time = input_time[len(value):] + input_time = input_time[len(value) :] # Check for the separator string - expect(len(re.match(DIGIT_CHECK, field).group(0)) == len(field), - "Numbers are not permissible in separator strings") - expect(input_time[:len(field) - 1] == field[1:], - "The separator string ({}) doesn't match '{}'".format(field[1:], input_time)) - input_time = input_time[len(field) - 1:] + expect( + len(re.match(DIGIT_CHECK, field).group(0)) == len(field), + "Numbers are not permissible in separator strings", + ) + expect( + input_time[: len(field) - 1] == field[1:], + "The separator string ({}) doesn't match '{}'".format( + field[1:], input_time + ), + ) + input_time = input_time[len(field) - 1 :] output_fields = time_format.split("%") output_time = output_fields[0] # Used when a value isn't given @@ -1516,8 +1890,10 @@ def format_time(time_format, input_format, input_time): # field starts with H, M, or S # output_time for field in output_fields[1:]: - expect(field == output_fields[-1] or len(field) > 1, - "Separator strings are required to properly parse times") + expect( + field == output_fields[-1] or len(field) > 1, + "Separator strings are required to properly parse times", + ) spec = field[0] expect(spec in timespec, "Unknown time specifier '" + spec + "'") if timespec[spec] is not None: @@ -1528,7 +1904,8 @@ def format_time(time_format, input_format, input_time): output_time += field[1:] return output_time -def append_status(msg, sfile, caseroot='.'): + +def append_status(msg, sfile, caseroot="."): """ Append msg to sfile in caseroot """ @@ -1542,17 +1919,24 @@ def append_status(msg, sfile, caseroot='.'): fd.write(ctime + msg + line_ending) fd.write(" ---------------------------------------------------" + line_ending) -def append_testlog(msg, caseroot='.'): + +def append_testlog(msg, caseroot="."): """ Add to TestStatus.log file """ append_status(msg, "TestStatus.log", caseroot) -def append_case_status(phase, status, msg=None, caseroot='.'): + +def append_case_status(phase, status, msg=None, caseroot="."): """ Update CaseStatus file """ - append_status("{} {}{}".format(phase, status, " {}".format(msg if msg else "")), "CaseStatus", caseroot) + append_status( + "{} {}{}".format(phase, status, " {}".format(msg if msg else "")), + "CaseStatus", + caseroot, + ) + def does_file_have_string(filepath, text): """ @@ -1560,6 +1944,7 @@ def does_file_have_string(filepath, text): """ return os.path.isfile(filepath) and text in open(filepath).read() + def is_last_process_complete(filepath, expect_text, fail_text): """ Search the filepath in reverse order looking for expect_text @@ -1567,10 +1952,10 @@ def is_last_process_complete(filepath, expect_text, fail_text): """ complete = False - fh = open(filepath, 'r') + fh = open(filepath, "r") fb = fh.readlines() - rfb = ''.join(reversed(fb)) + rfb = "".join(reversed(fb)) findex = re.search(fail_text, rfb) if findex is None: @@ -1589,6 +1974,7 @@ def is_last_process_complete(filepath, expect_text, fail_text): return complete + def transform_vars(text, case=None, subgroup=None, overrides=None, default=None): """ Do the variable substitution for any variables that need transforms @@ -1608,23 +1994,50 @@ def transform_vars(text, case=None, subgroup=None, overrides=None, default=None) m = directive_re.search(text) variable = m.groups()[0] whole_match = m.group() - if overrides is not None and variable.lower() in overrides and overrides[variable.lower()] is not None: + if ( + overrides is not None + and variable.lower() in overrides + and overrides[variable.lower()] is not None + ): repl = overrides[variable.lower()] - logger.debug("from overrides: in {}, replacing {} with {}".format(text, whole_match, str(repl))) + logger.debug( + "from overrides: in {}, replacing {} with {}".format( + text, whole_match, str(repl) + ) + ) text = text.replace(whole_match, str(repl)) - elif case is not None and hasattr(case, variable.lower()) and getattr(case, variable.lower()) is not None: + elif ( + case is not None + and hasattr(case, variable.lower()) + and getattr(case, variable.lower()) is not None + ): repl = getattr(case, variable.lower()) - logger.debug("from case members: in {}, replacing {} with {}".format(text, whole_match, str(repl))) + logger.debug( + "from case members: in {}, replacing {} with {}".format( + text, whole_match, str(repl) + ) + ) text = text.replace(whole_match, str(repl)) - elif case is not None and case.get_value(variable.upper(), subgroup=subgroup) is not None: + elif ( + case is not None + and case.get_value(variable.upper(), subgroup=subgroup) is not None + ): repl = case.get_value(variable.upper(), subgroup=subgroup) - logger.debug("from case: in {}, replacing {} with {}".format(text, whole_match, str(repl))) + logger.debug( + "from case: in {}, replacing {} with {}".format( + text, whole_match, str(repl) + ) + ) text = text.replace(whole_match, str(repl)) elif default is not None: - logger.debug("from default: in {}, replacing {} with {}".format(text, whole_match, str(default))) + logger.debug( + "from default: in {}, replacing {} with {}".format( + text, whole_match, str(default) + ) + ) text = text.replace(whole_match, default) else: @@ -1637,6 +2050,7 @@ def transform_vars(text, case=None, subgroup=None, overrides=None, default=None) return text + def wait_for_unlocked(filepath): locked = True file_object = None @@ -1644,7 +2058,7 @@ def wait_for_unlocked(filepath): try: buffer_size = 8 # Opening file in append mode and read the first 8 characters. - file_object = open(filepath, 'a', buffer_size) + file_object = open(filepath, "a", buffer_size) if file_object: locked = False except IOError: @@ -1654,10 +2068,12 @@ def wait_for_unlocked(filepath): if file_object: file_object.close() + def gunzip_existing_file(filepath): with gzip.open(filepath, "rb") as fd: return fd.read() + def gzip_existing_file(filepath): """ Gzips an existing file, removes the unzipped version, returns path to zip file. @@ -1678,7 +2094,7 @@ def gzip_existing_file(filepath): st = os.stat(filepath) orig_atime, orig_mtime = st[statlib.ST_ATIME], st[statlib.ST_MTIME] - gzpath = '{}.gz'.format(filepath) + gzpath = "{}.gz".format(filepath) with open(filepath, "rb") as f_in: with gzip.open(gzpath, "wb") as f_out: shutil.copyfileobj(f_in, f_out) @@ -1689,11 +2105,13 @@ def gzip_existing_file(filepath): return gzpath + def touch(fname): if os.path.exists(fname): os.utime(fname, None) else: - open(fname, 'a').close() + open(fname, "a").close() + def find_system_test(testname, case): """ @@ -1704,37 +2122,45 @@ def find_system_test(testname, case): Fail if the test is not found in any of the paths. """ from importlib import import_module + system_test_path = None if testname.startswith("TEST"): - system_test_path = "CIME.SystemTests.system_tests_common.{}".format(testname) + system_test_path = "CIME.SystemTests.system_tests_common.{}".format(testname) else: components = ["any"] - components.extend( case.get_compset_components()) + components.extend(case.get_compset_components()) fdir = [] for component in components: - tdir = case.get_value("SYSTEM_TESTS_DIR", - attribute={"component":component}) + tdir = case.get_value( + "SYSTEM_TESTS_DIR", attribute={"component": component} + ) if tdir is not None: tdir = os.path.abspath(tdir) - system_test_file = os.path.join(tdir ,"{}.py".format(testname.lower())) + system_test_file = os.path.join(tdir, "{}.py".format(testname.lower())) if os.path.isfile(system_test_file): fdir.append(tdir) - logger.debug( "found "+system_test_file) + logger.debug("found " + system_test_file) if component == "any": - system_test_path = "CIME.SystemTests.{}.{}".format(testname.lower(), testname) + system_test_path = "CIME.SystemTests.{}.{}".format( + testname.lower(), testname + ) else: system_test_dir = os.path.dirname(system_test_file) if system_test_dir not in sys.path: sys.path.append(system_test_dir) system_test_path = "{}.{}".format(testname.lower(), testname) expect(len(fdir) > 0, "Test {} not found, aborting".format(testname)) - expect(len(fdir) == 1, "Test {} found in multiple locations {}, aborting".format(testname, fdir)) + expect( + len(fdir) == 1, + "Test {} found in multiple locations {}, aborting".format(testname, fdir), + ) expect(system_test_path is not None, "No test {} found".format(testname)) - path, m = system_test_path.rsplit('.',1) + path, m = system_test_path.rsplit(".", 1) mod = import_module(path) return getattr(mod, m) + def _get_most_recent_lid_impl(files): """ >>> files = ['/foo/bar/e3sm.log.20160905_111212', '/foo/bar/e3sm.log.20160906_111212.gz'] @@ -1751,28 +2177,36 @@ def _get_most_recent_lid_impl(files): if len(components) > 2: results.append(components[2]) else: - logger.warning("Apparent model log file '{}' did not conform to expected name format".format(item)) + logger.warning( + "Apparent model log file '{}' did not conform to expected name format".format( + item + ) + ) return sorted(list(set(results))) + def ls_sorted_by_mtime(path): - ''' return list of path sorted by timestamp oldest first''' + """return list of path sorted by timestamp oldest first""" mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime return list(sorted(os.listdir(path), key=mtime)) + def get_lids(case): model = case.get_value("MODEL") rundir = case.get_value("RUNDIR") return _get_most_recent_lid_impl(glob.glob("{}/{}.log*".format(rundir, model))) + def new_lid(): lid = time.strftime("%y%m%d-%H%M%S") jobid = batch_jobid() if jobid is not None: - lid = jobid+'.'+lid + lid = jobid + "." + lid os.environ["LID"] = lid return lid + def batch_jobid(): jobid = os.environ.get("PBS_JOBID") if jobid is None: @@ -1783,6 +2217,7 @@ def batch_jobid(): jobid = os.environ.get("COBALT_JOBID") return jobid + def analyze_build_log(comp, log, compiler): """ Capture and report warning count, @@ -1801,7 +2236,7 @@ def analyze_build_log(comp, log, compiler): # don't know enough about this compiler return - with open(log,"r") as fd: + with open(log, "r") as fd: for line in fd: if re.search(warn_re, line): warncnt += 1 @@ -1811,7 +2246,10 @@ def analyze_build_log(comp, log, compiler): logger.warning(line) if warncnt > 0: - logger.info("Component {} build complete with {} warnings".format(comp, warncnt)) + logger.info( + "Component {} build complete with {} warnings".format(comp, warncnt) + ) + def is_python_executable(filepath): first_line = None @@ -1822,20 +2260,27 @@ def is_python_executable(filepath): except Exception: pass - return first_line is not None and first_line.startswith("#!") and "python" in first_line + return ( + first_line is not None + and first_line.startswith("#!") + and "python" in first_line + ) return False + def get_umask(): current_umask = os.umask(0) os.umask(current_umask) return current_umask + def stringify_bool(val): val = False if val is None else val expect(type(val) is bool, "Wrong type for val '{}'".format(repr(val))) return "TRUE" if val else "FALSE" + def indent_string(the_string, indent_level): """Indents the given string by a given number of spaces @@ -1850,19 +2295,27 @@ def indent_string(the_string, indent_level): """ lines = the_string.splitlines(True) - padding = ' ' * indent_level + padding = " " * indent_level lines_indented = [padding + line for line in lines] - return ''.join(lines_indented) + return "".join(lines_indented) + def verbatim_success_msg(return_val): return return_val + CASE_SUCCESS = "success" CASE_FAILURE = "error" -def run_and_log_case_status(func, phase, caseroot='.', - custom_starting_msg_functor=None, - custom_success_msg_functor=None, - is_batch=False): + + +def run_and_log_case_status( + func, + phase, + caseroot=".", + custom_starting_msg_functor=None, + custom_success_msg_functor=None, + is_batch=False, +): starting_msg = None if custom_starting_msg_functor is not None: @@ -1876,26 +2329,35 @@ def run_and_log_case_status(func, phase, caseroot='.', try: rv = func() except BaseException: - custom_success_msg = custom_success_msg_functor(rv) \ - if custom_success_msg_functor and rv is not None else None + custom_success_msg = ( + custom_success_msg_functor(rv) + if custom_success_msg_functor and rv is not None + else None + ) if phase == "case.submit" and is_batch: - append_case_status(phase, "starting", msg=custom_success_msg, - caseroot=caseroot) + append_case_status( + phase, "starting", msg=custom_success_msg, caseroot=caseroot + ) e = sys.exc_info()[1] - append_case_status(phase, CASE_FAILURE, msg=("\n{}".format(e)), - caseroot=caseroot) + append_case_status( + phase, CASE_FAILURE, msg=("\n{}".format(e)), caseroot=caseroot + ) raise else: - custom_success_msg = custom_success_msg_functor(rv) \ - if custom_success_msg_functor else None + custom_success_msg = ( + custom_success_msg_functor(rv) if custom_success_msg_functor else None + ) if phase == "case.submit" and is_batch: - append_case_status(phase, "starting", msg=custom_success_msg, - caseroot=caseroot) - append_case_status(phase, CASE_SUCCESS, msg=custom_success_msg, - caseroot=caseroot) + append_case_status( + phase, "starting", msg=custom_success_msg, caseroot=caseroot + ) + append_case_status( + phase, CASE_SUCCESS, msg=custom_success_msg, caseroot=caseroot + ) return rv + def _check_for_invalid_args(args): if get_model() != "e3sm": for arg in args: @@ -1903,14 +2365,24 @@ def _check_for_invalid_args(args): if " " in arg or arg.startswith("--"): continue if arg.startswith("-") and len(arg) > 2: - sys.stderr.write( "WARNING: The {} argument is deprecated. Multi-character arguments should begin with \"--\" and single character with \"-\"\n Use --help for a complete list of available options\n".format(arg)) + sys.stderr.write( + 'WARNING: The {} argument is deprecated. Multi-character arguments should begin with "--" and single character with "-"\n Use --help for a complete list of available options\n'.format( + arg + ) + ) + def add_mail_type_args(parser): parser.add_argument("--mail-user", help="Email to be used for batch notification.") - parser.add_argument("-M", "--mail-type", action="append", - help="When to send user email. Options are: never, all, begin, end, fail.\n" - "You can specify multiple types with either comma-separated args or multiple -M flags.") + parser.add_argument( + "-M", + "--mail-type", + action="append", + help="When to send user email. Options are: never, all, begin, end, fail.\n" + "You can specify multiple types with either comma-separated args or multiple -M flags.", + ) + def resolve_mail_type_args(args): if args.mail_type is not None: @@ -1919,16 +2391,20 @@ def resolve_mail_type_args(args): resolved_mail_types.extend(mail_type.split(",")) for mail_type in resolved_mail_types: - expect(mail_type in ("never", "all", "begin", "end", "fail"), - "Unsupported mail-type '{}'".format(mail_type)) + expect( + mail_type in ("never", "all", "begin", "end", "fail"), + "Unsupported mail-type '{}'".format(mail_type), + ) args.mail_type = resolved_mail_types + def copyifnewer(src, dest): - """ if dest does not exist or is older than src copy src to dest """ + """if dest does not exist or is older than src copy src to dest""" if not os.path.isfile(dest) or not filecmp.cmp(src, dest): safe_copy(src, dest) + class SharedArea(object): """ Enable 0002 umask within this manager @@ -1936,7 +2412,7 @@ class SharedArea(object): def __init__(self, new_perms=0o002): self._orig_umask = None - self._new_perms = new_perms + self._new_perms = new_perms def __enter__(self): self._orig_umask = os.umask(self._new_perms) @@ -1944,15 +2420,17 @@ def __enter__(self): def __exit__(self, *_): os.umask(self._orig_umask) + class Timeout(object): """ A context manager that implements a timeout. By default, it will raise exception, but a custon function call can be provided. Provided None as seconds makes this class a no-op """ + def __init__(self, seconds, action=None): self._seconds = seconds - self._action = action if action is not None else self._handle_timeout + self._action = action if action is not None else self._handle_timeout def _handle_timeout(self, *_): raise RuntimeError("Timeout expired") @@ -1966,11 +2444,13 @@ def __exit__(self, *_): if self._seconds is not None: signal.alarm(0) + def filter_unicode(unistr): """ Sometimes unicode chars can cause problems """ - return "".join([i if ord(i) < 128 else ' ' for i in unistr]) + return "".join([i if ord(i) < 128 else " " for i in unistr]) + def run_bld_cmd_ensure_logging(cmd, arg_logger, from_dir=None, timeout=None): arg_logger.info(cmd) @@ -1979,9 +2459,11 @@ def run_bld_cmd_ensure_logging(cmd, arg_logger, from_dir=None, timeout=None): arg_logger.info(errput) expect(stat == 0, filter_unicode(errput)) + def get_batch_script_for_job(job): return job if "st_archive" in job else "." + job + def string_in_list(_string, _list): """Case insensitive search for string in list returns the matching list value @@ -1996,12 +2478,14 @@ def string_in_list(_string, _list): return x return None + def model_log(model, arg_logger, msg, debug_others=True): if get_model() == model: arg_logger.info(msg) elif debug_others: arg_logger.debug(msg) + def get_htmlroot(machobj=None): """Get location for test HTML output @@ -2031,6 +2515,7 @@ def get_htmlroot(machobj=None): logger.info("No htmlroot info available") return None + def get_urlroot(machobj=None): """Get URL to htmlroot @@ -2059,3 +2544,17 @@ def get_urlroot(machobj=None): logger.info("No urlroot info available") return None + + +def clear_folder(_dir): + if os.path.exists(_dir): + for the_file in os.listdir(_dir): + file_path = os.path.join(_dir, the_file) + try: + if os.path.isfile(file_path): + os.unlink(file_path) + else: + clear_folder(file_path) + os.rmdir(file_path) + except Exception as e: + print(e) diff --git a/CIME/wait_for_tests.py b/CIME/wait_for_tests.py index 740e9f965fb..74e50cfd157 100644 --- a/CIME/wait_for_tests.py +++ b/CIME/wait_for_tests.py @@ -1,7 +1,8 @@ #pylint: disable=import-error from CIME.six.moves import queue import os, time, threading, socket, signal, shutil, glob -#pylint: disable=import-error + +# pylint: disable=import-error from distutils.spawn import find_executable import logging import xml.etree.ElementTree as xmlet @@ -13,26 +14,28 @@ from CIME.provenance import save_test_success from CIME.case.case import Case -SIGNAL_RECEIVED = False -E3SM_MAIN_CDASH = "E3SM" +SIGNAL_RECEIVED = False +E3SM_MAIN_CDASH = "E3SM" CDASH_DEFAULT_BUILD_GROUP = "ACME_Latest" -SLEEP_INTERVAL_SEC = .1 +SLEEP_INTERVAL_SEC = 0.1 ############################################################################### def signal_handler(*_): -############################################################################### + ############################################################################### global SIGNAL_RECEIVED SIGNAL_RECEIVED = True + ############################################################################### def set_up_signal_handlers(): -############################################################################### + ############################################################################### signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGINT, signal_handler) + ############################################################################### def get_test_time(test_path): -############################################################################### + ############################################################################### ts = TestStatus(test_dir=test_path) comment = ts.get_comment(RUN_PHASE) if comment is None or "time=" not in comment: @@ -42,15 +45,17 @@ def get_test_time(test_path): time_data = [token for token in comment.split() if token.startswith("time=")][0] return int(time_data.split("=")[1]) + ############################################################################### def get_test_phase(test_path, phase): -############################################################################### + ############################################################################### ts = TestStatus(test_dir=test_path) return ts.get_status(phase) + ############################################################################### def get_nml_diff(test_path): -############################################################################### + ############################################################################### test_log = os.path.join(test_path, "TestStatus.log") diffs = "" @@ -67,23 +72,35 @@ def get_nml_diff(test_path): return diffs + ############################################################################### def get_test_output(test_path): -############################################################################### + ############################################################################### output_file = os.path.join(test_path, "TestStatus.log") - if (os.path.exists(output_file)): - return open(output_file, 'r').read() + if os.path.exists(output_file): + return open(output_file, "r").read() else: logging.warning("File '{}' not found".format(output_file)) return "" + ############################################################################### -def create_cdash_xml_boiler(phase, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit): -############################################################################### +def create_cdash_xml_boiler( + phase, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + git_commit, +): + ############################################################################### site_elem = xmlet.Element("Site") - if ("JENKINS_START_TIME" in os.environ): - time_info_str = "Total testing time: {:d} seconds".format(int(current_time) - int(os.environ["JENKINS_START_TIME"])) + if "JENKINS_START_TIME" in os.environ: + time_info_str = "Total testing time: {:d} seconds".format( + int(current_time) - int(os.environ["JENKINS_START_TIME"]) + ) else: time_info_str = "" @@ -97,39 +114,81 @@ def create_cdash_xml_boiler(phase, cdash_build_name, cdash_build_group, utc_time phase_elem = xmlet.SubElement(site_elem, phase) xmlet.SubElement(phase_elem, "StartDateTime").text = time.ctime(current_time) - xmlet.SubElement(phase_elem, "Start{}Time".format("Test" if phase == "Testing" else phase)).text = str(int(current_time)) + xmlet.SubElement( + phase_elem, "Start{}Time".format("Test" if phase == "Testing" else phase) + ).text = str(int(current_time)) return site_elem, phase_elem + ############################################################################### -def create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit): -############################################################################### - site_elem, config_elem = create_cdash_xml_boiler("Configure", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit) +def create_cdash_config_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, +): + ############################################################################### + site_elem, config_elem = create_cdash_xml_boiler( + "Configure", + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + git_commit, + ) xmlet.SubElement(config_elem, "ConfigureCommand").text = "namelists" config_results = [] for test_name in sorted(results): test_path = results[test_name][0] - test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path) + test_norm_path = ( + test_path if os.path.isdir(test_path) else os.path.dirname(test_path) + ) nml_phase_result = get_test_phase(test_norm_path, NAMELIST_PHASE) if nml_phase_result == TEST_FAIL_STATUS: nml_diff = get_nml_diff(test_norm_path) - cdash_warning = "CMake Warning:\n\n{} NML DIFF:\n{}\n".format(test_name, nml_diff) + cdash_warning = "CMake Warning:\n\n{} NML DIFF:\n{}\n".format( + test_name, nml_diff + ) config_results.append(cdash_warning) xmlet.SubElement(config_elem, "Log").text = "\n".join(config_results) xmlet.SubElement(config_elem, "ConfigureStatus").text = "0" - xmlet.SubElement(config_elem, "ElapsedMinutes").text = "0" # Skip for now + xmlet.SubElement(config_elem, "ElapsedMinutes").text = "0" # Skip for now etree = xmlet.ElementTree(site_elem) etree.write(os.path.join(data_rel_path, "Configure.xml")) + ############################################################################### -def create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit): -############################################################################### - site_elem, build_elem = create_cdash_xml_boiler("Build", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit) +def create_cdash_build_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, +): + ############################################################################### + site_elem, build_elem = create_cdash_xml_boiler( + "Build", + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + git_commit, + ) xmlet.SubElement(build_elem, "ConfigureCommand").text = "case.build" @@ -141,7 +200,9 @@ def create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_tim for idx, test_name in enumerate(sorted(results)): test_path, test_status, _ = results[test_name] - test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path) + test_norm_path = ( + test_path if os.path.isdir(test_path) else os.path.dirname(test_path) + ) if test_status == TEST_FAIL_STATUS and get_test_time(test_norm_path) == 0: error_elem = xmlet.SubElement(build_elem, "Error") xmlet.SubElement(error_elem, "Text").text = test_name @@ -150,15 +211,33 @@ def create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_tim xmlet.SubElement(error_elem, "PostContext").text = "" xmlet.SubElement(error_elem, "RepeatCount").text = "0" - xmlet.SubElement(build_elem, "ElapsedMinutes").text = "0" # Skip for now + xmlet.SubElement(build_elem, "ElapsedMinutes").text = "0" # Skip for now etree = xmlet.ElementTree(site_elem) etree.write(os.path.join(data_rel_path, "Build.xml")) + ############################################################################### -def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit): -############################################################################### - site_elem, testing_elem = create_cdash_xml_boiler("Testing", cdash_build_name, cdash_build_group, utc_time, current_time, hostname, git_commit) +def create_cdash_test_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, +): + ############################################################################### + site_elem, testing_elem = create_cdash_xml_boiler( + "Testing", + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + git_commit, + ) test_list_elem = xmlet.SubElement(testing_elem, "TestList") for test_name in sorted(results): @@ -167,12 +246,14 @@ def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time for test_name in sorted(results): test_path, test_status, _ = results[test_name] test_passed = test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS] - test_norm_path = test_path if os.path.isdir(test_path) else os.path.dirname(test_path) + test_norm_path = ( + test_path if os.path.isdir(test_path) else os.path.dirname(test_path) + ) full_test_elem = xmlet.SubElement(testing_elem, "Test") if test_passed: full_test_elem.attrib["Status"] = "passed" - elif (test_status == TEST_PEND_STATUS): + elif test_status == TEST_PEND_STATUS: full_test_elem.attrib["Status"] = "notrun" else: full_test_elem.attrib["Status"] = "failed" @@ -189,11 +270,15 @@ def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time results_elem = xmlet.SubElement(full_test_elem, "Results") named_measurements = ( - ("text/string", "Exit Code", test_status), - ("text/string", "Exit Value", "0" if test_passed else "1"), - ("numeric_double", "Execution Time", str(get_test_time(test_norm_path))), - ("text/string", "Completion Status", "Not Completed" if test_status == TEST_PEND_STATUS else "Completed"), - ("text/string", "Command line", "create_test") + ("text/string", "Exit Code", test_status), + ("text/string", "Exit Value", "0" if test_passed else "1"), + ("numeric_double", "Execution Time", str(get_test_time(test_norm_path))), + ( + "text/string", + "Completion Status", + "Not Completed" if test_status == TEST_PEND_STATUS else "Completed", + ), + ("text/string", "Command line", "create_test"), ) for type_attr, name_attr, value in named_measurements: @@ -206,21 +291,28 @@ def create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time measurement_elem = xmlet.SubElement(results_elem, "Measurement") value_elem = xmlet.SubElement(measurement_elem, "Value") - value_elem.text = ''.join([item for item in get_test_output(test_norm_path) if ord(item) < 128]) + value_elem.text = "".join( + [item for item in get_test_output(test_norm_path) if ord(item) < 128] + ) - xmlet.SubElement(testing_elem, "ElapsedMinutes").text = "0" # Skip for now + xmlet.SubElement(testing_elem, "ElapsedMinutes").text = "0" # Skip for now etree = xmlet.ElementTree(site_elem) etree.write(os.path.join(data_rel_path, "Test.xml")) + ############################################################################### -def create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname): -############################################################################### +def create_cdash_xml_fakes( + results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname +): + ############################################################################### # We assume all cases were created from the same code repo first_result_case = os.path.dirname(list(results.items())[0][1][0]) try: - srcroot = run_cmd_no_fail("./xmlquery --value SRCROOT", from_dir=first_result_case) + srcroot = run_cmd_no_fail( + "./xmlquery --value SRCROOT", from_dir=first_result_case + ) except CIMEError: # Use repo containing this script as last resort srcroot = os.path.join(CIME.utils.get_cime_root(), "..") @@ -229,15 +321,45 @@ def create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_tim data_rel_path = os.path.join("Testing", utc_time) - create_cdash_config_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) - - create_cdash_build_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) - - create_cdash_test_xml(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname, data_rel_path, git_commit) - -############################################################################### -def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload): -############################################################################### + create_cdash_config_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, + ) + + create_cdash_build_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, + ) + + create_cdash_test_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + current_time, + hostname, + data_rel_path, + git_commit, + ) + + +############################################################################### +def create_cdash_upload_xml( + results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload +): + ############################################################################### data_rel_path = os.path.join("Testing", utc_time) @@ -266,18 +388,35 @@ def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_ti # it's possible that tests that failed very badly/early, and fake cases for testing # will not be able to support xmlquery try: - log_src_dir = run_cmd_no_fail("./xmlquery {} --value".format(param), from_dir=case_dir) + log_src_dir = run_cmd_no_fail( + "./xmlquery {} --value".format(param), + from_dir=case_dir, + ) except: continue - log_dst_dir = os.path.join(log_dir, "{}{}_{}_logs".format(test_name, "" if case_dir == test_case_dir else ".case2", param)) + log_dst_dir = os.path.join( + log_dir, + "{}{}_{}_logs".format( + test_name, + "" if case_dir == test_case_dir else ".case2", + param, + ), + ) os.makedirs(log_dst_dir) for log_file in glob.glob(os.path.join(log_src_dir, "*log*")): if os.path.isdir(log_file): - shutil.copytree(log_file, os.path.join(log_dst_dir, os.path.basename(log_file))) + shutil.copytree( + log_file, + os.path.join( + log_dst_dir, os.path.basename(log_file) + ), + ) else: safe_copy(log_file, log_dst_dir) - for log_file in glob.glob(os.path.join(log_src_dir, "*.cprnc.out*")): + for log_file in glob.glob( + os.path.join(log_src_dir, "*.cprnc.out*") + ): safe_copy(log_file, log_dst_dir) need_to_upload = True @@ -285,14 +424,15 @@ def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_ti if need_to_upload: tarball = "{}.tar.gz".format(log_dir) - if (os.path.exists(tarball)): + if os.path.exists(tarball): os.remove(tarball) - run_cmd_no_fail("tar -cf - {} | gzip -c".format(log_dir), arg_stdout=tarball) + run_cmd_no_fail( + "tar -cf - {} | gzip -c".format(log_dir), arg_stdout=tarball + ) base64 = run_cmd_no_fail("base64 {}".format(tarball)) - xml_text = \ -r""" + xml_text = r""" "?> @@ -303,18 +443,28 @@ def create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_ti -""".format(cdash_build_name, utc_time, cdash_build_group, hostname, os.path.abspath(tarball), base64) +""".format( + cdash_build_name, + utc_time, + cdash_build_group, + hostname, + os.path.abspath(tarball), + base64, + ) with open(os.path.join(data_rel_path, "Upload.xml"), "w") as fd: fd.write(xml_text) finally: - if (os.path.isdir(log_dir)): + if os.path.isdir(log_dir): shutil.rmtree(log_dir) + ############################################################################### -def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload=False): -############################################################################### +def create_cdash_xml( + results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload=False +): + ############################################################################### # # Create dart config file @@ -326,12 +476,13 @@ def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group cdash_timestamp = time.strftime("%H:%M:%S", utc_time_tuple) hostname = Machines().get_machine_name() - if (hostname is None): + if hostname is None: hostname = socket.gethostname().split(".")[0] - logging.warning("Could not convert hostname '{}' into an E3SM machine name".format(hostname)) + logging.warning( + "Could not convert hostname '{}' into an E3SM machine name".format(hostname) + ) - dart_config = \ -""" + dart_config = """ SourceDirectory: {0} BuildDirectory: {0} @@ -359,35 +510,62 @@ def create_cdash_xml(results, cdash_build_name, cdash_project, cdash_build_group UseLaunchers: CurlOptions: CURLOPT_SSL_VERIFYPEER_OFF;CURLOPT_SSL_VERIFYHOST_OFF -""".format(os.getcwd(), hostname, cdash_build_name, cdash_project, - find_executable("scp"), cdash_timestamp) +""".format( + os.getcwd(), + hostname, + cdash_build_name, + cdash_project, + find_executable("scp"), + cdash_timestamp, + ) with open("DartConfiguration.tcl", "w") as dart_fd: dart_fd.write(dart_config) - utc_time = time.strftime('%Y%m%d-%H%M', utc_time_tuple) + utc_time = time.strftime("%Y%m%d-%H%M", utc_time_tuple) os.makedirs(os.path.join("Testing", utc_time)) # Make tag file with open("Testing/TAG", "w") as tag_fd: tag_fd.write("{}\n{}\n".format(utc_time, cdash_build_group)) - create_cdash_xml_fakes(results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname) + create_cdash_xml_fakes( + results, cdash_build_name, cdash_build_group, utc_time, current_time, hostname + ) - create_cdash_upload_xml(results, cdash_build_name, cdash_build_group, utc_time, hostname, force_log_upload) + create_cdash_upload_xml( + results, + cdash_build_name, + cdash_build_group, + utc_time, + hostname, + force_log_upload, + ) run_cmd_no_fail("ctest -VV -D NightlySubmit", verbose=True) + ############################################################################### -def wait_for_test(test_path, results, wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run): -############################################################################### - if (os.path.isdir(test_path)): +def wait_for_test( + test_path, + results, + wait, + check_throughput, + check_memory, + ignore_namelists, + ignore_memleak, + no_run, +): + ############################################################################### + if os.path.isdir(test_path): test_status_filepath = os.path.join(test_path, TEST_STATUS_FILENAME) else: test_status_filepath = test_path logging.debug("Watching file: '{}'".format(test_status_filepath)) - test_log_path = os.path.join(os.path.dirname(test_status_filepath), ".internal_test_status.log") + test_log_path = os.path.join( + os.path.dirname(test_status_filepath), ".internal_test_status.log" + ) # We don't want to make it a requirement that wait_for_tests has write access # to all case directories @@ -399,15 +577,18 @@ def wait_for_test(test_path, results, wait, check_throughput, check_memory, igno prior_ts = None with open(test_log_path, "w") as log_fd: - while (True): - if (os.path.exists(test_status_filepath)): + while True: + if os.path.exists(test_status_filepath): ts = TestStatus(test_dir=os.path.dirname(test_status_filepath)) test_name = ts.get_name() - test_status, test_phase = ts.get_overall_test_status(wait_for_run=not no_run, # Important - no_run=no_run, - check_throughput=check_throughput, - check_memory=check_memory, ignore_namelists=ignore_namelists, - ignore_memleak=ignore_memleak) + test_status, test_phase = ts.get_overall_test_status( + wait_for_run=not no_run, # Important + no_run=no_run, + check_throughput=check_throughput, + check_memory=check_memory, + ignore_namelists=ignore_namelists, + ignore_memleak=ignore_memleak, + ) if prior_ts is not None and prior_ts != ts: log_fd.write(ts.phase_statuses_dump()) @@ -415,29 +596,59 @@ def wait_for_test(test_path, results, wait, check_throughput, check_memory, igno prior_ts = ts - if (test_status == TEST_PEND_STATUS and (wait and not SIGNAL_RECEIVED)): + if test_status == TEST_PEND_STATUS and (wait and not SIGNAL_RECEIVED): time.sleep(SLEEP_INTERVAL_SEC) logging.debug("Waiting for test to finish") else: - results.put( (test_name, test_path, test_status, test_phase) ) + results.put((test_name, test_path, test_status, test_phase)) break else: - if (wait and not SIGNAL_RECEIVED): - logging.debug("File '{}' does not yet exist".format(test_status_filepath)) + if wait and not SIGNAL_RECEIVED: + logging.debug( + "File '{}' does not yet exist".format(test_status_filepath) + ) time.sleep(SLEEP_INTERVAL_SEC) else: test_name = os.path.abspath(test_status_filepath).split("/")[-2] - results.put( (test_name, test_path, "File '{}' doesn't exist".format(test_status_filepath), CREATE_NEWCASE_PHASE) ) + results.put( + ( + test_name, + test_path, + "File '{}' doesn't exist".format(test_status_filepath), + CREATE_NEWCASE_PHASE, + ) + ) break + ############################################################################### -def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check_memory=False, ignore_namelists=False, ignore_memleak=False, no_run=False): -############################################################################### +def wait_for_tests_impl( + test_paths, + no_wait=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + ignore_memleak=False, + no_run=False, +): + ############################################################################### results = queue.Queue() for test_path in test_paths: - t = threading.Thread(target=wait_for_test, args=(test_path, results, not no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run)) + t = threading.Thread( + target=wait_for_test, + args=( + test_path, + results, + not no_wait, + check_throughput, + check_memory, + ignore_namelists, + ignore_memleak, + no_run, + ), + ) t.daemon = True t.start() @@ -446,45 +657,71 @@ def wait_for_tests_impl(test_paths, no_wait=False, check_throughput=False, check test_results = {} completed_test_paths = [] - while (not results.empty()): + while not results.empty(): test_name, test_path, test_status, test_phase = results.get() - if (test_name in test_results): + if test_name in test_results: prior_path, prior_status, _ = test_results[test_name] - if (test_status == prior_status): - logging.warning("Test name '{}' was found in both '{}' and '{}'".format(test_name, test_path, prior_path)) + if test_status == prior_status: + logging.warning( + "Test name '{}' was found in both '{}' and '{}'".format( + test_name, test_path, prior_path + ) + ) else: - raise CIMEError("Test name '{}' was found in both '{}' and '{}' with different results".format(test_name, test_path, prior_path)) - - expect(test_name is not None, "Failed to get test name for test_path: {}".format(test_path)) + raise CIMEError( + "Test name '{}' was found in both '{}' and '{}' with different results".format( + test_name, test_path, prior_path + ) + ) + + expect( + test_name is not None, + "Failed to get test name for test_path: {}".format(test_path), + ) test_results[test_name] = (test_path, test_status, test_phase) completed_test_paths.append(test_path) - expect(set(test_paths) == set(completed_test_paths), - "Missing results for test paths: {}".format(set(test_paths) - set(completed_test_paths))) + expect( + set(test_paths) == set(completed_test_paths), + "Missing results for test paths: {}".format( + set(test_paths) - set(completed_test_paths) + ), + ) return test_results + ############################################################################### -def wait_for_tests(test_paths, - no_wait=False, - check_throughput=False, - check_memory=False, - ignore_namelists=False, - ignore_memleak=False, - cdash_build_name=None, - cdash_project=E3SM_MAIN_CDASH, - cdash_build_group=CDASH_DEFAULT_BUILD_GROUP, - timeout=None, - force_log_upload=False, - no_run=False, - update_success=False, - expect_test_complete=True): -############################################################################### +def wait_for_tests( + test_paths, + no_wait=False, + check_throughput=False, + check_memory=False, + ignore_namelists=False, + ignore_memleak=False, + cdash_build_name=None, + cdash_project=E3SM_MAIN_CDASH, + cdash_build_group=CDASH_DEFAULT_BUILD_GROUP, + timeout=None, + force_log_upload=False, + no_run=False, + update_success=False, + expect_test_complete=True, +): + ############################################################################### # Set up signal handling, we want to print results before the program # is terminated set_up_signal_handlers() with Timeout(timeout, action=signal_handler): - test_results = wait_for_tests_impl(test_paths, no_wait, check_throughput, check_memory, ignore_namelists, ignore_memleak, no_run) + test_results = wait_for_tests_impl( + test_paths, + no_wait, + check_throughput, + check_memory, + ignore_namelists, + ignore_memleak, + no_run, + ) all_pass = True env_loaded = False @@ -492,25 +729,44 @@ def wait_for_tests(test_paths, test_path, test_status, phase = test_data case_dir = os.path.dirname(test_path) - if test_status not in [TEST_PASS_STATUS, TEST_PEND_STATUS, NAMELIST_FAIL_STATUS]: + if test_status not in [ + TEST_PASS_STATUS, + TEST_PEND_STATUS, + NAMELIST_FAIL_STATUS, + ]: # Report failed phases - logging.info( "{} {} (phase {})".format(test_status, test_name, phase)) + logging.info("{} {} (phase {})".format(test_status, test_name, phase)) all_pass = False else: # Be cautious about telling the user that the test passed since we might # not know that the test passed yet. if test_status == TEST_PEND_STATUS: if expect_test_complete: - logging.info( "{} {} (phase {} unexpectedly left in PEND)".format(TEST_PEND_STATUS, test_name, phase)) + logging.info( + "{} {} (phase {} unexpectedly left in PEND)".format( + TEST_PEND_STATUS, test_name, phase + ) + ) all_pass = False else: - logging.info( "{} {} (phase {} has not yet completed)".format(TEST_PEND_STATUS, test_name, phase)) + logging.info( + "{} {} (phase {} has not yet completed)".format( + TEST_PEND_STATUS, test_name, phase + ) + ) elif test_status == NAMELIST_FAIL_STATUS: - logging.info( "{} {} (but otherwise OK) {}".format(NAMELIST_FAIL_STATUS, test_name, phase)) + logging.info( + "{} {} (but otherwise OK) {}".format( + NAMELIST_FAIL_STATUS, test_name, phase + ) + ) all_pass = False else: - expect(test_status == TEST_PASS_STATUS, "Expected pass if we made it here, instead: {}".format(test_status)) + expect( + test_status == TEST_PASS_STATUS, + "Expected pass if we made it here, instead: {}".format(test_status), + ) logging.info("{} {} {}".format(test_status, test_name, phase)) logging.info(" Case dir: {}".format(case_dir)) @@ -528,12 +784,27 @@ def wait_for_tests(test_paths, env_loaded = True if update_success: - save_test_success(baseline_root, srcroot, test_name, test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS]) + save_test_success( + baseline_root, + srcroot, + test_name, + test_status in [TEST_PASS_STATUS, NAMELIST_FAIL_STATUS], + ) except CIMEError as e: - logging.warning("Failed to update success / load_env for Case {}: {}".format(case_dir, e)) + logging.warning( + "Failed to update success / load_env for Case {}: {}".format( + case_dir, e + ) + ) if cdash_build_name: - create_cdash_xml(test_results, cdash_build_name, cdash_project, cdash_build_group, force_log_upload) + create_cdash_xml( + test_results, + cdash_build_name, + cdash_project, + cdash_build_group, + force_log_upload, + ) return all_pass diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bfa36c38c4d..012b19301a3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -27,7 +27,7 @@ find useful? Have a few minutes to tackle an issue? In this guide we will get yo integrated into contributing to CIME! ## What Can I Do? -* Tackle any unassigned [issues](https://github.com/ESMCI/CIME/issues) you wish! +* Tackle any unassigned [issues](https://github.com/ESMCI/CIME/issues) you wish! * Contribute code you already have. It doesn’t need to be perfect! We will help you clean things up, test it, etc. @@ -67,12 +67,22 @@ to tell CIME about your development machine. See the [CIME users guide](https:// Run the scripts_regression_test: - cd scripts/tests - scripts_regression_tests.py + cd scripts/lib/CIME/tests + python scripts_regression_tests.py + +Alternatively with `pytest`: + + pytest scripts/lib/CIME/tests Make your change. Add tests for your change. Make the tests pass to the same level as before your changes. - scripts_regression_tests.py + cd scripts/lib/CIME/tests + python scripts_regression_tests.py + +Run [pre-commit](https://pre-commit.com/#usage) before committing changes and submitting a PR. + + pip install pre-commit + pre-commit run -a Commit the changes you made. Chris Beams has written a [guide](https://chris.beams.io/posts/git-commit/) on how to write good commit messages. @@ -90,8 +100,9 @@ Some things that will increase the chance that your pull request is accepted: * Follow [PEP8][pep8] for style. (The `flake8` utility can help with this.) * Write a [good commit message][commit]. -Pull requests will automatically have tests run by Travis. This includes -running both the unit tests as well as the `flake8` code linter. +Pull requests will automatically have tests run by a Github Action. This +includes running both the unit tests as well as `pre-commit`, which checks +linting. [pep8]: http://pep8.org [commit]: https://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html diff --git a/ChangeLog b/ChangeLog index 66b425d2cfc..8f0058c20be 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,956 @@ ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer +Date: 1-11-22 +Tag: cime6.0.13 +Answer Changes: None +Tests: scripts_regression_tests, pytest, & by hand +Dependencies: + +Brief Summary: + - Handle ESMF_Profile format change (backward compatible). + - Extend throughput and memory checking in baseline tests. + - FUNIT needs CIME_NO_CMAKE_MACRO ON for now. + - New compset naming convention. + - Changes needed for new cesm grid split in ccs_config_cesm. + - Move cesm config files to new repository - ccs_config_cesm. + - Add enforcing code formatting to pre-commit. + - Support importing buildnml from cime_config/{compname}_cime_py. + - Update the documentation of --extra-machines-dir. + - Fix returning correct test results. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +163752d4 Merge pull request #4150 from jedwards4b/esmf_profile_update +b0ada2143 Merge pull request #4151 from ESMCI/azamat/extend-perf-checks +f58bb131a Merge pull request #4149 from billsacks/funit_no_cmake_macro +7e63749f2 Merge pull request #4148 from jedwards4b/jedwards/refactor_compset_naming +bc80870bf changes needed for new cesm grid split in ccs_config_cesm (#4145) +a633c4521 Merge pull request #4144 from mvertens/feature/split_cesm_config +281c3d829 Merge pull request #4141 from jasonb5/precommit +178e9f83e Merge pull request #4130 from mnlevy1981/improve_cime_config +aba326d08 Merge pull request #4137 from billsacks/doc_extra_machines_dir +d6ffc88e3 Merge pull request #4143 from jasonb5/fix_testing + + +Modified files: git diff --name-status [previous_tag] +A .git-blame-ignore-revs +M .github/PULL_REQUEST_TEMPLATE +M .github/workflows/srt.yml +M .github/workflows/srt_nuopc.yml +M .gitignore +M .pre-commit-config.yaml +M CONTRIBUTING.md +M ChangeLog +M ChangeLog_template +M Externals.cfg +M LICENSE.TXT +D config/cesm/config_archive.xml +M config/cesm/config_files.xml +D config/cesm/config_grids.xml +D config/cesm/config_grids_common.xml +D config/cesm/config_grids_mct.xml +D config/cesm/config_grids_nuopc.xml +D config/cesm/config_inputdata.xml +D config/cesm/machines/Depends.babbageKnc +D config/cesm/machines/Depends.bluewaters +D config/cesm/machines/Depends.corip1 +D config/cesm/machines/Depends.cray +D config/cesm/machines/Depends.gnu +D config/cesm/machines/Depends.intel +D config/cesm/machines/Depends.intel14 +D config/cesm/machines/Depends.intelmic +D config/cesm/machines/Depends.intelmic14 +D config/cesm/machines/Depends.mira +D config/cesm/machines/Depends.nag +D config/cesm/machines/Depends.nvhpc-gpu +D config/cesm/machines/Depends.pgi-gpu +D config/cesm/machines/README +D config/cesm/machines/cmake_macros/CMakeLists.txt +D config/cesm/machines/cmake_macros/CNL.cmake +D config/cesm/machines/cmake_macros/Darwin.cmake +D config/cesm/machines/cmake_macros/Macros.cmake +D config/cesm/machines/cmake_macros/arm.cmake +D config/cesm/machines/cmake_macros/armgcc.cmake +D config/cesm/machines/cmake_macros/athena.cmake +D config/cesm/machines/cmake_macros/bluewaters.cmake +D config/cesm/machines/cmake_macros/casper.cmake +D config/cesm/machines/cmake_macros/centos7-linux.cmake +D config/cesm/machines/cmake_macros/cheyenne.cmake +D config/cesm/machines/cmake_macros/container.cmake +D config/cesm/machines/cmake_macros/cray.cmake +D config/cesm/machines/cmake_macros/cray_daint.cmake +D config/cesm/machines/cmake_macros/euler2.cmake +D config/cesm/machines/cmake_macros/euler3.cmake +D config/cesm/machines/cmake_macros/euler4.cmake +D config/cesm/machines/cmake_macros/frontera.cmake +D config/cesm/machines/cmake_macros/gnu.cmake +D config/cesm/machines/cmake_macros/gnu_cheyenne.cmake +D config/cesm/machines/cmake_macros/gnu_coeus.cmake +D config/cesm/machines/cmake_macros/gnu_hobart.cmake +D config/cesm/machines/cmake_macros/gnu_homebrew.cmake +D config/cesm/machines/cmake_macros/gnu_melvin.cmake +D config/cesm/machines/cmake_macros/gnu_modex.cmake +D config/cesm/machines/cmake_macros/hobart.cmake +D config/cesm/machines/cmake_macros/ibm.cmake +D config/cesm/machines/cmake_macros/ibm_AIX.cmake +D config/cesm/machines/cmake_macros/ibm_BGQ.cmake +D config/cesm/machines/cmake_macros/ibm_mira.cmake +D config/cesm/machines/cmake_macros/intel.cmake +D config/cesm/machines/cmake_macros/intel_Darwin.cmake +D config/cesm/machines/cmake_macros/intel_aleph.cmake +D config/cesm/machines/cmake_macros/intel_athena.cmake +D config/cesm/machines/cmake_macros/intel_bluewaters.cmake +D config/cesm/machines/cmake_macros/intel_casper.cmake +D config/cesm/machines/cmake_macros/intel_cheyenne.cmake +D config/cesm/machines/cmake_macros/intel_constance.cmake +D config/cesm/machines/cmake_macros/intel_cori-haswell.cmake +D config/cesm/machines/cmake_macros/intel_cori-knl.cmake +D config/cesm/machines/cmake_macros/intel_eastwind.cmake +D config/cesm/machines/cmake_macros/intel_edison.cmake +D config/cesm/machines/cmake_macros/intel_euler2.cmake +D config/cesm/machines/cmake_macros/intel_euler3.cmake +D config/cesm/machines/cmake_macros/intel_euler4.cmake +D config/cesm/machines/cmake_macros/intel_greenplanet-sib29.cmake +D config/cesm/machines/cmake_macros/intel_greenplanet-sky24.cmake +D config/cesm/machines/cmake_macros/intel_hobart.cmake +D config/cesm/machines/cmake_macros/intel_izumi.cmake +D config/cesm/machines/cmake_macros/intel_laramie.cmake +D config/cesm/machines/cmake_macros/intel_lawrencium-lr2.cmake +D config/cesm/machines/cmake_macros/intel_lawrencium-lr3.cmake +D config/cesm/machines/cmake_macros/intel_sandiatoss3.cmake +D config/cesm/machines/cmake_macros/intel_stampede2-knl.cmake +D config/cesm/machines/cmake_macros/intel_stampede2-skx.cmake +D config/cesm/machines/cmake_macros/intel_theia.cmake +D config/cesm/machines/cmake_macros/intel_zeus.cmake +D config/cesm/machines/cmake_macros/izumi.cmake +D config/cesm/machines/cmake_macros/laramie.cmake +D config/cesm/machines/cmake_macros/lonestar5.cmake +D config/cesm/machines/cmake_macros/nag.cmake +D config/cesm/machines/cmake_macros/nvhpc-gpu.cmake +D config/cesm/machines/cmake_macros/nvhpc-gpu_casper.cmake +D config/cesm/machines/cmake_macros/nvhpc.cmake +D config/cesm/machines/cmake_macros/nvhpc_casper.cmake +D config/cesm/machines/cmake_macros/pgi-gpu.cmake +D config/cesm/machines/cmake_macros/pgi-gpu_casper.cmake +D config/cesm/machines/cmake_macros/pgi.cmake +D config/cesm/machines/cmake_macros/pgi_bluewaters.cmake +D config/cesm/machines/cmake_macros/pgi_casper.cmake +D config/cesm/machines/cmake_macros/pgi_cheyenne.cmake +D config/cesm/machines/cmake_macros/pgi_constance.cmake +D config/cesm/machines/cmake_macros/pgi_daint.cmake +D config/cesm/machines/cmake_macros/pgi_eastwind.cmake +D config/cesm/machines/cmake_macros/pgi_euler2.cmake +D config/cesm/machines/cmake_macros/pgi_euler3.cmake +D config/cesm/machines/cmake_macros/pgi_euler4.cmake +D config/cesm/machines/cmake_macros/pgi_hobart.cmake +D config/cesm/machines/cmake_macros/pgi_izumi.cmake +D config/cesm/machines/cmake_macros/pgi_olympus.cmake +D config/cesm/machines/cmake_macros/pleiades-bro.cmake +D config/cesm/machines/cmake_macros/pleiades-has.cmake +D config/cesm/machines/cmake_macros/pleiades-ivy.cmake +D config/cesm/machines/cmake_macros/pleiades-san.cmake +D config/cesm/machines/cmake_macros/stampede2-knl.cmake +D config/cesm/machines/cmake_macros/stampede2-skx.cmake +D config/cesm/machines/cmake_macros/theta.cmake +D config/cesm/machines/cmake_macros/universal.cmake +D config/cesm/machines/cmake_macros/userdefined.cmake +D config/cesm/machines/cmake_macros/zeus.cmake +D config/cesm/machines/config_batch.xml +D config/cesm/machines/config_compilers.xml +D config/cesm/machines/config_machines.xml +D config/cesm/machines/config_pio.xml +D config/cesm/machines/config_workflow.xml +D config/cesm/machines/cylc_suite.rc.template +D config/cesm/machines/mpi_run_gpu.casper +D config/cesm/machines/nag_mpi_argument.txt +D config/cesm/machines/template.case.run +D config/cesm/machines/template.case.test +D config/cesm/machines/template.st_archive +D config/cesm/machines/userdefined_laptop_template/README.md +D config/cesm/machines/userdefined_laptop_template/config_compilers.xml +D config/cesm/machines/userdefined_laptop_template/config_machines.xml +D config/cesm/machines/userdefined_laptop_template/config_pes.xml +D config/ufs/config_archive.xml +M config/ufs/config_files.xml +D config/ufs/config_grids.xml +D config/ufs/config_inputdata.xml +D config/ufs/machines/Depends.cray +D config/ufs/machines/Depends.gnu +D config/ufs/machines/Depends.intel +D config/ufs/machines/README +D config/ufs/machines/config_batch.xml +D config/ufs/machines/config_compilers.xml +D config/ufs/machines/config_machines.xml +D config/ufs/machines/config_pio.xml +D config/ufs/machines/config_workflow.xml +D config/ufs/machines/cylc_suite.rc.template +D config/ufs/machines/template.case.run +D config/ufs/machines/template.case.test +D config/ufs/machines/template.chgres.run +D config/ufs/machines/template.gfs_post.run +D config/ufs/machines/template.st_archive +D config/ufs/machines/userdefined_laptop_template/README.md +D config/ufs/machines/userdefined_laptop_template/config_compilers.xml +D config/ufs/machines/userdefined_laptop_template/config_machines.xml +D config/ufs/machines/userdefined_laptop_template/config_pes.xml +M config/xml_schemas/config_grids_v2.2.xsd +M config/xml_schemas/config_machines.xsd +M conftest.py +M doc/Makefile +M doc/README +M doc/source/Tools_user/index.rst.template +M doc/source/_templates/layout.html +M doc/source/build_cpl/adding-components.rst +M doc/source/build_cpl/index.rst +M doc/source/build_cpl/introduction.rst +M doc/source/conf.py +M doc/source/glossary/index.rst +M doc/source/index.rst +M doc/source/misc_tools/ect.rst +M doc/source/misc_tools/load-balancing-tool.rst +M doc/source/users_guide/cime-internals.rst +M doc/source/users_guide/porting-cime.rst +M doc/source/users_guide/setting-up-a-case.rst +M doc/source/users_guide/testing.rst +M doc/source/users_guide/unit_testing.rst +M doc/source/what_cime/index.rst +M doc/source/xml_files/atmosphere.rst +M doc/source/xml_files/cesm.rst +M doc/source/xml_files/common.rst +M doc/source/xml_files/components.rst +M doc/source/xml_files/drivers.rst +M doc/source/xml_files/e3sm.rst +M doc/source/xml_files/esp.rst +M doc/source/xml_files/index.rst +M doc/source/xml_files/land.rst +M doc/source/xml_files/landice.rst +M doc/source/xml_files/ocean.rst +M doc/source/xml_files/river.rst +M doc/source/xml_files/seaice.rst +M doc/source/xml_files/wave.rst +M doc/tools_autodoc.cfg +M doc/tools_autodoc.py +M scripts/Tools/archive_metadata +M scripts/Tools/bld_diff +M scripts/Tools/bless_test_results +M scripts/Tools/case.build +M scripts/Tools/case.cmpgen_namelists +M scripts/Tools/case.qstatus +M scripts/Tools/case.setup +M scripts/Tools/case.submit +M scripts/Tools/case_diff +M scripts/Tools/check_case +M scripts/Tools/check_input_data +M scripts/Tools/check_lockedfiles +M scripts/Tools/cime_bisect +M scripts/Tools/code_checker +M scripts/Tools/compare_namelists +M scripts/Tools/compare_test_results +M scripts/Tools/component_compare_baseline +M scripts/Tools/component_compare_copy +M scripts/Tools/component_compare_test +M scripts/Tools/component_generate_baseline +M scripts/Tools/cs.status +M scripts/Tools/e3sm_check_env +M scripts/Tools/e3sm_compile_wrap.py +M scripts/Tools/generate_cylc_workflow.py +M scripts/Tools/getTiming +M scripts/Tools/get_case_env +M scripts/Tools/get_standard_makefile_args +M scripts/Tools/jenkins_generic_job +M scripts/Tools/list_e3sm_tests +M scripts/Tools/mvsource +M scripts/Tools/normalize_cases +M scripts/Tools/pelayout +M scripts/Tools/preview_namelists +M scripts/Tools/preview_run +M scripts/Tools/save_provenance +M scripts/Tools/simple_compare +M scripts/Tools/standard_script_setup.py +M scripts/Tools/testreporter.py +M scripts/Tools/wait_for_tests +M scripts/Tools/xmlchange +M scripts/Tools/xmlconvertors/config_pes_converter.py +M scripts/Tools/xmlconvertors/convert-grid-v1-to-v2 +M scripts/Tools/xmlconvertors/grid_xml_converter.py +M scripts/Tools/xmlquery +M scripts/Tools/xmltestentry +M scripts/climate_reproducibility/README.md +M scripts/create_clone +M scripts/create_newcase +M scripts/create_test +M scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt +M scripts/fortran_unit_testing/python/printer.py +M scripts/fortran_unit_testing/python/test_xml_test_list.py +M scripts/fortran_unit_testing/python/xml_test_list.py +M scripts/fortran_unit_testing/run_tests.py +M scripts/lib/CIME/BuildTools/cmakemacroswriter.py +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/BuildTools/macroconditiontree.py +M scripts/lib/CIME/BuildTools/macrowriterbase.py +M scripts/lib/CIME/BuildTools/makemacroswriter.py +M scripts/lib/CIME/BuildTools/possiblevalues.py +M scripts/lib/CIME/BuildTools/valuesetting.py +M scripts/lib/CIME/Servers/__init__.py +M scripts/lib/CIME/Servers/ftp.py +M scripts/lib/CIME/Servers/generic_server.py +M scripts/lib/CIME/Servers/gftp.py +M scripts/lib/CIME/Servers/svn.py +M scripts/lib/CIME/Servers/wget.py +M scripts/lib/CIME/SystemTests/dae.py +M scripts/lib/CIME/SystemTests/eri.py +M scripts/lib/CIME/SystemTests/erio.py +M scripts/lib/CIME/SystemTests/erp.py +M scripts/lib/CIME/SystemTests/err.py +M scripts/lib/CIME/SystemTests/erri.py +M scripts/lib/CIME/SystemTests/ers.py +M scripts/lib/CIME/SystemTests/ers2.py +M scripts/lib/CIME/SystemTests/ert.py +M scripts/lib/CIME/SystemTests/funit.py +M scripts/lib/CIME/SystemTests/homme.py +M scripts/lib/CIME/SystemTests/hommebaseclass.py +M scripts/lib/CIME/SystemTests/hommebfb.py +M scripts/lib/CIME/SystemTests/icp.py +M scripts/lib/CIME/SystemTests/irt.py +M scripts/lib/CIME/SystemTests/ldsta.py +M scripts/lib/CIME/SystemTests/mcc.py +M scripts/lib/CIME/SystemTests/mvk.py +M scripts/lib/CIME/SystemTests/nck.py +M scripts/lib/CIME/SystemTests/ncr.py +M scripts/lib/CIME/SystemTests/nodefail.py +M scripts/lib/CIME/SystemTests/pea.py +M scripts/lib/CIME/SystemTests/pem.py +M scripts/lib/CIME/SystemTests/pet.py +M scripts/lib/CIME/SystemTests/pfs.py +M scripts/lib/CIME/SystemTests/pgn.py +M scripts/lib/CIME/SystemTests/pre.py +M scripts/lib/CIME/SystemTests/rep.py +M scripts/lib/CIME/SystemTests/restart_tests.py +M scripts/lib/CIME/SystemTests/seq.py +M scripts/lib/CIME/SystemTests/sms.py +M scripts/lib/CIME/SystemTests/system_tests_common.py +M scripts/lib/CIME/SystemTests/system_tests_compare_two.py +M scripts/lib/CIME/SystemTests/test_utils/user_nl_utils.py +M scripts/lib/CIME/SystemTests/tsc.py +M scripts/lib/CIME/XML/archive.py +M scripts/lib/CIME/XML/archive_base.py +M scripts/lib/CIME/XML/batch.py +M scripts/lib/CIME/XML/compilerblock.py +M scripts/lib/CIME/XML/compilers.py +M scripts/lib/CIME/XML/component.py +M scripts/lib/CIME/XML/compsets.py +M scripts/lib/CIME/XML/entry_id.py +M scripts/lib/CIME/XML/env_archive.py +M scripts/lib/CIME/XML/env_base.py +M scripts/lib/CIME/XML/env_batch.py +M scripts/lib/CIME/XML/env_build.py +M scripts/lib/CIME/XML/env_case.py +M scripts/lib/CIME/XML/env_mach_pes.py +M scripts/lib/CIME/XML/env_mach_specific.py +M scripts/lib/CIME/XML/env_run.py +M scripts/lib/CIME/XML/env_test.py +M scripts/lib/CIME/XML/env_workflow.py +M scripts/lib/CIME/XML/expected_fails_file.py +M scripts/lib/CIME/XML/files.py +M scripts/lib/CIME/XML/generic_xml.py +M scripts/lib/CIME/XML/grids.py +M scripts/lib/CIME/XML/headers.py +M scripts/lib/CIME/XML/inputdata.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/XML/namelist_definition.py +M scripts/lib/CIME/XML/pes.py +M scripts/lib/CIME/XML/pio.py +M scripts/lib/CIME/XML/standard_module_setup.py +M scripts/lib/CIME/XML/stream.py +M scripts/lib/CIME/XML/test_reporter.py +M scripts/lib/CIME/XML/testlist.py +M scripts/lib/CIME/XML/tests.py +M scripts/lib/CIME/XML/testspec.py +M scripts/lib/CIME/XML/workflow.py +M scripts/lib/CIME/aprun.py +M scripts/lib/CIME/bless_test_results.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/buildnml.py +M scripts/lib/CIME/case/README +M scripts/lib/CIME/case/case.py +M scripts/lib/CIME/case/case_clone.py +M scripts/lib/CIME/case/case_cmpgen_namelists.py +M scripts/lib/CIME/case/case_run.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/case/case_st_archive.py +M scripts/lib/CIME/case/case_submit.py +M scripts/lib/CIME/case/case_test.py +M scripts/lib/CIME/case/check_input_data.py +M scripts/lib/CIME/case/check_lockedfiles.py +M scripts/lib/CIME/case/preview_namelists.py +M scripts/lib/CIME/code_checker.py +M scripts/lib/CIME/compare_namelists.py +M scripts/lib/CIME/compare_test_results.py +M scripts/lib/CIME/cs_status.py +M scripts/lib/CIME/cs_status_creator.py +M scripts/lib/CIME/date.py +M scripts/lib/CIME/expected_fails.py +M scripts/lib/CIME/get_timing.py +M scripts/lib/CIME/hist_utils.py +M scripts/lib/CIME/locked_files.py +M scripts/lib/CIME/namelist.py +M scripts/lib/CIME/nmlgen.py +M scripts/lib/CIME/provenance.py +M scripts/lib/CIME/simple_compare.py +M scripts/lib/CIME/test_scheduler.py +M scripts/lib/CIME/test_status.py +M scripts/lib/CIME/test_utils.py +M scripts/lib/CIME/tests/base.py +M scripts/lib/CIME/tests/case_fake.py +M scripts/lib/CIME/tests/custom_assertions_test_status.py +M scripts/lib/CIME/tests/scripts_regression_tests.py +M scripts/lib/CIME/tests/test_sys_bless_tests_results.py +M scripts/lib/CIME/tests/test_sys_build_system.py +M scripts/lib/CIME/tests/test_sys_cime_case.py +M scripts/lib/CIME/tests/test_sys_cime_performance.py +M scripts/lib/CIME/tests/test_sys_cmake_macros.py +M scripts/lib/CIME/tests/test_sys_create_newcase.py +M scripts/lib/CIME/tests/test_sys_full_system.py +M scripts/lib/CIME/tests/test_sys_grid_generation.py +M scripts/lib/CIME/tests/test_sys_jenkins_generic_job.py +M scripts/lib/CIME/tests/test_sys_macro_basic.py +M scripts/lib/CIME/tests/test_sys_make_macros.py +M scripts/lib/CIME/tests/test_sys_manage_and_query.py +M scripts/lib/CIME/tests/test_sys_run_restart.py +M scripts/lib/CIME/tests/test_sys_save_timings.py +M scripts/lib/CIME/tests/test_sys_single_submit.py +M scripts/lib/CIME/tests/test_sys_test_scheduler.py +M scripts/lib/CIME/tests/test_sys_unittest.py +M scripts/lib/CIME/tests/test_sys_user_concurrent_mods.py +M scripts/lib/CIME/tests/test_sys_wait_for_tests.py +M scripts/lib/CIME/tests/test_unit_case.py +M scripts/lib/CIME/tests/test_unit_case_fake.py +M scripts/lib/CIME/tests/test_unit_case_setup.py +M scripts/lib/CIME/tests/test_unit_compare_test_results.py +M scripts/lib/CIME/tests/test_unit_compare_two.py +M scripts/lib/CIME/tests/test_unit_cs_status.py +M scripts/lib/CIME/tests/test_unit_custom_assertions_test_status.py +M scripts/lib/CIME/tests/test_unit_expected_fails_file.py +M scripts/lib/CIME/tests/test_unit_grids.py +M scripts/lib/CIME/tests/test_unit_nmlgen.py +M scripts/lib/CIME/tests/test_unit_provenance.py +M scripts/lib/CIME/tests/test_unit_test_status.py +M scripts/lib/CIME/tests/test_unit_two_link_to_case2_output.py +M scripts/lib/CIME/tests/test_unit_user_mod_support.py +M scripts/lib/CIME/tests/test_unit_user_nl_utils.py +M scripts/lib/CIME/tests/test_unit_utils.py +M scripts/lib/CIME/tests/test_unit_xml_namelist_definition.py +M scripts/lib/CIME/tests/test_xml_env_batch.py +M scripts/lib/CIME/tests/utils.py +M scripts/lib/CIME/user_mod_support.py +M scripts/lib/CIME/utils.py +M scripts/lib/CIME/wait_for_tests.py +M scripts/lib/get_tests.py +M scripts/lib/jenkins_generic_job.py +M scripts/lib/six.py +M scripts/lib/six_additions.py +M scripts/query_config +M scripts/query_testlists +M scripts/tests/CMakeLists.txt +M scripts/tests/list_tests +M scripts/tests/user_mods_test3/shell_commands +M setup.cfg +M src/CMake/CESM_utils.cmake +M src/CMake/mpiexec.cmake +M src/build_scripts/buildlib.cprnc +M src/build_scripts/buildlib.gptl +M src/build_scripts/buildlib.internal_components +M src/build_scripts/buildlib.kokkos +M src/build_scripts/buildlib.mct +M src/build_scripts/buildlib.mpi-serial +M src/build_scripts/buildlib.pio +M src/build_scripts/buildlib_cmake.internal_components +M src/components/stub_comps_nuopc/satm/cime_config/buildnml +M src/components/stub_comps_nuopc/sesp/cime_config/buildnml +M src/components/stub_comps_nuopc/sglc/cime_config/buildnml +M src/components/stub_comps_nuopc/sice/cime_config/buildnml +M src/components/stub_comps_nuopc/slnd/cime_config/buildnml +M src/components/stub_comps_nuopc/socn/cime_config/buildnml +M src/components/stub_comps_nuopc/srof/cime_config/buildnml +M src/components/stub_comps_nuopc/swav/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xatm/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xglc/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 +M src/components/xcpl_comps_nuopc/xice/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xlnd/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xocn/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xrof/cime_config/buildnml +M src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 +M src/components/xcpl_comps_nuopc/xwav/cime_config/buildnml +M src/externals/genf90/ChangeLog +M src/share/README +M src/share/timing/ChangeLog +M src/share/timing/GPTLutil.c +M src/share/timing/gptl.c +M src/share/timing/gptl_papi.c +M tools/configure +M tools/load_balancing_tool/layouts.py +M tools/load_balancing_tool/load_balancing_solve.py +M tools/load_balancing_tool/load_balancing_submit.py +M tools/load_balancing_tool/optimize_model.py +M tools/load_balancing_tool/tests/atm_lnd.py +M tools/load_balancing_tool/tests/example.json +M tools/load_balancing_tool/tests/load_balancing_test.py +M tools/load_balancing_tool/tests/timing/timing_1 +M tools/load_balancing_tool/tests/timing/timing_2 +M tools/load_balancing_tool/tests/timing/timing_3 +M tools/mapping/check_maps/check_map.sh +M tools/mapping/check_maps/src/Makefile +M tools/mapping/gen_domain_files/INSTALL +M tools/mapping/gen_domain_files/README +M tools/mapping/gen_domain_files/src/gen_domain.F90 +M tools/mapping/gen_mapping_files/README +M tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README +M tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL +M tools/mapping/gen_mapping_files/runoff_to_ocn/README +M tools/mapping/map_field/INSTALL +M tools/mapping/map_field/README +M tools/statistical_ensemble_test/ensemble.py +M tools/statistical_ensemble_test/pyCECT/.gitignore +M tools/statistical_ensemble_test/pyCECT/CHANGES.rst +M tools/statistical_ensemble_test/pyCECT/EET.py +M tools/statistical_ensemble_test/pyCECT/README.rst +M tools/statistical_ensemble_test/pyCECT/docs/conf.py +M tools/statistical_ensemble_test/pyCECT/docs/source/installation.rst +M tools/statistical_ensemble_test/pyCECT/docs/source/pyCECT.rst +M tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSum.rst +M tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSumPop.rst +M tools/statistical_ensemble_test/pyCECT/docs/source/readme.rst +M tools/statistical_ensemble_test/pyCECT/pyCECT.py +M tools/statistical_ensemble_test/pyCECT/pyEnsLib.py +M tools/statistical_ensemble_test/pyCECT/pyEnsSum.py +M tools/statistical_ensemble_test/pyCECT/pyEnsSumPop.py +M tools/statistical_ensemble_test/pyCECT/pyPlots.py +M tools/statistical_ensemble_test/pyCECT/test_pyEnsSum.sh +M tools/statistical_ensemble_test/single_run.py + +====================================================================== +====================================================================== + +Originator: Chris Fischer +Date: 12-16-21 +Tag: cime6.0.12 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Rename izumi.unified to izumi.cgd + - Fix test ptr to run_cmd_no_fail. + - chdir is redundent in CMakeLists.txt + - Fix paths in cdash testing + - Hot fix cdash testing/ + - Update unit tests and linting. + - Fix SSL certification for testing database. + - Fix probing machine. + - cprnc compares NaN. + - Update izumi ESMF lib to 8.2.0 release. + - Update CCE for crayenv2 on cheyenne. + - Cleanup Scorpio I/O performance data in the run directory. + - Fix the N_TestUnitTest by setting CIME_NO_CMAKE_MACRO + - Add python 3.7.0 module load for izumi (CESM only). + - Fix broken links in documentation. + - Update cime externals. + - tools/configure was using deprecated config_compilers.xml system. + - Remove documentation sections for pieces no longer included in CIME. + - More grids for mizuRoute. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +5e99566ae Merge pull request #4142 from ESMCI/fischer/izumi_cgd +6ffadc5fd fix test ptr to run_cmd_no_fail +d271ebbd8 chdir here is redundent, done in CMakeLists.txt +1faf45586 fix paths in cdash testing +3955917de hot fix cdash testing +baf320df2 Merge pull request #4129 from jasonb5/updates_testing +e725ab7cb Merge pull request #4128 from ESMCI/fischer/testreporter_ssl +f0dca0d96 Merge pull request #4134 from billsacks/fix_machine_probe +4961f0357 Merge pull request #4132 from jedwards4b/cprnc_nan_detect +188e9fe2b Merge pull request #4124 from ESMCI/fischer/izumi_esmf +b0f7080a9 update CCE for crayenv2 on cheyenne +8c515e31c Merge pull request #4127 from ESMCI/jayeshkrishna/consolidate_spio_timing +5a98a511b fix the N_TestUnitTest by setting CIME_NO_CMAKE_MACRO +cde1dafda Merge pull request #4117 from peverwhee/python_load_module +f9fe41210 Merge pull request #4123 from billsacks/fix_doc_links +1686f03b8 update externals +9f263ed3d Merge pull request #4122 from ESMCI/jgfouca/fix_tools_configure +7f709b709 Merge pull request #4108 from billsacks/remove_some_docs +c7813bce7 Merge pull request #3823 from ekluzek/mizuRoute + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt.yml +M .github/workflows/srt_nuopc.yml +A .pre-commit-config.yaml +M Externals.cfg +M README.md +M config/cesm/config_grids.xml +M config/cesm/config_grids_nuopc.xml +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_machines.xml +A conftest.py +D doc/source/data_models/data-atm.rst +D doc/source/data_models/data-lnd.rst +D doc/source/data_models/data-model-science.rst +D doc/source/data_models/data-ocean.rst +D doc/source/data_models/data-river.rst +D doc/source/data_models/data-seaice.rst +D doc/source/data_models/data-wave.rst +D doc/source/data_models/design-details.rst +D doc/source/data_models/index.rst +D doc/source/data_models/input-namelists.rst +D doc/source/data_models/input-streams.rst +D doc/source/data_models/introduction.rst +D doc/source/driver_cpl/bit-for-bit-flag.rst +D doc/source/driver_cpl/budgets.rst +D doc/source/driver_cpl/cplug-02.1-figx1.jpg +D doc/source/driver_cpl/design.rst +D doc/source/driver_cpl/driver_threading_control.rst +D doc/source/driver_cpl/grids.rst +D doc/source/driver_cpl/history-and-restarts.rst +D doc/source/driver_cpl/implementation.rst +D doc/source/driver_cpl/index.rst +D doc/source/driver_cpl/initialization-and-restart.rst +D doc/source/driver_cpl/introduction.rst +D doc/source/driver_cpl/multi-instance.rst +D doc/source/driver_cpl/namelist-overview.rst +D doc/source/driver_cpl/time-management.rst +M doc/source/index.rst +M doc/source/users_guide/cime-change-namelist.rst +M doc/source/users_guide/grids.rst +M doc/source/users_guide/running-a-case.rst +M doc/source/users_guide/testing.rst +M doc/source/what_cime/index.rst +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/BuildTools/macrowriterbase.py +M scripts/lib/CIME/SystemTests/ers2.py +M scripts/lib/CIME/SystemTests/pgn.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/XML/test_reporter.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/provenance.py +D scripts/lib/CIME/tests/SystemTests/__init__.py +D scripts/lib/CIME/tests/SystemTests/test_utils/__init__.py +D scripts/lib/CIME/tests/XML/__init__.py +A scripts/lib/CIME/tests/base.py +A scripts/lib/CIME/tests/scripts_regression_tests.py +A scripts/lib/CIME/tests/test_sys_bless_tests_results.py +A scripts/lib/CIME/tests/test_sys_build_system.py +A scripts/lib/CIME/tests/test_sys_cime_case.py +A scripts/lib/CIME/tests/test_sys_cime_performance.py +A scripts/lib/CIME/tests/test_sys_cmake_macros.py +A scripts/lib/CIME/tests/test_sys_create_newcase.py +A scripts/lib/CIME/tests/test_sys_full_system.py +A scripts/lib/CIME/tests/test_sys_grid_generation.py +A scripts/lib/CIME/tests/test_sys_jenkins_generic_job.py +A scripts/lib/CIME/tests/test_sys_macro_basic.py +A scripts/lib/CIME/tests/test_sys_make_macros.py +A scripts/lib/CIME/tests/test_sys_manage_and_query.py +A scripts/lib/CIME/tests/test_sys_query_config.py +A scripts/lib/CIME/tests/test_sys_run_restart.py +A scripts/lib/CIME/tests/test_sys_save_timings.py +A scripts/lib/CIME/tests/test_sys_single_submit.py +A scripts/lib/CIME/tests/test_sys_test_scheduler.py +A scripts/lib/CIME/tests/test_sys_unittest.py +A scripts/lib/CIME/tests/test_sys_user_concurrent_mods.py +A scripts/lib/CIME/tests/test_sys_wait_for_tests.py +D scripts/lib/CIME/tests/test_test_scheduler.py +R100 scripts/lib/CIME/tests/test_case.py scripts/lib/CIME/tests/test_unit_case.py +R100 scripts/lib/CIME/tests/test_case_fake.py scripts/lib/CIME/tests/test_unit_case_fake.py +R100 scripts/lib/CIME/tests/test_case_setup.py scripts/lib/CIME/tests/test_unit_case_setup.py +R100 scripts/lib/CIME/tests/test_compare_test_results.py scripts/lib/CIME/tests/test_unit_compare_test_results.py +R100 scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two.py scripts/lib/CIME/tests/test_unit_compare_two.py +R100 scripts/lib/CIME/tests/test_cs_status.py scripts/lib/CIME/tests/test_unit_cs_status.py +R100 scripts/lib/CIME/tests/test_custom_assertions_test_status.py scripts/lib/CIME/tests/test_unit_custom_assertions_test_status.py +R100 scripts/lib/CIME/tests/XML/test_expected_fails_file.py scripts/lib/CIME/tests/test_unit_expected_fails_file.py +R100 scripts/lib/CIME/tests/XML/test_grids.py scripts/lib/CIME/tests/test_unit_grids.py +R100 scripts/lib/CIME/tests/test_nmlgen.py scripts/lib/CIME/tests/test_unit_nmlgen.py +R100 scripts/lib/CIME/tests/test_provenance.py scripts/lib/CIME/tests/test_unit_provenance.py +R100 scripts/lib/CIME/tests/test_test_status.py scripts/lib/CIME/tests/test_unit_test_status.py +R100 scripts/lib/CIME/tests/SystemTests/test_system_tests_compare_two_link_to_case2_output.py scripts/lib/CIME/tests/test_unit_two_link_to_case2_output.py +R100 scripts/lib/CIME/tests/test_user_mod_support.py scripts/lib/CIME/tests/test_unit_user_mod_support.py +R100 scripts/lib/CIME/tests/SystemTests/test_utils/test_user_nl_utils.py scripts/lib/CIME/tests/test_unit_user_nl_utils.py +R100 scripts/lib/CIME/tests/test_utils.py scripts/lib/CIME/tests/test_unit_utils.py +R100 scripts/lib/CIME/tests/test_xml_namelist_definition.py scripts/lib/CIME/tests/test_unit_xml_namelist_definition.py +M scripts/lib/CIME/tests/utils.py +M scripts/lib/CIME/utils.py +M scripts/tests/CMakeLists.txt +M scripts/tests/list_tests +D scripts/tests/scripts_regression_tests.py +A setup.cfg +M tools/cprnc/compare_vars_mod.F90.in +M tools/cprnc/run_tests +M tools/cprnc/test_inputs/README +A tools/cprnc/test_inputs/control_floatDoubleNan.nc +A tools/cprnc/test_inputs/diffs_in_nans.nc +M tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 11-1-21 +Tag: cime6.0.11 +Answer Changes: None +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Add IRT test to exceptions for generate history. + - Replace use of MODEL with COMP_NAME in cmake_macros directory. + - Improve Macro.make generation. + - Update centos7-linux settings. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +b763b20a9 Merge pull request #4110 from jedwards4b/fix_IRT_generate +48f8ad154 Merge pull request #4119 from jedwards4b/cmake_MODEL_TO_COMP_NAME +90b4e37b7 Merge pull request #4113 from ESMCI/jgfouca/improve_make_macro_generation +5868385ad update centos7-linux settings + + +Modified files: git diff --name-status [previous_tag] +M config/cesm/machines/cmake_macros/CNL.cmake +M config/cesm/machines/cmake_macros/cheyenne.cmake +M config/cesm/machines/cmake_macros/container.cmake +M config/cesm/machines/cmake_macros/cray.cmake +M config/cesm/machines/cmake_macros/frontera.cmake +M config/cesm/machines/cmake_macros/gnu_cheyenne.cmake +M config/cesm/machines/cmake_macros/gnu_modex.cmake +M config/cesm/machines/cmake_macros/hobart.cmake +M config/cesm/machines/cmake_macros/ibm.cmake +M config/cesm/machines/cmake_macros/ibm_AIX.cmake +M config/cesm/machines/cmake_macros/intel_aleph.cmake +M config/cesm/machines/cmake_macros/intel_athena.cmake +M config/cesm/machines/cmake_macros/intel_cori-haswell.cmake +M config/cesm/machines/cmake_macros/intel_cori-knl.cmake +M config/cesm/machines/cmake_macros/intel_edison.cmake +M config/cesm/machines/cmake_macros/intel_lawrencium-lr2.cmake +M config/cesm/machines/cmake_macros/intel_lawrencium-lr3.cmake +M config/cesm/machines/cmake_macros/intel_zeus.cmake +M config/cesm/machines/cmake_macros/izumi.cmake +M config/cesm/machines/cmake_macros/laramie.cmake +M config/cesm/machines/cmake_macros/nag.cmake +M config/cesm/machines/cmake_macros/nvhpc-gpu.cmake +M config/cesm/machines/cmake_macros/nvhpc.cmake +M config/cesm/machines/cmake_macros/pgi-gpu.cmake +M config/cesm/machines/cmake_macros/pgi.cmake +M config/cesm/machines/cmake_macros/universal.cmake +M config/cesm/machines/cmake_macros/zeus.cmake +M config/cesm/machines/config_machines.xml +M scripts/lib/CIME/SystemTests/irt.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/hist_utils.py +M scripts/tests/scripts_regression_tests.py + + +====================================================================== + +====================================================================== + +Originator: Chris Fischer +Date: 10-25-2021 +Tag: cime6.0.10 +Answer Changes: [None, Round Off, Climate Changing] +Tests: scripts_regression_tests +Dependencies: + +Brief Summary: + - Update to Cheyenne to use ESMF_8_2_0_beta_snapshot_23. + - Remove reference to Argonne from License file. + - Remove debug print statements. + - Port to nersc perlmutter and CPE on cheyenne. + - config_archive changes for running CISM with multiple ice sheets. + - wpc erio improvement. + - Hot fix Makefile for cesm. + - Fixes copying user cmake files. + - Remove MODEL as a setting for the build system, use COM_NAME instead. + - Hot fix for scripts regression tests. + - Merge branch 'master' of github.com:/ESMCI/cime. + - Update Externals + - Hot fix for nuopc mpi-serial tests. + - Remove dependency of cprnc on non-existent COMPARE_VARS target. + - Add cmake macros. + - cime_bisect: Improve robustness when using custom script. + - Update PGN and TSC test scripts for E3SM. + - Fix limited github workflow nuopc scripts regression testing. + - Hot fix, add another cam target that uses kokkos. + - Hot fix for better cmake-macro toggling. + - Change E3SM to use cmake macro file system. + +User interface changes: + +PR summary: git log --oneline --first-parent [previous_tag]..master +1a77ba628 Merge pull request #4114 from ESMCI/fischer/ESMF_update +3708b0e26 Merge pull request #4112 from ESMCI/fischer/license +e6fb20994 hot fix, remove debug print statements +adc6adc3f Merge pull request #4106 from jedwards4b/perlmutter_port +c8af27966 Merge pull request #4101 from billsacks/remove_cism_config_archive +7eeb2c8e6 Merge pull request #4109 from ESMCI/wpc_erio_improvement +a26c8e393 hot fix Makefile for cesm +fb5934c63 Merge pull request #4105 from jasonb5/fixes_user_cmake +4a6b60438 Merge pull request #4104 from ESMCI/jgfouca/remove_MODEL_from_bld +828fd3227 hot fix for scripts regression tests +79159c14e Merge branch 'master' of github.com:/ESMCI/cime +ea1597e79 update externals +d6956db5c hot fix for nuopc mpi-serial test +3d1be0ed6 Merge pull request #4098 from bartgol/bartgol/remove-compare-vars-dep +2b6088308 Merge pull request #4093 from jedwards4b/replace_config_compilers_with_cmake_macros +258737591 Merge pull request #4099 from ESMCI/jgfouca/imprv_cime_bisect +802206017 Merge pull request #4092 from ESMCI/wlin/atm_pgn_tsc_tests +c89e85d10 Merge pull request #4094 from ESMCI/fischer/nuopc_srt +6a2cc11fd hotfix, add another cam target that uses kokkos +fbb407916 Hotfix for better cmake-macro toggling +86a0db6f3 Merge pull request #4088 from ESMCI/jgfouca/e3sm_cmake_macros + + +Modified files: git diff --name-status [previous_tag] +M .github/workflows/srt_nuopc.yml +M CMakeLists.txt +M Externals.cfg +M LICENSE.TXT +M config/cesm/config_archive.xml +M config/cesm/config_files.xml +M config/cesm/config_grids.xml +A config/cesm/machines/cmake_macros/CMakeLists.txt +A config/cesm/machines/cmake_macros/CNL.cmake +A config/cesm/machines/cmake_macros/Darwin.cmake +A config/cesm/machines/cmake_macros/Macros.cmake +A config/cesm/machines/cmake_macros/arm.cmake +A config/cesm/machines/cmake_macros/armgcc.cmake +A config/cesm/machines/cmake_macros/athena.cmake +A config/cesm/machines/cmake_macros/bluewaters.cmake +A config/cesm/machines/cmake_macros/casper.cmake +A config/cesm/machines/cmake_macros/centos7-linux.cmake +A config/cesm/machines/cmake_macros/cheyenne.cmake +A config/cesm/machines/cmake_macros/container.cmake +A config/cesm/machines/cmake_macros/cray.cmake +A config/cesm/machines/cmake_macros/cray_daint.cmake +A config/cesm/machines/cmake_macros/euler2.cmake +A config/cesm/machines/cmake_macros/euler3.cmake +A config/cesm/machines/cmake_macros/euler4.cmake +A config/cesm/machines/cmake_macros/frontera.cmake +A config/cesm/machines/cmake_macros/gnu.cmake +A config/cesm/machines/cmake_macros/gnu_cheyenne.cmake +A config/cesm/machines/cmake_macros/gnu_coeus.cmake +A config/cesm/machines/cmake_macros/gnu_hobart.cmake +A config/cesm/machines/cmake_macros/gnu_homebrew.cmake +A config/cesm/machines/cmake_macros/gnu_melvin.cmake +A config/cesm/machines/cmake_macros/gnu_modex.cmake +A config/cesm/machines/cmake_macros/hobart.cmake +A config/cesm/machines/cmake_macros/ibm.cmake +A config/cesm/machines/cmake_macros/ibm_AIX.cmake +A config/cesm/machines/cmake_macros/ibm_BGQ.cmake +A config/cesm/machines/cmake_macros/ibm_mira.cmake +A config/cesm/machines/cmake_macros/intel.cmake +A config/cesm/machines/cmake_macros/intel_Darwin.cmake +A config/cesm/machines/cmake_macros/intel_aleph.cmake +A config/cesm/machines/cmake_macros/intel_athena.cmake +A config/cesm/machines/cmake_macros/intel_bluewaters.cmake +A config/cesm/machines/cmake_macros/intel_casper.cmake +A config/cesm/machines/cmake_macros/intel_cheyenne.cmake +A config/cesm/machines/cmake_macros/intel_constance.cmake +A config/cesm/machines/cmake_macros/intel_cori-haswell.cmake +A config/cesm/machines/cmake_macros/intel_cori-knl.cmake +A config/cesm/machines/cmake_macros/intel_eastwind.cmake +A config/cesm/machines/cmake_macros/intel_edison.cmake +A config/cesm/machines/cmake_macros/intel_euler2.cmake +A config/cesm/machines/cmake_macros/intel_euler3.cmake +A config/cesm/machines/cmake_macros/intel_euler4.cmake +A config/cesm/machines/cmake_macros/intel_greenplanet-sib29.cmake +A config/cesm/machines/cmake_macros/intel_greenplanet-sky24.cmake +A config/cesm/machines/cmake_macros/intel_hobart.cmake +A config/cesm/machines/cmake_macros/intel_izumi.cmake +A config/cesm/machines/cmake_macros/intel_laramie.cmake +A config/cesm/machines/cmake_macros/intel_lawrencium-lr2.cmake +A config/cesm/machines/cmake_macros/intel_lawrencium-lr3.cmake +A config/cesm/machines/cmake_macros/intel_sandiatoss3.cmake +A config/cesm/machines/cmake_macros/intel_stampede2-knl.cmake +A config/cesm/machines/cmake_macros/intel_stampede2-skx.cmake +A config/cesm/machines/cmake_macros/intel_theia.cmake +A config/cesm/machines/cmake_macros/intel_zeus.cmake +A config/cesm/machines/cmake_macros/izumi.cmake +A config/cesm/machines/cmake_macros/laramie.cmake +A config/cesm/machines/cmake_macros/lonestar5.cmake +A config/cesm/machines/cmake_macros/nag.cmake +A config/cesm/machines/cmake_macros/nvhpc-gpu.cmake +A config/cesm/machines/cmake_macros/nvhpc-gpu_casper.cmake +A config/cesm/machines/cmake_macros/nvhpc.cmake +A config/cesm/machines/cmake_macros/nvhpc_casper.cmake +A config/cesm/machines/cmake_macros/pgi-gpu.cmake +A config/cesm/machines/cmake_macros/pgi-gpu_casper.cmake +A config/cesm/machines/cmake_macros/pgi.cmake +A config/cesm/machines/cmake_macros/pgi_bluewaters.cmake +A config/cesm/machines/cmake_macros/pgi_casper.cmake +A config/cesm/machines/cmake_macros/pgi_cheyenne.cmake +A config/cesm/machines/cmake_macros/pgi_constance.cmake +A config/cesm/machines/cmake_macros/pgi_daint.cmake +A config/cesm/machines/cmake_macros/pgi_eastwind.cmake +A config/cesm/machines/cmake_macros/pgi_euler2.cmake +A config/cesm/machines/cmake_macros/pgi_euler3.cmake +A config/cesm/machines/cmake_macros/pgi_euler4.cmake +A config/cesm/machines/cmake_macros/pgi_hobart.cmake +A config/cesm/machines/cmake_macros/pgi_izumi.cmake +A config/cesm/machines/cmake_macros/pgi_olympus.cmake +A config/cesm/machines/cmake_macros/pleiades-bro.cmake +A config/cesm/machines/cmake_macros/pleiades-has.cmake +A config/cesm/machines/cmake_macros/pleiades-ivy.cmake +A config/cesm/machines/cmake_macros/pleiades-san.cmake +A config/cesm/machines/cmake_macros/stampede2-knl.cmake +A config/cesm/machines/cmake_macros/stampede2-skx.cmake +A config/cesm/machines/cmake_macros/theta.cmake +A config/cesm/machines/cmake_macros/universal.cmake +A config/cesm/machines/cmake_macros/userdefined.cmake +A config/cesm/machines/cmake_macros/zeus.cmake +M config/cesm/machines/config_batch.xml +M config/cesm/machines/config_compilers.xml +M config/cesm/machines/config_machines.xml +M config/config_tests.xml +M config/e3sm/config_files.xml +M config/xml_schemas/config_archive.xsd +M doc/source/users_guide/cime-config.rst +M doc/source/users_guide/machine.rst +M doc/source/users_guide/porting-cime.rst +M doc/source/users_guide/troubleshooting.rst +M doc/source/users_guide/unit_testing.rst +M doc/source/xml_files/cesm.rst +M scripts/Tools/Makefile +M scripts/Tools/cime_bisect +M scripts/lib/CIME/BuildTools/configure.py +M scripts/lib/CIME/SystemTests/erio.py +M scripts/lib/CIME/SystemTests/pgn.py +M scripts/lib/CIME/SystemTests/tsc.py +M scripts/lib/CIME/XML/archive_base.py +M scripts/lib/CIME/XML/machines.py +M scripts/lib/CIME/build.py +M scripts/lib/CIME/buildlib.py +M scripts/lib/CIME/case/case_setup.py +M scripts/lib/CIME/test_scheduler.py +A scripts/lib/CIME/tests/test_case_setup.py +M scripts/lib/get_tests.py +M scripts/tests/scripts_regression_tests.py +M src/build_scripts/buildlib.gptl +M src/build_scripts/buildlib.mct +M src/build_scripts/buildlib.mpi-serial +M src/build_scripts/buildlib.pio +M tools/cprnc/CMakeLists.txt + +====================================================================== + +====================================================================== + +Originator: Chris Fischer Date: 9-09-2021 Tag: cime6.0.9 Answer Changes: None @@ -15,7 +965,7 @@ Brief Summary: - Update modules and build flags on cori. - Add a SourceMods/src.cdeps directory for shared cdeps sourcemods. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 4fa1194a2 Merge pull request #4085 from ESMCI/jgfouca/fix_scripts_reg @@ -42,11 +992,11 @@ M tools/statistical_ensemble_test/pyCECT/docs/requirements.txt ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 9-02-2021 Tag: cime6.0.8 Answer Changes: None -Tests: +Tests: Dependencies: Brief Summary: @@ -55,7 +1005,7 @@ Brief Summary: - Set nuopc as the default. - Remove moab. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 1c41537f7 Merge pull request #4080 from ESMCI/fischer/izumi_esmf @@ -76,11 +1026,11 @@ D src/drivers/moab ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 8-25-2021 Tag: cime6.0.7 Answer Changes: None -Tests: ran the test testlist_cdeps.xml +Tests: ran the test testlist_cdeps.xml Dependencies: Brief Summary: @@ -101,11 +1051,11 @@ M scripts/lib/CIME/case/case_setup.py ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 8-20-2021 Tag: cime6.0.6 Answer Changes: [None, Round Off, Climate Changing] -Tests: +Tests: Dependencies: Brief Summary: @@ -117,7 +1067,7 @@ Brief Summary: - Changed Addendum to Appendices. - Allow multiple methods for running unit tests. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 2ceb39021 make sure CLM_USRDAT_NAME is not UNSET if set @@ -135,11 +1085,11 @@ Modified files: git diff --name-status [previous_tag] ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 8-6-2021 Tag: cime6.0.5 Answer Changes: None -Tests: scripts_regression_tests +Tests: scripts_regression_tests Dependencies: Brief Summary: @@ -148,7 +1098,7 @@ Brief Summary: - Update esmf version. - Support the creation of multiple user_nl files for a component. -User interface changes: +User interface changes: - Allows components to define a get_user_nl_list function in their buildnml file, which supports the creation of multiple user_nl files for the component @@ -177,7 +1127,7 @@ M scripts/lib/CIME/utils.py ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 8-2-2021 Tag: cime6.0.4 Answer Changes: None @@ -194,7 +1144,7 @@ Brief Summary: - Fix type in centos7-linux definition. - jenkins_generic_job: no reason for this magic config setting here. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master c6f489356 Merge pull request #4053 from jasonb5/fix_replay_archive @@ -289,7 +1239,7 @@ M tools/cprnc/CMakeLists.txt ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 6-29-2021 Tag: cime6.0.2 Answer Changes: None @@ -302,7 +1252,7 @@ Brief Summary: - Add esmf support for casper pgi. - Fix testreporter issues with python3 -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 95644f2 update externals @@ -328,8 +1278,8 @@ M src/build_scripts/buildlib.pio ====================================================================== -Originator: Chris Fischer -Date: 6-29-2021 +Originator: Chris Fischer +Date: 6-29-2021 Tag: cime6.0.1 Answer Changes: None Tests: scripts_regression_tests @@ -343,9 +1293,9 @@ Brief Summary: - Multi gpus casper. - cime smartsim. - Refactor lilac build - only relevant to CESM. - - Adds default walltime for queues. + - Adds default walltime for queues. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master ccdf7376e Merge pull request #4019 from jedwards4b/fix_max_tasks_per_node @@ -402,7 +1352,7 @@ M scripts/tests/scripts_regression_tests.py ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 6-23-2021 Tag: cime6.0.0 Answer Changes: None @@ -430,7 +1380,7 @@ Brief Summary: - Updates for pleiades systems. - Add feature to capture CIME commands in a script. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 866d4f507 Merge pull request #4010 from ESMCI/fischer/rm_moved_share @@ -1812,7 +2762,7 @@ M tools/statistical_ensemble_test/pyCECT/docs/requirements.txt ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 5-11-2021 Tag: cime5.8.47 Answer Changes: None @@ -1822,7 +2772,7 @@ Dependencies: Brief Summary: - Add back domain for CLM_USRDAT for mct driver. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master f01e48081 Merge pull request #3954 from jedwards4b/fix_clm_usrdata_mct @@ -1834,7 +2784,7 @@ M config/cesm/config_grids.xml ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 5-10-2021 Tag: cime5.8.46 Answer Changes: None @@ -1844,7 +2794,7 @@ Dependencies: Brief Summary: - Rename GLC Greenland grid from "gland" to "gris" -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master a52e7faf6 Merge pull request #3943 from billsacks/rename_gland @@ -1859,7 +2809,7 @@ M src/drivers/mct/cime_config/config_component.xml ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 5-7-2021 Tag: cime5.8.45 Answer Changes: None @@ -1875,7 +2825,7 @@ Brief Summary: - Adds git submodule info to provenance. - Fixes phase 1 of multisubmit not executing during rerun. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 1e72b5db7 Merge pull request #3956 from jedwards4b/GLCMakefile @@ -1909,7 +2859,7 @@ M scripts/tests/scripts_regression_tests.py ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 5-3-2021 Tag: cime5.8.44 Answer Changes: None @@ -1921,7 +2871,7 @@ Brief Summary: - Revert cheyenne intel mpt to 2.22. - Change shebang to explict python3. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 6cbfdd632 Merge pull request #3951 from ESMCI/fischer/mpt_cheyenne @@ -1980,7 +2930,7 @@ M scripts/tests/scripts_regression_tests.py ====================================================================== -Originator: Chris Fiscehr +Originator: Chris Fiscehr Date: 4-26-2021 Tag: cime5.8.43 Answer Changes: None @@ -2002,7 +2952,7 @@ Brief Summary: - Update configuration xml for Casper and add openacc directives to enable GPU simulation. - Fix query_config for py3 output format. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 40be61f38 Merge pull request #3941 from ESMCI/fischer/cheyenne_intel @@ -2056,7 +3006,7 @@ M src/share/util/shr_spfn_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 4-8-2021 Tag: cime5.8.42 Answer Changes: None @@ -2066,7 +3016,7 @@ Dependencies: Brief Summary: - No need to test ESMF_AWARE_THREADING here. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master b655c9127 Merge pull request #3918 from jedwards4b/ninst_fix @@ -2078,11 +3028,11 @@ M scripts/lib/CIME/XML/env_mach_pes.py ====================================================================== -Originator: Jim Edwards +Originator: Jim Edwards Date: 4-6-2021 Tag: cime5.8.41 Answer Changes: Climate Changing just for T compsets. -Tests: scripts_regression_tests +Tests: scripts_regression_tests SMS_Vnuopc_D_P1x1.f10_f10_mg37.I2000Clm50Sp.izumi_nag.clm-default Dependencies: @@ -2093,7 +3043,7 @@ Brief Summary: - Handle case when domain_root is not present. - Point to new dlnd scpl forcing data. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 81d4f2033 Merge pull request #3904 from ESMCI/jayeshkrishna/spio_stats_provenance @@ -2118,17 +3068,17 @@ M src/components/data_comps_mct/dlnd/cime_config/config_component.xml ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 3-30-2021 Tag: cime5.8.40 -Answer Changes: -Tests: +Answer Changes: +Tests: Dependencies: Brief Summary: - Bug fix for generating C/G nuopc compsets -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master a900c8ef6 Merge pull request #3900 from mvertens/feature/nuopc_grids_bugfix @@ -2141,7 +3091,7 @@ M scripts/lib/CIME/XML/grids.py ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 3-26-2021 Tag: cime5.8.39 Answer Changes: Round Off for nuopc/cmeps configurations. @@ -2162,7 +3112,7 @@ Brief Summary: - New nuopc/cmeps single/column single/point functionality. - Logs tail of cprnc outputs to TestStatus.log. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 1e705ee39 Merge pull request #3895 from ESMCI/jgfouca/add_rof_support_to_primary_comp @@ -2203,16 +3153,16 @@ M tools/statistical_ensemble_test/pyCECT/docs/requirements.txt ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 3-12-2021 Tag: cime5.8.38 -Answer Changes: bit-for-bit, climate-changing for trigrid +Answer Changes: bit-for-bit, climate-changing for trigrid Tests: scripts_regression_tests Dependencies: Brief Summary: -User interface changes: +User interface changes: - Disable kokkos tests when building for E3SM. - Remove references to CISM1-only grid for CESM. - NAG port for nuopc. @@ -2355,7 +3305,7 @@ M src/share/util/shr_reprosum_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 1-27-2021 Tag: cime5.8.37 Answer Changes: Some fill_values and pgi on cheyenne @@ -2388,7 +3338,7 @@ Brief Summary: - Check if TEST_ROOT exists earlier in cleanup from scripts_regression_tests. - query_testlists: replace newlines in comments with spaces. -User interface changes: +User interface changes: - Slight change to output of query_testlists PR summary: git log --oneline --first-parent [previous_tag]..master @@ -2555,7 +3505,7 @@ A tools/statistical_ensemble_test/pyCECT/test_uf_cam_ect.sh ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 12-11-2020 Tag: cime5.8.36 Answer Changes: None @@ -2569,7 +3519,7 @@ Brief Summary: - Add memory usage logging for memory profiling. - Update OS process id error-checking in GPTL's get_memusage. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master a0fb01a33 Merge pull request #3796 from ESMCI/jgfouca/wait_for_tests_expect @@ -2983,7 +3933,7 @@ M tools/statistical_ensemble_test/single_run.py ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 12-7-2020 Tag: cime5.8.35 Answer Changes: None @@ -3004,7 +3954,7 @@ Brief Summary: - Update seq_io_write_time to work for CPLHIST run. - Fix python3 incompatibilities in case.py. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 184f5886f Merge pull request #3732 from jedwards4b/change_pio_default_version @@ -3164,12 +4114,12 @@ M src/share/util/shr_pio_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 11-12-2020 Tag: cime5.8.34 Answer Changes: None Tests: code-checker, ERP_Ln9.f09_f09_mg17.F2000climo.cheyenne_intel.cam-outfrq9s_mg3 - github actions, scripts_regression_tests.py, P_TestJenkinsGenericJob + github actions, scripts_regression_tests.py, P_TestJenkinsGenericJob Dependencies: Brief Summary: @@ -3208,7 +4158,7 @@ Brief Summary: - Fix check_input_data to fail over to svn when a wget download fails. - Change permissions on ref case rpointer files. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 726e53cf7 Merge pull request #3775 from ESMCI/fischer/MPAS_build @@ -3308,7 +4258,7 @@ Brief Summary: - Revert "Use enhanced compiler flags for PUMAS/MG3 related F90 codes". - Use enhanced compiler flags for PUMAS/MG3 related F90 codes. - Create srt.yml. - - ESMF and pgi cheyenne update. + - ESMF and pgi cheyenne update. - Correctly attribute scorpio timers in cime_pre_init2. - Fix test Y for nuopc driver. - Add nuopc tests to cdash. @@ -3316,7 +4266,7 @@ Brief Summary: - Fix for pop ecosys rh files. - Merge branch 'master'. - Update ESMF on cheyenne. - - Change name ccsm cppdefs. + - Change name ccsm cppdefs. - Fixes Y_TestUserConcurrentMods by adding a more-robust waiting scheme. - Add a no-submit option to jenkins_generic_job. - In MEMLEAK checks, skip first day memory highwater while initializing. @@ -3346,7 +4296,7 @@ Brief Summary: - Cime changes required by the topounit inplementation in E3SM. - Update for UFS app. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 5316eabdf Merge pull request #3761 from ESMCI/jgfouca/sep_builds_for_tests @@ -3460,12 +4410,12 @@ M src/drivers/mct/shr/seq_flds_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 8-28-2020 Tag: cime5.8.32 Answer Changes: None Tests: scripts_regression_tests, create_test_cime_developer, - SMS_D_Ld5_Vnuopc.f10_f10_musgs.I2000Clm50BgcCropGs.cheyenne_intel.clm-default, + SMS_D_Ld5_Vnuopc.f10_f10_musgs.I2000Clm50BgcCropGs.cheyenne_intel.clm-default, SMS.f19_g17.X.cheyenne_intel, cdeps_aux tests and testlist_drv.xml test Dependencies: @@ -3475,9 +4425,9 @@ Brief Summary: - Avoid artificial limit on string lengths in shr_string_listMerge. - Don't try to set a variable from env_test.xml if it does not otherwise exist in the case. - Fix logic in cdeps build. - - New cdeps stream schema and stream definition file. + - New cdeps stream schema and stream definition file. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 5ab9bf4bd Merge pull request #3693 from ESMCI/jgfouca/volatile_env @@ -3510,7 +4460,7 @@ M src/share/util/shr_string_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 8-20-2020 Tag: cime5.8.31 Answer Changes: None @@ -3532,7 +4482,7 @@ Brief Summary: - CIME changes to support upcoming addition of MALI gis1to10km configuration. - Changes needed to have cdeps 3d stream input working. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master d575f612e Merge pull request #3679 from ekluzek/fv3gridfix2013endGSWP3 @@ -3575,7 +4525,7 @@ M src/externals/mct/mpeu/m_inpak90.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 8-12-2020 Tag: cime5.8.30 Answer Changes: [None, Round Off, Climate Changing] @@ -3601,7 +4551,7 @@ Brief Summary: - Archive atm_chunk_costs with other performance data. - Use SRCROOT instead of CIMEROOT in key places. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 899fcd90e Merge pull request #3663 from billsacks/cprnc_fix_int_singlevar @@ -3700,7 +4650,7 @@ M tools/statistical_ensemble_test/single_run.py ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 7-24-2020 Tag: cime5.8.29 Answer Changes: MOM6 @@ -3730,9 +4680,9 @@ Brief Summary: - Dict has_key is removed in python3. - Update TFREEZE_SALTWATER_OPTION value for MOM6. - Set PIO_REARR_COMM_TYPE: coll for mpi-serial. - -User interface changes: + +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 8cda0e75b Merge pull request #3633 from ESMCI/bdobbins-wget-inputdata-bugfix @@ -3895,7 +4845,7 @@ M tools/statistical_ensemble_test/single_run.py ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 6-30-2020 Tag: cime5.8.28 Answer Changes: None @@ -3907,7 +4857,7 @@ Brief Summary: - Improve error message for create newcase test option. - Clean up screen output & logging in check_input_data. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master a2de0598c Merge pull request #3602 from ESMCI/fischer/mapping_file_update @@ -3928,12 +4878,12 @@ M scripts/lib/CIME/test_scheduler.py ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 6-23-2020 Tag: cime5.8.27 Answer Changes: [None, Round Off, Climate Changing] Tests: SMS.ne30pg3_ne30pg3_mg17.A.cheyenne_intel - scripts_resgression_tests + scripts_resgression_tests SMS_D_Ld3.f45_g37_rx1.A.izumi_nag Dependencies: @@ -3953,7 +4903,7 @@ Brief Summary: - Remove nuopc data models to new repo CDEPS. - Fixed and cleaned-up scripts_regression_tests.py test Q_TestBlessTestResults with CIME_MODEL = cesm. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master b3fb69d87 Merge pull request #3587 from ESMCI/fischer/rename_ne30pg3_grid @@ -4067,11 +5017,11 @@ D src/share/streams_nuopc/dshr_tInterp_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 6-16-2020 Tag: cime5.8.26 -Answer Changes: Climate Changin] -Tests: hand tested with ERS_Vnuopc_D.f09_g17.A.cheyenne_intel modified for era5 +Answer Changes: Climate Changin] +Tests: hand tested with ERS_Vnuopc_D.f09_g17.A.cheyenne_intel modified for era5 ../src/drivers/nuopc/cime_config/testdefs/testlist_drv.xml (cheyenne, intel) scripts_regression_tests (with PIO_VERSION=2) Dependencies: @@ -4089,7 +5039,7 @@ Brief Summary: - Pleiades machines updates. - Improved error message for incorrect setting of cime model. -User interface changes: +User interface changes: - For all CIME data models. PR summary: git log --oneline --first-parent [previous_tag]..master @@ -4202,7 +5152,7 @@ M src/share/util/shr_kind_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 6-8-2020 Tag: cime5.8.25 Answer Changes: None @@ -4213,7 +5163,7 @@ Brief Summary: - Change grid alias for artic var-res grids. - Couples HYCOM with data atmosphere for UFS HAFS application. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master e8dfe9356 Merge pull request #3566 from ESMCI/fischer/var_res_grids @@ -4234,11 +5184,11 @@ M src/components/data_comps_nuopc/datm/src/atm_comp_nuopc.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 6-3-2020 Tag: cime5.8.24 Answer Changes: Round Off on izumi -Tests: scripts_regression_tests, izumi tests +Tests: scripts_regression_tests, izumi tests Dependencies: Brief Summary: @@ -4257,7 +5207,7 @@ Brief Summary: - Change type to double for derived vars in cprnc. - Fix typo. -User interface changes: +User interface changes: - New component added for cesm. PR summary: git log --oneline --first-parent [previous_tag]..master @@ -4347,7 +5297,7 @@ D tools/cprnc/test.csh ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 4-19-2020 Tag: cime5.8.23 Answer Changes: roundoff for some compsets, bfb for others @@ -4359,7 +5309,7 @@ Brief Summary: - Fix atm level spec so that it does not match 1x1_brazil. - Document provenance from COARE/Fairall surface flux scheme option. - Add missing ROF2OCN grid for r05_TO_gx3v7. - - Updates to CIME used for 0.1 degree JRA-forced G run with BGC enabled. + - Updates to CIME used for 0.1 degree JRA-forced G run with BGC enabled. - FV3 bug fix and Makefile update. - Update cheyenne ESMF libs. - case.setup: fix merge error from prev PR. @@ -4369,7 +5319,7 @@ Brief Summary: - Remove code specific to ORNL machine. -User interface changes: +User interface changes: - Adds --extra-machines-dir argument to create_newcase. PR summary: git log --oneline --first-parent [previous_tag]..master @@ -4445,18 +5395,18 @@ M utils/perl5lib/Config/SetupTools.pm ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 4-26-2020 Tag: cime5.8.22 Answer Changes: b4b -Tests: scripts_regression_tests +Tests: scripts_regression_tests Dependencies: Brief Summary: - Fix issue with mpi-serial io. - Use esmf8.1.0b14 on cheyenne. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 3b4ea8a99 Merge pull request #3499 from jedwards4b/pio_serialio_fix @@ -4475,11 +5425,11 @@ M src/share/util/shr_pio_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 4-17-2020 Tag: cime5.8.21 Answer Changes: None -Tests: scripts_regression_tests +Tests: scripts_regression_tests CIME_MODEL=cesm; CIME_DRIVER=nuopc; ./scripts_regression_tests.py Dependencies: @@ -4495,7 +5445,7 @@ Brief Summary: - Bugfixes and changes that permit cmeps to run without a mediator. - Some changes to cprnc's CMakeLists.txt. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 979ab8433 Merge pull request #3470 from jedwards4b/shr_map_getARptr @@ -4545,7 +5495,7 @@ M tools/cprnc/CMakeLists.txt ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 4-7-2020 Tag: cime5.8.20 Answer Changes: None @@ -4557,7 +5507,7 @@ Brief Summary: - E3SM 03-16-2020 merger. - Remove unneeded encodes. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 708a9338b Merge pull request #3479 from jedwards4b/ftp_timeout @@ -4701,8 +5651,8 @@ A tools/mapping/gen_domain_files/test_gen_domain.sh ====================================================================== -Originator: Chris Fischer -Date: 4-3-2020 +Originator: Chris Fischer +Date: 4-3-2020 Tag: cime5.8.19 Answer Changes: None Tests: scripts_regression_tests @@ -4714,7 +5664,7 @@ Brief Summary: - Change sst_aquap11 to sst_aquap_constant - case.setup: Add --keep option -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 2656300d5 Merge pull request #3462 from mnlevy1981/cleanup_maint-5.6_merge @@ -4736,7 +5686,7 @@ M src/drivers/mct/cime_config/namelist_definition_drv.xml ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 3-20-2020 Tag: cime5.8.18 Answer Changes: None @@ -4745,10 +5695,10 @@ Dependencies: Brief Summary: - Add ne5np4.pg4 grid. - - Ensure that api documentation is rebuilt whenever rebuilding + - Ensure that api documentation is rebuilt whenever rebuilding documentation. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master f990945a7 Merge pull request #3460 from ESMCI/fischer/ne5np4 @@ -5142,11 +6092,11 @@ M tools/cprnc/CMakeLists.txt ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 2-27-2020 Tag: cime5.8.16 Answer Changes: None -tests: scripts_regression_tests, aux_cam using fv3, various hand tests. +tests: scripts_regression_tests, aux_cam using fv3, various hand tests. Dependencies: Brief Summary: @@ -5205,7 +6155,7 @@ Brief Summary: - Fixed Empty .cesm_proj file results in a crash. - Improve scripts_regression_tests test cleanup. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master b2e292597 Merge pull request #3420 from ESMCI/fischer/segrids @@ -5449,7 +6399,7 @@ M src/share/nuopc/shr_ndep_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 11-11-2019 Tag: cime5.8.14 Answer Changes: None, (except for X compsets with nuopc @@ -5474,7 +6424,7 @@ Brief Summary: - ACME merge 2019-10-25. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master cad8477bb Merge pull request #3297 from ESMCI/revert-3296-master_seq_flux_init @@ -5539,7 +6489,7 @@ M src/share/util/shr_mct_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 10-24-2019 Tag: cime5.8.13 Answer Changes: None @@ -5550,7 +6500,7 @@ Brief Summary: - Add a raw xml option to the query_config interface. - Add --only-job option to case.submit. -User interface changes: +User interface changes: - Add --xml to query_config. - Add option --only-job to case.submit. - Fix an issue with Lockedfile error when case.submit is used with the --no-batch flag. @@ -5581,11 +6531,11 @@ M src/share/util/shr_scam_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 10-15-2019 Tag: cime5.8.12 Answer Changes: None -Tests: ERS.T5_T5_mg37.QPC4, scripts_regression_tests +Tests: ERS.T5_T5_mg37.QPC4, scripts_regression_tests Dependencies: Brief Summary: @@ -5594,7 +6544,7 @@ Brief Summary: - Updates for cam testing updates. - Fix nuopc esmf paths. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 7f1d48fe9 Merge pull request #3263 from ESMCI/fischer/T5_grid_fix @@ -5620,18 +6570,18 @@ M src/share/util/shr_flux_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 10-01-2019 Tag: cime5.8.11 Answer Changes: None -Tests: scripts_regression_tests, Hand tested SMS_D_Vnuopc.f09_g17.X.cheyenne_intel +Tests: scripts_regression_tests, Hand tested SMS_D_Vnuopc.f09_g17.X.cheyenne_intel Dependencies: Brief Summary: - Correct the way esmf is built to avoid library mismatch. - Update gnu compiler on cheyenne, bit for bit. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 693383167 Merge pull request #3252 from jedwards4b/nuopc_esmf_build_correction @@ -5646,7 +6596,7 @@ M scripts/Tools/Makefile ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 9-26-2019 Tag: cime5.8.10 Answer Changes: None @@ -5666,7 +6616,7 @@ Brief Summary: - Clean up some issues in scripts_regression_tests.py. - Add /cluster/torque/bin to path on izumi. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 809bcb3b8 Merge pull request #3248 from jedwards4b/mct_null_comm_fix @@ -5737,7 +6687,7 @@ M src/share/util/shr_scam_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 10-24-2019 Tag: cime5.8.13 Answer Changes: None @@ -5748,7 +6698,7 @@ Brief Summary: - Add a raw xml option to the query_config interface. - Add --only-job option to case.submit. -User interface changes: +User interface changes: - Add --xml to query_config. - Add option --only-job to case.submit. - Fix an issue with Lockedfile error when case.submit is used with the --no-batch flag. @@ -5779,11 +6729,11 @@ M src/share/util/shr_scam_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 10-15-2019 Tag: cime5.8.12 Answer Changes: None -Tests: ERS.T5_T5_mg37.QPC4, scripts_regression_tests +Tests: ERS.T5_T5_mg37.QPC4, scripts_regression_tests Dependencies: Brief Summary: @@ -5792,7 +6742,7 @@ Brief Summary: - Updates for cam testing updates. - Fix nuopc esmf paths. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 7f1d48fe9 Merge pull request #3263 from ESMCI/fischer/T5_grid_fix @@ -5818,18 +6768,18 @@ M src/share/util/shr_flux_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 10-01-2019 Tag: cime5.8.11 Answer Changes: None -Tests: scripts_regression_tests, Hand tested SMS_D_Vnuopc.f09_g17.X.cheyenne_intel +Tests: scripts_regression_tests, Hand tested SMS_D_Vnuopc.f09_g17.X.cheyenne_intel Dependencies: Brief Summary: - Correct the way esmf is built to avoid library mismatch. - Update gnu compiler on cheyenne, bit for bit. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 693383167 Merge pull request #3252 from jedwards4b/nuopc_esmf_build_correction @@ -5844,7 +6794,7 @@ M scripts/Tools/Makefile ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 9-26-2019 Tag: cime5.8.10 Answer Changes: None @@ -5864,7 +6814,7 @@ Brief Summary: - Clean up some issues in scripts_regression_tests.py. - Add /cluster/torque/bin to path on izumi. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 809bcb3b8 Merge pull request #3248 from jedwards4b/mct_null_comm_fix @@ -5935,7 +6885,7 @@ M src/share/util/shr_scam_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 8-29-2019 Tag: cime5.8.9 Answer Changes: None @@ -5945,7 +6895,7 @@ Dependencies: Brief Summary: - Fix nag compiler for CAM. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master - 6026ca341 Merge pull request #3219 from ESMCI/fischer/nag @@ -5958,11 +6908,11 @@ M config/cesm/machines/Depends.nag ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 8-26-2019 Tag: cime5.8.8 Answer Changes: None -Tests: scripts_regression_tests, aux_mom +Tests: scripts_regression_tests, aux_mom Dependencies: Brief Summary: @@ -5983,7 +6933,7 @@ Brief Summary: - ACME merge 2019-07-22 - Remove test SMS.T42_T42.S when testing with nuopc driver. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master a9c9289f1 Merge branch 'maint-5.6' @@ -6147,7 +7097,7 @@ M src/share/util/shr_pio_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 7-19-209 Tag: cime5.8.7 Answer Changes: Climate changing for CAM ne120, ne120pg3, ne0CONUS grids @@ -6159,7 +7109,7 @@ Brief Summary: - Updates and fixes for MOM6 in cime. - Maint 5.6 merge. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 6efe21745 Merge pull request #3177 from ESMCI/fischer/SE_gx1v7MaskFix @@ -6214,17 +7164,17 @@ M src/share/util/shr_file_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 7-16-2019 Tag: cime5.8.6 Answer Changes: None -Tests: scripts_regression_tests, SMS_Ld1.f19_f19_mg17.FXSD.cheyenne_intel.cam-outfrq1d +Tests: scripts_regression_tests, SMS_Ld1.f19_f19_mg17.FXSD.cheyenne_intel.cam-outfrq1d Dependencies: Brief Summary: - Add back ESMF modules on cheyenne. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 07f0945e6 Merge pull request #3172 from ESMCI/fischer/fix_esmfmodules @@ -6238,11 +7188,11 @@ M config/cesm/machines/config_machines.xml ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 7-12-2019 Tag: cime5.8.5 Answer Changes: bit for bit -Tests: scripts_regression_tests on izumi, SMS_Ln9.ne0CONUSne30x8_ne0CONUSne30x8_mg17.F2000climo +Tests: scripts_regression_tests on izumi, SMS_Ln9.ne0CONUSne30x8_ne0CONUSne30x8_mg17.F2000climo Dependencies: Brief Summary: @@ -6252,7 +7202,7 @@ Brief Summary: - Case insensitive user_nl. - ACME merge 2019-06-25. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master ba5ce7f68 Merge pull request #3167 from ESMCI/fischer/SE_conusmt12 @@ -6284,11 +7234,11 @@ M src/share/util/shr_flux_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 6-24-2019 Tag: cime5.8.4 Answer Changes: None -Tests: scripts_regression_tests, code checker +Tests: scripts_regression_tests, code checker Dependencies: Brief Summary: @@ -6302,7 +7252,7 @@ Brief Summary: - ACME merge 2019-06-10 - Change nodefail test to allow for nuopc file name difference from mct. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 8f0e3d3 Merge pull request #3150 from ESMCI/theia_to_slurm @@ -6532,8 +7482,8 @@ A src/externals/pio2/tests/unit/run_tests.sh ====================================================================== -Originator: Chris Fischer -Date: 6-7-2019 +Originator: Chris Fischer +Date: 6-7-2019 Tag: cime5.8.3 Answer Changes: [None, Round Off, Climate Changing] Tests: scripts_regression_tests.py with CIME_DRIVER=nuopc, @@ -6544,8 +7494,8 @@ Brief Summary: - Add logic to control activation of glcshelf_c2_ice. - Fix nuopc build, update for stamepede esmf lib. - Fixes for nuopc scripts_regressions_tests. - - Merge branch master. - - Merge branch master. + - Merge branch master. + - Merge branch master. - Merge acme split 2019-5-28 - Remove the nuopc driver and mediator to a separate repository https://github.com/ESCOMP/CMEPS.git - New documentation explaining how to invoked the --user-mods-dir option to create_newcase. @@ -6568,7 +7518,7 @@ Brief Summary: - Update modules cheyenne. - Poperly link nag f90. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 9f39361 Add logic to control activation of glcshelf_c2_ice (#3131) @@ -6785,7 +7735,7 @@ M tools/statistical_ensemble_test/README ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 4-23-2019 Tag: cime5.8.2 Answer Changes: None @@ -6801,7 +7751,7 @@ Brief Summary: - Introduces a new stub IAC. - Master merge to nuopc cmeps. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 57cf4a5 Merge pull request #3086 from ESMCI/fischer/ne_conus @@ -6953,11 +7903,11 @@ M src/share/util/shr_pio_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 04-08-2019 Tag: cime5.8.1 Answer Changes: None -Tests: scripts_regression_tests, many create_newcase with mangled compset names +Tests: scripts_regression_tests, many create_newcase with mangled compset names hand test xmllint Dependencies: @@ -6977,7 +7927,7 @@ Brief Summary: - PET and ERP tests were not setting compile_threaded correctly. - Implement 'share' field of test suites. -User interface changes: +User interface changes: - Stub components are now optional in compset long names. Also there is less order dependency. PR summary: git log --oneline --first-parent [previous_tag]..master @@ -7063,7 +8013,7 @@ M tools/mapping/gen_mapping_files/runoff_to_ocn/src/Makefile ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 03-12-2019 Tag: cime5.8.0 Answer Changes: None @@ -7074,7 +8024,7 @@ Brief Summary: - Merge maint-5.6 branch. - Cleanup of build. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 5f939ae Merge pull request #3039 from ESMCI/maint-5.6 @@ -7215,7 +8165,7 @@ Brief Summary: - Fix test issue for cesm. - Better handling of file permissions when copying files. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 5f31182 Merge pull request #3014 from jedwards4b/config_grids_v2.1 @@ -7406,11 +8356,11 @@ A tools/cprnc/test_inputs/missing_variables.nc ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 1-15-2019 Tag: cime5.7.7 Answer Changes: None -Tests: scripts_regression_tests +Tests: scripts_regression_tests Dependencies: Brief Summary: @@ -7426,7 +8376,7 @@ Brief Summary: - docn_comp_mod needs fix for optional variable. - Remove support for CLM4.0. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master e5c2368 Minor fix to e3sm_cime_mgmt to properly handle exe files @@ -7524,11 +8474,11 @@ M src/share/util/shr_wv_sat_mod.F90 ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer Date: 01-07-2019 Tag: cime5.7.6 Answer Changes: None -Tests: scripts_regression_tests.py, hand testing of cesm cases +Tests: scripts_regression_tests.py, hand testing of cesm cases scripts_regression_tests.py with PIO_VERSION=2 code-checker, by-hand Dependencies: @@ -7554,7 +8504,7 @@ Brief Summary: - Merge latest maint-5.6 changes into master. - Fix new pylint errors. -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master 486c579 Merge pull request #2966 from jedwards4b/cmpgen_needs_write @@ -7842,20 +8792,20 @@ Brief Summary: Miscellaneous minor bug fixes, including getting User interface changes: none -Originator: Chris Fischer +Originator: Chris Fischer Date: 11-29-2018 Tag: cime_cesm2_1_rel_06 Answer Changes: None -Tests: scripts_regression_tests +Tests: scripts_regression_tests Dependencies: Brief Summary: - Fix new pylint errors. - Update addmetadata for POP-ECT tests. -User interface changes: +User interface changes: -PR summary: git log --oneline --first-parent [previous_tag]..master +PR summary: git log --oneline --first-parent [previous_tag]..master 5a86646 Merge pull request #2921 from jedwards4b/fixpylint3errors 98821ff Merge pull request #2919 from ESMCI/fischer/addmetadata @@ -7880,7 +8830,7 @@ M tools/statistical_ensemble_test/addmetadata.sh ====================================================================== -Originator: Chris Fischer +Originator: Chris Fischer PR summary: git log --oneline --first-parent [previous_tag]..master e882c1c06 Merge pull request #2897 from billsacks/fix_histutils_regex fc14d5870 Merge pull request #2895 from billsacks/fix_lnd2glc_averaged_now diff --git a/ChangeLog_template b/ChangeLog_template index e368a3a8ccb..27821ec1540 100644 --- a/ChangeLog_template +++ b/ChangeLog_template @@ -1,19 +1,18 @@ ====================================================================== -Originator: -Date: +Originator: +Date: Tag: cimeX.Y.Z Answer Changes: [None, Round Off, Climate Changing] -Tests: +Tests: Dependencies: Brief Summary: -User interface changes: +User interface changes: PR summary: git log --oneline --first-parent [previous_tag]..master Modified files: git diff --name-status [previous_tag] ====================================================================== - diff --git a/Externals.cfg b/Externals.cfg index 84c683b760c..8cb00f6944c 100644 --- a/Externals.cfg +++ b/Externals.cfg @@ -1,12 +1,19 @@ +[ccs_config] +tag = ccs_config_cesm0.0.3 +protocol = git +repo_url = https://github.com/ESMCI/ccs_config_cesm +local_path = ccs_config +required = True + [cmeps] -tag = cmeps0.13.32 +tag = cmeps0.13.40 protocol = git repo_url = https://github.com/ESCOMP/CMEPS.git local_path = components/cmeps required = True [cdeps] -tag = cdeps0.12.31 +tag = cdeps0.12.32 protocol = git repo_url = https://github.com/ESCOMP/CDEPS.git local_path = components/cdeps @@ -14,14 +21,14 @@ externals = Externals_CDEPS.cfg required = True [cpl7] -tag = cpl7.0.5 +tag = cpl7.0.10 protocol = git repo_url = https://github.com/ESCOMP/CESM_CPL7andDataComps local_path = components/cpl7 required = True [share] -tag = share1.0.8 +tag = share1.0.9 protocol = git repo_url = https://github.com/ESCOMP/CESM_share local_path = share diff --git a/LICENSE.TXT b/LICENSE.TXT index 1772716c8a3..2424196e5a6 100644 --- a/LICENSE.TXT +++ b/LICENSE.TXT @@ -1,7 +1,7 @@ Copyright (c) 2017, University Corporation for Atmospheric Research (UCAR) All rights reserved. and -Copyright (c) 2017, Sandia Corporation. +Copyright (c) 2017, Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software. diff --git a/README.md b/README.md index 7ce3cc3c4ff..d4162788230 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,14 @@ # cime Common Infrastructure for Modeling the Earth -CIME, pronounced “SEAM”, contains the support scripts (configure, build, run, test), data models, essential -utility libraries, a “main” and other tools that are needed to build a single-executable coupled Earth System Model. -CIME is available in a stand-alone package that can be compiled and tested without active prognostic components -but is typically included in the source of a climate model. CIME does not contain: any active components, -any intra-component coupling capability (such as atmosphere physics-dynamics coupling). +CIME, pronounced "SEAM", primarily consists of a Case Control System that supports the configuration, compilation, execution, system testing and unit testing of an Earth System Model. The two main components of the Case Control System are: -*cime* (pronounced: seem) is currently used by the +1. Scripts to enable simple generation of model executables and associated input files for different scientific cases, component resolutions and combinations of full, data and stub components with a handful of commands. +2. Testing utilities to run defined system tests and report results for different configurations of the coupled system. + +CIME does **not** contain the source code for any Earth System Model drivers or components. It is typically included alongside the source code of a host model. However, CIME does include pointers to external repositories that contain drivers, data models and other test components. These external components can be easily assembled to facilitate end-to-end system tests of the CIME infrastructure, which are defined in the CIME repository. + +CIME is currently used by the Community Earth System Model (CESM) and the Energy Exascale Earth System Model (E3SM). @@ -19,16 +20,10 @@ See esmci.github.io/cime # Developers ## Lead Developers -Case Control System: Jim Edwards (NCAR), Jim Foucar (SNL) - -MCT-based Coupler/Driver: Mariana Vertenstein (NCAR), Robert Jacob (ANL) - -Data Models: Mariana Vertenstein (NCAR) +Jim Edwards (NCAR), Jim Foucar (SNL) ## Also Developed by -Alice Bertini (NCAR), Tony Craig (NCAR), Michael Deakin (SNL), Chris Fischer (NCAR), Steve Goldhaber (NCAR), -Erich Foster (SNL), Mike Levy (NCAR), Bill Sacks (NCAR), Andrew Salinger (SNL), Sean Santos (NCAR), Jason Sarich (ANL), -Andreas Wilke (ANL). +Alice Bertini (NCAR), Jason Boutte (LLNL), Tony Craig (NCAR), Michael Deakin (SNL), Chris Fischer (NCAR), Erich Foster (SNL), Steve Goldhaber (NCAR), Robert Jacob (ANL), Mike Levy (NCAR), Bill Sacks (NCAR), Andrew Salinger (SNL), Sean Santos (NCAR), Jason Sarich (ANL), Mariana Vertenstein (NCAR), Andreas Wilke (ANL). # Acknowledgements diff --git a/conftest.py b/conftest.py new file mode 100644 index 00000000000..38ce216f708 --- /dev/null +++ b/conftest.py @@ -0,0 +1,36 @@ +import os +import sys + +CIMEROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "scripts", "lib")) +sys.path.insert(0, CIMEROOT) + +import pytest + +from CIME import utils +from CIME.tests import scripts_regression_tests + +os.environ["CIME_GLOBAL_WALLTIME"] = "0:05:00" + + +def pytest_addoption(parser): + # set addoption as add_argument to use common argument setup + # pytest's addoption has same signature as add_argument + setattr(parser, "add_argument", parser.addoption) + + scripts_regression_tests.setup_arguments(parser) + + # verbose and debug flags already exist + parser.addoption("--silent", action="store_true", help="Disable all logging") + + +def pytest_configure(config): + kwargs = vars(config.option) + + utils.configure_logging(kwargs["verbose"], kwargs["debug"], kwargs["silent"]) + + scripts_regression_tests.configure_tests(**kwargs) + + +@pytest.fixture(scope="session", autouse=True) +def setup(pytestconfig): + os.chdir(CIMEROOT) diff --git a/doc/Makefile b/doc/Makefile index c3f55065335..ee5f25834b1 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -15,14 +15,14 @@ help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) # exit 0 is to prevent the api rule from being run by the catchall target -# and generating an error. -api: CIME_api Tools_api Tools_user +# and generating an error. +api: CIME_api Tools_api Tools_user exit 0 -CIME_api: +CIME_api: @$(SPHINXAPI) --force -o $(SOURCEDIR)/$@ $(SCRIPTSDIR)/lib/CIME -Tools_api: +Tools_api: @$(SPHINXAPI) --force -o $(SOURCEDIR)/$@ $(SCRIPTSDIR)/Tools Tools_user: diff --git a/doc/README b/doc/README index b8e0e530e94..deac674eb74 100644 --- a/doc/README +++ b/doc/README @@ -10,7 +10,7 @@ Check the sphinx version as follows: >sphinx-build --version -The documentation source is stored with the CIME master code base. However, +The documentation source is stored with the CIME master code base. However, the built html files are stored separately in the orphan gh-pages branch and can be viewed from a browser at URL: diff --git a/doc/source/Tools_user/index.rst.template b/doc/source/Tools_user/index.rst.template index 3da4239b722..06d03fddbb1 100644 --- a/doc/source/Tools_user/index.rst.template +++ b/doc/source/Tools_user/index.rst.template @@ -10,6 +10,3 @@ and **case.setup**. .. toctree:: :maxdepth: 1 - - - diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html index 6ec6be4302a..af14da01ced 100644 --- a/doc/source/_templates/layout.html +++ b/doc/source/_templates/layout.html @@ -1,3 +1,3 @@ {% extends "!layout.html" %} -{% set script_files = script_files + ["_static/pop_ver.js"] %} \ No newline at end of file +{% set script_files = script_files + ["_static/pop_ver.js"] %} diff --git a/doc/source/build_cpl/adding-components.rst b/doc/source/build_cpl/adding-components.rst index 09ed5ebfd94..c9fcf234dd1 100644 --- a/doc/source/build_cpl/adding-components.rst +++ b/doc/source/build_cpl/adding-components.rst @@ -4,7 +4,7 @@ Adding components =================== -Here are the steps to add prognostic components to CIME models. +Here are the steps to add prognostic components to CIME models. There are a couple of aspects of a component interface to CIME, the scripts interface which controls setting up component inputs and diff --git a/doc/source/build_cpl/index.rst b/doc/source/build_cpl/index.rst index 46984c1a59f..a492a431548 100644 --- a/doc/source/build_cpl/index.rst +++ b/doc/source/build_cpl/index.rst @@ -12,7 +12,7 @@ Building a Coupled Model with CIME .. toctree:: :maxdepth: 3 :numbered: - + introduction.rst adding-components.rst @@ -22,4 +22,3 @@ Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` - diff --git a/doc/source/build_cpl/introduction.rst b/doc/source/build_cpl/introduction.rst index f45c4b47844..0352da61bf7 100644 --- a/doc/source/build_cpl/introduction.rst +++ b/doc/source/build_cpl/introduction.rst @@ -7,7 +7,7 @@ How to add a new component model to cime. How to replace an existing cime model with another one. -How to integrate your model in to the cime build/configure system and coupler. +How to integrate your model in to the cime build/configure system and coupler. How to work with the CIME-supplied models. diff --git a/doc/source/conf.py b/doc/source/conf.py index a57408fde19..f1704a1d768 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -18,11 +18,13 @@ # import os import sys + # Note that we need a specific version of sphinx_rtd_theme. This can be obtained with: # pip install git+https://github.com/esmci/sphinx_rtd_theme.git@version-dropdown-with-fixes import sphinx_rtd_theme -sys.path.insert(0, os.path.abspath('../../scripts/lib')) -sys.path.insert(1, os.path.abspath('../../scripts')) + +sys.path.insert(0, os.path.abspath("../../scripts/lib")) +sys.path.insert(1, os.path.abspath("../../scripts")) # -- General configuration ------------------------------------------------ @@ -34,42 +36,42 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.todo', - 'sphinx.ext.coverage', - 'sphinx.ext.githubpages', - 'sphinx.ext.autodoc', - 'sphinx.ext.autosummary', - 'sphinx.ext.viewcode', - 'sphinx.ext.todo', - 'sphinxcontrib.programoutput' + "sphinx.ext.todo", + "sphinx.ext.coverage", + "sphinx.ext.githubpages", + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.viewcode", + "sphinx.ext.todo", + "sphinxcontrib.programoutput", ] -todo_include_todos=True +todo_include_todos = True # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'CIME' -copyright = u'2017, U.S. National Science Foundation and U.S. Department of Energy' -author = u'Staff of the NSF/CESM and DOE/E3SM projects' +project = u"CIME" +copyright = u"2017, U.S. National Science Foundation and U.S. Department of Energy" +author = u"Staff of the NSF/CESM and DOE/E3SM projects" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u'master' +version = u"master" # The full version, including alpha/beta/rc tags. -release = u'master' +release = u"master" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -84,7 +86,7 @@ exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -95,17 +97,17 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -#html_theme = 'alabaster' -#html_theme = 'bizstyle' -#html_theme = 'classic' -#html_theme = 'sphinxdoc' -html_theme = 'sphinx_rtd_theme' +# html_theme = 'alabaster' +# html_theme = 'bizstyle' +# html_theme = 'classic' +# html_theme = 'sphinxdoc' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # -#html_theme_options = {"stickysidebar": "true"} +# html_theme_options = {"stickysidebar": "true"} # The 'versions' option needs to have at least two versions to work, but it doesn't need # to have all versions: others will be added dynamically. Note that this maps from version @@ -113,18 +115,18 @@ # nothing). For the other version, we just add a place-holder; its name and value are # unimportant because these versions will get replaced dynamically. html_theme_options = {} -html_theme_options['versions'] = {version: ''} -html_theme_options['versions']['[placeholder]'] = '' +html_theme_options["versions"] = {version: ""} +html_theme_options["versions"]["[placeholder]"] = "" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'ondoc' +htmlhelp_basename = "ondoc" # -- Options for LaTeX output --------------------------------------------- @@ -133,15 +135,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -151,8 +150,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'on.tex', u'on Documentation', - u'Staff of the NSF/CESM and DOE/E3SM projects', 'manual'), + ( + master_doc, + "on.tex", + u"on Documentation", + u"Staff of the NSF/CESM and DOE/E3SM projects", + "manual", + ), ] @@ -160,10 +164,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'on', u'on Documentation', - [author], 1) -] +man_pages = [(master_doc, "on", u"on Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -172,16 +173,24 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'on', u'on Documentation', - author, 'on', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + "on", + u"on Documentation", + author, + "on", + "One line description of project.", + "Miscellaneous", + ), ] # -- Options for pdf output ------------------------------------------------- pdf_documents = [ - (master_doc, - u'CIME_Users_Guide', - u'CIME Users Guide (PDF)', - u'Staff of the NSF/CESM and DOE/E3SM projects'), + ( + master_doc, + u"CIME_Users_Guide", + u"CIME Users Guide (PDF)", + u"Staff of the NSF/CESM and DOE/E3SM projects", + ), ] diff --git a/doc/source/data_models/data-atm.rst b/doc/source/data_models/data-atm.rst deleted file mode 100644 index 311b265a294..00000000000 --- a/doc/source/data_models/data-atm.rst +++ /dev/null @@ -1,284 +0,0 @@ -.. _data-atm: - -Data Atmosphere (DATM) -====================== - -DATM is normally used to provide observational forcing data (or forcing data produced by a previous run using active components) to drive prognostic components. -In the case of CESM, these would be: CLM (I compset), POP2 (C compset), and POP2/CICE (G compset). -As a result, DATM variable settings are specific to the compset that will be targeted. -As examples, CORE2_NYF (CORE2 normal year forcing) is the DATM mode used in C and G compsets. -CLM_QIAN, CLMCRUNCEP, CLMGSWP3 and CLM1PT are DATM modes using observational data for forcing CLM in I compsets. - -.. _datm-xml-vars: - ------------------- -xml variables ------------------- -The following are ``$CASEROOT`` xml variables that CIME supports for DATM. -These variables are defined in ``$CIMEROOT/src/components/data_comps/datm/cime_config/config_component.xml``. -These variables will appear in ``env_run.xml`` and the resulting values are compset dependent. - -.. note:: These xml variables are used by the the datm's **cime_config/buildnml** script in conjunction with datm's **cime_config/namelist_definition_datm.xml** file to generate the namelist file ``datm_in``. - -.. csv-table:: "DATM xml variables" - :header: "xml variable", "description" - :widths: 20, 80 - - "DATM_MODE", "Mode for atmospheric component" - "", "Valid values are: CORE2_NYF,CORE2_IAF,CLM_QIAN,CLM_QIAN_WISO,CLM1PT,CLMCRUNCEP," - "", "CLMCRUNCEP_V5,CLMGSWP3,WW3,CPLHIST,CORE_IAF_JRA" - - "DATM_PRESAERO", "Optional prescribed aerosol forcing" - "DATM_TOPO", "Optional Surface topography" - "DATM_CO2_TSERIES", "Optional CO2 time series type" - - "DATM_CPLHIST_DOMAIN_FILE", "Coupler history forcing data mode - full pathname of model domain file " - "DATM_CPLHIST_CASE", "Coupler history forcing data mode - case name" - "DATM_CPLHIST_DIR", "Coupler history forcing data mode - directory containing coupler history data" - "DATM_CPLHIST_YR_ALIGN", "Coupler history forcing data mode - simulation year corresponding to DATM_CPLHIST_YR_START" - "DATM_CPLHIST_YR_START", "Coupler history forcing data mode - starting year to loop data over" - "DATM_CPLHIST_YR_END", "Coupler history forcing data mode - ending year to loop data over" - - "DATM_CLMNCEP_YR_ALIGN", "I compsets only - simulation year corresponding to data starting year" - "DATM_CLMNCEP_YR_START", "I compsets only - data model starting year to loop data over" - "DATM_CLMNCEP_YR_END", "I compsets only - data model ending year to loop data over" - -.. note:: If ``DATM_MODE`` is set to ``CPLHIST``, it is normally assumed that the model domain will be identical to **all** of the stream domains. To ensure this, the xml variables ``ATM_DOMAIN_PATH`` and ``ATM_DOMAIN_FILE`` are ignored and a valid setting **must be given** for ``DATM_CPLHIST_DOMAIN_FILE``. If ``DATM_CPLHIST_DOMAIN_FILE`` is set to ``null``, then the datm component domain information is read in from the first coupler history file in the target stream and it is assumed that the first coupler stream file that is pointed to contains the domain information for that stream. This is the default that should be used for this mode. Alternatively, ``DATM_CPLHIST_DOMAIN_FILE`` can be set to ``$ATM_DOMAIN_PATH/$ATM_DOMAIN_FILE`` in a non-default configuration. - -.. _datm-datamodes: - --------------------- -datamode values --------------------- - -The xml variable ``DATM_MODE`` (described in :ref:`datm_mode`) sets the streams that are associated with DATM and also sets the namelist variable ``datamode``. -``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DATM on the streams before returning to the driver. - -Each data model has its own set of supported ``datamode`` values. The following are the supported DATM ``datamode`` values, as defined in the file ``namelist_definition_datm.xml``. - -.. csv-table:: "Valid values for datamode namelist variable" - :header: "datamode variable", "description" - :widths: 20, 80 - - "NULL", "This mode turns off the data model as a provider of data to the coupler. The ``atm_present`` flag will be set to ``false`` and the coupler assumes no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero except for aerosol deposition fields which will be set to a special value. " - "CLMNCEP", "In conjunction with NCEP climatological atmosphere data, provides the atmosphere forcing favored by the Land Model Working Group when coupling an active land model with observed atmospheric forcing. This mode replicates code previously found in CLM (circa 2005), before the LMWG started using the CIME coupling infrastructure and data models to do active-land-only simulations." - "CORE2_NYF", "Coordinated Ocean-ice Reference Experiments (CORE) Version 2 Normal Year Forcing." - "CORE2_IAF", "In conjunction with CORE Version 2 atmospheric forcing data, provides the atmosphere forcing favored by the Ocean Model Working Group when coupling an active ocean model with observed atmospheric forcing. This mode and associated data sets implement the CORE-IAF Version 2 forcing data, as developed by Large and Yeager (2008) at NCAR. Note that CORE2_NYF and CORE2_IAF work exactly the same way." - "CORE_IAF_JRA", "In conjunction with JRA-55 Project, provides the atmosphere forcing when coupling an active ocean model with observed atmospheric forcing. This mode and associated data sets implement the JRA-55 v1.3 forcing data." - -.. _datm_mode: - -------------------------------- -DATM_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DATM_MODE`` (defined in the ``config_component.xml`` file for DATM), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DATM_MODE`` based on the compset. - -.. csv-table:: "Relationship between DATM_MODE, datamode and streams" - :header: "DATM_MODE", "description-streams-datamode" - :widths: 15, 85 - - "NULL", "null mode" - "", "streams: none" - "", "datamode: NULL" - "CORE2_NYF","CORE2 normal year forcing (C ang G compsets)" - "", "streams: CORE2_NYF.GISS,CORE2_NYF.GXGXS,CORE2_NYF.NCEP" - "", "datamode: CORE2_NYF" - "CORE2_IAF","CORE2 interannual year forcing (C ang G compsets)" - "", "streams: CORE2_IAF.GCGCS.PREC,CORE2_IAF.GISS.LWDN,CORE2_IAF.GISS.SWDN,CORE2_IAF.GISS.SWUP," - "", "CORE2_IAF.NCEP.DN10,CORE2_IAF.NCEP.Q_10,CORE2_IAF.NCEP.SLP_,CORE2_IAF.NCEP.T_10,CORE2_IAF.NCEP.U_10," - "", "CORE2_IAF.NCEP.V_10,CORE2_IAF.CORE2.ArcFactor" - "", "datamode: CORE2_IAF" - "CORE_IAF_JRA",JRA-55 intra-annual year forcing(C ang G compsets)" - "", "streams: CORE_IAF_JRA.PREC,CORE_IAF_JRA.LWDN,CORE_IAF_JRA.SWDN," - "", "CORE_IAF_JRA.Q_10,CORE_IAF_JRA.SLP_,CORE_IAF_JRA.T_10,CORE_IAF_JRA.U_10," - "", "CORE_IAF_JRA.V_10,CORE_IAF_JRA.CORE2.ArcFactor" - "", "datamode: CORE_IAF_JRA" - "CLM_QIAN_WISO","QIAN atm input data with water isotopes (I compsets)" - "", "streams: CLM_QIAN_WISO.Solar,CLM_QIAN_WISO.Precip,CLM_QIAN_WISO.TPQW" - "", "datamode: CLMNCEP" - "CLM_QIAN", "QIAN atm input data (I compsets)" - "", "streams: CLM_QIAN.Solar,CLM_QIAN.Precip,CLM_QIAN.TPQW" - "", "datamode: CLMNCEP" - "CLMCRUNCEP","CRUNCEP atm input data (I compsets)" - "", "streams: CLMCRUNCEP.Solar,CLMCRUNCEP.Precip,CLMCRUNCEP.TPQW" - "", "datamode: CLMNCEP" - "CLMCRUNCEP_V5","CRUNCEP atm input data (I compsets)" - "","streams: CLMCRUNCEP_V5.Solar,CLMCRUNCEP_V5.Precip,CLMCRUNCEP_V5.TPQW" - "","datamode: CLMNCEP" - "CLMGSWP3","GSWP3 atm input data (I compsets)" - "","streams: CLMGSWP3.Solar,CLMGSWP3.Precip,CLMGSWP3.TPQW" - "","datamode: CLMNCEP" - "CLM1PT", "single point tower site atm input data" - "","streams: CLM1PT.$ATM_GRID" - "","datamode: CLMNCEP" - "CPLHIST","user generated forcing data from using coupler history files used to spinup relevant prognostic components (for CESM this is CLM, POP and CISM)" - "","streams: CPLHISTForcing.Solar,CPLHISTForcing.nonSolarFlux," - "","CPLHISTForcing.State3hr,CPLHISTForcing.State1hr" - "","datamode: CPLHIST" - "WW3","WW3 wave watch data from a short period of hi WW3 wave watch data from a short period of hi temporal frequency COREv2 data" - "","streams: WW3" - "","datamode: COPYALL" - --------------- -Namelists --------------- - -The DATM namelist file is ``datm_in`` (or ``datm_in_NNN`` for multiple instances). DATM namelists can be separated into two groups: *stream-independent* namelist variables that are specific to the DATM model and *stream-specific* namelist variables whose names are common to all the data models. - -Stream dependent input is in the namelist group ``"shr_strdata_nml`` which is discussed in :ref:`input streams ` and is the same for all data models. - -.. _datm-stream-independent-namelists: - -The stream-independent group is ``datm_nml`` and the DATM stream-independent namelist variables are: - -===================== ============================================================================================= -datm_nml vars description -===================== ============================================================================================= -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -bias_correct if set, include bias correction streams in namelist -anomaly_forcing if set, includ anomaly forcing streams in namelist -factorfn filename containing correction factors for use in CORE2 modes (CORE2_IAF and CORE2_NYF) -presaero if true, prescribed aerosols are sent from datm -iradsw frequency to update radiation in number of time steps (of hours if negative) -wiso_datm if true, turn on water isotopes -===================== ============================================================================================= - -.. _datm-mode-independent-streams: - ------------------------------------------- -Streams independent of DATM_MODE value ------------------------------------------- - -In general, each ``DATM_MODE`` xml variable is identified with a unique set of streams. -However, there are several streams in DATM that can accompany any ``DATM_MODE`` setting. -Currently, these are streams associated with prescribed aerosols, co2 time series, topography, anomoly forcing and bias correction. -These mode-independent streams are activated different, depending on the stream. - -- ``prescribed aerosol stream:`` - To add this stream, set ``$DATM_PRESAERO`` to a supported value other than ``none``. - -- ``co2 time series stream``: - To add this stream, set ``$DATM_CO2_TSERIES`` to a supported value other than ``none``. - -- ``topo stream``: - To add this stream, set ``$DATM_TOPO`` to a supported value other than ``none``. - -- ``anomaly forcing stream:`` - To add this stream, you need to add any of the following keywword/value pair to the end of ``user_nl_datm``: - :: - - Anomaly.Forcing.Precip = - Anomaly.Forcing.Temperature = - Anomaly.Forcing.Pressure = - Anomaly.Forcing.Humidity = - Anomaly.Forcing.Uwind = - Anomaly.Forcing.Vwind = - Anomaly.Forcing.Shortwave = - Anomaly.Forcing.Longwave = - -- ``bias_correct stream:`` - To add this stream, you need to add any of the following keywword/value pair to the end of ``user_nl_datm``: - :: - - BC.QIAN.CMAP.Precip = - BC.QIAN.GPCP.Precip = - BC.CRUNCEP.CMAP.Precip = - BC.CRUNCEP.GPCP.Precip = - -.. _datm-fields: - ----------------- -DATM Field names ----------------- - -DATM defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. -In general, the stream input file should translate the stream input variable names into the ``datm_fld`` names for use within the data atmosphere model. - -.. csv-table:: "DATM internal field names" - :header: "datm_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "z", "Sa_z" - "topo", "Sa_topo" - "u", "Sa_u" - "v", "Sa_v" - "tbot", "Sa_tbot" - "ptem", "Sa_ptem" - "shum", "Sa_shum" - "dens", "Sa_dens" - "pbot", "Sa_pbot" - "pslv", "Sa_pslv" - "lwdn", "Faxa_lwdn" - "rainc", "Faxa_rainc" - "rainl", "Faxa_rainl" - "snowc", "Faxa_snowc" - "snowl", "Faxa_snowl" - "swndr", "Faxa_swndr" - "swvdr", "Faxa_swvdr" - "swndf", "Faxa_swndf" - "swvdf", "Faxa_swvdf" - "swnet", "Faxa_swnet" - "co2prog", "Sa_co2prog" - "co2diag", "Sa_co2diag" - "bcphidry", "Faxa_bcphidry" - "bcphodry", "Faxa_bcphodry" - "bcphiwet", "Faxa_bcphiwet" - "ocphidry", "Faxa_ocphidry" - "ocphodry", "Faxa_ocphodry" - "ocphiwet", "Faxa_ocphiwet" - "dstwet1", "Faxa_dstwet1" - "dstwet2", "Faxa_dstwet2" - "dstwet3", "Faxa_dstwet3" - "dstwet4", "Faxa_dstwet4" - "dstdry1", "Faxa_dstdry1" - "dstdry2", "Faxa_dstdry2" - "dstdry3", "Faxa_dstdry3" - "dstdry4", "Faxa_dstdry4" - "tref", "Sx_tref" - "qref", "Sx_qref" - "avsdr", "Sx_avsdr" - "anidr", "Sx_anidr" - "avsdf", "Sx_avsdf" - "anidf", "Sx_anidf" - "ts", "Sx_t" - "to", "So_t" - "snowhl", "Sl_snowh" - "lfrac", "Sf_lfrac" - "ifrac", "Sf_ifrac" - "ofrac", "Sf_ofrac" - "taux", "Faxx_taux" - "tauy", "Faxx_tauy" - "lat", "Faxx_lat" - "sen", "Faxx_sen" - "lwup", "Faxx_lwup" - "evap", "Faxx_evap" - "co2lnd", "Fall_fco2_lnd" - "co2ocn", "Faoo_fco2_ocn" - "dms", "Faoo_fdms_ocn" - "precsf", "Sa_precsf" - "prec_af", "Sa_prec_af" - "u_af", "Sa_u_af" - "v_af", "Sa_v_af" - "tbot_af", "Sa_tbot_af" - "pbot_af", "Sa_pbot_af" - "shum_af", "Sa_shum_af" - "swdn_af", "Sa_swdn_af" - "lwdn_af", "Sa_lwdn_af" - "rainc_18O", "Faxa_rainc_18O" - "rainc_HDO", "Faxa_rainc_HDO" - "rainl_18O", "Faxa_rainl_18O" - "rainl_HDO", "Faxa_rainl_HDO" - "snowc_18O", "Faxa_snowc_18O" - "snowc_HDO", "Faxa_snowc_HDO" - "snowl_18O", "Faxa_snowl_18O" - "snowl_HDO", "Faxa_snowl_HDO" - "shum_16O", "Sa_shum_16O" - "shum_18O", "Sa_shum_18O" diff --git a/doc/source/data_models/data-lnd.rst b/doc/source/data_models/data-lnd.rst deleted file mode 100644 index e22f6ec9f1f..00000000000 --- a/doc/source/data_models/data-lnd.rst +++ /dev/null @@ -1,154 +0,0 @@ -.. _data-lnd: - -Data Land (DLND) -================ - -The land model is unique because it supports land data and snow data (*lnd and sno*) almost as if they were two separate components, but they are in fact running in one component model through one interface. -The lnd (land) data consist of fields sent to the atmosphere. -This set of data is used when running DLND with an active atmosphere. -In general this is not a mode that is used or supported. -The sno (snow) data consist of fields sent to the glacier model. This set of data is used when running dlnd with an active glacier model (TG compsets). Both sets of data are assumed to be on the same grid. - -.. _dlnd-xml-vars: - ---------------- -xml variables ---------------- - -The following are xml variables that CIME supports for DLND. -These variables are defined in ``$CIMEROOT/src/components/data_comps/dlnd/cime_config/config_component.xml``. -These variables will appear in ``env_run.xml`` and are used by the DLND ``cime_config/buildnml`` script to generate the DLND namelist file ``dlnd_in`` and the required associated stream files for the case. - -.. note:: These xml variables are used by the the dlnd's **cime_config/buildnml** script in conjunction with dlnd's **cime_config/namelist_definition_dlnd.xml** file to generate the namelist file ``dlnd_in``. - -.. csv-table:: "DLND xml variables" - :header: "xml variable", "description" - :widths: 15, 85 - - "DLND_MODE", "Mode for data land component" - "", "Valid values are: NULL, CPLHIST, GLC_CPLHIST" - - "DLND_CPLHIST_DOMAIN_FILE", "Coupler history forcing data mode - full pathname of model domain file" - "DLND_CPLHIST_CASE", "Coupler history forcing data mode - case name" - "DLND_CPLHIST_DIR", "Coupler history forcing data mode - directory containing coupler history data" - "DLND_CPLHIST_YR_ALIGN", "Coupler history forcing data mode - simulation year corresponding to DLND_CPLHIST_YR_START" - "DLND_CPLHIST_YR_START", "Coupler history forcing data mode - starting year to loop data over" - "DLND_CPLHIST_YR_END", "Coupler history forcing data mode - ending year to loop data over" - -.. note:: If ``DLND_MODE`` is set to ``CPLHIST``, it is normally assumed that the model domain will be identical to **all** of the stream domains. To ensure this, the xml variables ``LND_DOMAIN_PATH`` and ``LND_DOMAIN_FILE`` are ignored and a valid setting **must be given** for ``DLND_CPLHIST_DOMAIN_FILE``. If ``DLND_CPLHIST_DOMAIN_FILE`` is set to ``null``, then the dlnd component domain information is read in from the first coupler history file in the target stream and it is assumed that the first coupler stream file that is pointed to contains the domain information for that stream. Alternatively, ``DLND_CPLHIST_DOMAIN_FILE`` can be set to ``$LND_DOMAIN_PATH/$LND_DOMAIN_FILE``. - -.. _dlnd-datamodes: - --------------------- -datamode values --------------------- - -The xml variable ``DLND_MODE`` (described in :ref:`dlnd_mode`) sets the streams that are associated with DLND and also sets the namelist variable ``datamode``. -``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DLND on the streams before returning to the driver. - -Each data model has its own set of supported ``datamode`` values. The following are the supported DLND ``datamode`` values, as defined in the file ``namelist_definition_dlnd.xml``. - -.. csv-table:: "Valid values for datamode namelist variable" - :header: "datamode variable", "description" - :widths: 20, 80 - - "NULL", "Turns off the data model as a provider of data to the coupler. The ``lnd_present`` flag will be set to false and the coupler will assume no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." - -.. _dlnd_mode: - -------------------------------- -DLND_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DLND_MODE`` (defined in the ``config_component.xml`` file for DLND), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DLND_MODE`` based on the compset. - -.. csv-table:: "Relationship between DLND_MODE, datamode and streams" - :header: "DLND_MODE", "description-streams-datamode" - :widths: 20, 80 - - "NULL", "null mode" - "", "streams: none" - "", "datamode: null" - "CPLHIST", "land forcing data (e.g. produced by CESM/CLM) from a previous model run are read in from coupler history files" - "", "streams: lnd.cplhist" - "", "datamode: COPYALL" - "GLC_CPLHIST", "glc coupling fields (e.g. produced by CESM/CLM) from a previous model run are read in from coupler history files" - "", "streams: sno.cplhist" - "", "datamode: COPYALL" - ---------- -Namelists ---------- - -The namelist file for DLND is ``dlnd_in`` (or ``dlnd_in_NNN`` for multiple instances). - -As is the case for all data models, DLND namelists can be separated into two groups, stream-independent and stream-dependent. - -The stream dependent group is :ref:`shr_strdata_nml`. - -.. _dlnd-stream-independent-namelists: - -The stream-independent group is ``dlnd_nml`` and the DLND stream-independent namelist variables are: - -===================== ====================================================== -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -===================== ====================================================== - -To change the namelist settings in ``dlnd_in``, edit the file ``user_nl_dlnd``. - -.. _dlnd-mode-independent-streams: - --------------------------------------- -Streams independent of DLND_MODE value --------------------------------------- - -There are no datamode independent streams for DLND. - -.. _dlnd-fields: - ------------ -Field names ------------ - -DLND defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. -In general, the stream input file should translate the stream input variable names into the ``dlnd_fld`` names below for use within the data land model. - -.. csv-table:: "DLND internal field names" - :header: "dlnd_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "t", "Sl_t" - "tref", "Sl_tref" - "qref", "Sl_qref" - "avsdr", "Sl_avsdr" - "anidr", "Sl_anidr" - "avsdf", "Sl_avsdf" - "anidf", "Sl_anidf" - "snowh", "Sl_snowh" - "taux", "Fall_taux" - "tauy", "Fall_tauy" - "lat", "Fall_lat" - "sen", "Fall_sen" - "lwup", "Fall_lwup" - "evap", "Fall_evap" - "swnet", "Fall_swnet" - "lfrac", "Sl_landfrac" - "fv", "Sl_fv" - "ram1", "Sl_ram1" - "flddst1", "Fall_flxdst1" - "flxdst2", "Fall_flxdst2" - "flxdst3", "Fall_flxdst3" - "flxdst4", "Fall_flxdst4" - "tsrfNN", "Sl_tsrfNN" - "topoNN", "Sl_topoNN" - "qiceNN", "Flgl_qiceNN" - -where NN = (00,01,02,..., ``glc_nec``), and ``glc_nec`` is the number of glacier elevation classes. -Note that the number of elevation classes on the input files must be the same as in the run. diff --git a/doc/source/data_models/data-model-science.rst b/doc/source/data_models/data-model-science.rst deleted file mode 100644 index b92a7331e51..00000000000 --- a/doc/source/data_models/data-model-science.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. _data-model-science: - -Data Model Science -================== - -When a given data model is run, the user must specify which *science mode* it will run in. -Each data model has a fixed set of fields that it must send to the coupler, but it is the choice of mode that specifies how that set of fields is to be computed. -Each mode activates various assumptions about what input fields are available from the input data streams, what input fields are available from the the coupler, and how to use this input data to compute the output fields sent to the coupler. - -In general, a mode might specify... - -- that fields be set to a time invariant constant (so that no input data is needed) -- that fields be taken directly from input data files (the input streams) -- that fields be computed using data read in from input files -- that fields be computed using data received from the coupler -- some combination of the above. - -If a science mode is chosen that is not consistent with the input data provided, the model may abort (perhaps with a "missing data" error message), or the model may send erroneous data to the coupler (for example, if a mode assumes an input stream has temperature in Kelvin, but it really has temperature in Celsius). -Such an error is unlikely unless a user has edited the run scripts to specify either non-standard input data or a non-standard science mode. -When editing the run scripts to use non-standard stream data or modes, users must be careful that the input data is consistent with the science mode and should verify that the data model is providing data to the coupler as expected. - -The data model mode is a character string that is set in the namelist variable ``datamode`` in the namelist group ``shr_strdata_nml``. Although each data model, -``datm``, ``dlnd``, ``drof``, ``docn``, ``dice`` and ``dwav`` has its own set of valid datamode values, two modes are common to all data models: ``COPYALL`` and ``NULL``. - -``dataMode = "COPYALL"`` - The default mode is ``COPYALL`` -- the model will assume *all* the data that must be sent to the coupler will be found in the input data streams, and that this data can be sent to the coupler, unaltered, except for spatial and temporal interpolation. - -``dataMode = "NULL"`` - ``NULL`` mode turns off the data model as a provider of data to the coupler. The ``model_present`` flag (eg. ``atm_present``) will be set to false and the coupler will assume no exchange of data to or from the data model. diff --git a/doc/source/data_models/data-ocean.rst b/doc/source/data_models/data-ocean.rst deleted file mode 100644 index 899b6b40529..00000000000 --- a/doc/source/data_models/data-ocean.rst +++ /dev/null @@ -1,309 +0,0 @@ -.. _data-ocean: - -=================== -Data Ocean (DOCN) -=================== - -Data ocean can be run both as a prescribed component, simply reading -in SST data from a stream, or as a prognostic slab ocean model -component. - -The data ocean component (DOCN) always returns SSTs to the driver. -The atmosphere/ocean fluxes are computed in the coupler. Therefore, -the data ocean model does not compute fluxes like the data ice (DICE) -model. DOCN has two distinct modes of operation. DOCN can run as a -pure data model, reading in ocean SSTs (normally climatological) from -input datasets, performing time/spatial interpolations, and passing -these to the coupler. Alternatively, DOCN can compute updated SSTs by -running as a slab ocean model where bottom ocean heat flux convergence -and boundary layer depths are read in and used with the -atmosphere/ocean and ice/ocean fluxes obtained from the driver. - -DOCN running in prescribed mode assumes that the only field in the -input stream is SST and also that SST is in Celsius and must be -converted to Kelvin. All other fields are set to zero except for -ocean salinity, which is set to a constant reference salinity value. -Normally the ice fraction data (used for prescribed CICE) is found in -the same data files that provide SST data to the data ocean model -since SST and ice fraction data are derived from the same -observational data sets and are consistent with each other. For DOCN -prescribed mode, default yearly climatological datasets are provided -for various model resolutions. - -DOCN running as a slab ocean model is used in conjunction with active -ice mode running in full prognostic mode (e.g. CICE for CESM). This -mode computes a prognostic sea surface temperature and a freeze/melt -potential (surface Q-flux) used by the sea ice model. This -calculation requires an external SOM forcing data file that includes -ocean mixed layer depths and bottom-of-the-slab Q-fluxes. -Scientifically appropriate bottom-of-the-slab Q-fluxes are normally -ocean resolution dependent and are derived from the ocean model output -of a fully coupled CCSM run. Note that this mode no longer runs out -of the box, the default testing SOM forcing file is not scientifically -appropriate and is provided for testing and development purposes only. -Users must create scientifically appropriate data for their particular -application or use one of the standard SOM forcing files from the full -prognostic control runs. For CESM, some of these are available in the -`inputdata repository -`_. -The user then modifies the ``$DOCN_SOM_FILENAME`` variable in -env_run.xml to point to the appropriate SOM forcing dataset. - -.. note:: A tool is available to derive valid `SOM forcing `_ and more information on creating the SOM forcing is also available. - -.. _docn-xml-vars: - -------------- -xml variables -------------- - -The following are xml variables that CIME supports for DOCN. These -variables are defined in -``$CIMEROOT/src/components/data_comps/docn/cime_config/config_component.xml``. -These variables will appear in ``env_run.xml`` and are used by the -DOCN ``cime_config/buildnml`` script to generate the DOCN namelist -file ``docn_in`` and the required associated stream files for the -case. - -.. note:: These xml variables are used by the the docn's **cime_config/buildnml** script in conjunction with docn's **cime_config/namelist_definition_docn.xml** file to generate the namelist file ``docn_in``. - -.. csv-table:: "DOCN xml variables" - :header: "xml variable", "description" - :widths: 15, 85 - - "DOCN_MODE", "Data mode" - "", "Valid values are: null, prescribed, som, interannual, ww3" - "DOCN_SOM_FILENAME", "Sets SOM forcing data filename for pres runs, only used in D and E compset" - "SSTICE_STREAM", "Prescribed SST and ice coverage stream name." - "", "Sets SST and ice coverage stream name for prescribed runs." - "SSTICE_DATA_FILENAME", "Prescribed SST and ice coverage data file name." - "", "Sets SST and ice coverage data file name for DOCN prescribed runs." - "SSTICE_YEAR_ALIGN", "The model year that corresponds to SSTICE_YEAR_START on the data file." - "", "Prescribed SST and ice coverage data will be aligned so that the first year of" - "", "data corresponds to SSTICE_YEAR_ALIGN in the model. For instance, if the first" - "", "year of prescribed data is the same as the first year of the model run, this" - "", "should be set to the year given in RUN_STARTDATE." - "", "If SSTICE_YEAR_ALIGN is later than the model's starting year, or if the model is" - "", "run after the prescribed data ends (as determined by SSTICE_YEAR_END), the" - "", "default behavior is to assume that the data from SSTICE_YEAR_START to SSTICE_YEAR_END" - "", "cyclically repeats. This behavior is controlled by the *taxmode* stream option" - "SSTICE_YEAR_START", "The first year of data to use from SSTICE_DATA_FILENAME." - "", "This is the first year of prescribed SST and ice coverage data to use. For" - "", "example, if a data file has data for years 0-99, and SSTICE_YEAR_START is 10," - "", "years 0-9 in the file will not be used." - "SSTICE_YEAR_END", "The last year of data to use from SSTICE_DATA_FILENAME." - "", "This is the last year of prescribed SST and ice coverage data to use. For" - "", "example, if a data file has data for years 0-99, and value is 49," - "", "years 50-99 in the file will not be used." - -.. note:: For multi-year runs requiring AMIP datasets of sst/ice_cov fields, you need to set the xml variables for ``DOCN_SSTDATA_FILENAME``, ``DOCN_SSTDATA_YEAR_START``, and ``DOCN_SSTDATA_YEAR_END``. CICE in prescribed mode also uses these values. - -.. _docn-datamodes: - ---------------- -datamode values ---------------- - -The xml variable ``DOCN_MODE`` (described in :ref:`docn_mode`) sets the streams that are associated with DOCN and also sets the namelist variable ``datamode``. -``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DOCN on the streams before returning to the driver. - -Each data model has its own set of supported ``datamode`` values. The following are the supported DOCN ``datamode`` values, as defined in the file ``namelist_definition_docn.xml``. - -.. csv-table:: "Valid values for datamode namelist variable" - :header: "datamode variable", "description" - :widths: 20, 80 - - "NULL", "Turns off the data model as a provider of data to the coupler. The ocn_present flag will be set to false and the coupler will assume no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." - "SSTDATA", "assumes the only field in the input stream is SST. It also assumes the SST is in Celsius and must be converted to Kelvin. All other fields are set to zero except for ocean salinity, which is set to a constant reference salinity value. Normally the ice fraction data is found in the same data files that provide SST data to the data ocean model. They are normally found in the same file because the SST and ice fraction data are derived from the same observational data sets and are consistent with each other. They are normally found in the same file because the SST and ice fraction data are derived from the same observational data sets and are consistent with each other." - "IAF", "is the interannually varying version of SSTDATA" - "SOM", "(slab ocean model) mode is a prognostic mode. This mode computes a prognostic sea surface temperature and a freeze/melt potential (surface Q-flux) used by the sea ice model. This calculation requires an external SOM forcing data file that includes ocean mixed layer depths and bottom-of-the-slab Q-fluxes. Scientifically appropriate bottom-of-the-slab Q-fluxes are normally ocean resolution dependent and are derived from the ocean model output of a fully coupled CCSM run. Note that while this mode runs out of the box, the default SOM forcing file is not scientifically appropriate and is provided for testing and development purposes only. Users must create scientifically appropriate data for their particular application. A tool is available to derive valid SOM forcing." - -.. _docn_mode: - -------------------------------- -DOCN_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DOCN_MODE`` (defined in the ``config_component.xml`` file for DOCN), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DOCN_MODE`` based on the compset. - -.. csv-table:: "Relationship between DOCN_MODE, datamode and streams" - :header: "DOCN_MODE, "description-streams-datamode" - :widths: 20, 80 - - "null", "null mode" - "", "streams: none" - "", "datamode: null" - "prescribed","run with prescribed climatological SST and ice-coverage" - "","streams: prescribed" - "","datamode: SSTDATA" - "interannual", "run with interannual SST and ice-coverage" - "","streams: prescribed" - "","datamode: SSTDATA" - "som", "run in slab ocean mode" - "","streams: som" - "","datamode: SOM" - "ww3", "ww3 mode" - "", "streams: ww3" - "", "datamode: COPYALL" - -.. _docn-namelists: - ---------- -Namelists ---------- - -As is the case for all data models, DOCN namelists can be separated into two groups, stream-independent and stream-dependent. - -The namelist file for DOCN is ``docn_in`` (or ``docn_in_NNN`` for multiple instances). - -The stream dependent group is :ref:`shr_strdata_nml` . - -As part of the stream dependent namelist input, DOCN supports two science modes, ``SSTDATA`` (prescribed mode) and ``SOM`` (slab ocean mode). - -.. _docn-stream-independent-namelists: - -The stream-independent group is ``docn_nml`` and the DOCN stream-independent namelist variables are: - -===================== ====================================================== -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -===================== ====================================================== - -To change the namelist settings in docn_in, edit the file user_nl_docn. - -.. _docn-mode-independent-streams: - ---------------------------------- -Datamode independent streams ---------------------------------- - -There are no datamode independent streams for DOCN. - -.. _docn-fields: - ------------ -Field names ------------ - -DOCN defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. - -.. note:: In general, the stream input file should translate the stream input variable names into the ``docn_fld`` names below for use within the data ocn model. - -.. csv-table:: "DOCN internal field names" - :header: "docn_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "t", "So_t" - "u", "So_u" - "v", "So_v" - "dhdx", "So_dhdx" - "dhdy", "So_dhdy" - "s", "So_s" - "h", "strm_h (internal to docn_comp_mod only)" - "qbot", "strm_qbot (internal to docn_comp_mod only)" - -.. _creating-sstdata-input-from-prognostic-run: - ---------------------------------------------------------------------- -Creating SSTDATA mode input from a fully prognostic run (CESM only) ---------------------------------------------------------------------- - -The following outlines the steps you would take to create monthly averages of SST and ice coverage from a previous fully prognostic run that can then be read as as stream data by DOCN. - -As an example, the following uses an f09_g16 CESM B-configuration simulation using CAM5 physics and with cosp enabled. The procedure to create the SST/ICE file is as follows: - -1. Save monthly averaged 'aice' information from cice code (this is the default). - -2. Save monthly averaged SST information from pop2. To do this, copy $SRCROOT/pop2/input_templates/gx1v6_tavg_contents to $CASEROOT/SourceMods/src.pop2 and change the 2 in front of SST to 1 for monthly frequency. - -3. Extract (using ncrcat) SST from monthly pop2 history files and form a single netcdf file containing just SST; change SST to SST_cpl. - :: - - > ncrcat -v SST case.pop.h.*.nc temp.nc - > ncrename -v SST,SST_cpl temp.nc sst_cpl.nc - -4. Extract aice from monthly cice history files and form a single netcdf file containing aice; change aice to ice_cov; divide values by 100 (to convert from percent to fraction). - :: - - > ncrcat -v aice case.cice.h.*.nc temp.nc - > ncrename -v aice,ice_cov temp.nc temp2.nc - > ncap2 -s 'ice_cov=ice_cov/100.' temp2.nc ice_cov.nc - -5. Modify fill values in the sst_cpl file (which are over land points) to have value -1.8 and remove fill and missing value designators; change coordinate lengths and names: to accomplish this, first run ncdump, then replace _ with -1.8 in SST_cpl, then remove lines with _FillValue and missing_value. - (Note: although it might be possible to merely change the fill value to -1.8, this is conforming to other SST/ICE files, which have SST_cpl explicitly set to -1.8 over land.) - To change coordinate lengths and names, replace nlon by lon, nlat by lat, TLONG by lon, TLAT by lat. - The last step is to run ncgen. Note: when using ncdump followed by ncgen, precision will be lost; however, one can specify -d 9,17 to maximize precision - as in the following example: - :: - - > ncdump -d 9,17 old.nc > old - > ncgen -o new.nc new - -6. Modify fill values in the ice_cov file (which are over land points) to have value 1 and remove fill and missing value designators; change coordinate lengths and names; patch longitude and latitude to replace missing values. - To accomplish this, first run ncdump, then replace _ with 1 in ice_cov, then remove lines with _FillValue and missing_value. - To change coordinate lengths and names, replace ni by lon, nj by lat, TLON by lon, TLAT by lat. - To patch longitude and latitude arrays, replace values of those arrays with those in sst_cpl file. - The last step is to run ncgen. - (Note: the replacement of longitude and latitude missing values by actual values should not be necessary but is safer.) - -7. Combine (using ncks) the two netcdf files. - :: - - > ncks -v ice_cov ice_cov.nc sst_cpl.nc - - Rename the file to ssticetemp.nc. - The time variable will refer to the number of days at the end of each month, counting from year 0, whereas the actual simulation began at year 1. - However, we want time values to be in the middle of each month, referenced to the first year of the simulation (first time value equals 15.5). - Extract (using ncks) time variable from existing amip sst file (for correct number of months - 132 in this example) into working netcdf file. - :: - - > ncks -d time,0,131 -v time amipsst.nc ssticetemp.nc - - Add date variable: ncdump date variable from existing amip sst file; modify first year to be year 0 instead of 1949 (do not including leading zeroes or it will interpret as octal) and use correct number of months; ncgen to new netcdf file; extract date (using ncks) and place in working netcdf file. - :: - - > ncks -v date datefile.nc ssticetemp.nc - - Add datesec variable: extract (using ncks) datesec (correct number of months) from existing amip sst file and place in working netcdf file. - :: - - > ncks -d time,0,131 -v datesec amipsst.nc ssticetemp.nc - -8. At this point, you have an SST/ICE file in the correct format. - -9. Due to CAM's linear interpolation between mid-month values, you need to apply a procedure to assure that the computed monthly means are consistent with the input data. - To do this, invoke ``$SRCROOT/components/cam/tools/icesst/bcgen`` and following the following steps: - - a. Rename SST_cpl to SST, and ice_cov to ICEFRAC in the current SST/ICE file: - :: - - > ncrename -v SST_cpl,SST -v ice_cov,ICEFRAC ssticetemp.nc - - b. In driver.f90, sufficiently expand the lengths of variables prev_history and history (16384 should be sufficient); also comment out the test that the climate year be between 1982 and 2001 (lines 152-158). - - c. In bcgen.f90 and setup_outfile.f90, change the dimensions of xlon and ???TODO xlat to (nlon,nlat); this is to accommodate use of non-cartesian ocean grid. - - d. In setup_outfile.f90, modify the 4th and 5th ???TODO arguments in the calls to wrap_nf_def_var for *lon* and *lat* to be *2* and *dimids*; this is to accommodate use of non-cartesian ocean grid. - - e. Adjust Makefile to have proper path for LIB_NETCDF and INC_NETCDF. - - f. Modify namelist accordingly. - - g. Make bcgen and execute per instructions. The resulting sstice_ts.nc file is the desired ICE/SST file. - -9. Place the new SST/ICE file in desired location and modify ``env_run.xml`` to have : - - a. ``SSTICE_DATA_FILENAME`` point to the complete path of your SST/ICE file. - - b. ``SSTICE_GRID_FILENAME`` correspond to full path of (in this case) gx1v6 grid file. - - c. ``SSTICE_YEAR_START`` set to 0 - - d. ``SSTICE_YEAR_END`` to one less than the total number of years - - e. ``SSTICE_YEAR_ALIGN`` to 1 (for CESM, since CESM starts counting at year 1). diff --git a/doc/source/data_models/data-river.rst b/doc/source/data_models/data-river.rst deleted file mode 100644 index 25cc22b656a..00000000000 --- a/doc/source/data_models/data-river.rst +++ /dev/null @@ -1,132 +0,0 @@ -.. _data-river: - -================= -Data River (DROF) -================= - -The data river model (DROF) provides river runoff data primarily to be used by the prognostic ocean component. -This data can either be observational (climatological or interannual river data) or data from a previous model run that is output to coupler history files and then read back in by DROF. - -.. _drof-xml-vars: - -------------- -xml variables -------------- - -The following are xml variables that CIME supports for DROF. -These variables are defined in ``$CIMEROOT/src/components/data_comps/drof/cime_config/config_component.xml``. -These variables will appear in ``env_run.xml`` and are used by the DROF ``cime_config/buildnml`` script to generate the DROF namelist file ``drof_in`` and the required associated stream files for the case. - -.. note:: These xml variables are used by the the drof's **cime_config/buildnml** script in conjunction with drof's **cime_config/namelist_definition_drof.xml** file to generate the namelist file ``drof_in``. - -.. csv-table:: "DROF xml variables" - :header: "xml variable", "description" - :widths: 15, 85 - - "DROF_MODE", "Data mode" - "", "Valid values are: NULL,CPLHIST,DIATREN_ANN_RX1,DIATREN_IAF_RX1,IAF_JRA" - - "DROF_CPLHIST_DOMAIN_FILE", "Coupler history forcing data mode - full pathname of model domain file " - "DROF_CPLHIST_CASE", "Coupler history forcing data mode - case name" - "DROF_CPLHIST_DIR", "Coupler history forcing data mode - directory containing coupler history forcing data" - "DROF_CPLHIST_YR_ALIGN", "Coupler history forcing data mode - simulation year corresponding to DROF_CPLHIST_YR_START" - "DROF_CPLHIST_YR_START", "Coupler history forcing data mode - starting year to loop forcing data over" - "DROF_CPLHIST_YR_END", "Coupler history forcing data mode - ending year to loop forcing data over" - -.. note:: If ``DROF_MODE`` is set to ``CPLHIST``, it is normally assumed that the model domain will be identical to **all** of the stream domains. To ensure this, the xml variables ``ROF_DOMAIN_PATH`` and ``ROF_DOMAIN_FILE`` are ignored and a valid setting **must be given** for ``DROF_CPLHIST_DOMAIN_FILE``. If ``DROF_CPLHIST_DOMAIN_FILE`` is set to ``null``, then the drof component domain information is read in from the first coupler history file in the target stream and it is assumed that the first coupler stream file that is pointed to contains the domain information for that stream. This is the default mode that should be used for this mode. Alternatively, ``DROF_CPLHIST_DOMAIN_FILE`` can be set to ``$ROF_DOMAIN_PATH/$ROF_DOMAIN_FILE`` in a non-default configuration. - -.. _drof-datamodes: - --------------------- -datamode values --------------------- - -The xml variable ``DROF_MODE`` (described in :ref:`drof_mode`) sets the streams that are associated with DROF and also sets the namelist variable ``datamode``. -``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DROF on the streams before returning to the driver. - -Each data model has its own set of supported ``datamode`` values. The following are the supported DROF ``datamode`` values, as defined in the file ``namelist_definition_drof.xml``. - -.. csv-table:: "Valid values for datamode namelist variable" - :header: "datamode variable", "description" - :widths: 20, 80 - - "NULL", "Turns off the data model as a provider of data to the coupler. The rof_present flag will be set to false and the coupler will assume no exchange of data to or from the data model." - "COPYALL", "Copies all fields directly from the input data streams Any required fields not found on an input stream will be set to zero." - ---------- -Namelists ---------- - -The data river runoff model (DROF) provides data river input to prognostic components such as the ocean. - -The namelist file for DROF is ``drof_in``. - -As is the case for all data models, DROF namelists can be separated into two groups, stream-independent and stream-dependent. -The stream dependent group is :ref:`shr_strdata_nml`. -The stream-independent group is ``drof_nml`` and the DROF stream-independent namelist variables are: - -.. _drof-stream-independent-namelists: - -===================== ====================================================== -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -===================== ====================================================== - -To change the namelist settings in ``drof_in``, edit the file ``user_nl_drof`` in your case directory. - -.. _drof_mode: - -------------------------------- -DROF_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DROF_MODE`` (defined in the ``config_component.xml`` file for DROF), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DROF_MODE`` based on the compset. - -.. csv-table:: "Relationship between DROF_MODE, datamode and streams" - :header: "DROF_MODE", "description-streams-datamode" - :widths: 15, 85 - - "NULL", "null mode" - "", "streams: none" - "", "datamode: NULL" - "DIATREN_ANN_RX1", "Reads in annual forcing river data used for CORE2 forcing runs." - "", "streams: rof.diatren_ann_rx1" - "", "datamode: COPYALL" - "DIATREN_IAF_RX1", "Reads in intra-annual forcing river data used for CORE2 forcing runs." - "", "streams: rof.diatren_iaf_rx1" - "", "datamode: COPYALL" - "CPLHIST", "Reads in data from coupler history files generated by a previous run." - "", "streams: rof.cplhist" - "", "datamode: COPYALL" - "IAF_JRA", "Reads in intra-annual forcing river data used for JRA-55 forcing runs." - "", "streams: rof.iaf_jra" - "", "datamode: COPYALL" - -.. _drof-mode-independent-streams: - ------------------------------------------- -Streams independent of DROF_MODE value ------------------------------------------- - -There are no datamode independent streams for DROF. - -.. _drof-fields: - ----------------- -DROF Field names ----------------- - -DROF defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. -In general, the stream input file should translate the stream input variable names into the ``drof_fld`` names for use within the data rofosphere model. - -.. csv-table:: "DROF internal field names" - :header: "drof_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "roff", "Forr_rofl" - "ioff", "Forr_rofi" diff --git a/doc/source/data_models/data-seaice.rst b/doc/source/data_models/data-seaice.rst deleted file mode 100644 index 1c44c623b59..00000000000 --- a/doc/source/data_models/data-seaice.rst +++ /dev/null @@ -1,180 +0,0 @@ -.. _data-seaice: - -Data Ice (DICE) -================ - -DICE is a combination of a data model and a prognostic model. -The data functionality reads in ice coverage. -The prognostic functionality calculates the ice/atmosphere and ice/ocean fluxes. -DICE receives the same atmospheric input from the coupler as the active CICE model (i.e., atmospheric states, shortwave fluxes, and ocean ice melt flux) and acts very similarly to CICE running in prescribed mode. -Currently, this component is only used to drive POP in "C" compsets. - -.. _dice-xml-vars: - ---------------- -xml variables ---------------- -The following are xml variables that CIME supports for DICE. -These variables are defined in ``$CIMEROOT/src/components/data_comps/dice/cime_config/config_component.xml``. -These variables will appear in ``env_run.xml`` and are used by the DICE ``cime_config/buildnml`` script to generate the DICE namelist file ``dice_in`` and the required associated stream files for the case. - -.. note:: These xml variables are used by the the dice's **cime_config/buildnml** script in conjunction with dice's **cime_config/namelist_definition_dice.xml** file to generate the namelist file ``dice_in``. - -.. csv-table:: "DICE xml variables" - :header: "xml variable", "description" - :widths: 15, 85 - - "DICE_MODE", "Mode for sea-ice component" - "","Valid values are: null, prescribed, ssmi, ssmi_iaf, ww3 " - - -.. _dice-datamodes: - --------------------- -datamode values --------------------- - -The xml variable ``DICE_MODE`` (described in :ref:`dice_mode`) sets the streams that are associated with DICE and also sets the namelist variable ``datamode``. -``datamode`` (which appears in ``shr_strdata_nml``) specifies what additional operations need to be done by DICE on the streams before returning to the driver. - -Each data model has its own set of supported ``datamode`` values. The following are the supported DICE ``datamode`` values, as defined in the file ``namelist_definition_dice.xml``. - -.. csv-table:: "Valid values for datamode namelist variable" - :header: "datamode variable", "description" - :widths: 20, 80 - - "NULL", "Turns off the data model as a provider of data to the coupler. The ice_present flag will be set to false and the coupler will assume no exchange of data to or from the data model." - "COPYALL", "The default science mode of the data model is the COPYALL mode. This mode will examine the fields found in all input data streams; if any input field names match the field names used internally, they are copied into the export array and passed directly to the coupler without any special user code. Any required fields not found on an input stream will be set to zero." - "SSTDATA","Is a prognostic mode. It requires data be sent to the ice model. Ice fraction (extent) data is read from an input stream, atmosphere state variables are received from the coupler, and then an atmosphere-ice surface flux is computed and sent to the coupler. It is called ``SSTDATA`` mode because normally the ice fraction data is found in the same data files that provide SST data to the data ocean model. They are normally found in the same file because the SST and ice fraction data are derived from the same observational data sets and are consistent with each other. " - -.. _dice_mode: - -------------------------------- -DICE_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DICE_MODE`` (defined in the ``config_component.xml`` file for DICE), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DICE_MODE`` based on the compset. - -.. csv-table:: "Relationship between DICE_MODE, datamode and streams" - :header: "DICE_MODE, "description-streams-datamode" - :widths: 20, 80 - - "null", "null mode" - "", "streams: none" - "", "datamode: null" - "prescribed","prognostic mode - requires data to be sent to DICE" - "","streams: prescribed" - "","datamode: SSTDATA" - "ssmi", "Special Sensor Microwave Imager climatological data" - "","streams: SSMI" - "","datamode: SSTDATA" - "ssmi", "Special Sensor Microwave Imager inter-annual forcing data" - "","streams: SSMI_IAF" - "","datamode: SSTDATA" - "ww3", "ww3 mode" - "", "streams: ww3" - "", "datamode: COPYALL" - -NIf DICE_MODE is set to ``ssmi``, ``ssmi_iaf`` or ``prescribed``, it is a prognostic mode and requires data be sent to the ice model. -Ice fraction (extent) data is read from an input stream, atmosphere state variables are received from the coupler, and then an atmosphere-ice surface flux is computed and sent to the coupler. -Normally the ice fraction data is found in the same data files that provide SST data to the data ocean model. -They are normally found in the same file because the SST and ice fraction data are derived from the same observational data sets and are consistent with each other. - -.. _dice-namelists: - ---------- -Namelists ---------- - -The namelist file for DICE is ``dice_in`` (or ``dice_in_NNN`` for multiple instances). - -As is the case for all data models, DICE namelists can be separated into two groups, stream-independent and stream-dependent. - -The stream dependent group is :ref:`shr_strdata_nml`. - -.. _dice-stream-independent-namelists: - -The stream-independent group is ``dice_nml`` and the DICE stream-independent namelist variables are: - -===================== ====================================================== -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -flux_qacc activates water accumulation/melt wrt Q -flux_qacc0 initial water accumulation value -flux_qmin bound on melt rate -flux_swpf short-wave penetration factor -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -===================== ====================================================== - -To change the namelist settings in ``dice_in``, edit the file ``user_nl_dice``. - -.. _dice-mode-independent-streams: - --------------------------------------- -Streams independent of DICE_MODE value --------------------------------------- - -There are no datamode independent streams for DICE. - -.. _dice-fields: - ------------ -Field names ------------ - -DICE defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. - -.. note:: In general, the stream input file should translate the stream input variable names into the ``docn_fld`` names below for use within the data ocn model. - -.. csv-table:: "DICE internal field names" - :header: "dice_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "ifrac", "Si_ifrac" - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/doc/source/data_models/data-wave.rst b/doc/source/data_models/data-wave.rst deleted file mode 100644 index 53ea0f9ae46..00000000000 --- a/doc/source/data_models/data-wave.rst +++ /dev/null @@ -1,127 +0,0 @@ -.. _data-wave: - -================= -Data Wave (DWAV) -================= - -The data wave model (DWAV) provides data wave forcing primarily to be used by the prognostic ocean component. -Currently, this data is climatological. - -.. _dwav-xml-vars: - -------------- -xml variables -------------- - -The following are XML variables that CIME supports for DWAV. -These variables will appear in ``env_run.xml`` and are used by the DWAV ``cime_config/buildnml`` script to generate the DWAV namelist file ``dwav_in`` and the required associated stream files for the case. - -.. note:: These XML variables are used by the the DWAV **cime_config/buildnml** script in conjunction with the DWAV **cime_config/namelist_definition_dwav.xml** file to generate the namelist file ``dwav_in``. - -.. csv-table:: "DROF xml variables" - :header: "xml variable", "description" - :widths: 15, 85 - - "DWAV_MODE", "Data mode" - "", "Valid values are: NULL, CLIMO" - -.. _dwav-datamodes: - --------------------- -DWAV datamode values --------------------- - -One of the variables in ``shr_strdata_nml`` is ``datamode``, whose value is a character string. -Each data model has a unique set of ``datamode`` values that it supports. - -The valid values for ``datamode`` are set by the XML variable ``DWAV_MODE`` in the ``config_component.xml`` file for DWAV. -CIME will generate a value ``datamode`` that is compset dependent. - -The following are the supported DWAV datamode values and their relationship to the ``DWAV_MODE`` xml variable value. - -.. csv-table:: Relationship between ``DWAV_MODE`` xml variables and ``datamode`` namelist variables - :header: "DWAV_MODE (xml)", "datamode (namelist)" - :widths: 15, 90 - - "NULL", "NULL" - "", "This mode turns off the data model as a provider of data to the coupler. " - "", "The ``wav_present`` flag will be set to ``false`` and the coupler assumes no exchange of data to or from the data model." - "CLIMO", "COPYALL" - "", "Examines the fields found in all input data streams and if any input field names match the field names used internally, " - "", "they are copied into the export array and passed directly to the coupler without any special user code." - - -.. _dwav-namelists: - ---------- -Namelists ---------- - -The data wave model (DWAV) provides data wave input to prognostic components such as the ocean. - -The namelist file for DWAV is ``dwav_in``. - -As is the case for all data models, DWAV namelists can be separated into two groups, stream-independent and stream-dependent. -The stream dependent group is :ref:`shr_strdata_nml`. -The stream-independent group is ``dwav_nml`` and the DWAV stream-independent namelist variables are: - -.. _dwav-stream-independent-namelists: - -===================== ====================================================== -decomp decomposition strategy (1d, root) - - 1d => vector decomposition, root => run on master task -restfilm master restart filename -restfils stream restart filename -force_prognostic_true TRUE => force prognostic behavior -===================== ====================================================== - -To change the namelist settings in ``dwav_in``, edit the file ``user_nl_dwav`` in your case directory. - -.. _dwav_mode: - -------------------------------- -DWAV_MODE, datamode and streams -------------------------------- - -The following table describes the valid values of ``DWAV_MODE`` (defined in the ``config_component.xml`` file for DWAV), and how they relate to the associated input streams and the ``datamode`` namelist variable. -CIME will generate a value of ``DWAV_MODE`` based on the compset. - -.. csv-table:: "Relationship between DWAV_MODE, datamode and streams" - :header: "DWAV_MODE", "description-streams-datamode" - :widths: 15, 85 - - "NULL", "null mode" - "", "streams: none" - "", "datamode: NULL" - - -.. _dwav-mode-independent-streams: - --------------------------------------- -Streams independent of DWAV_MODE value --------------------------------------- - -There are no datamode independent streams for DWAV. - -.. _dwav-fields: - ----------------- -Field names ----------------- - -DWAV defines a set of pre-defined internal field names as well as mappings for how those field names map to the fields sent to the coupler. -In general, the stream input file should translate the stream input variable names into the ``dwav_fld`` names below for use within the data wave model. - -.. csv-table:: "DWAV internal field names" - :header: "dwav_fld (avifld)", "driver_fld (avofld)" - :widths: 30, 30 - - "lamult", "Sw_lamult" - "ustokes","Sw_ustokes" - "vstokes", "Sw_vstokes" - - - - - diff --git a/doc/source/data_models/design-details.rst b/doc/source/data_models/design-details.rst deleted file mode 100644 index ad24102b214..00000000000 --- a/doc/source/data_models/design-details.rst +++ /dev/null @@ -1,225 +0,0 @@ -.. _design-details: - -================ - Design Details -================ - ----------------------- -Data Model Performance ----------------------- - -There are two primary costs associated with strdata: reading data and spatially mapping data. -Time interpolation is relatively cheap in the current implementation. -As much as possible, redundant operations are minimized. -Fill and mapping weights are generated at initialization and saved. -The upper and lower bound mapped input data is saved between time steps to reduce mapping costs in cases where data is time interpolated more often than new data is read. -If the input data timestep is relatively small (for example, hourly data as opposed to daily or monthly data) the cost of reading input data can be quite large. -Also, there can be significant variation in cost of the data model over the coarse of the run, for instance, when new inputdata must be read and interpolated, although it's relatively predictable. -The present implementation doesn't support changing the order of operations, for instance, time interpolating the data before spatial mapping. -Because the present computations are always linear, changing the order of operations will not fundamentally change the results. -The present order of operations generally minimizes the mapping cost for typical data model use cases. - ----------------------- -Data Model Limitations ----------------------- - -There are several limitations in both options and usage within the data models at the present time. -Spatial interpolation can only be performed from a two-dimensional latitude-longitude input grid. -The target grid can be arbitrary but the source grid must be able to be described by simple one-dimensional lists of longitudes and latitudes, although they don't have to be equally spaced. - ----------------------- -IO Through Data Models ----------------------- - -At the present time, data models can only read netcdf data, and IO is handled through either standard netcdf interfaces or through the PIO library using either netcdf or pnetcdf. -If standard netcdf is used, global fields are read and then scattered one field at a time. -If PIO is used, then data will be read either serially or in parallel in chunks that are approximately the global field size divided by the number of IO tasks. -If pnetcdf is used through PIO, then the pnetcdf library must be included during the build of the model. -The pnetcdf path and option is hardwired into the ``Macros.make`` file for the specific machine. -To turn on ``pnetcdf`` in the build, make sure the ``Macros.make`` variables ``PNETCDF_PATH``, ``INC_PNETCDF``, and ``LIB_PNETCDF`` are set and that the PIO ``CONFIG_ARGS`` sets the ``PNETCDF_PATH`` argument. - -Beyond just the option of selecting IO with PIO, several namelist variables are available to help optimize PIO IO performance. -Those are **TODO** - list these. -The total mpi tasks that can be used for IO is limited to the total number of tasks used by the data model. -Often though, using fewer IO tasks results in improved performance. -In general, [io_root + (num_iotasks-1)*io_stride + 1] has to be less than the total number of data model tasks. -In practice, PIO seems to perform optimally somewhere between the extremes of 1 task and all tasks, and is highly machine and problem dependent. - -------------- -Restart Files -------------- -Restart files are generated automatically by the data models based on a flag sent from the driver. -The restart files must meet the CIME naming convention and an ``rpointer`` file is generated at the same time. -An ``rpointer`` file is a *restart pointer* file which contains the name of the most recently created restart file. -Normally, if restart files are read, the restart filenames are specified in the ``rpointer`` file. -Optionally though, there are namelist variables such as ``restfilm`` to specify the restart filenames via namelist. If those namelist variables are set, the ``rpointer`` file will be ignored. - -In most cases, no restart file is required for the data models to restart exactly. -This is because there is no memory between timesteps in many of the data model science modes. -If a restart file is required, it will be written automatically and then must be used to continue the previous run. - -There are separate stream restart files that only exist for performance reasons. -A stream restart file contains information about the time axis of the input streams. -This information helps reduce the startup costs associated with reading the input dataset time axis information. -If a stream restart file is missing, the code will restart without it but may need to reread data from the input data files that would have been stored in the stream restart file. -This will take extra time but will not impact the results. - -.. _data-structures: - ---------------- -Data Structures ---------------- - -The data models all use three fundamental routines. - -- $CIMEROOT/src/utils/shr_dmodel_mod.F90 - -- $CIMEROOT/src/utils/shr_stream_mod.F90 - -- $CIMEROOT/src/utils/shr_strdata.F90 - -These routines contain three data structures that are leveraged by all the data model code. - -The most basic type, ``shr_stream_fileType`` is contained in ``shr_stream_mod.F90`` and specifies basic information related to a given stream file. - -.. code-block:: Fortran - - type shr_stream_fileType - character(SHR_KIND_CL) :: name = shr_stream_file_null ! the file name - logical :: haveData = .false. ! has t-coord data been read in? - integer (SHR_KIND_IN) :: nt = 0 ! size of time dimension - integer (SHR_KIND_IN),allocatable :: date(:) ! t-coord date: yyyymmdd - integer (SHR_KIND_IN),allocatable :: secs(:) ! t-coord secs: elapsed on date - end type shr_stream_fileType - -The following type, ``shr_stream_streamType`` contains information -that encapsulates the information related to all files specific to a -target stream. These are the list of files found in the ``domainInfo`` -and ``fieldInfo`` blocks of the target stream description file (see the overview of the :ref:`stream_description_file`). - -.. code-block:: Fortran - - type shr_stream_streamType - !private ! no public access to internal components - !--- input data file names and data --- - logical :: init ! has stream been initialized? - integer (SHR_KIND_IN),pointer :: initarr(:) => null()! surrogate for init flag - integer (SHR_KIND_IN) :: nFiles ! number of data files - character(SHR_KIND_CS) :: dataSource ! meta data identifying data source - character(SHR_KIND_CL) :: filePath ! remote location of data files - type(shr_stream_fileType), allocatable :: file(:) ! data specific to each file - - !--- specifies how model dates align with data dates --- - integer(SHR_KIND_IN) :: yearFirst ! first year to use in t-axis (yyyymmdd) - integer(SHR_KIND_IN) :: yearLast ! last year to use in t-axis (yyyymmdd) - integer(SHR_KIND_IN) :: yearAlign ! align yearFirst with this model year - integer(SHR_KIND_IN) :: offset ! offset in seconds of stream data - character(SHR_KIND_CS) :: taxMode ! cycling option for time axis - - !--- useful for quicker searching --- - integer(SHR_KIND_IN) :: k_lvd,n_lvd ! file/sample of least valid date - logical :: found_lvd ! T <=> k_lvd,n_lvd have been set - integer(SHR_KIND_IN) :: k_gvd,n_gvd ! file/sample of greatest valid date - logical :: found_gvd ! T <=> k_gvd,n_gvd have been set - - !---- for keeping files open - logical :: fileopen ! is current file open - character(SHR_KIND_CL) :: currfile ! current filename - type(file_desc_t) :: currpioid ! current pio file desc - - !--- stream data not used by stream module itself --- - character(SHR_KIND_CXX):: fldListFile ! field list: file's field names - character(SHR_KIND_CXX):: fldListModel ! field list: model's field names - character(SHR_KIND_CL) :: domFilePath ! domain file: file path of domain file - character(SHR_KIND_CL) :: domFileName ! domain file: name - character(SHR_KIND_CS) :: domTvarName ! domain file: time-dim var name - character(SHR_KIND_CS) :: domXvarName ! domain file: x-dim var name - character(SHR_KIND_CS) :: domYvarName ! domain file: y-dim var name - character(SHR_KIND_CS) :: domZvarName ! domain file: z-dim var name - character(SHR_KIND_CS) :: domAreaName ! domain file: area var name - character(SHR_KIND_CS) :: domMaskName ! domain file: mask var name - - character(SHR_KIND_CS) :: tInterpAlgo ! Algorithm to use for time interpolation - character(SHR_KIND_CL) :: calendar ! stream calendar - end type shr_stream_streamType - -Finally, the ``shr_strdata_type`` is the heart of the CIME data -model implemenentation and contains information for all the streams -that are active for the target data model. The first part of the -``shr_strdata_type`` is filled in by the namelist values read in from the -namelist group (see the :ref:`stream data namelist section `). - -.. code-block:: Fortran - - type shr_strdata_type - ! --- set by input namelist --- - character(CL) :: dataMode ! flags physics options wrt input data - character(CL) :: domainFile ! file containing domain info - character(CL) :: streams (nStrMax) ! stream description file names - character(CL) :: taxMode (nStrMax) ! time axis cycling mode - real(R8) :: dtlimit (nStrMax) ! dt max/min limit - character(CL) :: vectors (nVecMax) ! define vectors to vector map - character(CL) :: fillalgo(nStrMax) ! fill algorithm - character(CL) :: fillmask(nStrMax) ! fill mask - character(CL) :: fillread(nStrMax) ! fill mapping file to read - character(CL) :: fillwrit(nStrMax) ! fill mapping file to write - character(CL) :: mapalgo (nStrMax) ! scalar map algorithm - character(CL) :: mapmask (nStrMax) ! scalar map mask - character(CL) :: mapread (nStrMax) ! regrid mapping file to read - character(CL) :: mapwrit (nStrMax) ! regrid mapping file to write - character(CL) :: tintalgo(nStrMax) ! time interpolation algorithm - integer(IN) :: io_type ! io type, currently pnetcdf or netcdf - - !--- data required by cosz t-interp method, --- - real(R8) :: eccen ! orbital eccentricity - real(R8) :: mvelpp ! moving vernal equinox long - real(R8) :: lambm0 ! mean long of perihelion at vernal equinox (radians) - real(R8) :: obliqr ! obliquity in degrees - integer(IN) :: modeldt ! data model dt in seconds (set to the coupling frequency) - - ! --- data model grid, public --- - integer(IN) :: nxg ! data model grid lon size - integer(IN) :: nyg ! data model grid lat size - integer(IN) :: nzg ! data model grid vertical size - integer(IN) :: lsize ! data model grid local size - type(mct_gsmap) :: gsmap ! data model grid global seg map - type(mct_ggrid) :: grid ! data model grid ggrid - type(mct_avect) :: avs(nStrMax) ! data model stream attribute vectors - - ! --- stream specific arrays, stream grid --- - type(shr_stream_streamType) :: stream(nStrMax) - type(iosystem_desc_t), pointer :: pio_subsystem => null() - type(io_desc_t) :: pio_iodesc(nStrMax) - integer(IN) :: nstreams ! actual number of streams - integer(IN) :: strnxg(nStrMax) ! stream grid lon sizes - integer(IN) :: strnyg(nStrMax) ! stream grid lat sizes - integer(IN) :: strnzg(nStrMax) ! tream grid global sizes - logical :: dofill(nStrMax) ! true if stream grid is different from data model grid - logical :: domaps(nStrMax) ! true if stream grid is different from data model grid - integer(IN) :: lsizeR(nStrMax) ! stream local size of gsmapR on processor - type(mct_gsmap) :: gsmapR(nStrMax) ! stream global seg map - type(mct_rearr) :: rearrR(nStrMax) ! rearranger - type(mct_ggrid) :: gridR(nStrMax) ! local stream grid on processor - type(mct_avect) :: avRLB(nStrMax) ! Read attrvect - type(mct_avect) :: avRUB(nStrMax) ! Read attrvect - type(mct_avect) :: avFUB(nStrMax) ! Final attrvect - type(mct_avect) :: avFLB(nStrMax) ! Final attrvect - type(mct_avect) :: avCoszen(nStrMax) ! data assocaited with coszen time interp - type(mct_sMatP) :: sMatPf(nStrMax) ! sparse matrix map for fill on stream grid - type(mct_sMatP) :: sMatPs(nStrMax) ! sparse matrix map for mapping from stream to data model grid - integer(IN) :: ymdLB(nStrMax) ! lower bound time for stream - integer(IN) :: todLB(nStrMax) ! lower bound time for stream - integer(IN) :: ymdUB(nStrMax) ! upper bound time for stream - integer(IN) :: todUB(nStrMax) ! upper bound time for stream - real(R8) :: dtmin(nStrMax) - real(R8) :: dtmax(nStrMax) - - ! --- internal --- - integer(IN) :: ymd ,tod - character(CL) :: calendar ! model calendar for ymd,tod - integer(IN) :: nvectors ! number of vectors - integer(IN) :: ustrm (nVecMax) - integer(IN) :: vstrm (nVecMax) - character(CL) :: allocstring - end type shr_strdata_type - diff --git a/doc/source/data_models/index.rst b/doc/source/data_models/index.rst deleted file mode 100644 index fb7660a4bee..00000000000 --- a/doc/source/data_models/index.rst +++ /dev/null @@ -1,33 +0,0 @@ -.. _data-models: - -.. on documentation master file, created by - sphinx-quickstart on Tue Jan 31 19:46:36 2017. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -#################### - Data Models -#################### - -.. toctree:: - :maxdepth: 2 - :numbered: - - introduction.rst - input-namelists.rst - input-streams.rst - design-details.rst - data-model-science.rst - data-atm.rst - data-lnd.rst - data-seaice.rst - data-ocean.rst - data-river.rst - data-wave.rst - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/doc/source/data_models/input-namelists.rst b/doc/source/data_models/input-namelists.rst deleted file mode 100644 index 533f1c1b2f9..00000000000 --- a/doc/source/data_models/input-namelists.rst +++ /dev/null @@ -1,87 +0,0 @@ -.. _input-namelists: - -Input Namelists -=============== - -Each data model has two namelist groups in its input namelist file: a **stream-dependent** and a **stream-independent** namelist group. - -The stream-dependent namelist group (``shr_strdata_nml``) specifies the data model mode, stream description text files, and interpolation options. -The stream description files will be provided as separate input files and contain the files and fields that need to be read. -The stream-independent namelist group (one of ``[datm_nml, dice_nml, dlnd_nml, docn_nml, drof_nml, dwav_nml]``) contains namelist input such as the data model decomposition, etc. - -For users wanting to introduce new data sources for any data model, it is important to know what modes are supported and the internal field names in the data model. -That information will be used in the ``shr_strdata_nml`` namelist and stream input files. - -Users will primarily set up different data model configurations through namelist settings. -**The stream input options and format are identical for all data models**. -The data model-specific namelist has significant overlap between data models, but each data model has a slightly different set of input namelist variables and each model reads that namelist from a unique filename. -The detailed namelist options for each data model will be described later, but each model will specify a filename or filenames for stream namelist input and each ``shr_strdata_nml`` namelist will specify a set of stream input files. - -The following example illustrates the basic set of namelist inputs:: - - &dlnd_nml - decomp = '1d' - / - &shr_strdata_nml - dataMode = 'CPLHIST' - domainFile = 'grid.nc' - streams = 'streama', 'streamb', 'streamc' - mapalgo = 'interpa', 'interpb', 'interpc' - / - -As mentioned above, the ``dataMode`` namelist variable that is associated with each data model specifies if there are any additional operations that need to be performed on that data model's input streams before return to the driver. -At a minimum, all data models support ``datamode`` values of ``NULL`` and ``COPYALL``. - -- ``NULL`` - turns off the data model as a provider of data to the coupler. - -- ``COPYALL`` - copies all fields directly from the input data streams. Any required fields not found on an input stream will be set to zero. - -Three stream description files (see :ref:`input streams`) are then expected to be available, ``streama``, ``streamb`` and ``streamc``. -Those files specify the input data filenames, input data grids, and input fields that are expected, among other things. -The stream files are **not** Fortran namelist format. -Their format and options will be described later. -As an example, one of the stream description files might look like -:: - - - - GENERIC - - - - dn10 dens - slp_ pslv - q_10 shum - t_10 tbot - u_10 u - v_10 v - - - /glade/proj3/cseg/inputdata/atm/datm7/NYF - - - 0 - - - nyf.ncep.T62.050923.nc - - - - - time time - lon lon - lat lat - area area - mask mask - - - /glade/proj3/cseg/inputdata/atm/datm7/NYF - - - nyf.ncep.T62.050923.nc - - - - - -In general, these examples of input files are not complete, but they do show the general hierarchy and feel of the data model input. diff --git a/doc/source/data_models/input-streams.rst b/doc/source/data_models/input-streams.rst deleted file mode 100644 index 3c4c0080924..00000000000 --- a/doc/source/data_models/input-streams.rst +++ /dev/null @@ -1,469 +0,0 @@ -.. _input-streams: - -Input Streams -============= - --------- -Overview --------- -An *input data stream* is a time-series of input data files where all the fields in the stream are located in the same data file and all share the same spatial and temporal coordinates (ie. are all on the same grid and share the same time axis). Normally a time axis has a uniform dt, but this is not a requirement. - -The data models can have multiple input streams. - -The data for one stream may be all in one file or may be spread over several files. For example, 50 years of monthly average data might be contained all in one data file or it might be spread over 50 files, each containing one year of data. - -The data models can *loop* over stream data -- i.e., repeatedly cycle over some subset of an input stream's time axis. When looping, the models can only loop over whole years. For example, an input stream might have SST data for years 1950 through 2000, but a model could loop over the data for years 1960 through 1980. A model *cannot* loop over partial years, for example, from 1950-Feb-10 through 1980-Mar-15. - -The input data must be in a netcdf file and the time axis in that file must be CF-1.0 compliant. - -There are two main categories of information that the data models need to know about a stream: - -- data that describes what a user wants -- what streams to use and how to use them -- things that can be changed by a user. -- data that describes the stream data -- meta-data about the inherent properties of the data itself -- things that cannot be changed by a user. - -Generally, information about what streams a user wants to use and how to use them is input via the strdata ("stream data") Fortran namelist, while meta-data that describes the stream data itself is found in an xml-like text file called a "stream description file." - --------------------------------------------------- -Stream Data and shr_strdata_nml namelists --------------------------------------------------- -The stream data (referred to as *strdata*) input is set via a Fortran namelist called ``shr_strdata_nml``. -That namelist, the associated strdata datatype, and the methods are contained in the share source code file, ``shr_strdata_mod.F90``. -In general, strdata input defines an array of input streams and operations to perform on those streams. -Therefore, many namelist inputs are arrays of character strings. -Different variables of the same index are associated. For instance, mapalgo(1) spatial interpolation will be performed between streams(1) and the target domain. - -Each data model has an associated input namelist file, ``xxx_in``, where ``xxx=[datm,dlnd,dice,docn,drof,dwav]``. - -The input namelist file for each data model has a stream dependent namelist group, ``shr_strdata_nml``, and a stream independent namelist group. -The ``shr_strdata_nml`` namelist variables **are the same for all data models**. - -=========== ========================================================================================================================== -File Namelist Groups -=========== ========================================================================================================================== -datm_in datm_nml, shr_strdata_nml -dice_in dice_nml, shr_strdata_nml -dlnd_in dlnd_nml, shr_strdata_nml -docn_in docn_nml, shr_strdata_nml -drof_in drof_nml, shr_strdata_nml -dwav_in dwav_nml, shr_strdata_nml -=========== ========================================================================================================================== - -.. _shr-strdata-nml: - -The following table summaries the ``shr_strdata_nml`` entries. - -=========== ========================================================================================================================== -Namelist Description -=========== ========================================================================================================================== -dataMode component specific mode. - - Each CIME data model has its own datamode values as described below: - - :ref:`datm dataMode` - - :ref:`dice dataMode` - - :ref:`dlnd dataMode` - - :ref:`docn dataMode` - - :ref:`drof dataMode` - - :ref:`dwav dataMode` - -domainFile component domain (all streams will be mapped to this domain). - - Spatial gridfile associated with the strdata. grid information will - be read from this file and that grid will serve as the target grid - for all input data for this strdata input. - If the value is **null** then the domain of the first stream - will be used as the component domain - - default="null" - -streams character array (up to 30 elements) of input stream filenames and associated years of data. - - Each array entry consists of a stream_input_filename year_align year_first year_last. - The stream_input_filename is a stream text input file and the format and options are described elsewhere. - The year_align, year_first, and year_last provide information about the time axis of the file and how to relate - the input time axis to the model time axis. - - default="null". - -fillalgo array (up to 30 elements) of fill algorithms associated with the array of streams. - - Valid options are just copy (ie. no fill), special value, nearest neighbor, nearest neighbor in "i" direction, - or nearest neighbor in "j" direction. - - valid values: 'copy','spval','nn','nnoni','nnonj' - - default value='nn' - -fillmask array (up to 30 elements) of fill masks. - - valid values: "nomask,srcmask,dstmask,bothmask" - - default="nomask" - -fillread array (up to 30 elements) fill mapping files to read. Secifies the weights file to read in instead of - computing the weights on the fly for the fill operation. If this is set, fillalgo and fillmask are ignored. - - default='NOT_SET' - -fillwrite array of fill mapping file to write - - default='NOT_SET' - -mapalgo array of spatial interpolation algorithms - - default="bilinear" - -mapmask array of spatial interpolation mask - - default='NOT_SET' - -mapread array of spatial interpolation mapping files to read (optional) - - default='NOT_SET' - -mapwrite array (up to 30 elements) of spatial interpolation mapping files to write (optional). Specifies the weights file - to generate after weights are computed on the fly for the mapping (interpolation) operation, thereby allowing - users to save and reuse a set of weights later. - default='NOT_SET' - -tintalgo array (up to 30 elements) of time interpolation algorithm options associated with the array of streams. - - valid values: lower,upper,nearest,linear,coszen - lower = Use lower time-value - - upper = Use upper time-value - - nearest = Use the nearest time-value - - linear = Linearly interpolate between the two time-values - - coszen = Scale according to the cosine of the solar zenith angle (for solar) - - default="linear" - -taxMode array (up to 30 elements) of time interpolation modes. - - Time axis interpolation modes are associated with the array of streams for - handling data outside the specified stream time axis. - - Valid options are to cycle the data based on the first, last, and align - settings associated with the stream dataset, to extend the first and last - valid value indefinitely, or to limit the interpolated data to fall only between - the least and greatest valid value of the time array. - - valid values: cycle,extend,limit - - extend = extrapolate before and after the period by using the first or last value. - - cycle = cycle between the range of data - - limit = restrict to the period for which the data is valid - - default="cycle" - -dtlimit array (up to 30 elements) of setting delta time axis limit. - - Specifies delta time ratio limits placed on the time interpolation - associated with the array of streams. Causes the model to stop if - the ratio of the running maximum delta time divided by the minimum delta time - is greater than the dtlimit for that stream. For instance, with daily data, - the delta time should be exactly one day throughout the dataset and - the computed maximum divided by minimum delta time should always be 1.0. - For monthly data, the delta time should be between 28 and 31 days and the - maximum ratio should be about 1.1. The running value of the delta - time is computed as data is read and any wraparound or cycling is also - included. this input helps trap missing data or errors in cycling. - to turn off trapping, set the value to 1.0e30 or something similar. - - default=1.5 - -vectors paired vector field names -=========== ========================================================================================================================== - - -``shr_strdata_nml`` contains a namelist variable, ``streams``, that specifies a list of input stream description files and for each file what years of data to use, and how to align the input stream time axis with the model run time axis. - -The general input format for the ``streams`` namelist variable is: -:: - - &shr_strdata_nml - streams = 'stream1.txt year_align year_first year_last ', - 'stream2.txt year_align year_first year_last ', - ... - 'streamN.txt year_align year_first year_last ' - / - -where: - -.. code-block:: none - - streamN.txt - the stream description file, a plain text file containing details about the input stream (see below) - year_first - the first year of data that will be used - year_last - the last year of data that will be used - year_align - a model year that will be aligned with data for year_first - ---------------------- -Details on year_align ---------------------- - -The ``year_align`` value gives the simulation year corresponding to -``year_first``. A common usage is to set this to the year of -``RUN_STARTDATE``. With this setting, the forcing in the first year of -the run will be the forcing of year ``year_first``. Another use case is -to align the calendar of transient forcing with the model calendar. For -example, setting ``year_align`` = ``year_first`` will lead to the -forcing calendar being the same as the model calendar. The forcing for a -given model year would be the forcing of the same year. This would be -appropriate in transient runs where the model calendar is setup to span -the same year range as the forcing data. - -For some data model modes, ``year_align`` can be set via an xml variable -whose name ends with ``YR_ALIGN`` (there are a few such xml variables, -each pertaining to a particular data model mode). - -An example of this is land-only historical simulations in which we run -the model for 1850 to 2010 using atmospheric forcing data that is only -available for 1901 to 2010. In this case, we want to run the model for -years 1850 (so ``RUN_STARTDATE`` has year 1850) through 1900 by looping -over the forcing data for 1901-1920, and then run the model for years -1901-2010 using the forcing data from 1901-2010. To do this, we -initially set:: - - ./xmlchange DATM_CLMNCEP_YR_ALIGN=1901 - ./xmlchange DATM_CLMNCEP_YR_START=1901 - ./xmlchange DATM_CLMNCEP_YR_END=1920 - -When the model has completed year 1900, then we set:: - - ./xmlchange DATM_CLMNCEP_YR_ALIGN=1901 - ./xmlchange DATM_CLMNCEP_YR_START=1901 - ./xmlchange DATM_CLMNCEP_YR_END=2010 - -With this setup, the correlation between model run year and forcing year -looks like this:: - - RUN Year : 1850 ... 1860 1861 ... 1870 ... 1880 1881 ... 1890 ... 1900 1901 ... 2010 - FORCE Year : 1910 ... 1920 1901 ... 1910 ... 1920 1901 ... 1910 ... 1920 1901 ... 2010 - -Setting ``DATM_CLMNCEP_YR_ALIGN`` to 1901 tells the code that you want -to align model year 1901 with forcing data year 1901, and then it -calculates what the forcing year should be if the model starts in -year 1850. - --------------------------------------------------- -Customizing shr_strdata_nml values --------------------------------------------------- - -The contents of ``shr_strdata_nml`` are automatically generated by that data model's **cime_config/buildnml** script. -These contents are easily customizable for your target experiment. -As an example we refer to the following ``datm_in`` contents (that would appear in both ``$CASEROOT/CaseDocs`` and ``$RUNDIR``): -:: - - \&shr_strdata_nml - datamode = 'CLMNCEP' - domainfile = '/glade/proj3/cseg/inputdata/share/domains/domain.lnd.fv1.9x2.5_gx1v6.090206.nc' - dtlimit = 1.5,1.5,1.5,1.5 - fillalgo = 'nn','nn','nn','nn' - fillmask = 'nomask','nomask','nomask','nomask' - mapalgo = 'bilinear','bilinear','bilinear','bilinear' - mapmask = 'nomask','nomask','nomask','nomask' - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1972 ", - "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1972 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1972 ", - "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" - taxmode = 'cycle','cycle','cycle','cycle' - tintalgo = 'coszen','nearest','linear','linear' - vectors = 'null' - / - - -As is discussed in the :ref:`CIME User's Guide`, to change the contents of ``datm_in``, you must edit ``$CASEROOT/user_nl_datm``. -In the above example, you can to this to change any of the above settings **except for the names** - -.. code-block:: none - - datm.streams.txt.CLM_QIAN.Solar - datm.streams.txt.CLM_QIAN.Precip - datm.streams.txt.CLM_QIAN.TPQW - datm.streams.txt.presaero.trans_1850-2000 - -Other than these names, any namelist variable from ``shr_strdata_nml`` can be modified by adding the appropriate keyword/value pairs to ``user_nl_datm``. - -As an example, the following could be the contents of ``$CASEROOT/user_nl_datm``: -:: - - !------------------------------------------------------------------------ - ! Users should ONLY USE user_nl_datm to change namelists variables - ! Users should add all user specific namelist changes below in the form of - ! namelist_var = new_namelist_value - ! Note that any namelist variable from shr_strdata_nml and datm_nml can - ! be modified below using the above syntax - ! User preview_namelists to view (not modify) the output namelist in the - ! directory $CASEROOT/CaseDocs - ! To modify the contents of a stream txt file, first use preview_namelists - ! to obtain the contents of the stream txt files in CaseDocs, and then - ! place a copy of the modified stream txt file in $CASEROOT with the string - ! user_ prepended. - !------------------------------------------------------------------------ - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1900 ", - "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" - -and the contents of ``shr_strdata_nml`` (in both ``$CASEROOT/CaseDocs`` and ``$RUNDIR``) would be -:: - - datamode = 'CLMNCEP' - domainfile = '/glade/proj3/cseg/inputdata/share/domains/domain.lnd.fv1.9x2.5_gx1v6.090206.nc' - dtlimit = 1.5,1.5,1.5,1.5 - fillalgo = 'nn','nn','nn','nn' - fillmask = 'nomask','nomask','nomask','nomask' - mapalgo = 'bilinear','bilinear','bilinear','bilinear' - mapmask = 'nomask','nomask','nomask','nomask' - streams = "datm.streams.txt.CLM_QIAN.Solar 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.Precip 1895 1948 1900 ", - "datm.streams.txt.CLM_QIAN.TPQW 1895 1948 1900 ", - "datm.streams.txt.presaero.trans_1850-2000 1849 1849 2006" - taxmode = 'cycle','cycle','cycle','cycle' - tintalgo = 'coszen','nearest','linear','linear' - vectors = 'null' - -As is discussed in the :ref:`CIME User's Guide`, you should use **preview_namelists** to view (not modify) the output namelist in ``CaseDocs``. - - -.. _stream_description_file: - ------------------------ -Stream Description File ------------------------ -The *stream description file* is not a Fortran namelist, but a locally built xml-like parsing implementation. -Sometimes it is called a "stream dot-text file" because it has a ".txt." in the filename. -Stream description files contain data that specifies the names of the fields in the stream, the names of the input data files, and the file system directory where the data files are located. - -The data elements found in the stream description file are: - -``dataSource`` - A comment about the source of the data -- always set to GENERIC and is there only for backwards compatibility. - -``domainInfo`` - Information about the domain data for this stream specified by the following 3 sub elements. - - ``variableNames`` - A list of the domain variable names. This is a paired list with the name of the variable in the netCDF file on the left and the name of the corresponding model variable on the right. This data models require five variables in this list. The names of model's variables (names on the right) must be: "time," "lon," "lat," "area," and "mask." - - ``filePath`` - The file system directory where the domain data file is located. - - ``fileNames`` - The name of the domain data file. Often the domain data is located in the same file as the field data (above), in which case the name of the domain file could simply be the name of the first field data file. Sometimes the field data files don't contain the domain data required by the data models, in this case, one new file can be created that contains the required data. - - -``fieldInfo`` - Information about the stream data for this stream specified by the following 3 required sub elements and optional offset element. - - ``variableNames`` - A list of the field variable names. This is a paired list with the name of the variable in the netCDF file on the left and the name of the corresponding model variable on the right. This is the list of fields to read in from the data file; there may be other fields in the file which are not read in (i.e., they won't be used). - - ``filePath`` - The file system directory where the data files are located. - - ``fileNames`` - The list of data files to use. If there is more than one file, the files must be in chronological order, that is, the dates in time axis of the first file are before the dates in the time axis of the second file. - - ``offset`` - The offset allows a user to shift the time axis of a data stream by a fixed and constant number of seconds. For instance, if a data set contains daily average data with timestamps for the data at the end of the day, it might be appropriate to shift the time axis by 12 hours so the data is taken to be at the middle of the day instead of the end of the day. This feature supports only simple shifts in seconds as a way of correcting input data time axes without having to modify the input data time axis manually. This feature does not support more complex shifts such as end of month to mid-month. But in conjunction with the time interpolation methods in the strdata input, hopefully most user needs can be accommodated with the two settings. Note that a positive offset advances the input data time axis forward by that number of seconds. - -The data models advance in time discretely. -At a given time, they read/derive fields from input files. -Those input files have data on a discrete time axis as well. -Each data point in the input files is associated with a discrete time (as opposed to a time interval). -Depending on whether you pick lower, upper, nearest, linear, or coszen, the data in the input file will be "interpolated" to the time in the model. - -The offset shifts the time axis of the input data the given number of seconds. -So if the input data is at 0, 3600, 7200, 10800 seconds (hourly) and you set an offset of 1800, then the input data will be set at times 1800, 5400, 9000, and 12600. -So a model at time 3600 using linear interpolation would have data at "n=2" with offset of 0 will have data at "n=(2+3)/2" with an offset of 1800. -n=2 is the 2nd data in the time list 0, 3600, 7200, 10800 in this example. -n=(2+3)/2 is the average of the 2nd and 3rd data in the time list 0, 3600, 7200, 10800. -offset can be positive or negative. - -Actual example: -:: - - - - - time time - lon lon - lat lat - area area - mask mask - - - /glade/proj3/cseg/inputdata/atm/datm7/NYF - - - nyf.ncep.T62.050923.nc - - - - - dn10 dens - slp_ pslv - q10 shnum - t_10 tbot - u_10 u - v_10 v - - - /glade/proj3/cseg/inputdata/atm/datm7/NYF - - - 0 - - - nyf.ncep.T62.050923.nc - - - - --------------------------------------------------- -Customizing stream description files --------------------------------------------------- - -Each data model's **cime-config/buildnml** utility automatically generates the required stream description files for the case. -The directory contents of each data model will look like the following (using DATM as an example) -:: - - $CIMEROOT/components/data_comps/datm/cime_config/buildnml - $CIMEROOT/components/data_comps/datm/cime_config/namelist_definition_datm.xml - -The ``namelist_definition_datm.xml`` file defines and sets default values for all the namelist variables and associated groups and also provides out-of-the box settings for the target data model and target stream. -**buildnml** utilizes these two files to construct the stream files for the given compset settings. You can modify the generated stream files for your particular needs by doing the following: - - -1. Copy the relevant description file from ``$CASEROOT/CaseDocs`` to ``$CASEROOT`` and pre-pend a "\user_"\ string to the filename. Change the permission of the file to write. For example, assuming you are in **$CASEROOT** - :: - - cp $CASEROOT/CaseDocs/datm.streams.txt.CLM_QIAN.Solar $CASEROOT/user_datm.streams.txt.CLM_QIAN.Solar - chmod u+w $CASEROOT/user_datm.streams.txt.CLM_QIAN.Solar - -2. Edit ``$CASEROOT/user_datm.streams.txt.CLM_QIAN.Solar`` with your desired changes. - - - *Be sure not to put any tab characters in the file: use spaces instead*. - - - In contrast to other user_nl_xxx files, be sure to set all relevant data model settings in the xml files, issue the **preview_namelist** command and THEN edit the ``user_datm.streams.txt.CLM_QIAN.Solar`` file. - - - **Once you have created a user_xxx.streams.txt.* file, further modifications to the relevant data model settings in the xml files will be ignored.** - - - If you later realize that you need to change some settings in an xml file, you should remove the user_xxx.streams.txt.* file(s), make the modifications in the xml file, rerun **preview_namelists**, and then reintroduce your modifications into a new user_xxx.streams.txt.* stream file(s). - -3. Call **preview_namelists** and verify that your changes do indeed appear in the resultant stream description file appear in ``CaseDocs/datm.streams.txt.CLM_QIAN.Solar``. These changes will also appear in ``$RUNDIR/datm.streams.txt.CLM_QIAN.Solar``. diff --git a/doc/source/data_models/introduction.rst b/doc/source/data_models/introduction.rst deleted file mode 100644 index c2b668ff380..00000000000 --- a/doc/source/data_models/introduction.rst +++ /dev/null @@ -1,98 +0,0 @@ -.. _data-model-introduction: - -Introduction -============ - --------- -Overview --------- -The CIME data models perform the basic function of reading external data files, modifying those data, and then sending the data to the driver via the CIME coupling interfaces. -The fields sent to the driver are the same as those that would be sent by an active component. -This takes advantage of the fact that the driver and other models have no fundamental knowledge of whether another component is fully active or just a data model. -So, for example, the data atmosphere model (datm) sends the same fields as the prognostic Community Atmosphere Model (CAM). -However, rather than determining these fields prognostically, most data models simply read prescribed data. - -The data models typically read gridded data from observations or reanalysis products. -Out of the box, they often provide a few possible data sources and/or time periods that you can choose from when setting up a case. -However, data models can also be configured to read output from a previous coupled run. -For example, you can perform a fully-coupled run in which you ask for particular extra output streams; you can then use these saved "coupler history" files as inputs to datm to run a later land-only spinup. - -In some cases, data models have prognostic functionality, that is, they also receive and use data sent by the driver. -However, in most cases, the data models are not running prognostically and have no need to receive any data from the driver. - -The CIME data models have parallel capability and share significant amounts of source code. -Methods for reading and interpolating data have been established and can easily be reused: -The data model calls strdata ("stream data") methods which then call stream methods. -The stream methods are responsible for managing lists of input data files and their time axes. -The information is then passed up to the strdata methods where the data is read and interpolated in space and time. -The interpolated data is passed up to the data model where final fields are derived, packed, and returned to the driver. - ------- -Design ------- -Data models function by reading in different streams of input data and interpolating those data both spatially and temporally to the appropriate final model grid and model time. -The strdata implementation does the following: - -1. determines nearest lower and upper bound data from the input dataset -2. if that is new data then read lower and upper bound data -3. fill lower and upper bound data -4. spatially map lower and upper bound data to model grid -5. time interpolate lower and upper bound data to model time -6. return fields to data model - -The two timestamps of input data that bracket the present model time are read first. -These are called the lower and upper bounds of data and will change as the model advances. -Those two sets of inputdata are first filled based on the user setting of the namelist variables ``str_fillalgo`` and ``str_fillmask``. -That operation occurs on the input data grid. -The lower and upper bound data are then spatially mapped to the model grid based upon the user setting of the namelist variables ``str_mapalgo`` and ``str_mapmask``. -Spatial interpolation only occurs if the input data grid and model grid are not identical, and this is determined in the strdata module automatically. -Time interpolation is the final step and is done using a time interpolation method specified by the user in namelist (via the ``shr_strdata_nml`` namelist variable ``tintalgo``). -A final set of fields is then available to the data model on the model grid and for the current model time. -(See the :ref:`stream data namelist section ` for details on these and other namelist variables.) - -**Each data model** - -- communicates with the driver with fields on only the data model grid - -- can be associated with multiple :ref:`streams` - -- has an xml variable in ``env_run.xml`` that specifies its mode. - These are: ``DATM_MODE``, ``DICE_MODE``, ``DLND_MODE``, ``DOCN_MODE``, ``DROF_MODE``, ``DWAV_MODE``. - Each data model mode specifies the streams that are associated with that data model. - -- has two :ref:`namelist` groups in its input namelist file: a **stream-dependent** and a **stream-independent** namelist group. - -- is associated with only one stream-independent namelist variable ``datamode`` (specified in the ``shr_strdata_nml`` namelist group) that determines if additional operations need to be performed on the input streams before returning to the driver. - - -**Each** ``DXXX_MODE`` **xml variable variable specfies 2 things:** - -- the list of streams that are associated with the data model. - -- a ``datamode`` namelist variable that is associated with each data model and that determines if additional operations need to be performed on the input streams before returning to the driver. - - At a minimum, all data models support ``datamode`` values of ``NULL`` and ``COPYALL``. - - - ``NULL`` - turns off the data model as a provider of data to the coupler. - - - ``COPYALL`` - copies all fields directly from the input data streams. Any required fields not found on an input stream will be set to zero. - - -**Each data model stream** - -- can be associated with multiple stream input files (specified in the ``shr_strdata_nml`` namelist group). - - -**Each stream input file** - -- can contain data on a unique grid and unique temporal time stamps. - -- is interpolated to a single model grid and the present model time. - -More details of the data model design are covered in :ref:`design details`. - -------------- -Next Sections -------------- -In the next sections, more details will be presented, including a full description of the science modes and namelist settings for the data atmosphere, data land, data runoff, data ocean, and data ice models; namelist settings for the strdata namelist input; a description of the format and options for the stream description input files; and a list of internal field names for each of the data components. -The internal data model field names are important because they are used to setup the stream description files and to map the input data fields to the internal data model field names. diff --git a/doc/source/driver_cpl/bit-for-bit-flag.rst b/doc/source/driver_cpl/bit-for-bit-flag.rst deleted file mode 100644 index 0505e53441b..00000000000 --- a/doc/source/driver_cpl/bit-for-bit-flag.rst +++ /dev/null @@ -1,12 +0,0 @@ -Bit-for-bit flag -============================ - -The driver namelist variable ``bfbflag`` provides the option of preserving bit-for-bit results on different coupler processor counts. -This flag has no impact on other components and their ability to generate bit-for-bit results on different pe counts. -When this flag is set, all mappings become "X" types where the source data is rearranged to the destination processor and then local mapping is carried out. -The order of operations of this mapping is independent of the pe count or decomposition of the grids. - -The other feature that is changed by the ``bfbflag`` is the global sum diagnostics. - -- When ``bfbflag`` is set to *.false.*, a partial sum is done on each processors and those partial sums are added together to form a global sum. This is generally not order of operations independent for different pe counts or decompositions. -- When ``bfbflag`` is set to *.true.*, the global sums are computed by gathering the global field on the root processor and doing an ordered sum there. diff --git a/doc/source/driver_cpl/budgets.rst b/doc/source/driver_cpl/budgets.rst deleted file mode 100644 index 4c595494e74..00000000000 --- a/doc/source/driver_cpl/budgets.rst +++ /dev/null @@ -1,23 +0,0 @@ -Mass and Heat Budgets -===================== - -Mass and heat are conserved in the coupler to several digits over centuries. -Several steps have been taken to ensure this level of conservation, and these are described in other sections of the document. -In addition, efforts have been made to make sure each component is internally conservative with respect to mass and heat. - -The budgets can be turned on and off using the namelist variable ``do_budgets``. -The value of that namelist is set by the ``$CASEROOT/env_run.xml`` variable, ``BUDGETS``. - -The driver coupler can diagnose heat and mast budgets at several levels and over different periods. -The periods are *instantenous*, *daily average*, *monthly average*, *annual average*, or since the start of the run. -The budget output for each of these periods is controlled by the driver namelist variables ``budget_inst``, ``budget_daily``, ``budget_month``, ``budget_ann``, ``budget_ltann``, and ``budget_ltend``. -``budget_ltann`` and ``budget_ltend`` are used to write the long term budget at either the end of every year or the end of every run. -Other budgets are written at their period interval. - -The namelist input is an integer specifying what to write. -The budget flags are controlled by ``$CASEROOT/env_run.xml`` variables ``BUDGET_INST``, ``BUDGET_DAILY``, ``BUDGET_MONTHLY``, ``BUDGET_ANNUAL``, ``BUDGET_LONGTER_EOY``, and ``BUDGET_LONGTERM_STOP`` respectively. -Valid values are 0, 1, 2, or 3. -If 0 is set, no budget data is written. -The value 1 generates a net heat and water budget for each component, 2 adds a detailed heat and water budget for each component, and 3 adds a detailed heat and water budget of the different conmponents on the atmosphere grid. -Normally values of 0 or 1 are specified. -Values of 2 or 3 are generally used only when debugging problems involving conservation. diff --git a/doc/source/driver_cpl/cplug-02.1-figx1.jpg b/doc/source/driver_cpl/cplug-02.1-figx1.jpg deleted file mode 100644 index 17fd91a1993..00000000000 Binary files a/doc/source/driver_cpl/cplug-02.1-figx1.jpg and /dev/null differ diff --git a/doc/source/driver_cpl/design.rst b/doc/source/driver_cpl/design.rst deleted file mode 100644 index a2cf516529f..00000000000 --- a/doc/source/driver_cpl/design.rst +++ /dev/null @@ -1,116 +0,0 @@ -Design -====== - --------- -Overview --------- -cpl7 is built as a single executable with a single high-level driver. -The driver runs on all processors and handles coupler sequencing, model concurrency, and communication of data between components. -The driver calls all model components via common and standard interfaces. -The driver also directly calls coupler methods for mapping (interpolation), rearranging, merging, an atmosphere/ocean flux calculation, and diagnostics. -The model components and the coupler methods can run on subsets of all the processors. -In other words, cpl7 consists of a driver that controls the top level sequencing, the processor decomposition, and communication between components and the coupler while coupler operations such as mapping and merging are running under the driver on a subset of processors as if there were a unique coupler model component. - -In general, an active component both needs data from and provides data to the coupler while data models generally read data from I/O and then just provide data to the coupler. -Currently, the atmosphere, land, river, and sea ice models are always tightly coupled to better resolve the diurnal cycle. -This coupling is typically half-hourly, although at higher resolutions, can be more frequent. -The ocean model coupling is typically once or a few times per day. -The diurnal cycle of ocean surface albedo is computed in the coupler for use by the atmosphere model. -The looser ocean coupling frequency means the ocean forcing and response is lagged in the system. -There is an option in cpl7 to run the ocean tightly coupled without any lags, but this is more often used only when running with data ocean components. - --------------------------- -Sequencing and Concurrency --------------------------- -The component processor layouts and MPI communicators are derived from namelist input. -At the present time, there are eight (10) basic processor groups in cpl7. -These are associated with the atmosphere, land, river, ocean, sea ice, land ice, wave, external-system-process, coupler, and global groups, although others could be easily added later. -Each of the processor groups can be distinct, but that is not a requirement of the system. -A user can overlap processor groups relatively arbitrarily. -If all processors sets overlap each other in at least one processor, then the model runs sequentially. -If all processor sets are distinct, the model runs as concurrently as science allows. -The processor sets for each component group are described via 3 basic scalar parameters at the present time; the number of mpi tasks, the number of openmp threads per mpi task, and the global mpi task rank of the root mpi task for that group. -For example, a layout where the number of mpi tasks is 8, the number of threads per mpi task is 4, and the root mpi task is 16 would create a processor group that consisted of 32 hardware processors, starting on global mpi task number 16 and it would contain 8 mpi tasks. -The global group would have at least 24 tasks and at least 48 hardware processors. -The driver derives all MPI communicators at initialization and passes them to the component models for use. -More information on the coupler concurrency can be found in the Craig et al IJHPCA 2012 reference mentioned in the top section of this document. - -As mentioned above, there are two issues related to whether the component models run concurrently. -The first is whether unique chunks of work are running on distinct processor sets. -The second is the sequencing of this work in the driver. -As much as possible, the driver sequencing has been implemented to maximize the potential amount of concurrency of work between different components. -Ideally, in a single coupling step, the forcing for all models would be computed first, the models could then all run concurrently, and then the driver would advance. -However, scientific requirements such as the coordination of surface albedo and atmosphere radiation computations as well as general computational stability issues prevents this ideal implementation in cpl7. -`Figure 1 `_ shows the maximum amount of concurrency supported for a fully active system. -In practice, the scientific constraints mean the active atmosphere model cannot run concurrently with the land, runoff, and sea-ice models. -Again, `figure 1 `_ does not necessarily represent the optimum processor layout for performance for any configuration, but it provides a practical limit to the amount of concurrency in the system due to scientific constraints. -Results are bit-for-bit identical regardless of the component sequencing because the scientific lags are fixed by the implementation, not the processor layout. - -image:: cplug-02.1-figx1.jpg - -Figure 1: Maximum potential processor concurrency designed to support scientific requirements and stability. - --------------------- -Component Interfaces --------------------- -The standard cpl7 component model interfaces are based upon the ESMF design. -Each component provides an init, run, and finalize method with consistent arguments. -The component interface arguments currently consist of Fortran and MCT datatypes. -The physical coupling fields are passed through the interfaces in the init, run, and finalize phases. -As part of initialization, an MPI communicator is passed from the driver to the component, and grid and decomposition information is passed from the component back to the driver. -The driver/coupler acquires all information about resolution, configurations, and processor layout at run-time from either namelist or from communication with components. - - -Initialization of the system is relatively straight-forward. -First, the MPI communicators are computed in the driver. -Then the component model initialization methods are called on the appropriate processor sets, and an mpi communicator is sent, and the grid and decomposition information are passed back to the driver. -Once the driver has all the grid and decomposition information from the components, various rearrangers and mappers are initialized that will move data between processors, decompositions, and grids as needed at the driver level. -No distinction is made in the coupler implementation for sequential versus concurrent execution. -In general, even for cases where two components have identical grids and processor layouts, often their decomposition is different for performance reasons. -In cases where the grid, decomposition, and processor layout are identical between components, the mapping or rearranging operation will degenerate to a local data copy. - -The interface to the components' run method consists of two distinct bundles of fields. -One is the data sent to force the model. -The second is data received from the model for coupling to other components. -The run interface also contains a clock that specifies the current time and the run length for the model and a data type that encapsulates grid, decomposition, and scalar coupling information. -These interfaces generally follow the ESMF design principles. - -------------------------------- -MCT, The Model Coupling Toolkit -------------------------------- -In cpl7, the MCT attribute_vector, global_segmap, and general_grid datatypes have been adopted at the highest levels of the driver, and they are used directly in the component init, run, and finalize interfaces. -In addition, MCT is used for all data rearranging and mapping (interpolation). -The clock used by cpl7 at the driver level is based on the ESMF specification. -Mapping weights are still generated off-line using the SCRIP or ESMF packages as a preprocessing step. -They are read using a subroutine that reads and distributes the mapping weights in reasonably small chunks to minimize the memory footprint. -Development of the cpl7 coupler not only relies on MCT, but MCT developers contributed significantly to the design and implementation of the cpl7 driver. -Development of cpl7 coupler resulted from a particularly strong and close collaboration between NCAR and the Department of Energy Argonne National Lab. - ------------------------------------- -Memory, Parallel IO, and Performance ------------------------------------- -Scaling to tens-of-thousands of processors requires reasonable performance scaling of the models, and all components have worked at improving scaling via changes to algorithms, infrastructure, or decompositions. -In particular, decompositions using shared memory blocking, space filling curves, and all three spatial dimensions have been implemented to varying degrees in all components to increase parallelization and improve scalability. -The Craig et al IJHPCA 2012 reference mentioned in the first section of this document provides a summary of scaling performance of cpl7 for several coupler kernals. - -In practice, performance, load balance, and scalability are limited as a result of the size, complexity, and multiple model character of the system. -Within the system, each component has its own scaling characteristics. -In particular, each may have processor count "sweet-spots" where the individual component model performs particularly well. -This might occur within a component because of internal load balance, decomposition capabilities, communication patterns, or cache usage. -Second, component performance can vary over the length of the model run. -This occurs because of seasonal variability of the cost of physics in models, changes in performance during an adjustment (spin-up) phase, and temporal variability in calling certain model operations like radiation, dynamics, or I/O. -Third, the hardware or batch queueing system might have some constraints on the total number of processors that are available. -For instance, on 16 or 32 way shared memory node, a user is typically charged based on node usage, not processor usage. -So there is no cost savings running on 40 processors versus 64 processors on a 32-way node system. -As a result of all of these issues, perfect load-balancing is generally not possible. -But to a large degree, if one accepts the limitations, a load balance configuration with acceptable idle-time and reasonably good throughput is nearly always possible to configure. - - -Load-balancing requires a number of considerations such as which components are run, their absolute resolution, and their relative resolution; cost, scaling and processor count sweet-spots for each component; and internal load imbalance within a component. -It is often best to load balance the system with all significant run-time I/O turned off because this generally occurs very infrequently (typically one timestep per month), is best treated as a separate cost, and can bias interpretation of the overall model load balance. -Also, the use of OpenMP threading in some or all of the system is dependent on the hardware/OS support as well as whether the system supports running all MPI and mixed MPI/OpenMP on overlapping processors for different components. -Finally, should the components run sequentially, concurrently, or some combination of the two. -Typically, a series of short test runs is done with the desired production configuration to establish a reasonable load balance setup for the production job. - - - diff --git a/doc/source/driver_cpl/driver_threading_control.rst b/doc/source/driver_cpl/driver_threading_control.rst deleted file mode 100644 index ddc8cfd70fa..00000000000 --- a/doc/source/driver_cpl/driver_threading_control.rst +++ /dev/null @@ -1,12 +0,0 @@ -Driver Threading Control -======================== - -OpenMP thread counts are controlled at three levels. - -- The coarsest level is prior to launching the model. The environment variable OMP_NUM_THREADS is usually set to the largest value any mpi task will use. At a minimum, this will ensure threading is turned on to the maximum desired value in the run. - -- The next level is during the driver initialization phase. When the mpi communicators are initialized, the maximum number of threads per mpi task can be computed based on the ccsm_pes namelist input. At that point, there is an initial fortran call to the intrinsic, omp_set_num_threads. When that happens and if that call is successful, the number of threads will be set to the maximum needed in the system on an mpi task by task basis. - -- Finally, there is the ability of CESM to change the thread count per task as each component is individually called and as the model integrates through the driver run loop. In other words, for components that share the same hardware processor but have different threads per task, this feature allows those components to run with the exact value set by the user in the ccsm_pes namelist. This final level of thread control is turned off by default, but it can be turned on using the driver namelist variable ``drv_threading``. - -This fine control of threading is likely of limited use at this point given the current driver implementation. diff --git a/doc/source/driver_cpl/grids.rst b/doc/source/driver_cpl/grids.rst deleted file mode 100644 index a40e546b90c..00000000000 --- a/doc/source/driver_cpl/grids.rst +++ /dev/null @@ -1,285 +0,0 @@ -===================== -Grids -===================== - ----------------------------- -Standard Grid Configurations ----------------------------- -The standard implementation for grids in CIME has been that the atmosphere and land models are run on identical grids and the ocean and sea ice model are run on identical grids. -The ocean model mask is used to derive a complementary mask for the land grid such that for any given combination of atmosphere/land and ocean/ice grids, there is a unique land mask. -This approach for dealing with grids is still used a majority of the time. -But there is a new capability, called ``trigrid`` that allows the atmosphere and land grids to be unique. -A typical grid is the finite volume "1 degree" atmosphere/land grid matched with the "1 degree" ocean/ice grid. -The runoff grid is generally unique to runoff and the land ice grid is coupled on the land grid with interpolation carried out to a unique land ice grid inside that component. - -Historically, the ocean grid has been the higher resolution grid in CIME model configurations. -While that is no longer always the case, the current driver implementation largely reflects that presumption. -The atmosphere/ocean fluxes in the coupler are computed on the ocean grid. -The driver namelist variable ``aoflux_grid`` allows users to specify the atmosphere/ocean flux computation grid in the coupler in the future. -In addition, the default mapping approach used also reflects the presumption that the ocean is generally higher resolution. -Fluxes are always mapped using a locally conservative area average methods to preserve conservation. -However, states are mapped using bilinear interpolation from the atmosphere grid to the ocean grid to better preserve gradients, while they are mapped using a locally conservative area average approach from the ocean grid to the atmosphere grid. -These choices are based on the presumption that the ocean grid is higher resolution. - -There has always been an option that all grids (atmosphere, land, ocean, and ice) could be identical, and this is still supported. -There are a couple of namelist variables, ``samegrid_ao``, ``samegrid_al``, and ``samegrid_ro`` that tell the coupler whether to expect that the following grids; atmosphere/ocean, atmosphere/land, and runoff/ocean respectively are identical. -These are set automaticaly in the driver namelist depending on the grid chosen and impact mapping as well as domain checking. - ----------------------- -Trigrid Configurations ----------------------- -Grid configurations are allowed where the atmosphere and land grids are unique. - -The trigrid implementation introduces an ambiguity in the definition of the mask. -This ambiguity is associated with an inability to define an absolutely consistent ocean/land mask across all grids in the system. -A summary of trigrid support follows: -- The land mask is defined on the atmosphere grid as the complement of the ocean mask mapped conservatively to the atmosphere grid. -- Then the land and ocean masks are exactly complementary on the atmosphere grid where conservative merging are critical. -- No precise land fraction needs to be defined in the land grid. -- The only requirement is that the land model compute data on a masked grid such that when mapped to the atmosphere grid, all atmosphere grid points that contain some fraction of land have valid values computed in the land model. -- There are an infinite number of land fraction masks that can accomplish this including a fraction field that is exactly one at every grid cell. -- In the land model, all land fraction masks produce internally conservative results. -- The only place where the land fraction becomes important is mapping the land model output to the runoff model. -- In that case, the land fraction on the land grid is applied to the land to runoff mapping. - ---------- -Fractions ---------- -The component grid fractions in the coupler are defined and computed in ``$CIMEROOT/driver_cpl/driver/seq_frac_mct``. -A slightly modified version of the notes from this file is pasted below. -Just to clarify some of the terms. -- fractions_a, fractions_l, fractions_i, and fractions_o are the fractions on the atmosphere, land, ice, and ocean grids. -- afrac, lfrac, ifrac, and ofrac are the atmosphere, land, ice, and ocean fractions on those grids. -So fractions_a(lfrac) is the land fraction on the atmosphere grid. -lfrin in the land fraction defined in the land model. -This can be different from lfrac because of the trigrid implementation. -lfrac is the land fraction consistent with the ocean mask and lfrin is the land fraction in the land model. -ifrad and ofrad are fractions at the last radiation timestep. -These fractions preserve conservation of heat in the net shortwave calculation because the net shortwave calculation is one timestep behind the ice fraction evolution in the system. -When the variable "dom" is mentioned below, that refers to a field sent from a component at initialization. -:: - - ! the fractions fields are now afrac, ifrac, ofrac, lfrac, and lfrin. - ! afrac = fraction of atm on a grid - ! lfrac = fraction of lnd on a grid - ! ifrac = fraction of ice on a grid - ! ofrac = fraction of ocn on a grid - ! lfrin = land fraction defined by the land model - ! ifrad = fraction of ocn on a grid at last radiation time - ! ofrad = fraction of ice on a grid at last radiation time - ! afrac, lfrac, ifrac, and ofrac are the self-consistent values in the - ! system. lfrin is the fraction on the land grid and is allowed to - ! vary from the self-consistent value as descibed below. ifrad - ! and ofrad are needed for the swnet calculation. - ! the fractions fields are defined for each grid in the fraction bundles as - ! needed as follows. - ! character(*),parameter :: fraclist_a = 'afrac:ifrac:ofrac:lfrac:lfrin' - ! character(*),parameter :: fraclist_o = 'afrac:ifrac:ofrac:ifrad:ofrad' - ! character(*),parameter :: fraclist_i = 'afrac:ifrac:ofrac' - ! character(*),parameter :: fraclist_l = 'afrac:lfrac:lfrin' - ! character(*),parameter :: fraclist_g = 'gfrac' - ! - ! we assume ocean and ice are on the same grids, same masks - ! we assume ocn2atm and ice2atm are masked maps - ! we assume lnd2atm is a global map - ! we assume that the ice fraction evolves in time but that - ! the land model fraction does not. the ocean fraction then - ! is just the complement of the ice fraction over the region - ! of the ocean/ice mask. - ! we assume that component domains are filled with the total - ! potential mask/fraction on that grid, but that the fractions - ! sent at run time are always the relative fraction covered. - ! for example, if an atm cell can be up to 50% covered in - ! ice and 50% land, then the ice domain should have a fraction - ! value of 0.5 at that grid cell. at run time though, the ice - ! fraction will be between 0.0 and 1.0 meaning that grid cells - ! is covered with between 0.0 and 0.5 by ice. the "relative" fractions - ! sent at run-time are corrected by the model to be total fractions - ! such that - ! in general, on every grid, - ! fractions_*(afrac) = 1.0 - ! fractions_*(ifrac) + fractions_*(ofrac) + fractions_*(lfrac) = 1.0 - ! where fractions_* are a bundle of fractions on a particular grid and - ! *frac (ie afrac) is the fraction of a particular component in the bundle. - ! - ! the fractions are computed fundamentally as follows (although the - ! detailed implementation might be slightly different) - ! initialization (frac_init): - ! afrac is set on all grids - ! fractions_a(afrac) = 1.0 - ! fractions_o(afrac) = mapa2o(fractions_a(afrac)) - ! fractions_i(afrac) = mapa2i(fractions_a(afrac)) - ! fractions_l(afrac) = mapa2l(fractions_a(afrac)) - ! initially assume ifrac on all grids is zero - ! fractions_*(ifrac) = 0.0 - ! fractions/masks provided by surface components - ! fractions_o(ofrac) = dom_o(frac) ! ocean "mask" - ! fractions_l(lfrin) = dom_l(frac) ! land model fraction - ! then mapped to the atm model - ! fractions_a(ofrac) = mapo2a(fractions_o(ofrac)) - ! fractions_a(lfrin) = mapl2a(fractions_l(lfrin)) - ! and a few things are then derived - ! fractions_a(lfrac) = 1.0 - fractions_a(ofrac) - ! this is truncated to zero for very small values (< 0.001) - ! to attempt to preserve non-land gridcells. - ! fractions_l(lfrac) = mapa2l(fractions_a(lfrac)) - ! one final term is computed - ! dom_a(ascale) = fractions_a(lfrac)/fractions_a(lfrin) - ! dom_l(ascale) = mapa2l(dom_a(ascale)) - ! these are used to correct land fluxes in budgets and lnd2rtm coupling - ! and are particularly important when the land model is running on - ! a different grid than the atm model. in the old system, this term - ! was treated as effectively 1.0 since there was always a check that - ! fractions_a(lfrac) ~ fractions_a(lfrin), namely that the land model - ! provided a land frac that complemented the ocean grid. this is - ! no longer a requirement in this new system and as a result, the - ! ascale term can be thought of as a rescaling of the land fractions - ! in the land model to be exactly complementary to the ocean model - ! on whatever grid it may be running. - ! run-time (frac_set): - ! update fractions on ice grid - ! fractions_i(ifrac) = i2x_i(Si_ifrac) ! ice frac from ice model - ! fractions_i(ofrac) = 1.0 - fractions_i(ifrac) - ! note: the relative fractions are corrected to total fractions - ! fractions_o(ifrac) = mapi2o(fractions_i(ifrac)) - ! fractions_o(ofrac) = mapi2o(fractions_i(ofrac)) - ! fractions_a(ifrac) = mapi2a(fractions_i(ifrac)) - ! fractions_a(ofrac) = mapi2a(fractions_i(ofrac)) - ! - ! fractions used in merging are as follows - ! mrg_x2a uses fractions_a(lfrac,ofrac,ifrac) - ! mrg_x2o needs to use fractions_o(ofrac,ifrac) normalized to one - ! normalization happens in mrg routine - ! - ! fraction corrections in mapping are as follows - ! mapo2a uses *fractions_o(ofrac) and /fractions_a(ofrac) - ! mapi2a uses *fractions_i(ifrac) and /fractions_a(ifrac) - ! mapl2a uses *fractions_l(lfrin) and /fractions_a(lfrin) - ! mapa2* should use *fractions_a(afrac) and /fractions_*(afrac) but this - ! has been defered since the ratio always close to 1.0 - ! - ! budgets use the standard afrac, ofrac, ifrac, and lfrac to compute - ! quantities except in the land budget which uses lfrin multiplied - ! by the scale factor, dom_l(ascale) to compute budgets. - ! - ! fraction and domain checks - ! initialization: - ! dom_i = mapo2i(dom_o) ! lat, lon, mask, area - ! where fractions_a(lfrac) > 0.0, fractions_a(lfrin) is also > 0.0 - ! this ensures the land will provide data everywhere the atm needs it - ! and allows the land frac to be subtlely different from the - ! land fraction specified in the atm. - ! dom_a = mapl2a(dom_l) ! if atm/lnd same grids - ! dom_a = mapo2a(dom_o) ! if atm/ocn same grids - ! dom_a = mapi2a(dom_i) ! if atm/ocn same grids - ! 0.0-eps < fractions_*(*) < 1.0+eps - ! fractions_l(lfrin) = fractions_l(lfrac) - ! only if atm/lnd same grids (but this is not formally required) - ! this is needed until dom_l(ascale) is sent to the land model - ! as an additional field for use in l2r mapping. - ! run time: - ! fractions_a(lfrac) + fractions_a(ofrac) + fractions_a(ifrac) ~ 1.0 - ! 0.0-eps < fractions_*(*) < 1.0+eps - ---------------- -Domain Checking ---------------- -Domain checking is a very important initialization step in the system. -Domain checking verifies that the longitudes, latitudes, areas, masks, and fractions of different grids are consistent with each other. -The subroutine that carries out domain checking is in ``$CIMEROOT/driver_cpl/driver/seq_domain_mct``. -Tolerances for checking the domains can be set in the drv_in driver namelist via the namelist variables, ``eps_frac``, ``eps_amask``, ``eps_agrid``, ``eps_aarea``, ``eps_omask``, ``eps_ogrid``, and ``eps_oarea``. -These values are derived in the coupler namelist from the script env variables, EPS_FRAC, EPS_AMASK, EPS_AGRID, EPS_AAREA, EPS_OMASK, EPS_OGRID, and EPS_OAREA in the env_run.xml. -If an error is detected in the domain checking, the model will write an error message and abort. - -The domain checking is dependent on the grids and in particular, the samegrid input namelist settings. But it basically does the following, -:: - - ocean/ice grid comparison: - verifies the grids are the same size - verifies the difference in longitudes and latitudes is less than eps_ogrid. - verifies the difference in masks is less than eps_omask - verifies the difference in areas is less than eps_oarea - - atmosphere/land grid comparison (if samegrid_al): - verifies the grids are the same size - verifies the difference in longitudes and latitudes is less than eps_agrid. - verifies the difference in masks is less than eps_amask - verifies the difference in areas is less than eps_aarea - - atmosphere/ocean grid comparison (if samegrid_ao): - verifies the grids are the same size - verifies the difference in longitudes and latitudes is less than eps_agrid. - verifies the difference in masks is less than eps_amask - verifies the difference in areas is less than eps_aarea - - fractions - verifies that the land fraction on the atmosphere grid and the ocean fraction - on the atmosphere grid add to one within a tolerance of eps_frac. - -There are a number of subtle aspects in the domain checking like whether to check over masked grid cells, but these issues are less important than recognizing that errors in the domain checking should be treated seriously. -It is easy to make the errors go away by changing the tolerances, but by doing so, critical grid errors that can impact conservation and consistency in a simulation might be overlooked. - - ------------------------ -Mapping (Interpolation) ------------------------ -Mapping files to support interpolation of fields between grids are computed offline. -This is done using the ESMF offline regridding utility. -First, note that historically, the ocean grid has been the higher resolution grid. -While that is no longer always the case, the current implementation largely reflects that presumption. -In general, mapping of fluxes is done using a locally conservative area average approach to preserve conservation. -State fields are generally mapped using bilinear interpolation from the atmosphere grid to the ocean grid to better preserve gradients, but state fields are generally mapped using the conservative area average approach from the ocean grid to the atmosphere grid. -But this is not a requirement of the system. -The individual state and flux mapping files are specified at runtime using the ``seq_maps.rc`` input file, and any valid mapping file using any mapping approach can be specified in that input file. - -The ``seq_maps.c`` file contains information about the mapping files as well as the mapping type. -There are currently two types of mapping implementations, "X" and "Y". - -- "X" mapping rearranges the source data to the destination grid decomposition and then a local mapping is done from the source to the destination grid on the destination decomposition. In the "X" type, the source grid is rearranged. -- "Y" mapping does a local mapping from the source grid to the destination grid on the source grid decomposition. That generates a partial sum of the destination values which are then rearranged to the destination decomposition and summed. Both options produce reasonable results, although they may differ in value by "roundoff" due to differences in order or operations. The type chosen impacts performance. In both implementations, the number of flops is basically identical. The difference is the communication. In the "Y" type, the destination grid is rearranged. - -Since historically, the ocean grid is higher resolution than the atmosphere grid, "X" mapping is used for atmosphere to ocean/ice mapping and "Y" mapping is used from ocean/ice to atmosphere mapping to optimize mapping performance. - -Mapping corrections are made in some cases in the polar region. -In particular, the current bilinear and area conservative mapping approaches introduce relatively large errors in mapping vector fields around the pole. -The current coupler can correct the interpolated surface wind velocity near the pole when mapping from the atmosphere to the ocean and ice grids. -There are several options that correct the vector mapping and these are set in the env variable VECT_MAP. -The npfix option only affects ocean and ice grid cells that are northward of the last latitude line of the atmospheric grid. -The algorithm is contained in the file models/drv/driver/map_atmocn_mct.F90 and is only valid when the atmosphere grid is a longitude/latitude grid. -This feature is generally on by default. -The other alternative is the cart3d option which converts the surface u and v velocity to 3d x,y,z vectors then maps those three vectors before coverting back to u and v east and north directions on the surface. -Both vector mapping methods introduce errors of different degrees but are generally much better than just mapping vector fields as if they were individual scalars. -The ``vect_map`` namelist input is set in the ``drv_in`` file. - -The input mapping files are assumed to be valid for grids with masks of value zero or one where grid points with a mask of zero are never considered in the mapping. -Well defined, locally conservative area mapping files as well as bilinear mapping files can be generated using this masked approach. -However, there is another issue which is that a grid fraction in an active cell might actually change over time. -This is not the case for land fraction, but it is the case for relative ice and ocean fractions. -The ice fraction is constantly evolving in the system in general. -To improve the accuracy of the ice and ocean mapping, the ocean/ice fields are scaled by the local fraction before mapping and unscaled by the mapped fraction after mapping. -The easiest way to demonstate this is via an example. -Consider a case where two ice cells of equal area underlie a single atmosphere cell completely. -The mapping weight of each ice cell generated offline would be 0.5 in this case and if ice temperatures of -1.0 and -2.0 in the two cells respectively were mapped to the atmosphere grid, a resulting ice temperature on the atmosphere grid of -1.5 would result. -Consider the case where one cell has an ice fraction of 0.3 and the other has a fraction of 0.5. -Mapping the ice fraction to the atmospheric cell results in a value of 0.4. -If the same temperatures are mapped in the same way, a temperature of -1.5 results which is reasonable, but not entirely accurate. -Because of the relative ice fractions, the weight of the second cell should be greater than the weight of the first cell. -Taking this into account properly results in a fraction weighted ice temperature of -1.625 in this example. -This is the fraction correction that is carried out whenever ocean and ice fields are mapped to the atmosphere grid. -Time varying fraction corrections are not required in other mappings to improve accuracy because their relative fractions remain static. - -------------------------- -Area Correction of Fluxes -------------------------- -To improve conservation in the system, all fluxes sent to and received from components are corrected for the area differences between the components. -There are many reasonable ways to compute an area of a grid cell, but they are not generally consistent. -One assumption with respect to conservation of fluxes is that the area acting upon the flux is well defined. -Differences in area calculations can result in differences of areas up to a few percent and if these are not corrected, will impact overall mass and heat conservation. -Areas are extracted for each grid from the mapping files. -In this implementation, it is assumed that the areas in all mapping files are computed reasonably and consistently for each grid and on different grids. -Those mapping areas are used to correct the fluxes for each component by scaling the fluxes sent to and received by the component by the ratio of the mapping area and the component area. -The areas from the components are provided to the coupler by the component at initialization. -The minimum and maximum value of each area corrections is written to the coupler log file at initialization. -One critical point is that if mapping files are generated by different tools offline and used in the driver, an error could be introduced that is related to inconsistent areas provided by different mapping files. - - diff --git a/doc/source/driver_cpl/history-and-restarts.rst b/doc/source/driver_cpl/history-and-restarts.rst deleted file mode 100644 index eed38f345de..00000000000 --- a/doc/source/driver_cpl/history-and-restarts.rst +++ /dev/null @@ -1,24 +0,0 @@ -History and Restarts -==================== - -In addition to log files, component models also produce history and restart files. -History files are generally netcdf format and contain fields associated with the state of the model. -History files are implemented and controlled independently in the component models, although support for monthly average history files is a standard output of most production runs. -The driver has a file naming standard for history files which includes the case names, component name, and model date. - -All CIME-compliant component models must be able to stop in the middle of a run and then subsequently restart in a bit-for-bit fashion. -For most models, this requires the writing of a restart file. -The restart file can be any format, although netcdf has become relatively standard, and it should contain any scalars, fields, or information that is required to restart the component model in exactly the same state as when the restart was written and the model was stopped. -The expectation in CIME is that a restart of a model run will be bit-for-bit identical and this is regularly tested as part of component model development by running the model 10 days, writing a restart at the end of 5 days, and then restarting at day 5 and comparing the result with the 10 day run. -Unlike history files, restart files must be coordinated across different components. -The restart frequency is set in the driver time manager namelist by driver namelist variables ``restart_option``, ``restart_n``, and ``restart_ymd``. -The driver will trigger a restart alarm in clocks when a coordinated restart is requested. -The components are required to check this alarm whenever they are called and to write a restart file at the end of the current coupling period. -This method ensures all components are writing restart files at a consistent timestamp. -The restart filenames are normally set in a generic rpointer file. -The rpointer file evolves over the integration and keeps track of the current restart filenames. -When a model is restarted, both the rpointer file and the actual restart file are generally required. - -Many models are also able to restart accumulating history files in the middle of an accumulation period, but this is not a current requirement for CIME compliant components. -In production, the model is usually started and stopped on monthly boundaries so monthly average history files are produced cleanly. -The run length of a CESM1 production run is usually specified using the nmonths or nyears option and restart files are normally written only at the end of the run. diff --git a/doc/source/driver_cpl/implementation.rst b/doc/source/driver_cpl/implementation.rst deleted file mode 100644 index e4d74740f15..00000000000 --- a/doc/source/driver_cpl/implementation.rst +++ /dev/null @@ -1,15 +0,0 @@ -Implementation -============== - -.. toctree:: - :maxdepth: 3 - - time-management.rst - grids.rst - initialization-and-restart.rst - driver_threading_control.rst - bit-for-bit-flag.rst - history-and-restarts.rst - budgets.rst - multi-instance.rst - namelist-overview.rst diff --git a/doc/source/driver_cpl/index.rst b/doc/source/driver_cpl/index.rst deleted file mode 100644 index 18ffa1b9023..00000000000 --- a/doc/source/driver_cpl/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -.. _driver-cpl: - -.. on documentation master file, created by - sphinx-quickstart on Tue Jan 31 19:46:36 2017. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -####################### - Driver/Coupler -####################### - -.. toctree:: - :maxdepth: 3 - :numbered: - - introduction.rst - design.rst - implementation.rst - -Indices and tables -================== - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` - diff --git a/doc/source/driver_cpl/initialization-and-restart.rst b/doc/source/driver_cpl/initialization-and-restart.rst deleted file mode 100644 index e7680922d6c..00000000000 --- a/doc/source/driver_cpl/initialization-and-restart.rst +++ /dev/null @@ -1,34 +0,0 @@ -==================================== -Initialization and Restart -==================================== - -The initialization has been developed over the last two decades to meet the scientific goals, minimize the communication required, and ensure a consistent and well defined climate system. -The order of operations is critical. The initialization is basically as follows: - -- The ``ccsm_pes`` namelist is read and mpi communicators are initialized. -- The ``seq_infodata`` namelist is read and configuration settings are established. -- The ``prof_inparm`` namelist is read and the timing tool is initialized. -- The ``pio_inparm`` namelist is read and the driver IO is initialized. -- The ``seq_timemgr`` namelist is read and the driver time manager and clocks are initialized. -- The atmosphere init routine is called, the mpi communicator and clock are sent, and the atmosphere grid is returned. -- The land init routine is called, the mpi communicator and clock are sent, and the land grid is returned. -- The runoff init routine is called, the mpi communicator and clock are sent, and the runoff grid is returned. -- The ocean init routine is called, the mpi communicator and clock are sent, and the ocean grid is returned. -- The ice init routine is called, the mpi communicator and clock are sent, and the ice grid is returned. -- The land ice init routine is called, the mpi communicator and clock are sent, and the land ice grid is returned. -- The infodata buffer is synchronized across all processors. This buffer contains many model configuration settings set by the driver but also sent from the components. -- The atmosphere, land, runoff, ice, land ice, and ocean rearrangers are initialized. - These rearrangers move component data between the component pes and the coupler pes. -- The Remaining attribute datatypes associated are initialized -- The mapping weights and areas are read. -- The Component grids are checked using the domain checking method. -- The flux area corrections are initialized on the component pes and applied to the initial fields sent by each component on the component pes. Those initial fields are then rearranged to the coupler pes. -- The fractions are initialized on the coupler pes. -- The atmosphere/ocean flux computation is initialized and initial ocean albedos are computed on the coupler pes. -- The land, ocean, and ice initial albedos are mapped to the atmosphere grid and merged to generate initial surface albedos. -- The initial atmosphere forcing data (albedos) is rearranged from the coupler pes to the atmosphere pes, and the area corrections are applied. -- The second phase of the atmosphere init method is to initialize the atmosphere radiation from the surface albedos. -- The new atmosphere initial data is area corrected and rearranged to the coupler pes. -- The budget diagnostics are zeroed out. -- The coupler restart file is read. -- Initialization is complete. - diff --git a/doc/source/driver_cpl/introduction.rst b/doc/source/driver_cpl/introduction.rst deleted file mode 100644 index f7fe10aaceb..00000000000 --- a/doc/source/driver_cpl/introduction.rst +++ /dev/null @@ -1,18 +0,0 @@ -Introduction -============ - -The following provides an overview of the CIME driver/coupler. -We will cover the top level driver implementation as well as the coupler component within the system. -The driver runs on all hardware processors, runs the top level instructions, and, executes the driver time loop. -The coupler is a component of the CIME infrastructure that is run from within the driver. -It can be run on a subset of the total processors, and carries out mapping (interpolation), merging, diagnostics, and other calculations. -The name cpl7 refers to the source code associated with both the driver and the coupler parts of the model. -cpl7 code is located in the CIME source tree under driver_cpl/ and the main program of ``driver_cpl/driver/cesm_driver.F90``. - -We also provide a general overview of the cpl7 design. -Specific implementation issues are then discussed individually. -Finally, there is a section summarizing all of the cpl7 namelist input. -This document is written primarily to help users understand the inputs and controls within the cpl7 system, but to also provide some background about the associated implementation. -`Coupler flow diagrams `_ are provided in a separate document. -Some additional documentation on how the coupler works can be found in Craig et al, `"A New Flexible Coupler for Earth System Modeling Developed for CCSM4 and CESM1" `_, International Journal of High Performance Computing Applications 2012 26: 31 DOI: 10.1177/1094342011428141. - diff --git a/doc/source/driver_cpl/multi-instance.rst b/doc/source/driver_cpl/multi-instance.rst deleted file mode 100644 index ebfe8d923c6..00000000000 --- a/doc/source/driver_cpl/multi-instance.rst +++ /dev/null @@ -1,15 +0,0 @@ -Multi-instance Functionality -============================= - -The multi-instance feature allows multiple instances of a given component to run in a single CESM run. -This might be useful for data assimilation or to average results from multiple instances to force another model. - -The multi-instance implementation is fairly basic at this point. -It does not do any averaging or other statistics between multiple instances, and it requires that all prognostic components must run the same multiple instances to ensure correct coupling. -The multi-instance feature is set via the ``$CASEROOT/env_mach_pes.xml`` variables that have an ``NINST_`` prefix. -The tasks and threads that are specified in multi-instance cases are distributed evenly between the multiple instances. -In other words, if 16 tasks are requested for each of two atmosphere instances, each instance will run on 8 of those tasks. -The ``NINST_*_LAYOUT`` value should always be set to *concurrent* at this time. -Sequential running on multiple instances is still not a robust feature. -Multiple instances is a build time setting in env_mach_pes.xml. -Multiple instance capabilities are expected to be extended in the future. diff --git a/doc/source/driver_cpl/namelist-overview.rst b/doc/source/driver_cpl/namelist-overview.rst deleted file mode 100644 index ad0641d65bf..00000000000 --- a/doc/source/driver_cpl/namelist-overview.rst +++ /dev/null @@ -1,14 +0,0 @@ -More on Driver Namelists -========================= - -There are a series of driver/coupler namelist input files created by the driver namelist generator ``$CIMEROOT/driver_cpl/cime_config/buildnml``. These are - -- drv_in -- drv_flds_in -- cpl_modelio.nml, atm_modelio.nml, esp_modelio.nml, glc_modelio.nml, ice_modelio.nml, lnd_modelio.nmlo, ocn_modelio.nml, rof_modelio.nml, wav_modelio.nml -- seq_maps.rc - -The ``*_modelio.nml`` files set the filename for the primary standard output file and also provide settings for the parallel IO library, PIO. -The drv_in namelist file contains several different namelist groups associated with general options, time manager options, pe layout, timing output, and parallel IO settings. -The seq_maps.rc file specifies the mapping files for the configuration. -Note that seq_maps.rc is NOT a Fortran namelist file but the format should be relatively clear from the default settings. diff --git a/doc/source/driver_cpl/time-management.rst b/doc/source/driver_cpl/time-management.rst deleted file mode 100644 index da2ea973dda..00000000000 --- a/doc/source/driver_cpl/time-management.rst +++ /dev/null @@ -1,194 +0,0 @@ -.. _time-management: - -=============== -Time Management -=============== - -------------- -Driver Clocks -------------- -The driver manages the main clock in the system. The main clock -advances at the shortest coupling period and uses alarms to trigger -component coupling and other events. In addition, the driver -maintains a clock that is associated with each component. The -driver's component clocks have a timestep associated with the coupling -period of that component. The main driver clock and the component -clocks in the driver advance in a coordinated manor and are always -synchronized. The advancement of time is managed as follows in the -main run loop. First, the main driver clock advances one timestep and -the component clocks are advanced in a synchronous fashion. The clock -time represents the time at the end of the next model timestep. -Alarms may be triggered at that timestep to call the the atmosphere, -land, runoff, sea ice, land ice, or ocean run methods. If a component -run alarm is triggered, the run method is called and the driver passes -that component's clock to that component. The component clock -contains information about the length of the next component -integration and the expected time of the component at the end of the -integration period. - -Generally, the component models have indepedent time management -software. When a component run method is called, the component must -advance the proper period and also check that their internal clock is -consistent with the coupling clock before returning to the driver. -The clock passed to the component by the driver contains this -information. Component models are also responsible for making sure -the coupling period is consistent with their internal timestep. -History files are managed independently by each component, but restart -files are coordinated by the driver. - -The driver clocks are based on ESMF clock datatype are are supported -in software by either an official ESMF library or by software included -in CIME called ``esmf_wrf_timemgr``, which is a much simplified -Fortran implementation of a subset of the ESMF time manager -interfaces. - --------------------- -The Driver Time Loop --------------------- -The driver time loop is hardwired to sequence the component models in -a specific way to meet scientific requirements and to otherwise -provide the maximum amount of potential concurrency of work. The -results of the model integration are not dependent on the processor -layout of the components. See the Craig et al IJHPCA 2012 reference -for further details. - -In addition, the driver is currently configured to couple the -atmosphere, land, and sea ice models using the same coupling frequency -while the runoff, land ice, and ocean model can be coupled at the same -or at a lower frequency. To support this feature, the driver does -temporal averaging of coupling inputs to the ocean and runoff, and the -driver also computes the surface ocean albedo at the higher coupling -frequency. There is no averaging of coupling fields for other -component coupling interactions and the land and sea ice models' -surface albedos are computed inside those components. Averaging -functionality could be added to the driver to support alternative -relative coupling schemes in the future if desired with the additional -caveat that the interaction between the surface albedo computation in -each component and the atmospheric radiation calculation have to be -carefully considered. In addition, some other features may need to be -extended to support other coupling schemes and still allow model -concurrency. - -The coupler processors (pes) handle the interaction of data between -components, so there are separate tasks associated with deriving -fields on the coupler pes, transfering data to and from the coupler -pes and other components, and then running the component models on -their processors. The driver time loop is basically sequenced as -follows, -:: - - - The driver clock is advanced first and alarms set. - - Input data for ocean, land, sea ice, and runoff is computed. - - Ocean data is rearranged from the coupler to the ocean pes. - - Land data is rearranged from the coupler to the land pes. - - Ice data is rearranged from the coupler to the ice pes. - - Runoff data is rearranged from the coupler to the ice pes. - - The ice model is run. - - The land model is run. - - The runoff model is run. - - The ocean model is run. - - The ocean inputs are accumulated, and the atmosphere/ocean fluxes are - computed on the coupler pes based on the results from the previous - atmosphere and ocean coupled timestep. - - Land data is rearranged from the land pes to the coupler pes. - - Land ice input is computed. - - Land ice data is rearranged from the coupler to the land ice pes. - - River output (runoff) data is rearranged from the runoff pes to the coupler pes. - - Ice data is rearranged from the ice pes to the coupler pes. - - Coupler fractions are updated. - - Atmospheric forcing data is computed on the coupler pes. - - Atmospheric data is rearranged from the coupler pes to the atmosphere pes. - - The atmosphere model is run. - - The land ice model is run. - - Land ice data is rearranged from the land ice pes to the coupler pes. - - Atmospheric data is rearranged from the atmosphere pes to the coupler pes. - - Ocean data is rearranged from the ocean pes to the coupler pes. - - The loop returns - - Within this loop, as much as possible, coupler work associated - with mapping data, merging fields, diagnosing, applying area corrections, - and computing fluxes is overlapped with component work. - -The land ice model interaction is slightly different. -:: - - - The land ice model is run on the land grid - - Land model output is passed to the land ice model every land coupling period. - - The driver accumluates this data, interpolates the data to the land ice grid, - and the land ice model advances the land ice model about once a year. - -The runoff coupling should be coupled at a frequency between the land -coupling and ocean coupling frequencies. The runoff model runs at the -same time as the land and sea ice models when it runs. - -The current driver sequencing has been developed over nearly two -decades, and it plays a critical role in conserving mass and heat, -minimizing lags, and providing stability in the system. The above -description is consistent with the `concurrency limitations -`_. -Just to reiterate, the land, runoff, and sea ice models will always -run before the atmospheric model, and the coupler and ocean models are -able to run concurrently with all other components. The coupling -between the atmosphere, land, sea ice, and atmosphere/ocean flux -computation incurs no lags but the coupling to the ocean state is -lagged by one ocean coupling period in the system. `Mass and heat -`_ -are conserved in the system with more description. - - -It is possible to reduce the ocean lag in the system. A driver -namelist variable, ``ocean_tight_coupling``, moves the step where -ocean data is rearranged from the ocean pes to the coupler pes from -the end of the loop to before the atmosphere/ocean flux computation. -If ocean_tight_coupling is set to true, then the ocean lag is reduced -by one atmosphere coupling period, but the ability of the ocean model -to run concurrently with the atmosphere model is also reduced or -eliminated. This flag is most useful when the ocean coupling -frequency matches the other components. - ------------------- -Coupling Frequency ------------------- -In the current implementation, the coupling period must be identical -for the atmosphere, sea ice, and land components. The ocean coupling -period can be the same or greater. The runoff coupling period should -be between or the same as the land and ocean coupling period. All -coupling periods must be multiple integers of the smallest coupling -period and will evenly divide the NCPL_BASE_PERIOD, typically one day, -set in env_run.xml. The coupling periods are set using the NCPL env -variables in env_run.xml. - -The coupling periods are set in the driver namelist for each component -via variables called something like atm_cpl_dt and atm_cpl_offset. -The units of these inputs are seconds. The coupler template file -derives these values from CIME script variable names like ATM_NCPL -which is the coupling frequency per day. The \*_cpl_dt input -specifies the coupling period in seconds and the \*_cpl_offset input -specifies the temporal offset of the coupling time relative to initial -time. An example of an offset might be a component that couples every -six hours. That would normally be on the 6th, 12th, 18th, and 24th -hour of every day. An offset of 3600 seconds would change the -coupling to the 1st, 7th, 13th, and 19th hour of every day. The -offsets cannot be larger than the coupling period and the sign of the -offsets is such that a positive offset shifts the alarm time forward -by that number of seconds. The offsets are of limited use right now -because of the limitations of the relative coupling frequencies. - -Offsets play an important role in supporting concurrency. There is an -offset of the smallest coupling period automatically introduced in -every coupling run alarm for each component clock. This is only -mentioned because it is an important but subtle point of the -implementation and changing the coupling offset could have an impact -on concurrency performance. Without this explicit automatic offset, -the component run alarms would trigger at the end of the coupling -period. This is fine for components that are running at the shortest -coupling period, but will limit the ability of models to run -concurrently for models that couple at longer periods. What is really -required for concurrency is that the run alarm be triggered as early -as possible and that the data not be copied from that component to the -coupler pes until the coupling period has ended. The detailed -implementation of this feature is documented in the seq_timemgr_mod. -90 file and the impact of it for the ocean coupling is implemented in -the ccsm_driver.F90 code via use of the ocnrun_alarm and ocnnext_alarm -variables. - diff --git a/doc/source/glossary/index.rst b/doc/source/glossary/index.rst index e8fc6fb8303..6ab8f033fa9 100644 --- a/doc/source/glossary/index.rst +++ b/doc/source/glossary/index.rst @@ -14,14 +14,14 @@ General .. glossary:: - active or prognostic component + active or prognostic component Solves a complex set of equations to describe a sub-model’s behavior. case (CASE) An instance of a global climate model simulation. A case is defined by a component set, a model grid, a machine, a compiler, and any other additional customizations. - component + component A sub-model coupled with other components to constitute a global climate modeling system. Example components: atmosphere, ocean, land, etc. @@ -29,17 +29,17 @@ General A complete set of components to be linked together into a climate model to run a specific case. - data component + data component Replacement for an active component. Sends and receives the same variables to and from other models (but ignores the variables received). grid (GRID) A set of numerical grids of a case. Each active component operates on its own numerical grid. - resolution + resolution Used to refer to a set of grids. Each grid within a set may have different resolution. - stub component + stub component Simply occupies the required place in the climate execution sequence and does send or receive any data. @@ -49,25 +49,25 @@ Coupling .. glossary:: - coupler + coupler A component of the CIME infrastructure that is run from within the driver. It can be run on a subset of the total processors, and carries out mapping (interpolation), merging, diagnostics, and other calculations. - driver + driver The hub that connects all components. CIME driver runs on all hardware processors, runs the top level instructions, and, executes the driver time loop. forcing An imposed perturbation of Earth's energy balance - Model Coupling Toolkit or MCT + Model Coupling Toolkit or MCT A library used by CIME for all data rearranging and mapping (interpolation) - mask + mask Determines land/ocean boundaries in the model - mapping + mapping Interpolation of fields between components. ********************* @@ -85,13 +85,13 @@ Files and Directories case root (CASEROOT) The directory where the case is created. Includes namelist files, xml files, and scripts to setup, - build, and run the case. Also, includes logs and timing output files. + build, and run the case. Also, includes logs and timing output files. CIME root (CIMEROOT) The directory where the CIME source code resides - history files - NetCDF files that contain fields associated with the state of the model at a given time slice. + history files + NetCDF files that contain fields associated with the state of the model at a given time slice. initial files Files required to start a file @@ -100,22 +100,22 @@ Files and Directories A time-series of input data files where all the fields in the stream are located in the same data file and all share the same spatial and temporal coordinates. - namelist files + namelist files Each namelist file includes input parameters for a specific component. run directory (RUNDIR) Where the case is run. - restart files + restart files Written and read by each component in the RUNDIR to stop and subsequently restart in a bit-for-bit fashion. - rpointer files + rpointer files Text file written by the coupler in the RUNDIR with a list of necessary files required for model restart. - XML files + XML files Elements and attributes in these files configure a case. (building, running, batch, etc.) These files include env_archive.xml, env_batch.xml, env_build.xml, env_case.xml, env_mach_pes.xml, env_mach_specific.xml, env_run.xml - in CASEROOT and can be queried and modifed using the xmlquery and xmlchange tools. + in CASEROOT and can be queried and modifed using the xmlquery and xmlchange tools. *********** Development @@ -131,7 +131,7 @@ Development one or more source files that are modified by the user. Before building a case, CIME replaces the original source files with these files. - tag + tag A snapshot of the source code. With each consecutive tag (one or more) answer-changing modifications to the source code of a component are introduced. @@ -144,7 +144,7 @@ Testing .. glossary:: - baseline + baseline A set of test cases that is run using a tag which is complete, tested, and has no modifications in the source code. Used to assess the performance/accuracy of a case that is run using a sandbox. @@ -152,13 +152,13 @@ Testing A test that fails in its comparison with a baseline. blessing - Part of the unit testing framework used by CIME scripts regression tests. + Part of the unit testing framework used by CIME scripts regression tests. regression test A test that compares with baseline results to determine if any new errors have been introduced into the code base. - unit testing + unit testing A fast, self-verifying test of a small piece of code. ************* @@ -167,5 +167,5 @@ Miscellaneous .. glossary:: - ESP - External System Processing: handles data assimilation \ No newline at end of file + ESP + External System Processing: handles data assimilation diff --git a/doc/source/index.rst b/doc/source/index.rst index 940dc2d9340..4e26bf773b3 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -15,11 +15,9 @@ Table of contents ----------------- .. toctree:: :maxdepth: 2 - + what_cime/index.rst users_guide/index.rst - data_models/index.rst - driver_cpl/index.rst build_cpl/index.rst misc_tools/index.rst @@ -43,6 +41,6 @@ Python Module Indices and Search -CIME is developed by the -`E3SM `_ and +CIME is developed by the +`E3SM `_ and `CESM `_ projects. diff --git a/doc/source/misc_tools/ect.rst b/doc/source/misc_tools/ect.rst index 58113747e0a..5a2af9737de 100644 --- a/doc/source/misc_tools/ect.rst +++ b/doc/source/misc_tools/ect.rst @@ -14,16 +14,16 @@ UF-CAM-ECT - detects issues in CAM and CLM (9 time step runs) POP-ECT - detects issues in POP and CICE (12 month runs) The ECT process involves comparing runs generated with -the new scenario ( 3 for CAM-ECT and UF-CAM-ECT, and 1 for POP-ECT) +the new scenario ( 3 for CAM-ECT and UF-CAM-ECT, and 1 for POP-ECT) to an ensemble built on a trusted machine (currently cheyenne). The python ECT tools are located in the pyCECT subdirectory or https://github.com/NCAR/PyCECT/releases. -OR- - -We now provide a web server for CAM-ECT and UF-CAM-ECT, where + +We now provide a web server for CAM-ECT and UF-CAM-ECT, where you can upload the (3) generated runs for comparison to our ensemble. -Please see the webpage at http://www.cesm.ucar.edu/models/cesm2/verification/ +Please see the webpage at http://www.cesm.ucar.edu/models/cesm2/verification/ for further instructions. ----------------------------------- @@ -42,12 +42,12 @@ $CESMDATAROOT/inputdata/validation/pop_ensembles If none of our ensembles are suitable for your needs, then you may create your own ensemble (and summary file) using the following instructions: - -(1) To create a new ensemble, use the ensemble.py script in this directory. + +(1) To create a new ensemble, use the ensemble.py script in this directory. This script creates and compiles a case, then creates clones of the original case, where the initial temperature perturbation is slightly modified for each ensemble member. At this time, cime includes functionality -to create ensembles for CAM-ECT, UF-CAM-ECT, and POP-ECT. +to create ensembles for CAM-ECT, UF-CAM-ECT, and POP-ECT. (2) Use --ect to specify whether ensemble is for CAM or POP. (See 'python ensemble.py -h' for additional details). @@ -73,21 +73,21 @@ POP-ECT: python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/uf_ensemble/ensemble.cesm_tag.000 --mach cheyenne --ensemble 40 --ect pop --project P99999999 -Notes: +Notes: (a) ensemble.py accepts (most of) the argumenets of create_newcase (b) case name must end in ".000" and include the full path (c) ensemble size must be specified, and suggested defaults are listed - above. Note that for CAM-ECT and UF-CAM-ECT, the ensemble size + above. Note that for CAM-ECT and UF-CAM-ECT, the ensemble size needs to be larger than the number of variables that ECT will evaluate. -(5) Once all ensemble simulations have run successfully, copy every cam history -file (*.cam.h0.*) for CAM-ECT and UF-CAM-ECT) or monthly pop history file -(*.pop.h.*) for POP-ECT from each ensemble run directory into a separate directory. +(5) Once all ensemble simulations have run successfully, copy every cam history +file (*.cam.h0.*) for CAM-ECT and UF-CAM-ECT) or monthly pop history file +(*.pop.h.*) for POP-ECT from each ensemble run directory into a separate directory. Next create the ensemble summary using the pyCECT tool pyEnsSum.py (for CAM-ECT and -UF-CAM-ECT) or pyEnsSumPop.py (for POP-ECT). For details see README_pyEnsSum.rst +UF-CAM-ECT) or pyEnsSumPop.py (for POP-ECT). For details see README_pyEnsSum.rst and README_pyEnsSumPop.rst with the pyCECT tools. ------------------- @@ -105,14 +105,14 @@ attributes give this information. (2) For example, for CAM-ECT: python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/camcase.cesm_tag.000 --ect cam --mach cheyenne --project P99999999 ---compset F2000climo --res f19_f19 +--compset F2000climo --res f19_f19 For example, for UF-CAM-ECT: -python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/uf.camcase.cesm_tag.000 --ect cam --uf --mach cheyenne --project P99999999 --compset F2000climo --res f19_f19 +python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/uf.camcase.cesm_tag.000 --ect cam --uf --mach cheyenne --project P99999999 --compset F2000climo --res f19_f19 For example, for POP-ECT: -python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/popcase.cesm_tag.000 --ect pop --mach cheyenne --project P99999999 --compset G --res T62_g17 +python ensemble.py --case /glade/scratch/cesm_user/cesm_tag/popcase.cesm_tag.000 --ect pop --mach cheyenne --project P99999999 --compset G --res T62_g17 (3) Next verify the new simulation(s) with the pyCECT tool pyCECT.py (see README_pyCECT.rst with the pyCECT tools). diff --git a/doc/source/misc_tools/load-balancing-tool.rst b/doc/source/misc_tools/load-balancing-tool.rst index da96f353632..2701b9e1b6f 100644 --- a/doc/source/misc_tools/load-balancing-tool.rst +++ b/doc/source/misc_tools/load-balancing-tool.rst @@ -36,13 +36,13 @@ Also in this documentation is:: 1. set PYTHONPATH to include $CIME_DIR/scripts:$CIME_DIR/tools/load_balancing_tool - + 2. create PE XML file to describe the PE layouts for the timing runs 3. $ ./load_balancing_submit.py --res --compset --pesfile - + 4. ... wait for jobs to run ... - + 5. $ ./load_balancing_solve.py --total-tasks --blocksize 8 @@ -55,7 +55,7 @@ Simulations can be run on a given system by executing the load_balancing_tool.py script, located in cime/tools/load_balancing_tool/load_balancing_tool_submit.py. This creates timing files in the case directory which will be used to solve a mixed integer linear program optimizing the layout. If there is already timing -information available, then a +information available, then a As with the create_newcase and create_test scripts, command line options are used to tailor the simulations for a given model. These values will be @@ -345,7 +345,7 @@ To permanently add to CIME: Testing ******* -To run the provided test suite: +To run the provided test suite: 1. set PYTHONPATH to include CIME libraries:: @@ -377,6 +377,3 @@ To run the provided test suite: $ cd $CIME_DIR/tools/load_balancing_tool $ ./tests/load_balancing_test.py - - - diff --git a/doc/source/users_guide/cime-change-namelist.rst b/doc/source/users_guide/cime-change-namelist.rst index 34bda3feec0..54497898d2b 100644 --- a/doc/source/users_guide/cime-change-namelist.rst +++ b/doc/source/users_guide/cime-change-namelist.rst @@ -195,7 +195,7 @@ Each data model can be runtime-configured with its own namelist. Data Atmosphere (DATM) ~~~~~~~~~~~~~~~~~~~~~~ -DATM is discussed in detail in :ref:`data atmosphere overview `. +DATM is discussed in detail in :ref:`data atmosphere overview ` (**link currently broken**). DATM can be user-customized by changing either its *namelist input files* or its *stream files*. The namelist file for DATM is **datm_in** (or **datm_in_NNN** for multiple instances). @@ -217,7 +217,7 @@ After calling `preview_namelists <../Tools_user/preview_namelists.html>`_ again, Data Ocean (DOCN) ~~~~~~~~~~~~~~~~~~~~~~ -DOCN is discussed in detail in :ref:`data ocean overview `. +DOCN is discussed in detail in :ref:`data ocean overview ` (**link currently broken**). DOCN can be user-customized by changing either its namelist input or its stream files. The namelist file for DOCN is **docn_in** (or **docn_in_NNN** for multiple instances). @@ -239,7 +239,7 @@ After changing this file and calling `preview_namelists <../Tools_user/preview_n Data Sea-ice (DICE) ~~~~~~~~~~~~~~~~~~~~~~ -DICE is discussed in detail in :ref:`data sea-ice overview `. +DICE is discussed in detail in :ref:`data sea-ice overview ` (**link currently broken**). DICE can be user-customized by changing either its namelist input or its stream files. The namelist file for DICE is ``dice_in`` (or ``dice_in_NNN`` for multiple instances) and its values can be changed by editing the ``$CASEROOT`` file ``user_nl_dice`` (or ``user_nl_dice_NNN`` for multiple instances). @@ -256,7 +256,7 @@ The namelist file for DICE is ``dice_in`` (or ``dice_in_NNN`` for multiple insta Data Land (DLND) ~~~~~~~~~~~~~~~~~~~~~~ -DLND is discussed in detail in :ref:`data land overview `. +DLND is discussed in detail in :ref:`data land overview ` (**link currently broken**). DLND can be user-customized by changing either its namelist input or its stream files. The namelist file for DLND is ``dlnd_in`` (or ``dlnd_in_NNN`` for multiple instances) and its values can be changed by editing the ``$CASEROOT`` file ``user_nl_dlnd`` (or ``user_nl_dlnd_NNN`` for multiple instances). @@ -273,7 +273,7 @@ The namelist file for DLND is ``dlnd_in`` (or ``dlnd_in_NNN`` for multiple insta Data River (DROF) ~~~~~~~~~~~~~~~~~~~~~~ -DROF is discussed in detail in :ref:`data river overview `. +DROF is discussed in detail in :ref:`data river overview ` (**link currently broken**). DROF can be user-customized by changing either its namelist input or its stream files. The namelist file for DROF is ``drof_in`` (or ``drof_in_NNN`` for multiple instances) and its values can be changed by editing the ``$CASEROOT`` file ``user_nl_drof`` (or ``user_nl_drof_NNN`` for multiple instances). @@ -298,7 +298,7 @@ CIME calls **$SRCROOT/components/cam/cime_config/buildnml** to generate the CAM' CAM-specific CIME xml variables are set in **$SRCROOT/components/cam/cime_config/config_component.xml** and are used by CAM's **buildnml** script to generate the namelist. -For complete documentation of namelist settings, see `CAM namelist variables `_. +For complete documentation of namelist settings, see `CAM namelist variables `_. To modify CAM namelist settings, add the appropriate keyword/value pair at the end of the **$CASEROOT/user_nl_cam** file. (See the documentation for each file at the top of that file.) @@ -316,7 +316,7 @@ CIME calls **$SRCROOT/components/clm/cime_config/buildnml** to generate the CLM CLM-specific CIME xml variables are set in **$SRCROOT/components/clm/cime_config/config_component.xml** and are used by CLM's **buildnml** script to generate the namelist. -For complete documentation of namelist settings, see `CLM namelist variables `_. +For complete documentation of namelist settings, see `CLM namelist variables `_. To modify CLM namelist settings, add the appropriate keyword/value pair at the end of the **$CASEROOT/user_nl_clm** file. @@ -336,34 +336,32 @@ CICE CIME calls **$SRCROOT/components/cice/cime_config/buildnml** to generate the CICE namelist variables. -For complete documentation of namelist settings, see `CICE namelist variables `_. +For complete documentation of namelist settings, see `CICE namelist variables `_. To modify CICE namelist settings, add the appropriate keyword/value pair at the end of the **$CASEROOT/user_nl_cice** file. (See the documentation for each file at the top of that file.) To see the result of your change, call `preview_namelists <../Tools_user/preview_namelists.html>`_ and verify that the changes appear correctly in **CaseDocs/ice_in**. -In addition, `case.setup <../Tools_user/case.setup.html>`_ creates CICE's compile time `block decomposition variables `_ in **env_build.xml** as follows: +In addition, `case.setup <../Tools_user/case.setup.html>`_ creates CICE's compile time `block decomposition variables `_ in **env_build.xml**. POP2 ~~~~ CIME calls **$SRCROOT/components/pop2/cime_config/buildnml** to generate the POP2 namelist variables. -For complete documentation of namelist settings, see `POP2 namelist variables `_. +For complete documentation of namelist settings, see `POP2 namelist variables `_. To modify POP2 namelist settings, add the appropriate keyword/value pair at the end of the **$CASEROOT/user_nl_pop2** file. (See the documentation for each file at the top of that file.) To see the result of your change, call `preview_namelists <../Tools_user/preview_namelists.html>`_ and verify that the changes appear correctly in **CaseDocs/ocn_in**. -In addition, `case.setup <../Tools_user/case.setup.html>`_ generates POP2's compile-time `block decomposition variables `_ in **env_build.xml** as shown here: - CISM ~~~~ -See `CISM namelist variables `_ for a complete description of the CISM runtime namelist variables. This includes variables that appear both in **cism_in** and in **cism.config**. +See `CISM namelist variables `_ for a complete description of the CISM runtime namelist variables. This includes variables that appear both in **cism_in** and in **cism.config**. To modify any of these settings, add the appropriate keyword/value pair at the end of the **user_nl_cism** file. (See the documentation for each file at the top of that file.) Note that there is no distinction between variables that will appear in **cism_in** and those that will appear in **cism.config**: simply add a new variable setting in **user_nl_cism**, and it will be added to the appropriate place in **cism_in** or **cism.config**. To see the result of your change, call `preview_namelists <../Tools_user/preview_namelists.html>`_ and verify that the changes appear correctly in **CaseDocs/cism_in** and **CaseDocs/cism.config**. -Some CISM runtime settings are sets via **env_run.xml**, as documented in `CISM runtime variables `_. +Some CISM runtime settings are sets via **env_run.xml**, as documented in `CISM runtime variables `_. diff --git a/doc/source/users_guide/cime-internals.rst b/doc/source/users_guide/cime-internals.rst index 17d8e5633d1..3f31dd7cac6 100644 --- a/doc/source/users_guide/cime-internals.rst +++ b/doc/source/users_guide/cime-internals.rst @@ -48,4 +48,3 @@ The file **$CIMEROOT/config/[cesm,e3sm]/config_files.xml** contains all model-sp user-mods directories: - diff --git a/doc/source/users_guide/grids.rst b/doc/source/users_guide/grids.rst index a5fe82a3e88..55cb2661475 100644 --- a/doc/source/users_guide/grids.rst +++ b/doc/source/users_guide/grids.rst @@ -94,7 +94,7 @@ The steps for adding a new component grid to the model system follow. This proce If you are introducing just one new grid, you can leverage SCRIP grid files that are already in place for the other components. There is no supported functionality for creating the SCRIP format file. -2. Build the **check_map** utility by following the instructions in **$CCSMROOT/mapping/check_maps/INSTALL**. Also confirm that the `ESMF `_ toolkit is installed on your machine. +2. Build the **check_map** utility by following the instructions in **$CCSMROOT/mapping/check_maps/INSTALL**. Also confirm that the ESMF toolkit is installed on your machine. When you add new user-defined grid files, you also need to generate a set of mapping files so the coupler can send data from a component on one grid to a component on another grid. There is an ESMF tool that tests the mapping file by comparing a mapping of a smooth function to its true value on the destination grid. diff --git a/doc/source/users_guide/porting-cime.rst b/doc/source/users_guide/porting-cime.rst index f017de860f3..1e9236a646b 100644 --- a/doc/source/users_guide/porting-cime.rst +++ b/doc/source/users_guide/porting-cime.rst @@ -106,7 +106,7 @@ In what follows we outline the process for method (2) above: xmllint --noout --schema $CIME/config/xml_schemas/config_machines.xsd $HOME/.cime/config_machines.xml - If you find that you need to introduce compiler settings specific to your machine, create a **$HOME/.cime/*.cmake** file. - The default compiler settings are defined in **$CIME/config/$model/machines/cmake_macros/**. + The default compiler settings are defined in **$CIME/config/$model/machines/cmake_macros/**. - If you have a batch system, you may also need to create a **$HOME/.cime/config_batch.xml** file. Out-of-the-box batch settings are set in **$CIME/config/$model/machines/config_batch.xml**. diff --git a/doc/source/users_guide/running-a-case.rst b/doc/source/users_guide/running-a-case.rst index 0fee6f50ffe..b8366775716 100644 --- a/doc/source/users_guide/running-a-case.rst +++ b/doc/source/users_guide/running-a-case.rst @@ -484,8 +484,7 @@ for a component set using full active model components. If short-term archiving is turned on, the model archives the component restart data sets and pointer files into **$DOUT_S_ROOT/rest/yyyy-mm-dd-sssss**, where yyyy-mm-dd-sssss is the -model date at the time of the restart. (See `below for more details -`_.) +model date at the time of the restart. (See below for more details.) --------------------------------- Backing up to a previous restart diff --git a/doc/source/users_guide/setting-up-a-case.rst b/doc/source/users_guide/setting-up-a-case.rst index aea931bf54a..feff58aaf49 100644 --- a/doc/source/users_guide/setting-up-a-case.rst +++ b/doc/source/users_guide/setting-up-a-case.rst @@ -16,9 +16,9 @@ After creating a case or changing aspects of a case, such as the pe-layout, call This creates the following additional files and directories in ``$CASEROOT``: ============================= =============================================================================================================================== - .case.run A (hidden) file with the commands that will be used to run the model (such as “mpirun”) and any batch directives needed. + .case.run A (hidden) file with the commands that will be used to run the model (such as “mpirun”) and any batch directives needed. The directive values are generated using the contents - of **env_mach_pes.xml**. Running `case.setup --clean <../Tools_user/case.setup.html>`_ will remove this file. + of **env_mach_pes.xml**. Running `case.setup --clean <../Tools_user/case.setup.html>`_ will remove this file. This file should not be edited directly and instead controlled through XML variables in **env_batch.xml**. It should also *never* be run directly. diff --git a/doc/source/users_guide/testing.rst b/doc/source/users_guide/testing.rst index 1d89dd9eeb5..ea8c6288749 100644 --- a/doc/source/users_guide/testing.rst +++ b/doc/source/users_guide/testing.rst @@ -4,10 +4,10 @@ Testing ********** -`create_test <../Tools_user/create_test.html>`_ -is the tool we use to test both CIME and CIME-driven models. -It can be used as an easy way to run a single basic test or an entire suite of tests. -`create_test <../Tools_user/create_test.html>`_ runs a test suite in parallel for improved performance. +`create_test <../Tools_user/create_test.html>`_ +is the tool we use to test both CIME and CIME-driven models. +It can be used as an easy way to run a single basic test or an entire suite of tests. +`create_test <../Tools_user/create_test.html>`_ runs a test suite in parallel for improved performance. It is the driver behind the automated nightly testing of cime-driven models. Running create_test is generally resource intensive, so run it in a manner appropriate for your system, @@ -18,7 +18,7 @@ It will create and submit additional jobs to the batch queue (if one is availabl An individual test can be run as:: - $CIMEROOT/scripts/create_test $test_name + $CIMEROOT/scripts/create_test $test_name Multiple tests can be run similarly, by listing all of the test names on the command line:: @@ -31,12 +31,12 @@ or by putting the test names into a file, one name per line:: A pre-defined suite of tests can by run using the ``--xml`` options to create_test, which harvest test names from testlist*.xml files. As described in https://github.com/ESCOMP/ctsm/wiki/System-Testing-Guide, -to determine what pre-defined test suites are available and what tests they contain, -you can run query_testlists_. +to determine what pre-defined test suites are available and what tests they contain, +you can run query_testlists_. Test suites are retrieved in create_test via 3 selection attributes:: - --xml-category your_category The test category. + --xml-category your_category The test category. --xml-machine your_machine The machine. --xml-compiler your_compiler The compiler. @@ -46,7 +46,7 @@ Test suites are retrieved in create_test via 3 selection attributes:: The search for test names can be restricted to a single test list using:: - --xml-testlist your_testlist + --xml-testlist your_testlist Omitting this results in searching all testlists listed in:: @@ -69,30 +69,30 @@ MODIFIERS_ These are changes to the default settings for the test. See the following table and test_scheduler.py. GRID The model grid (can be an alias). COMPSET alias of the compset, or long name, if no ``--xml`` arguments are used. -MACHINE This is optional; if this value is not supplied, `create_test <../Tools_user/create_test.html>`_ - will probe the underlying machine. +MACHINE This is optional; if this value is not supplied, `create_test <../Tools_user/create_test.html>`_ + will probe the underlying machine. COMPILER If this value is not supplied, use the default compiler for MACHINE. -GROUP-TESTMODS_ This is optional. This points to a directory with ``user_nl_xxx`` files or a ``shell_commands`` +GROUP-TESTMODS_ This is optional. This points to a directory with ``user_nl_xxx`` files or a ``shell_commands`` that can be used to make namelist and ``XML`` modifications prior to running a test. | ================= ===================================================================================== - + .. _TESTTYPE: - + ============ ===================================================================================== TESTTYPE Description ============ ===================================================================================== - ERS Exact restart from startup (default 6 days + 5 days) - | Do an 11 day initial test - write a restart at day 6. (file suffix: base) - | Do a 5 day restart test, starting from restart at day 6. (file suffix: rest) + ERS Exact restart from startup (default 6 days + 5 days) + | Do an 11 day initial test - write a restart at day 6. (file suffix: base) + | Do a 5 day restart test, starting from restart at day 6. (file suffix: rest) | Compare component history files '.base' and '.rest' at day 11. | They should be identical. ERS2 Exact restart from startup (default 6 days + 5 days). | Do an 11 day initial test without making restarts. (file suffix: base) - | Do an 11 day restart test stopping at day 6 with a restart, + | Do an 11 day restart test stopping at day 6 with a restart, then resuming from restart at day 6. (file suffix: rest) | Compare component history files ".base" and ".rest" at day 11. @@ -106,14 +106,14 @@ TESTTYPE Description ERRI Exact restart from startup with resubmit, (default 4 days + 3 days). Tests incomplete logs option for st_archive. - ERI hybrid/branch/exact restart test, default (by default STOP_N is 22 days) + ERI hybrid/branch/exact restart test, default (by default STOP_N is 22 days) ref1case Do an initial run for 3 days writing restarts at day 3. ref1case is a clone of the main case. Short term archiving is on. ref2case (Suffix hybrid) Do a hybrid run for default 19 days running with ref1 restarts from day 3, - and writing restarts at day 10. + and writing restarts at day 10. ref2case is a clone of the main case. Short term archiving is on. case @@ -195,7 +195,7 @@ MODIFIERS Description _I Marker to distinguish tests with same name - ignored. _Lo# Run length set by o (STOP_OPTION) and # (STOP_N). - | o = {"y":"nyears", "m":"nmonths", "d":"ndays", + | o = {"y":"nyears", "m":"nmonths", "d":"ndays", | \ "h":"nhours", "s":"nseconds", "n":"nsteps"} _Mx Set MPI library to x. @@ -206,7 +206,7 @@ MODIFIERS Description _R For testing in PTS_MODE or Single Column Model (SCM) mode. For PTS_MODE, compile with mpi-serial. - + _Vx Set driver to x. | @@ -221,10 +221,10 @@ GROUP A subdirectory of testmods_dirs and the parent directory of various `-` Replaces '/' in the path name where the testmods are found. TESTMODS A subdirectory of GROUP containing files which set non-default values of the set-up and run-time variables via namelists or xml_change commands. - See "Adding tests": CESM_. + See "Adding tests": CESM_. Examples include - | GROUP-TESTMODS = cam-outfrq9s points to + | GROUP-TESTMODS = cam-outfrq9s points to | $cesm/components/cam/cime_config/testdefs/testmods_dirs/cam/outfrq9s | while allactive-defaultio points to | $cesm/cime_config/testmods_dirs/allactive/defaultio @@ -271,10 +271,10 @@ Query_testlists =================== .. _query_testlists: -**$CIMEROOT/scripts/query_testlists** gathers descriptions of the tests and testlists available +**$CIMEROOT/scripts/query_testlists** gathers descriptions of the tests and testlists available for CESM, the components, and projects. -The ``--xml-{compiler,machine,category,testlist}`` arguments can be used +The ``--xml-{compiler,machine,category,testlist}`` arguments can be used as in create_test (above) to focus the search. The 'category' descriptor of a test can be used to run a group of associated tests at the same time. The available categories, with the tests they encompass, can be listed by:: @@ -413,7 +413,7 @@ CESM .. _CESM: -Select a compset to test. If you need to test a non-standard compset, +Select a compset to test. If you need to test a non-standard compset, define an alias for it in the most appropriate config_compsets.xml in :: $cesm/components/$component/cime_config @@ -424,7 +424,7 @@ If you want to test non-default namelist or xml variable values for your chosen you might find them in a suitable existing testmods directory (see "branching", this section, for locations). If not, then populate a new testmods directory with the needed files (see "contents", below). Note; do not use '-' in the testmods directory name because it has a special meaning to create_test. -Testlists and testmods live in different paths for cime, drv, and components. +Testlists and testmods live in different paths for cime, drv, and components. The relevant directory branching looks like :: @@ -441,7 +441,7 @@ The relevant directory branching looks like The contents of each testmods directory can include :: - user_nl_$components namelist variable=value pairs + user_nl_$components namelist variable=value pairs shell_commands xmlchange commands user_mods a list of other GROUP-TESTMODS which should be imported but at a lower precedence than the local testmods. @@ -455,21 +455,21 @@ CIME's scripts regression tests =============================== .. _`CIME's scripts regression tests`: -**$CIMEROOT/scripts/tests/scripts_regression_tests.py** is the suite of internal tests we run +**$CIMEROOT/scripts/lib/CIME/tests/scripts_regression_tests.py** is the suite of internal tests we run for the stand-alone CIME testing. With no arguments, it will run the full suite. You can limit testing to a specific test class or even a specific test within a test class. Run full suite:: - ./scripts_regression_tests.py + python scripts/lib/CIME/tests/scripts_regression_tests.py Run a test class:: - ./scripts_regression_tests.py K_TestCimeCase + python scripts/lib/CIME/tests/scripts_regression_tests.py CIME.tests.test_unit_case Run a specific test:: - ./scripts_regression_tests.py K_TestCimeCase.test_cime_case + python scripts/lib/CIME/tests/scripts_regression_tests.py CIME.tests.test_unit_case.TestCaseSubmit.test_check_case If a test fails, the unittest module that drives scripts_regression_tests wil note the failure, but won't print the output of the test until testing has completed. When there are failures for a @@ -477,4 +477,27 @@ test, the case directories for that test will not be cleaned up so that the user analysis. The user will be notified of the specific directories that will be left for them to examine. +The test suite can also be ran with `pytest` and `pytest-cov`. After the test suite is done running +a coverage report will be presented. + +Install dependencies:: + + python -m pip install pytest pytest-cov + +Run full suite:: + + pytest -vvv + +Run just unit tests:: + + pytest -vvv scripts/lib/CIME/tests/test_unit* + +Run a test class:: + + pytest -vvv scripts/lib/CIME/tests/test_unit_case.py + +Run a specific test:: + + pytest -vvv scripts/lib/CIME/tests/test_unit_case.py::TestCaseSubmit::test_check_case + More description can be found in https://github.com/ESCOMP/ctsm/wiki/System-Testing-Guide diff --git a/doc/source/users_guide/unit_testing.rst b/doc/source/users_guide/unit_testing.rst index 0e001f11a4d..916a19a7c17 100644 --- a/doc/source/users_guide/unit_testing.rst +++ b/doc/source/users_guide/unit_testing.rst @@ -175,7 +175,7 @@ Adding to the xml file ~~~~~~~~~~~~~~~~~~~~~~ After you build pFUnit, tell CIME about your build or builds. -To do this, specify the appropriate path(s) using the ``PFUNIT_PATH`` element in ** *MACH*_*COMPILER*.cmake** file. +To do this, specify the appropriate path(s) using the ``PFUNIT_PATH`` element in ** *MACH*_*COMPILER*.cmake** file. The ``MPILIB`` attribute should be either: diff --git a/doc/source/what_cime/index.rst b/doc/source/what_cime/index.rst index a8c2bce9f76..f87b3bd6704 100644 --- a/doc/source/what_cime/index.rst +++ b/doc/source/what_cime/index.rst @@ -6,56 +6,30 @@ contain the root `toctree` directive. ##################################### - What is CIME? + What is CIME? ##################################### .. toctree:: :maxdepth: 3 :numbered: - - -CIME, pronounced "SEAM", contains the support scripts (configure, -build, run, test), data models, essential utility libraries, a “main” -and other tools that are needed to build a single-executable coupled -Earth System Model. CIME is available in a stand-alone package that -can be compiled and tested without active prognostic components but is -typically included in the source of a climate model. CIME does not -contain: any active components, any intra-component coupling -capability (such as atmosphere physics-dynamics coupling). ********* Overview ********* -CIME is comprised of: - -1. A Case Control System to support configuration, compilation, execution, system testing and unit testing of a earth system model: - - i. Scripts to enable simple generation of model executables and associated input files for different scientific cases, component resolutions and combinations of full, data and stub components with a handful of commands. - ii. Testing utilities to run defined system tests and report results for different configurations of the coupled system. - -2. A default coupled model architecture: - - i. A programmer interface and libraries to implement a hub-and-spoke inter-component coupling architecture. - ii. An implementation of a "hub" that needs 7 components (atm, ocn, lnd, sea-ice, land-ice, river, wave). a.k.a. “the driver”. - iii. The ability to allow active and data components to be mixed in any combination as long as each component implements the coupling programmer interface. +CIME, pronounced "SEAM", primarily consists of a Case Control System that supports the configuration, compilation, execution, system testing and unit testing of an Earth System Model. The two main components of the Case Control System are: -3. Non-active Data and Stub components: +1. Scripts to enable simple generation of model executables and associated input files for different scientific cases, component resolutions and combinations of full, data and stub components with a handful of commands. +2. Testing utilities to run defined system tests and report results for different configurations of the coupled system. - i. “Data-only” versions of 6 of the 7 components that can replace active components at build-time. - ii. “Stub” versions of all 7 components for building a complete system. +CIME also contains additional stand-alone tools, including: -4. Source code for external libraries useful in scientific applications in general and climate models in particular. - i. Parallel I/O library. - ii. The Model Coupling Toolkit. - iii. Timing library. +1. Parallel regridding weight generation program +2. Scripts to automate off-line load-balancing. +3. Scripts to conduct ensemble-based statistical consistency tests. +4. Netcdf file comparison program (for bit-for-bit). -5. Additional stand-alone tools: - - i. Parallel regridding weight generation program - ii. Scripts to automate off-line load-balancing. - iii. Scripts to conduct ensemble-based statistical consistency tests. - iv. Netcdf file comparison program (for bit-for-bit). +CIME does **not** contain the source code for any Earth System Model drivers or components. It is typically included alongside the source code of a host model. However, CIME does include pointers to external repositories that contain drivers, data models and other test components. These external components can be easily assembled to facilitate end-to-end system tests of the CIME infrastructure, which are defined in the CIME repository. ************************* Development @@ -64,6 +38,3 @@ Development CIME is developed in an open-source, public repository hosted under the Earth System Model Computational Infrastructure (ESMCI) organization on Github at http://github.com/ESMCI/cime. - - - diff --git a/doc/source/xml_files/atmosphere.rst b/doc/source/xml_files/atmosphere.rst index 6bb2a2282f7..a6b544bc02c 100644 --- a/doc/source/xml_files/atmosphere.rst +++ b/doc/source/xml_files/atmosphere.rst @@ -4,7 +4,7 @@ CIME Atmosphere Data and Stub XML Files ####################################### -Atmosphere component XML files for data, stub, and dead components. +Atmosphere component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,7 +19,7 @@ XML specification for archiving datm output files. .. literalinclude:: ../../../src/components/data_comps/datm/cime_config/config_archive.xml -XML variables and component descriptions specific to datm. +XML variables and component descriptions specific to datm. .. literalinclude:: ../../../src/components/data_comps/datm/cime_config/config_component.xml @@ -35,7 +35,7 @@ CIMEROOT/src/components/stub_comps/satm/cime_config The atmosphere stub model, **satm**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to satm. +XML variables and component descriptions specific to satm. .. literalinclude:: ../../../src/components/stub_comps/satm/cime_config/config_component.xml @@ -47,11 +47,6 @@ CIMEROOT/src/components/xcpl_comps/xatm/cime_config The atmosphere dead model, **xatm**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to xatm. +XML variables and component descriptions specific to xatm. .. literalinclude:: ../../../src/components/xcpl_comps/xatm/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/cesm.rst b/doc/source/xml_files/cesm.rst index 223b27f1e2e..0b255f4eb9f 100644 --- a/doc/source/xml_files/cesm.rst +++ b/doc/source/xml_files/cesm.rst @@ -4,7 +4,7 @@ CESM Coupled Model XML Files ############################# -XML files for CESM in CIMEROOT/config/cesm. +XML files for CESM in CIMEROOT/config/cesm. .. toctree:: :maxdepth: 1 @@ -42,7 +42,7 @@ CESM XML settings for supported machines. .. literalinclude:: ../../../config/cesm/machines/config_machines.xml -CESM XML settings for Parallel Input/Output (PIO) library. +CESM XML settings for Parallel Input/Output (PIO) library. .. literalinclude:: ../../../config/cesm/machines/config_pio.xml @@ -66,4 +66,3 @@ CESM XML settings for all-active test configurations. CESM XML settings for optimized processor elements (PEs) layout configurations. .. literalinclude:: ../../../../cime_config/config_pes.xml - diff --git a/doc/source/xml_files/common.rst b/doc/source/xml_files/common.rst index 1f57771df56..4fb201eea5a 100644 --- a/doc/source/xml_files/common.rst +++ b/doc/source/xml_files/common.rst @@ -4,7 +4,7 @@ Common XML Files ################# -Common XML files in CIMEROOT/config. +Common XML files in CIMEROOT/config. .. toctree:: :maxdepth: 1 @@ -22,9 +22,6 @@ Headers for the CASEROOT env_*.xml files created by create_newcase. CIMEROOT/config/config_tests.xml ******************************** -Descriptions and XML settings for the CIME regression tests. +Descriptions and XML settings for the CIME regression tests. .. literalinclude:: ../../../config/config_tests.xml - - - diff --git a/doc/source/xml_files/components.rst b/doc/source/xml_files/components.rst index 7053df0301a..7fdb45dc8bd 100644 --- a/doc/source/xml_files/components.rst +++ b/doc/source/xml_files/components.rst @@ -16,7 +16,4 @@ Component XML files in CIMEROOT/src sub-directories. ocean.rst river.rst seaice.rst - wave.rst - - - + wave.rst diff --git a/doc/source/xml_files/drivers.rst b/doc/source/xml_files/drivers.rst index 4153e903957..a3c9ab8ea89 100644 --- a/doc/source/xml_files/drivers.rst +++ b/doc/source/xml_files/drivers.rst @@ -15,7 +15,7 @@ CIMEROOT/src/drivers/mct/cime_config The Model Coupling Toolkit (MCT) based driver/coupler is treated as a component by CIME with associated XML files -to define behavior. +to define behavior. XML specification for archiving coupler output files. @@ -43,7 +43,7 @@ XML settings for driver/coupler defined component set (compset) PE layouts. ************************************************************** -CIMEROOT/src/drivers/mct/cime_config/namelist_definition_*.xml +CIMEROOT/src/drivers/mct/cime_config/namelist_definition_*.xml ************************************************************** XML namelist definitions for the driver/coupler. @@ -57,6 +57,3 @@ XML namelist definitions for the driver/coupler fields. XML namelist definitions for the driver/coupler model input/output settings. .. literalinclude:: ../../../src/drivers/mct/cime_config/namelist_definition_modelio.xml - - - diff --git a/doc/source/xml_files/e3sm.rst b/doc/source/xml_files/e3sm.rst index a52c1248140..ae3659165e8 100644 --- a/doc/source/xml_files/e3sm.rst +++ b/doc/source/xml_files/e3sm.rst @@ -4,7 +4,7 @@ E3SM Coupled Model XML Files ############################# -XML files for E3SM in CIMEROOT/config/e3sm. +XML files for E3SM in CIMEROOT/config/e3sm. .. toctree:: :maxdepth: 1 @@ -59,7 +59,6 @@ E3SM XML settings for supported machines. .. literalinclude:: ../../../config/e3sm/machines/config_machines.xml -E3SM XML settings for Parallel Input/Output (PIO) library. +E3SM XML settings for Parallel Input/Output (PIO) library. .. literalinclude:: ../../../config/e3sm/machines/config_pio.xml - diff --git a/doc/source/xml_files/esp.rst b/doc/source/xml_files/esp.rst index 9688fa0c1b9..1874c535111 100644 --- a/doc/source/xml_files/esp.rst +++ b/doc/source/xml_files/esp.rst @@ -4,7 +4,7 @@ CIME ESP Data and Stub XML Files ################################ -External System Processing **ESP** component XML files for data, stub, and dead components. +External System Processing **ESP** component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -15,7 +15,7 @@ CIMEROOT/src/components/data_comps/desp/cime_config ESP data model, **desp**, XML files and settings. -XML variables and component descriptions specific to desp. +XML variables and component descriptions specific to desp. .. literalinclude:: ../../../src/components/data_comps/desp/cime_config/config_component.xml @@ -31,12 +31,6 @@ CIMEROOT/src/components/stub_comps/sesp/cime_config The ESP stub model, **sesp**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to sesp. +XML variables and component descriptions specific to sesp. .. literalinclude:: ../../../src/components/stub_comps/sesp/cime_config/config_component.xml - - - - - - diff --git a/doc/source/xml_files/index.rst b/doc/source/xml_files/index.rst index 923e4b8e1ec..1affd98a2ee 100644 --- a/doc/source/xml_files/index.rst +++ b/doc/source/xml_files/index.rst @@ -11,9 +11,9 @@ CIMEROOT. Modifcations to XML settings is case specific and the tools and modify these settings while ensuring the continued schema integrity of the XML. -For advanced CIME developers, there are XML schema definition files +For advanced CIME developers, there are XML schema definition files in the CIMEROOT/config/xml_schemas directory that can be used with -**xmllint** to verify the XML. +**xmllint** to verify the XML. .. toctree:: :maxdepth: 2 @@ -23,7 +23,3 @@ in the CIMEROOT/config/xml_schemas directory that can be used with common.rst components.rst drivers.rst - - - - diff --git a/doc/source/xml_files/land.rst b/doc/source/xml_files/land.rst index dbe556e5f16..e275f466434 100644 --- a/doc/source/xml_files/land.rst +++ b/doc/source/xml_files/land.rst @@ -4,7 +4,7 @@ CIME Land Data and Stub XML Files ################################# -Land component XML files for data, stub, and dead components. +Land component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,7 +19,7 @@ XML specification for archiving dlnd output files. .. literalinclude:: ../../../src/components/data_comps/dlnd/cime_config/config_archive.xml -XML variables and component descriptions specific to dlnd. +XML variables and component descriptions specific to dlnd. .. literalinclude:: ../../../src/components/data_comps/dlnd/cime_config/config_component.xml @@ -35,7 +35,7 @@ CIMEROOT/src/components/stub_comps/slnd/cime_config The land stub model, **slnd**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to slnd. +XML variables and component descriptions specific to slnd. .. literalinclude:: ../../../src/components/stub_comps/slnd/cime_config/config_component.xml @@ -47,11 +47,6 @@ CIMEROOT/src/components/xcpl_comps/xlnd/cime_config The land dead model, **xlnd**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to xlnd. +XML variables and component descriptions specific to xlnd. .. literalinclude:: ../../../src/components/xcpl_comps/xlnd/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/landice.rst b/doc/source/xml_files/landice.rst index a0873c54040..cdb00e69d90 100644 --- a/doc/source/xml_files/landice.rst +++ b/doc/source/xml_files/landice.rst @@ -4,8 +4,8 @@ CIME Land Ice Data and Stub XML Files ##################################### -Land ice component XML files for stub and dead components. -The land ice component does not currently have a data model. +Land ice component XML files for stub and dead components. +The land ice component does not currently have a data model. .. toctree:: :maxdepth: 1 @@ -17,7 +17,7 @@ CIMEROOT/src/components/stub_comps/sglc/cime_config The land ice stub model, **sglc**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to sglc. +XML variables and component descriptions specific to sglc. .. literalinclude:: ../../../src/components/stub_comps/sglc/cime_config/config_component.xml @@ -29,11 +29,6 @@ CIMEROOT/src/components/xcpl_comps/xglc/cime_config The land ice dead model, **xglc**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to xglc. +XML variables and component descriptions specific to xglc. .. literalinclude:: ../../../src/components/xcpl_comps/xglc/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/ocean.rst b/doc/source/xml_files/ocean.rst index 2c21990cc3b..40f1d69583b 100644 --- a/doc/source/xml_files/ocean.rst +++ b/doc/source/xml_files/ocean.rst @@ -4,7 +4,7 @@ CIME Ocean Data and Stub XML Files ################################## -Ocean component XML files for data, stub, and dead components. +Ocean component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,7 +19,7 @@ XML specification for archiving docn output files. .. literalinclude:: ../../../src/components/data_comps/docn/cime_config/config_archive.xml -XML variables and component descriptions specific to docn. +XML variables and component descriptions specific to docn. .. literalinclude:: ../../../src/components/data_comps/docn/cime_config/config_component.xml @@ -35,7 +35,7 @@ CIMEROOT/src/components/stub_comps/socn/cime_config The ocean stub model, **socn**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to socn. +XML variables and component descriptions specific to socn. .. literalinclude:: ../../../src/components/stub_comps/socn/cime_config/config_component.xml @@ -47,11 +47,6 @@ CIMEROOT/src/components/xcpl_comps/xocn/cime_config The ocean dead model, **xocn**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to xocn. +XML variables and component descriptions specific to xocn. .. literalinclude:: ../../../src/components/xcpl_comps/xocn/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/river.rst b/doc/source/xml_files/river.rst index 116b1c42993..84543d59879 100644 --- a/doc/source/xml_files/river.rst +++ b/doc/source/xml_files/river.rst @@ -4,7 +4,7 @@ CIME River Runoff Data and Stub XML Files ######################################### -River runoff component XML files for data, stub, and dead components. +River runoff component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,11 +19,11 @@ XML specification for archiving drof output files. .. literalinclude:: ../../../src/components/data_comps/drof/cime_config/config_archive.xml -XML variables and component descriptions specific to drof. +XML variables and component descriptions specific to drof. .. literalinclude:: ../../../src/components/data_comps/drof/cime_config/config_component.xml -XML variables and component descriptions specific to drof. +XML variables and component descriptions specific to drof. .. literalinclude:: ../../../src/components/data_comps/drof/cime_config/config_component.xml @@ -39,7 +39,7 @@ CIMEROOT/src/components/stub_comps/srof/cime_config The river runoff stub model, **srof**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to srof. +XML variables and component descriptions specific to srof. .. literalinclude:: ../../../src/components/stub_comps/srof/cime_config/config_component.xml @@ -54,8 +54,3 @@ does it have any namelist settings. XML variables and component descriptions specific to xrof. .. literalinclude:: ../../../src/components/xcpl_comps/xrof/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/seaice.rst b/doc/source/xml_files/seaice.rst index 2492493b1e1..7c9d9e04e98 100644 --- a/doc/source/xml_files/seaice.rst +++ b/doc/source/xml_files/seaice.rst @@ -4,7 +4,7 @@ CIME Sea Ice Data and Stub XML Files #################################### -Sea Ice component XML files for data, stub, and dead components. +Sea Ice component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,11 +19,11 @@ XML specification for archiving dice output files. .. literalinclude:: ../../../src/components/data_comps/dice/cime_config/config_archive.xml -XML variables and component descriptions specific to dice. +XML variables and component descriptions specific to dice. .. literalinclude:: ../../../src/components/data_comps/dice/cime_config/config_component.xml -XML variables and component descriptions specific to dice. +XML variables and component descriptions specific to dice. .. literalinclude:: ../../../src/components/data_comps/dice/cime_config/config_component.xml @@ -39,7 +39,7 @@ CIMEROOT/src/components/stub_comps/sice/cime_config The sea ice stub model, **sice**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to sice. +XML variables and component descriptions specific to sice. .. literalinclude:: ../../../src/components/stub_comps/sice/cime_config/config_component.xml @@ -51,11 +51,6 @@ CIMEROOT/src/components/xcpl_comps/xice/cime_config The sea ice dead model, **xice**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to satm. +XML variables and component descriptions specific to satm. .. literalinclude:: ../../../src/components/xcpl_comps/xice/cime_config/config_component.xml - - - - - diff --git a/doc/source/xml_files/wave.rst b/doc/source/xml_files/wave.rst index fa6e91f3865..e9d54792c1e 100644 --- a/doc/source/xml_files/wave.rst +++ b/doc/source/xml_files/wave.rst @@ -4,7 +4,7 @@ CIME Wave Data and Stub XML Files ################################# -Wave component XML files for data, stub, and dead components. +Wave component XML files for data, stub, and dead components. .. toctree:: :maxdepth: 1 @@ -19,11 +19,11 @@ XML specification for archiving dwav output files. .. literalinclude:: ../../../src/components/data_comps/dwav/cime_config/config_archive.xml -XML variables and component descriptions specific to dwav. +XML variables and component descriptions specific to dwav. .. literalinclude:: ../../../src/components/data_comps/dwav/cime_config/config_component.xml -XML variables and component descriptions specific to dwav. +XML variables and component descriptions specific to dwav. .. literalinclude:: ../../../src/components/data_comps/dwav/cime_config/config_component.xml @@ -39,7 +39,7 @@ CIMEROOT/src/components/stub_comps/swav/cime_config The wave stub model, **swav**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to swav. +XML variables and component descriptions specific to swav. .. literalinclude:: ../../../src/components/stub_comps/swav/cime_config/config_component.xml @@ -51,11 +51,6 @@ CIMEROOT/src/components/xcpl_comps/xwav/cime_config The wave dead model, **xwav**, does not output any files in the RUNDIR nor does it have any namelist settings. -XML variables and component descriptions specific to xwav. +XML variables and component descriptions specific to xwav. .. literalinclude:: ../../../src/components/xcpl_comps/xwav/cime_config/config_component.xml - - - - - diff --git a/doc/tools_autodoc.cfg b/doc/tools_autodoc.cfg index ca1300b63a0..9472086e724 100644 --- a/doc/tools_autodoc.cfg +++ b/doc/tools_autodoc.cfg @@ -6,9 +6,9 @@ exclude_prefix: JENKINS_ [scripts] scripts_dir: ../scripts -exclude_files: +exclude_files: exclude_ext: ~ pyc -exclude_prefix: +exclude_prefix: [templates] templates_dir: ../config/cesm/machines @@ -18,4 +18,4 @@ exclude_prefix: Depends [doc] doc_dir: ./source/Tools_user -index_template: index.rst.template \ No newline at end of file +index_template: index.rst.template diff --git a/doc/tools_autodoc.py b/doc/tools_autodoc.py index b9427a7e990..04a08ba2001 100755 --- a/doc/tools_autodoc.py +++ b/doc/tools_autodoc.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -"""script to auto generate rst documentation for cime/scripts/Tools +"""script to auto generate rst documentation for cime/scripts/Tools user facing utilities. """ @@ -11,8 +11,11 @@ if sys.hexversion < 0x02070000: print(70 * "*") print("ERROR: {0} requires python >= 2.7.x. ".format(sys.argv[0])) - print("It appears that you are running python {0}".format( - ".".join(str(x) for x in sys.version_info[0:3]))) + print( + "It appears that you are running python {0}".format( + ".".join(str(x) for x in sys.version_info[0:3]) + ) + ) print(70 * "*") sys.exit(1) @@ -30,11 +33,12 @@ from configparser import ConfigParser as config_parser # define rst templates -_tool_template = Template(''' +_tool_template = Template( + """ .. _$tool_name: #################################################### -$tool_name +$tool_name #################################################### **$tool_name** is a script in CIMEROOT/scripts/Tools. @@ -44,13 +48,15 @@ .. command-output:: ./$tool_name --help :cwd: ../../$tools_dir -''') +""" +) -_script_template = Template(''' +_script_template = Template( + """ .. _$script_name: #################################################### -$script_name +$script_name #################################################### **$script_name** is a script in CIMEROOT/scripts. @@ -60,13 +66,15 @@ .. command-output:: ./$script_name --help :cwd: ../../$scripts_dir -''') +""" +) -_tmpl_template = Template(''' +_tmpl_template = Template( + """ .. _$tmpl_name: #################################################### -$tmpl_name +$tmpl_name #################################################### **$tmpl_name** is a script template in CIMEROOT/config/cesm/machines @@ -79,29 +87,32 @@ .. command-output:: ./$tmpl_name --help :cwd: ./temp_files -''') +""" +) # ------------------------------------------------------------------------------- # User input # ------------------------------------------------------------------------------- -def commandline_options(): - """Process the command line arguments. - """ +def commandline_options(): + """Process the command line arguments.""" parser = argparse.ArgumentParser( - description='Auto generate rst documentation for cime/scripts/Tools.') + description="Auto generate rst documentation for cime/scripts/Tools." + ) - parser.add_argument('--backtrace', action='store_true', - help='show exception backtraces as extra debugging ' - 'output') + parser.add_argument( + "--backtrace", + action="store_true", + help="show exception backtraces as extra debugging " "output", + ) - parser.add_argument('--debug', action='store_true', - help='extra debugging output') + parser.add_argument("--debug", action="store_true", help="extra debugging output") - parser.add_argument('--config', nargs=1, default=['tools_autodoc.cfg'], - help='path to config file') + parser.add_argument( + "--config", nargs=1, default=["tools_autodoc.cfg"], help="path to config file" + ) options = parser.parse_args() return options @@ -111,9 +122,7 @@ def commandline_options(): # read the tools_autodoc.cfg configuration file # ------------------------------------------------------------------------------- def read_config_file(filename): - """Read the configuration file and process - - """ + """Read the configuration file and process""" print("tools_autodoc.py - Reading configuration file : {0}".format(filename)) cfg_file = os.path.abspath(filename) @@ -125,29 +134,30 @@ def read_config_file(filename): return config + # ------------------------------------------------------------------------------- # create the rst files for the Tools configuration settings # ------------------------------------------------------------------------------- def get_tools(config, doc_dir): # get the input tools dir - tools_dir = config.get('tools','tools_dir') + tools_dir = config.get("tools", "tools_dir") tools_dir = os.path.abspath(tools_dir) - + # get list of files to exclude - exclude_files = config.get('tools','exclude_files').split() + exclude_files = config.get("tools", "exclude_files").split() # get list of files to exclude - exclude_ext = config.get('tools','exclude_ext').split() + exclude_ext = config.get("tools", "exclude_ext").split() # get list of files to exclude - exclude_prefix = config.get('tools','exclude_prefix').split() + exclude_prefix = config.get("tools", "exclude_prefix").split() # get a list of all files in the tools_dir all_files = next(os.walk(tools_dir))[2] tools_files = list() - # exclude files + # exclude files for f in all_files: f = f.strip() include = True @@ -163,10 +173,10 @@ def get_tools(config, doc_dir): if include: tools_files.append(f) - tools_dir = config.get('tools','tools_dir') + tools_dir = config.get("tools", "tools_dir") for f in tools_files: - tool_file = os.path.join(doc_dir, '{0}.rst'.format(f)) - with open(tool_file,'w') as tf: + tool_file = os.path.join(doc_dir, "{0}.rst".format(f)) + with open(tool_file, "w") as tf: contents = _tool_template.substitute(tool_name=f, tools_dir=tools_dir) tf.write(contents) @@ -179,23 +189,23 @@ def get_tools(config, doc_dir): def get_scripts(config, doc_dir): # get the input scripts dir - scripts_dir = config.get('scripts','scripts_dir') + scripts_dir = config.get("scripts", "scripts_dir") scripts_dir = os.path.abspath(scripts_dir) # get list of files to exclude - exclude_files = config.get('scripts','exclude_files').split() + exclude_files = config.get("scripts", "exclude_files").split() # get list of files to exclude - exclude_ext = config.get('scripts','exclude_ext').split() + exclude_ext = config.get("scripts", "exclude_ext").split() # get list of files to exclude - exclude_prefix = config.get('scripts','exclude_prefix').split() + exclude_prefix = config.get("scripts", "exclude_prefix").split() # get a list of all files in the scripts_dir all_files = next(os.walk(scripts_dir))[2] scripts_files = list() - # exclude files + # exclude files for f in all_files: f = f.strip() include = True @@ -211,39 +221,42 @@ def get_scripts(config, doc_dir): if include: scripts_files.append(f) - scripts_dir = config.get('scripts','scripts_dir') + scripts_dir = config.get("scripts", "scripts_dir") for f in scripts_files: - script_file = os.path.join(doc_dir, '{0}.rst'.format(f)) - with open(script_file,'w') as tf: - contents = _script_template.substitute(script_name=f, scripts_dir=scripts_dir) + script_file = os.path.join(doc_dir, "{0}.rst".format(f)) + with open(script_file, "w") as tf: + contents = _script_template.substitute( + script_name=f, scripts_dir=scripts_dir + ) tf.write(contents) return scripts_files + # ------------------------------------------------------------------------------- # get the template files and substitute the {{...}} patterns so they can be # run with the --help command # ------------------------------------------------------------------------------- def get_templates(config, doc_dir): - # get the input template dir - templates_dir = config.get('templates','templates_dir') + # get the input template dir + templates_dir = config.get("templates", "templates_dir") templates_dir = os.path.abspath(templates_dir) # get list of files to exclude - exclude_files = config.get('templates','exclude_files').split() + exclude_files = config.get("templates", "exclude_files").split() # get list of files to exclude - exclude_ext = config.get('templates','exclude_ext').split() + exclude_ext = config.get("templates", "exclude_ext").split() # get list of files to exclude - exclude_prefix = config.get('templates','exclude_prefix').split() + exclude_prefix = config.get("templates", "exclude_prefix").split() # get a list of all files in the templates_dir all_files = next(os.walk(templates_dir))[2] template_files = list() - # exclude files + # exclude files for f in all_files: f = f.strip() include = True @@ -261,49 +274,57 @@ def get_templates(config, doc_dir): # create temporary files with the {{..}} stripped out temp_files = list() - temp_dir = '{0}/temp_files'.format(doc_dir) + temp_dir = "{0}/temp_files".format(doc_dir) if not os.path.exists(temp_dir): os.makedirs(temp_dir) for fname in template_files: - with open(os.path.join(templates_dir,fname),'r') as f: + with open(os.path.join(templates_dir, fname), "r") as f: content = f.read() content = content.replace("{{ batchdirectives }}", "# {{ batchdirective }}", 1) - content = content.replace("os.chdir( '{{ caseroot }}')", "# os.chdir( '{{ caseroot }}')", 1) - content = content.replace('os.path.join("{{ cimeroot }}", "scripts", "Tools")', 'os.path.join("../../../..","scripts", "Tools")',1) + content = content.replace( + "os.chdir( '{{ caseroot }}')", "# os.chdir( '{{ caseroot }}')", 1 + ) + content = content.replace( + 'os.path.join("{{ cimeroot }}", "scripts", "Tools")', + 'os.path.join("../../../..","scripts", "Tools")', + 1, + ) # create a temporary file - tf = fname.split('.') - tfname = '.'.join(tf[1:]) - if tfname == 'st_archive': - tfname = 'case.st_archive' + tf = fname.split(".") + tfname = ".".join(tf[1:]) + if tfname == "st_archive": + tfname = "case.st_archive" tfile = os.path.join(temp_dir, tfname) - with open(tfile, 'w') as tf: + with open(tfile, "w") as tf: tf.write(content) - + temp_files.append(tfname) for f in temp_files: - tmpl_file = os.path.join(doc_dir, '{0}.rst'.format(f)) - with open(tmpl_file,'w') as tf: + tmpl_file = os.path.join(doc_dir, "{0}.rst".format(f)) + with open(tmpl_file, "w") as tf: contents = _tmpl_template.substitute(tmpl_name=f, temp_dir=temp_dir) tf.write(contents) tf.close() - exefile = os.path.join(doc_dir, 'temp_files/{0}'.format(f)) + exefile = os.path.join(doc_dir, "temp_files/{0}".format(f)) st = os.stat(exefile) os.chmod(exefile, st.st_mode | stat.S_IEXEC) return temp_files + # ------------------------------------------------------------------------------- # main # ------------------------------------------------------------------------------- + def main(options): all_files = list() config = read_config_file(options.config[0]) # get the output doc dir - doc_dir = config.get('doc','doc_dir') + doc_dir = config.get("doc", "doc_dir") doc_dir = os.path.abspath(doc_dir) # gather the files from different locations in the CIMEROOT @@ -315,21 +336,21 @@ def main(options): all_files.sort() # copy the index.rst.template to index.rst - doc_dir = config.get('doc','doc_dir') + doc_dir = config.get("doc", "doc_dir") doc_dir = os.path.abspath(doc_dir) - index_template = config.get('doc','index_template') - index_rst_file = index_template.split('.')[0:-1] - index_template = os.path.join(doc_dir,index_template) - index_rst_file = '.'.join(index_rst_file) - index_rst_file = os.path.join(doc_dir,index_rst_file) + index_template = config.get("doc", "index_template") + index_rst_file = index_template.split(".")[0:-1] + index_template = os.path.join(doc_dir, index_template) + index_rst_file = ".".join(index_rst_file) + index_rst_file = os.path.join(doc_dir, index_rst_file) shutil.copy2(index_template, index_rst_file) # open index_rst_file in append mode - with open(index_rst_file,'a') as index_rst: + with open(index_rst_file, "a") as index_rst: for f in all_files: - index_rst.write(' {0}\n'.format(f)) + index_rst.write(" {0}\n".format(f)) return 0 diff --git a/scripts/climate_reproducibility/README.md b/scripts/climate_reproducibility/README.md index 5c5cff0a697..c45862e8b8f 100644 --- a/scripts/climate_reproducibility/README.md +++ b/scripts/climate_reproducibility/README.md @@ -1,6 +1,6 @@ # Climate reproducibility testing -Requiring model changes to pass stringent tests before being accepted as part of E3SM’s main development +Requiring model changes to pass stringent tests before being accepted as part of E3SM’s main development branch is critical for quickly and efficiently producing a trustworthy model. Depending on their impacts on model output, code modifications can be classified into three types: @@ -9,50 +9,50 @@ impacts on model output, code modifications can be classified into three types: averaged over a sufficiently long time 3. Changes that lead to a different model climate -Only (3) impacts model climate, and changes of this type should only be implemented within the code +Only (3) impacts model climate, and changes of this type should only be implemented within the code after an in-depth demonstration of improvement. However, distinguishing between (2) and (3) requires -a comprehensive analysis of both a baseline climate and the currently produced climate. +a comprehensive analysis of both a baseline climate and the currently produced climate. Through the CMDV Software project, we've provided a set of climate reproducibility tests to determine whether or not non-bit-for-bit (nb4b) model changes are climate changing. The current tests provided are: - * **MVK** -- This tests the null hypothesis that the baseline (n) and modified (m) model Short Independent - Simulation Ensembles (SISE) represent the same climate state, based on the equality of distribution - of each variable's annual global average in the standard monthly model output between the two - simulations. The (per variable) null hypothesis uses the non-parametric, two-sample (n and m) + * **MVK** -- This tests the null hypothesis that the baseline (n) and modified (m) model Short Independent + Simulation Ensembles (SISE) represent the same climate state, based on the equality of distribution + of each variable's annual global average in the standard monthly model output between the two + simulations. The (per variable) null hypothesis uses the non-parametric, two-sample (n and m) Kolmogorov-Smirnov test as the univariate test of of equality of distribution of global means. - + * **PGN** -- This tests the null hypothesis that the reference (n) and modified (m) model ensembles represent the same atmospheric state after each physics parameterization is applied within a single time-step using the two-sample (n and m) T-test for equal averages at a 95% confidence level. Ensembles are generated by repeating the simulation for many initial conditions, with each initial condition subject to multiple perturbations. - + * **TSC** -- This tests the null hypothesis that the convergence of the time stepping error for a set of key atmospheric variables is the same for a reference ensemble and a test ensemble. Both the reference and test ensemble are generated with a two-second time step, and for each variable the RMSD between each ensemble and - a truth ensemble, generated with a one-second time step, is calculated. At each + a truth ensemble, generated with a one-second time step, is calculated. At each 10 second interval during the 10 minute long simulations, the difference in the reference and test RMSDs for each variable, each ensemble member, and each domain are calculated and these ΔRMSDs should be zero for identical climates. A one sided (due to self convergence) Student's T Test is used to test the null hypothesis that the ensemble mean ΔRMSD is statistically zero. - - - + + + ## Running the tests -These tests are built into E3SM-CIME as system tests and will be launched using the `create_test` scripts. +These tests are built into E3SM-CIME as system tests and will be launched using the `create_test` scripts. *However*, because these tests use high level statistics, they have additional python dependencies which need to be installed on your system and accessible via the compute nodes (if you're on a batch machine). -Primarily, the statistical analysis of the climates is done through [EVV](https://github.com/LIVVkit/evv4esm) -which will generate a portable test website to describe the results (pass or fail) in detail (see the extended output -section below). +Primarily, the statistical analysis of the climates is done through [EVV](https://github.com/LIVVkit/evv4esm) +which will generate a portable test website to describe the results (pass or fail) in detail (see the extended output +section below). -For E3SM supported machines, the `e3sm_simple` conda environment is provided for these tests and includes the `EVV` -conda package. You can activate the `e3sm_simple` environment in the same way as `e3sm_unified` environment: +For E3SM supported machines, the `e3sm_simple` conda environment is provided for these tests and includes the `EVV` +conda package. You can activate the `e3sm_simple` environment in the same way as `e3sm_unified` environment: ``` source /load_latest_e3sm_simple.sh @@ -63,33 +63,33 @@ where `` is the machine-specific location of the activation scrip https://acme-climate.atlassian.net/wiki/spaces/EIDMG/pages/780271950/Diagnostics+and+Analysis+Quickstart#DiagnosticsandAnalysisQuickstart-Accessingmetapackagesoftwarebyactivatingacondaenvironment If you don't have access to confluence or are unable to activate this environment for whatever reason, you can install -your own `e3sm_simple` conda environment with this command (once you have anaconda/miniconda installed): +your own `e3sm_simple` conda environment with this command (once you have anaconda/miniconda installed): ``` conda create -n e3sm-simple -c conda-forge -c e3sm e3sm-simple ``` -*NOTE: If you run into problems with getting this environment working on your machine, please open an issue on E3SM's +*NOTE: If you run into problems with getting this environment working on your machine, please open an issue on E3SM's Github and tag @jhkennedy, or send Joseph H. Kennedy an email.* -After you've activated the `e3sm_simple` environment, change to the `$E3SM/cime/scripts` directory (where `$E3SM` is the -directory containing E3SM). Then to run one of the tests, you will use the `create_test` script like normal. -To run the `MVK` test and generate a baseline, you would run a command like: +After you've activated the `e3sm_simple` environment, change to the `$E3SM/cime/scripts` directory (where `$E3SM` is the +directory containing E3SM). Then to run one of the tests, you will use the `create_test` script like normal. +To run the `MVK` test and generate a baseline, you would run a command like: ``` -./create_test MVK_PL.ne4_oQU240.FC5AV1C-L -g --baseline-root "/PATH/TO/BASELINE" +./create_test MVK_PL.ne4_oQU240.FC5AV1C-L -g --baseline-root "/PATH/TO/BASELINE" ``` -And to compare to the baseline, you would run a command like: +And to compare to the baseline, you would run a command like: ``` -./create_test MVK_PL.ne4_oQU240.FC5AV1C-L -c --baseline-root "/PATH/TO/BASELINE" +./create_test MVK_PL.ne4_oQU240.FC5AV1C-L -c --baseline-root "/PATH/TO/BASELINE" ``` -*NOTE: The MVK run a 20 member ensemble for at least 13 months (using the last 12 for the -statistical tests) and, depending on the machine, may take some fiddling to execute within a particular -queue's wallclock time limit. You may want to over-ride the requested walltime using `--walltime HH:MM:SS` -option to `create_test`.* +*NOTE: The MVK run a 20 member ensemble for at least 13 months (using the last 12 for the +statistical tests) and, depending on the machine, may take some fiddling to execute within a particular +queue's wallclock time limit. You may want to over-ride the requested walltime using `--walltime HH:MM:SS` +option to `create_test`.* The full set of commands to run the MVK test used on Cori are: @@ -113,7 +113,7 @@ source /global/project/projectdirs/acme/software/anaconda_envs/load_latest_e3sm_ ## Test pass/fail and extended output -When you launch these tests and compare to a baseline, CIME will output the location of the case directory, which will look +When you launch these tests and compare to a baseline, CIME will output the location of the case directory, which will look something like this: ``` @@ -122,8 +122,8 @@ something like this: Creating test directory /global/cscratch1/sd/${USER}/acme_scratch/cori-knl/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID ``` -Let's call that directory `$CASE_DIR`. Once all the jobs are finished, navigate to that directory and -you can `cat TestStatus` to determine if the test passed or failed by looking at the `BASELINE` status: +Let's call that directory `$CASE_DIR`. Once all the jobs are finished, navigate to that directory and +you can `cat TestStatus` to determine if the test passed or failed by looking at the `BASELINE` status: ``` cd $CASE_DIR @@ -137,28 +137,28 @@ cat TestStatus To get some basic summary statistics about the test that was run, look in the `TestStatus.log` file: ``` -2019-08-14 22:09:02: BASELINE PASS for test 'YYYYMMDD_HHMMSS_RANDOMID'. - Case: YYYYMMDD_HHMMSS_RANDOMID; Test status: pass; Variables analyzed: 118; Rejecting: 0; Critical value: 13; Ensembles: statistically identical - EVV results can be viewed at: /global/cscratch1/sd/${USER}/acme_scratch/cori-knl/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID/run/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/ - EVV viewing instructions can be found at: https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/climate_reproducibility/README.md#test-passfail-and-extended-output +2019-08-14 22:09:02: BASELINE PASS for test 'YYYYMMDD_HHMMSS_RANDOMID'. + Case: YYYYMMDD_HHMMSS_RANDOMID; Test status: pass; Variables analyzed: 118; Rejecting: 0; Critical value: 13; Ensembles: statistically identical + EVV results can be viewed at: /global/cscratch1/sd/${USER}/acme_scratch/cori-knl/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID/run/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/ + EVV viewing instructions can be found at: https://github.com/E3SM-Project/E3SM/blob/master/cime/scripts/climate_reproducibility/README.md#test-passfail-and-extended-output ``` -EVV reports the location of the output website where you can see the details of the analysis. For -the MVK test, you will be able to view per variable Q-Q plots, P-P plots, the K-S test statistic, and -whether it rejects or accepts the null hypothesis, as well as a description of the test itself -- you +EVV reports the location of the output website where you can see the details of the analysis. For +the MVK test, you will be able to view per variable Q-Q plots, P-P plots, the K-S test statistic, and +whether it rejects or accepts the null hypothesis, as well as a description of the test itself -- you can see an example of the output website [here](http://livvkit.github.io/evv4esm/). -To view the website, you can either tunnel the website to your local machine through ssh, or copy -the website directory to your machine and view it using EVV. +To view the website, you can either tunnel the website to your local machine through ssh, or copy +the website directory to your machine and view it using EVV. ### View via ssh For this example, we'll assume the tests were run on Cori at NERSC, but these instructions should be -easily adaptable to any E3SM supported machine. First, log into Cori via ssh and connect your local -8080 port to the 8080 port on Cori: +easily adaptable to any E3SM supported machine. First, log into Cori via ssh and connect your local +8080 port to the 8080 port on Cori: ``` -ssh -L 8080:localhost:8080 [USER]@cori.nersc.gov +ssh -L 8080:localhost:8080 [USER]@cori.nersc.gov ``` Activate the `e3sm_simple` environment: @@ -184,21 +184,21 @@ Evv will then report to you the URL where you can view the website: ``` -------------------------------------------------------------------- - ______ __ __ __ __ - | ____| \ \ / / \ \ / / - | |__ \ \ / / \ \ / / - | __| \ \/ / \ \/ / - | |____ \ / \ / - |______| \/ \/ - - Extended Verification and Validation for Earth System Models + ______ __ __ __ __ + | ____| \ \ / / \ \ / / + | |__ \ \ / / \ \ / / + | __| \ \/ / \ \/ / + | |____ \ / \ / + |______| \/ \/ + + Extended Verification and Validation for Earth System Models -------------------------------------------------------------------- Current run: 2019-08-27 14:16:49 User: kennedyj OS Type: Linux 4.12.14-150.27-default Machine: cori07 - + Serving HTTP on 0.0.0.0 port 8080 (http://0.0.0.0:8080/) @@ -209,7 +209,7 @@ View the generated website by navigating to: Exit by pressing `ctrl+c` to send a keyboard interrupt. ``` -You can now either click that link or copy-paste that link into your favorite web +You can now either click that link or copy-paste that link into your favorite web browser to view the output website. ### View a local copy @@ -220,48 +220,48 @@ easily adaptable to any E3SM supported machine. Install `e3sm_simple` locally an ``` conda create -n e3sm_simple -c conda-forge -c e3sm e3sm-simple conda activate e3sm_simple -``` +``` -Then, copy the website to your local machine, and view it: +Then, copy the website to your local machine, and view it: ``` # on your local machine -scp -r /global/cscratch1/sd/${USER}/acme_scratch/cori-knl/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID/run/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv . +scp -r /global/cscratch1/sd/${USER}/acme_scratch/cori-knl/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID/run/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv . evv -o MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv -s -------------------------------------------------------------------- - ______ __ __ __ __ - | ____| \ \ / / \ \ / / - | |__ \ \ / / \ \ / / - | __| \ \/ / \ \/ / - | |____ \ / \ / - |______| \/ \/ - - Extended Verification and Validation for Earth System Models + ______ __ __ __ __ + | ____| \ \ / / \ \ / / + | |__ \ \ / / \ \ / / + | __| \ \/ / \ \/ / + | |____ \ / \ / + |______| \/ \/ + + Extended Verification and Validation for Earth System Models -------------------------------------------------------------------- - + Current run: 2018-08-06 15:15:03 User: ${USER} OS Type: Linux 4.15.0-29-generic Machine: pc0101123 - - + + Serving HTTP on 0.0.0.0 port 8000 (http://0.0.0.0:8000/) - + View the generated website by navigating to: - + http://0.0.0.0:8000/MVK_PL.ne4_ne4.FC5AV1C-L.cori-knl_intel.C.YYYYMMDD_HHMMSS_RANDOMID.evv/index.html - + Exit by pressing `ctrl+c` to send a keyboard interrupt. - + ``` -You can now either click that link or copy-paste that link into your favorite web +You can now either click that link or copy-paste that link into your favorite web browser to view the output website. -**Please note:** the output website uses some JavaScript to render elements of the page (especially figures), -and opening up the `index.html` file using the `file://` protocol in a web browser will likely not work -well (most browser have stopped allowing access to "local resources" like JavaScript through the `file://` -protocol). You can view the website by either copying it to a hosted location (`~/WWW` which is hosted at -`http://users.nccs.gov/~user` on Titan, for example) or copying it to your local machine and running a -local http server (included in python!) and viewing it through an address like `http://0.0.0.0:8000/index.html`. \ No newline at end of file +**Please note:** the output website uses some JavaScript to render elements of the page (especially figures), +and opening up the `index.html` file using the `file://` protocol in a web browser will likely not work +well (most browser have stopped allowing access to "local resources" like JavaScript through the `file://` +protocol). You can view the website by either copying it to a hosted location (`~/WWW` which is hosted at +`http://users.nccs.gov/~user` on Titan, for example) or copying it to your local machine and running a +local http server (included in python!) and viewing it through an address like `http://0.0.0.0:8000/index.html`. diff --git a/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt index 5e018dad2eb..00fe6689977 100644 --- a/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt +++ b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt @@ -2,4 +2,3 @@ set(sources_needed circle.F90) extract_sources("${sources_needed}" "${circle_area_sources}" test_sources) create_pFUnit_test(pFunit_circle_area pFunittest_circle_area_exe "test_circle.pf" ${test_sources}) - diff --git a/scripts/fortran_unit_testing/python/printer.py b/scripts/fortran_unit_testing/python/printer.py index dcd5551f5c4..7aa74b3a648 100644 --- a/scripts/fortran_unit_testing/python/printer.py +++ b/scripts/fortran_unit_testing/python/printer.py @@ -54,7 +54,7 @@ def print(self, item, end="\n"): item - Object to be printed. end - String appended to the end. """ - self._output.write(str(item)+end) + self._output.write(str(item) + end) def comment(self, string): """Print the input str as a comment. @@ -72,7 +72,7 @@ def draw_rule(self, char="=", length=50): char - Character that the line is composed of. length - Horizontal line length. """ - self.comment(char*length) + self.comment(char * length) def print_header(self, string): """Write a string into a header, denoting a new output section.""" @@ -87,14 +87,15 @@ def print_error(self, error_message): if self._color: # ANSI sequence turns the text bright red. esc_char = chr(curses.ascii.ESC) - to_red_text = esc_char+"[1;31m" - to_default_text = esc_char+"[0m" + to_red_text = esc_char + "[1;31m" + to_default_text = esc_char + "[0m" else: to_red_text = "" to_default_text = "" - self._error.write(to_red_text+"ERROR: "+ - error_message+to_default_text+"\n") + self._error.write( + to_red_text + "ERROR: " + error_message + to_default_text + "\n" + ) class ScriptPrinter(Printer): @@ -128,7 +129,7 @@ def __init__(self, output=sys.stdout, error=sys.stderr, indent_size=2): def comment(self, string): """Write a comment (prepends "#").""" - self.print("# "+string) + self.print("# " + string) def print_header(self, string): """Write a header in a comment. @@ -147,5 +148,5 @@ def print(self, item, end="\n"): item - Object to be printed. end - String appended to the end. """ - new_item = (" "*self.indent_size*self.indent_level)+str(item) + new_item = (" " * self.indent_size * self.indent_level) + str(item) super(ScriptPrinter, self).print(new_item, end) diff --git a/scripts/fortran_unit_testing/python/test_xml_test_list.py b/scripts/fortran_unit_testing/python/test_xml_test_list.py index 0d3f626822e..df80572ad72 100755 --- a/scripts/fortran_unit_testing/python/test_xml_test_list.py +++ b/scripts/fortran_unit_testing/python/test_xml_test_list.py @@ -14,6 +14,7 @@ __all__ = ("TestSuitesFromXML", "TestSuitesFromXML") + class TestTestSuiteSpec(unittest.TestCase): """Tests for the TestSuiteSpec class.""" @@ -27,8 +28,7 @@ def test_absolute_path(self): def test_relative_path(self): """TestSuiteSpec works as intended on relative paths.""" spec = TestSuiteSpec("name", [None, None], ["path", "./path"]) - self.assertEqual([abspath("path"), abspath("./path")], - spec.directories) + self.assertEqual([abspath("path"), abspath("./path")], spec.directories) def test_no_path(self): """TestSuiteSpec works with no paths.""" @@ -50,15 +50,16 @@ def test_no_label(self): def test_iterate(self): """TestSuiteSpec provides an iterator over directories.""" spec = TestSuiteSpec("name", ["foo", "bar"], ["/foo", "/bar"]) - self.assertEqual([("foo", "/foo"), ("bar", "/bar")], - list(d for d in spec)) + self.assertEqual([("foo", "/foo"), ("bar", "/bar")], list(d for d in spec)) + class TestSuitesFromXML(unittest.TestCase): """Tests for the suites_from_xml function.""" - def check_spec_list(self, xml_str, names, directories, - known_paths=None, labels=None): + def check_spec_list( + self, xml_str, names, directories, known_paths=None, labels=None + ): """Check that a spec list matches input names and directories. This is used by the following tests to do the dirty work of making @@ -69,25 +70,31 @@ def check_spec_list(self, xml_str, names, directories, xml_tree = ElementTree(XML(xml_str)) spec_list = list(suites_from_xml(xml_tree, known_paths)) - self.assertEqual(len(names), len(directories), - msg="Internal test suite error: name and "+ - "directories lists are different sizes!") + self.assertEqual( + len(names), + len(directories), + msg="Internal test suite error: name and " + + "directories lists are different sizes!", + ) - self.assertEqual(len(spec_list), len(names), - msg="Wrong number of suite specs returned.") + self.assertEqual( + len(spec_list), len(names), msg="Wrong number of suite specs returned." + ) - self.assertEqual(names, - [spec.name for spec in spec_list], - msg="Wrong suite name(s).") + self.assertEqual( + names, [spec.name for spec in spec_list], msg="Wrong suite name(s)." + ) - self.assertEqual(directories, - [spec.directories for spec in spec_list], - msg="Wrong suite path(s).") + self.assertEqual( + directories, + [spec.directories for spec in spec_list], + msg="Wrong suite path(s).", + ) if labels is not None: - self.assertEqual(labels, - [spec.labels for spec in spec_list], - msg="Wrong suite label(s).") + self.assertEqual( + labels, [spec.labels for spec in spec_list], msg="Wrong suite label(s)." + ) def test_no_suites(self): """suites_from_xml output returns empty list for no matches.""" @@ -123,9 +130,9 @@ def test_multiple_suites(self): """ - self.check_spec_list(xml_str, - ["suite1", "suite2"], - [["/the/path"], ["/other/path"]]) + self.check_spec_list( + xml_str, ["suite1", "suite2"], [["/the/path"], ["/other/path"]] + ) def test_path_relative_to_known(self): """suites_from_xml handles a relative_to directory attribute.""" @@ -137,10 +144,9 @@ def test_path_relative_to_known(self): """ - self.check_spec_list(xml_str, - ["suite1"], - [["/foodir/path"]], - known_paths={"foo": "/foodir"}) + self.check_spec_list( + xml_str, ["suite1"], [["/foodir/path"]], known_paths={"foo": "/foodir"} + ) def test_path_with_whitespace(self): """suites_from_xml handles a directory with whitespace added.""" @@ -166,8 +172,7 @@ def test_path_with_label(self): """ - self.check_spec_list(xml_str, ["suite1"], [["/foo"]], - labels=[["foo"]]) + self.check_spec_list(xml_str, ["suite1"], [["/foo"]], labels=[["foo"]]) if __name__ == "__main__": diff --git a/scripts/fortran_unit_testing/python/xml_test_list.py b/scripts/fortran_unit_testing/python/xml_test_list.py index ec0cf68796e..af95bb52ca9 100644 --- a/scripts/fortran_unit_testing/python/xml_test_list.py +++ b/scripts/fortran_unit_testing/python/xml_test_list.py @@ -10,6 +10,7 @@ __all__ = ("TestSuiteSpec", "suites_from_xml") + class TestSuiteSpec(object): """Specification for the location of a test suite. @@ -35,9 +36,10 @@ def __init__(self, name, labels, directories): directories - Path to the test suite. """ - assert (len(labels) == len(directories)), \ - "TestSuiteSpec: Number of spec labels and number of spec "+ \ - "directories do not match." + assert len(labels) == len(directories), ( + "TestSuiteSpec: Number of spec labels and number of spec " + + "directories do not match." + ) self.name = name self.labels = [] @@ -48,15 +50,15 @@ def __init__(self, name, labels, directories): else: self.labels.append(self.UNLABELED_STRING) - self.directories = [os.path.abspath(directory) - for directory in directories] + self.directories = [os.path.abspath(directory) for directory in directories] def __iter__(self): """Iterate over directories. Each iteration yields a (label, directory) pair. """ - return ( (l, d) for l, d in zip(self.labels, self.directories) ) + return ((l, d) for l, d in zip(self.labels, self.directories)) + def suites_from_xml(xml_tree, known_paths=None): """Generate test suite descriptions from XML. @@ -89,10 +91,10 @@ def suites_from_xml(xml_tree, known_paths=None): path = directory.text.strip() if "relative_to" in directory.keys(): relative_to_key = directory.get("relative_to") - assert relative_to_key in known_paths, \ - "suites_from_xml: Unrecognized relative_to attribute." - path = os.path.join(known_paths[relative_to_key], - path) + assert ( + relative_to_key in known_paths + ), "suites_from_xml: Unrecognized relative_to attribute." + path = os.path.join(known_paths[relative_to_key], path) directories.append(path) if "label" in directory.keys(): labels.append(directory.get("label")) diff --git a/scripts/fortran_unit_testing/run_tests.py b/scripts/fortran_unit_testing/run_tests.py index 3011601591c..50628bd4e04 100755 --- a/scripts/fortran_unit_testing/run_tests.py +++ b/scripts/fortran_unit_testing/run_tests.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 from __future__ import print_function import os, sys + _CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../..") sys.path.append(os.path.join(_CIMEROOT, "scripts", "Tools")) sys.path.append(os.path.join(_CIMEROOT, "scripts", "utils", "python")) @@ -14,122 +15,151 @@ from CIME.XML.env_mach_specific import EnvMachSpecific from xml_test_list import TestSuiteSpec, suites_from_xml import socket -#================================================= + +# ================================================= # Standard library modules. -#================================================= +# ================================================= from printer import Printer from shutil import rmtree + # This violates CIME policy - move to CIME/XML directory from xml.etree.ElementTree import ElementTree logger = logging.getLogger(__name__) + def parse_command_line(args): """Command line argument parser for configure.""" - description="""Within build_directory (--build-dir), runs cmake on test + description = """Within build_directory (--build-dir), runs cmake on test specification directories (from --test-spec-dir or --xml-test-list), then builds and runs the tests defined via CMake.""" parser = argparse.ArgumentParser(description=description) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("--build-dir", default=".", - help="""Directory where tests are built and run. Will be created if it does not exist.""" - ) + parser.add_argument( + "--build-dir", + default=".", + help="""Directory where tests are built and run. Will be created if it does not exist.""", + ) - parser.add_argument("--build-optimized", action="store_true", - help="""By default, tests are built with debug flags. + parser.add_argument( + "--build-optimized", + action="store_true", + help="""By default, tests are built with debug flags. If this option is provided, then tests are instead built - in optimized mode.""") - - parser.add_argument("--machine", - help="The machine to create build information for.") - - parser.add_argument("--machines-dir", - help="The machines directory to take build information " - "from. Overrides the CIME_MODEL environment variable, " - "and must be specified if that variable is not set.") - - parser.add_argument("--clean", action="store_true", - help="""Clean build directory before building. Removes CMake cache and -runs "make clean".""" - ) - parser.add_argument("--cmake-args", - help="""Additional arguments to pass to CMake.""" - ) - parser.add_argument("--comp-interface", - default="mct", - help="""The cime driver/cpl interface to use.""" - ) - parser.add_argument("--color", action="store_true", - default=sys.stdout.isatty(), - help="""Turn on colorized output.""" - ) - parser.add_argument("--no-color", action="store_false", - help="""Turn off colorized output.""" + in optimized mode.""", + ) + + parser.add_argument( + "--machine", help="The machine to create build information for." + ) + + parser.add_argument( + "--machines-dir", + help="The machines directory to take build information " + "from. Overrides the CIME_MODEL environment variable, " + "and must be specified if that variable is not set.", + ) + + parser.add_argument( + "--clean", + action="store_true", + help="""Clean build directory before building. Removes CMake cache and +runs "make clean".""", + ) + parser.add_argument( + "--cmake-args", help="""Additional arguments to pass to CMake.""" + ) + parser.add_argument( + "--comp-interface", + default="mct", + help="""The cime driver/cpl interface to use.""", + ) + parser.add_argument( + "--color", + action="store_true", + default=sys.stdout.isatty(), + help="""Turn on colorized output.""", ) - parser.add_argument("--compiler", - help="""Compiler vendor for build (supported depends on machine). - If not specified, use the default for this machine.""" - ) - parser.add_argument("--enable-genf90", action="store_true", - default=True, - help="""Use genf90.pl to regenerate out-of-date sources from .F90.in + parser.add_argument( + "--no-color", action="store_false", help="""Turn off colorized output.""" + ) + parser.add_argument( + "--compiler", + help="""Compiler vendor for build (supported depends on machine). + If not specified, use the default for this machine.""", + ) + parser.add_argument( + "--enable-genf90", + action="store_true", + default=True, + help="""Use genf90.pl to regenerate out-of-date sources from .F90.in templates. Not enabled by default because it creates in-source output, and because it -requires genf90.pl to be in the user's path.""" - ) +requires genf90.pl to be in the user's path.""", + ) - parser.add_argument("--make-j", type=int, default=8, - help="""Number of processes to use for build.""" - ) + parser.add_argument( + "--make-j", + type=int, + default=8, + help="""Number of processes to use for build.""", + ) - parser.add_argument("--use-mpi", action="store_true", - help="""If specified, run unit tests with an mpi-enabled version + parser.add_argument( + "--use-mpi", + action="store_true", + help="""If specified, run unit tests with an mpi-enabled version of pFUnit, via mpirun. (Default is to use a serial build without - mpirun.) This requires a pFUnit build with MPI support.""") + mpirun.) This requires a pFUnit build with MPI support.""", + ) - parser.add_argument("--mpilib", - help="""MPI Library to use in build. + parser.add_argument( + "--mpilib", + help="""MPI Library to use in build. If not specified, use the default for this machine/compiler. Must match an MPILIB option in config_compilers.xml. e.g., for cheyenne, can use 'mpt'. - Only relevant if --use-mpi is specified.""" + Only relevant if --use-mpi is specified.""", ) - parser.add_argument("--mpirun-command", - help="""Command to use to run an MPI executable. + parser.add_argument( + "--mpirun-command", + help="""Command to use to run an MPI executable. If not specified, uses the default for this machine. - Only relevant if --use-mpi is specified.""" + Only relevant if --use-mpi is specified.""", ) parser.add_argument( - "--test-spec-dir", default=".", + "--test-spec-dir", + default=".", help="""Location where tests are specified. - Defaults to current directory.""" - ) + Defaults to current directory.""", + ) parser.add_argument( - "-T", "--ctest-args", - help="""Additional arguments to pass to CTest.""" + "-T", "--ctest-args", help="""Additional arguments to pass to CTest.""" ) parser.add_argument( - "--use-env-compiler", action="store_true", + "--use-env-compiler", + action="store_true", default=False, help="""Always use environment settings to set compiler commands. This is only necessary if using a CIME build type, if the user wants to -override the command provided by Machines.""" +override the command provided by Machines.""", ) parser.add_argument( - "--use-openmp", action="store_true", + "--use-openmp", + action="store_true", help="""If specified, include OpenMP support for tests. (Default is to run without OpenMP support.) This requires a pFUnit build with - OpenMP support.""" + OpenMP support.""", ) parser.add_argument( "--xml-test-list", - help="""Path to an XML file listing directories to run tests from.""" - ) + help="""Path to an XML file listing directories to run tests from.""", + ) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) output = Printer(color=args.color) @@ -137,20 +167,49 @@ def parse_command_line(args): if args.xml_test_list is None and args.test_spec_dir is None: output.print_error( "You must specify either --test-spec-dir or --xml-test-list." - ) + ) raise Exception("Missing required argument.") if args.make_j < 1: raise Exception("--make-j must be >= 1") - return output, args.build_dir, args.build_optimized, args.clean,\ - args.cmake_args, args.compiler, args.enable_genf90, args.machine, args.machines_dir,\ - args.make_j, args.use_mpi, args.mpilib, args.mpirun_command, args.test_spec_dir, args.ctest_args,\ - args.use_openmp, args.xml_test_list, args.verbose, args.comp_interface + return ( + output, + args.build_dir, + args.build_optimized, + args.clean, + args.cmake_args, + args.compiler, + args.enable_genf90, + args.machine, + args.machines_dir, + args.make_j, + args.use_mpi, + args.mpilib, + args.mpirun_command, + args.test_spec_dir, + args.ctest_args, + args.use_openmp, + args.xml_test_list, + args.verbose, + args.comp_interface, + ) -def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_command, output, pfunit_path, - cmake_args=None, clean=False, verbose=False, enable_genf90=True, color=True): +def cmake_stage( + name, + test_spec_dir, + build_optimized, + use_mpiserial, + mpirun_command, + output, + pfunit_path, + cmake_args=None, + clean=False, + verbose=False, + enable_genf90=True, + color=True, +): """Run cmake in the current working directory. Arguments: @@ -168,7 +227,7 @@ def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_comm if not os.path.isfile("CMakeCache.txt"): - output.print_header("Running cmake for "+name+".") + output.print_header("Running cmake for " + name + ".") # This build_type only has limited uses, and should probably be removed, # but for now it's still needed @@ -181,13 +240,14 @@ def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_comm "cmake", "-C Macros.cmake", test_spec_dir, - "-DCIMEROOT="+_CIMEROOT, - "-DSRC_ROOT="+get_src_root(), - "-DCIME_CMAKE_MODULE_DIRECTORY="+os.path.abspath(os.path.join(_CIMEROOT,"src","CMake")), - "-DCMAKE_BUILD_TYPE="+build_type, - "-DPFUNIT_MPIRUN='"+mpirun_command+"'", - "-DPFUNIT_PATH="+pfunit_path - ] + "-DCIMEROOT=" + _CIMEROOT, + "-DSRC_ROOT=" + get_src_root(), + "-DCIME_CMAKE_MODULE_DIRECTORY=" + + os.path.abspath(os.path.join(_CIMEROOT, "src", "CMake")), + "-DCMAKE_BUILD_TYPE=" + build_type, + "-DPFUNIT_MPIRUN='" + mpirun_command + "'", + "-DPFUNIT_PATH=" + pfunit_path, + ] if use_mpiserial: cmake_command.append("-DUSE_MPI_SERIAL=ON") if verbose: @@ -195,10 +255,8 @@ def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_comm if enable_genf90: cmake_command.append("-DENABLE_GENF90=ON") - genf90_dir = os.path.join( - _CIMEROOT,"src","externals","genf90" - ) - cmake_command.append("-DCMAKE_PROGRAM_PATH="+genf90_dir) + genf90_dir = os.path.join(_CIMEROOT, "src", "externals", "genf90") + cmake_command.append("-DCMAKE_PROGRAM_PATH=" + genf90_dir) if not color: cmake_command.append("-DUSE_COLOR=OFF") @@ -208,6 +266,7 @@ def cmake_stage(name, test_spec_dir, build_optimized, use_mpiserial, mpirun_comm run_cmd_no_fail(" ".join(cmake_command), combine_output=True) + def make_stage(name, output, make_j, clean=False, verbose=True): """Run make in the current working directory. @@ -215,18 +274,19 @@ def make_stage(name, output, make_j, clean=False, verbose=True): name - Name for output messages. make_j (int) - number of processes to use for make """ - output.print_header("Running make for "+name+".") + output.print_header("Running make for " + name + ".") if clean: run_cmd_no_fail("make clean") - make_command = ["make","-j",str(make_j)] + make_command = ["make", "-j", str(make_j)] if verbose: make_command.append("VERBOSE=1") run_cmd_no_fail(" ".join(make_command), combine_output=True) + def find_pfunit(compilerobj, mpilib, use_openmp): """Find the pfunit installation we'll be using, and print its path @@ -237,32 +297,53 @@ def find_pfunit(compilerobj, mpilib, use_openmp): - mpilib: String giving the mpi library we're using - use_openmp: Boolean """ - attrs = {"MPILIB": mpilib, - "compile_threaded": "TRUE" if use_openmp else "FALSE" - } - - pfunit_path = compilerobj.get_optional_compiler_node("PFUNIT_PATH", attributes=attrs) - expect(pfunit_path is not None, - """PFUNIT_PATH not found for this machine and compiler, with MPILIB={} and compile_threaded={}. -You must specify PFUNIT_PATH in config_compilers.xml, with attributes MPILIB and compile_threaded.""".format(mpilib, attrs['compile_threaded'])) + attrs = {"MPILIB": mpilib, "compile_threaded": "TRUE" if use_openmp else "FALSE"} + + pfunit_path = compilerobj.get_optional_compiler_node( + "PFUNIT_PATH", attributes=attrs + ) + expect( + pfunit_path is not None, + """PFUNIT_PATH not found for this machine and compiler, with MPILIB={} and compile_threaded={}. +You must specify PFUNIT_PATH in config_compilers.xml, with attributes MPILIB and compile_threaded.""".format( + mpilib, attrs["compile_threaded"] + ), + ) logger.info("Using PFUNIT_PATH: {}".format(compilerobj.text(pfunit_path))) return compilerobj.text(pfunit_path) -#================================================= + +# ================================================= # Iterate over input suite specs, building the tests. -#================================================= +# ================================================= def _main(): - output, build_dir, build_optimized, clean,\ - cmake_args, compiler, enable_genf90, machine, machines_dir,\ - make_j, use_mpi, mpilib, mpirun_command, test_spec_dir, ctest_args,\ - use_openmp, xml_test_list, verbose, comp_interface \ - = parse_command_line(sys.argv) - -#================================================= -# Find directory and file paths. -#================================================= + ( + output, + build_dir, + build_optimized, + clean, + cmake_args, + compiler, + enable_genf90, + machine, + machines_dir, + make_j, + use_mpi, + mpilib, + mpirun_command, + test_spec_dir, + ctest_args, + use_openmp, + xml_test_list, + verbose, + comp_interface, + ) = parse_command_line(sys.argv) + + # ================================================= + # Find directory and file paths. + # ================================================= suite_specs = [] # TODO: this violates cime policy of direct access to xml # should be moved to CIME/XML @@ -271,15 +352,16 @@ def _main(): test_xml_tree.parse(xml_test_list) known_paths = { "here": os.path.abspath(os.path.dirname(xml_test_list)), - } + } suite_specs.extend(suites_from_xml(test_xml_tree, known_paths)) if test_spec_dir is not None: suite_specs.append( - TestSuiteSpec("__command_line_test__", - ["__command_line_test__"], - [os.path.abspath(test_spec_dir)]) + TestSuiteSpec( + "__command_line_test__", + ["__command_line_test__"], + [os.path.abspath(test_spec_dir)], ) - + ) if machines_dir is not None: machines_file = os.path.join(machines_dir, "config_machines.xml") @@ -299,13 +381,16 @@ def _main(): pwd_contents = os.listdir(os.getcwd()) # Clear CMake cache. for file_ in pwd_contents: - if file_ in ("Macros.cmake", "env_mach_specific.xml") \ - or file_.startswith('Depends') or file_.startswith(".env_mach_specific"): + if ( + file_ in ("Macros.cmake", "env_mach_specific.xml") + or file_.startswith("Depends") + or file_.startswith(".env_mach_specific") + ): os.remove(file_) - #================================================= + # ================================================= # Functions to perform various stages of build. - #================================================= + # ================================================= if not use_mpi: mpilib = "mpi-serial" @@ -327,47 +412,70 @@ def _main(): # Create the environment, and the Macros.cmake file # # - configure(machobj, build_dir, ["CMake"], compiler, mpilib, debug, - comp_interface, os_, unit_testing=True) + configure( + machobj, + build_dir, + ["CMake"], + compiler, + mpilib, + debug, + comp_interface, + os_, + unit_testing=True, + ) machspecific = EnvMachSpecific(build_dir, unit_testing=True) fake_case = FakeCase(compiler, mpilib, debug, comp_interface) machspecific.load_env(fake_case) - cmake_args = "{}-DOS={} -DCOMPILER={} -DDEBUG={} -DMPILIB={} -Dcompile_threaded={}".format( - "" if not cmake_args else " ", os_, compiler, stringify_bool(debug), mpilib, stringify_bool(use_openmp)) + cmake_args = ( + "{}-DOS={} -DCOMPILER={} -DDEBUG={} -DMPILIB={} -Dcompile_threaded={}".format( + "" if not cmake_args else " ", + os_, + compiler, + stringify_bool(debug), + mpilib, + stringify_bool(use_openmp), + ) + ) os.environ["UNIT_TEST_HOST"] = socket.gethostname() if "NETCDF_PATH" in os.environ and not "NETCDF" in os.environ: # The CMake Netcdf find utility that we use (from pio2) seems to key off # of the environment variable NETCDF, but not NETCDF_PATH - logger.info("Setting NETCDF environment variable: {}".format(os.environ["NETCDF_PATH"])) + logger.info( + "Setting NETCDF environment variable: {}".format(os.environ["NETCDF_PATH"]) + ) os.environ["NETCDF"] = os.environ["NETCDF_PATH"] if "NETCDFROOT" in os.environ and not "NETCDF" in os.environ: # The CMake Netcdf find utility that we use (from pio2) seems to key off # of the environment variable NETCDF, but not NETCDFROOT - logger.info("Setting NETCDF environment variable: {}".format(os.environ["NETCDFROOT"])) + logger.info( + "Setting NETCDF environment variable: {}".format(os.environ["NETCDFROOT"]) + ) os.environ["NETCDF"] = os.environ["NETCDFROOT"] if not use_mpi: mpirun_command = "" elif mpirun_command is None: mpi_attribs = { - "compiler" : compiler, - "mpilib" : mpilib, - "threaded" : use_openmp, - "comp_interface" : comp_interface, - "unit_testing" : True + "compiler": compiler, + "mpilib": mpilib, + "threaded": use_openmp, + "comp_interface": comp_interface, + "unit_testing": True, } # We can get away with specifying case=None since we're using exe_only=True - mpirun_command, _, _, _ = machspecific.get_mpirun(None, mpi_attribs, None, exe_only=True) + mpirun_command, _, _, _ = machspecific.get_mpirun( + None, mpi_attribs, None, exe_only=True + ) mpirun_command = machspecific.get_resolved_value(mpirun_command) logger.info("mpirun command is '{}'".format(mpirun_command)) -#================================================= -# Run tests. -#================================================= + # ================================================= + # Run tests. + # ================================================= for spec in suite_specs: os.chdir(build_dir) @@ -379,29 +487,38 @@ def _main(): os.mkdir(spec.name) for label, directory in spec: - os.chdir(os.path.join(build_dir,spec.name)) + os.chdir(os.path.join(build_dir, spec.name)) if not os.path.isdir(label): os.mkdir(label) os.chdir(label) - name = spec.name+"/"+label + name = spec.name + "/" + label if not os.path.islink("Macros.cmake"): - os.symlink(os.path.join(build_dir,"Macros.cmake"), "Macros.cmake") + os.symlink(os.path.join(build_dir, "Macros.cmake"), "Macros.cmake") use_mpiserial = not use_mpi - cmake_stage(name, directory, build_optimized, use_mpiserial, mpirun_command, output, pfunit_path, verbose=verbose, - enable_genf90=enable_genf90, cmake_args=cmake_args) + cmake_stage( + name, + directory, + build_optimized, + use_mpiserial, + mpirun_command, + output, + pfunit_path, + verbose=verbose, + enable_genf90=enable_genf90, + cmake_args=cmake_args, + ) make_stage(name, output, make_j, clean=clean, verbose=verbose) - for spec in suite_specs: - os.chdir(os.path.join(build_dir,spec.name)) + os.chdir(os.path.join(build_dir, spec.name)) for label, directory in spec: - name = spec.name+"/"+label + name = spec.name + "/" + label - output.print_header("Running CTest tests for "+name+".") + output.print_header("Running CTest tests for " + name + ".") ctest_command = ["ctest", "--output-on-failure"] @@ -412,8 +529,11 @@ def _main(): ctest_command.extend(ctest_args.split(" ")) logger.info("Running '{}'".format(" ".join(ctest_command))) - output = run_cmd_no_fail(" ".join(ctest_command), from_dir=label, combine_output=True) + output = run_cmd_no_fail( + " ".join(ctest_command), from_dir=label, combine_output=True + ) logger.info(output) + if __name__ == "__main__": _main() diff --git a/scripts/query_config b/scripts/query_config index 682d9d986b7..8438430d5e8 100755 --- a/scripts/query_config +++ b/scripts/query_config @@ -14,5 +14,6 @@ sys.path.insert(1, tools_path) from CIME.scripts.query_config import _main_func + if __name__ == "__main__": _main_func() diff --git a/scripts/query_testlists b/scripts/query_testlists index 4de801d3260..6ea16e3cada 100755 --- a/scripts/query_testlists +++ b/scripts/query_testlists @@ -14,5 +14,6 @@ sys.path.insert(1, tools_path) from CIME.scripts.query_testlists import _main_func + if __name__ == "__main__": _main_func() diff --git a/scripts/tests/CMakeLists.txt b/scripts/tests/CMakeLists.txt index b7d3f990747..f015bec8b42 100644 --- a/scripts/tests/CMakeLists.txt +++ b/scripts/tests/CMakeLists.txt @@ -16,7 +16,7 @@ execute_process(COMMAND ${PYTHON} "--version" OUTPUT_VARIABLE PY_VER OUTPUT_STRIP_TRAILING_WHITESPACE ERROR_STRIP_TRAILING_WHITESPACE) -MESSAGE("Python version is " ${PY_VER}) +MESSAGE("Python version is " ${PY_VER}) execute_process(COMMAND ${PYTHON} "list_tests" OUTPUT_VARIABLE STR_TESTS WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} OUTPUT_STRIP_TRAILING_WHITESPACE @@ -35,6 +35,6 @@ separate_arguments(ARG_LIST UNIX_COMMAND ${args}) foreach(ATEST ${TEST_LIST}) # this assignment prevents quotes being added to testname in add_test set(fulltest "${ATEST}") - add_test(NAME ${ATEST} COMMAND ./scripts_regression_tests.py -v ${fulltest} ${ARG_LIST} + add_test(NAME ${ATEST} COMMAND ../lib/CIME/tests/scripts_regression_tests.py -v ${fulltest} ${ARG_LIST} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) endforeach(ATEST) diff --git a/scripts/tests/list_tests b/scripts/tests/list_tests index d28ae7199c4..6c0d339fe85 100755 --- a/scripts/tests/list_tests +++ b/scripts/tests/list_tests @@ -2,28 +2,15 @@ # This script will print the list of test classes in # scripts_regression_tests.py # + import unittest -DEBUG = False -#pylint: disable=protected-access +# pylint: disable=protected-access def list_tests_from(): - loader = unittest.TestLoader() - suite = loader.discover(".", pattern="scripts_regression_tests.py") - test_classes = [] - for alltests in suite: - tests = alltests._tests - if len(tests): - for atest in tests: - if DEBUG: - print(atest) - for btest in atest._tests: - btestname = btest.__str__().split() - test_classes.append(btestname[1][1:-1].split('.')[1]) - # add this explicitly, not captured by the above - test_classes.append("B_CheckCode") - for ctest in sorted(list(set(test_classes))): - print(ctest) + suite = unittest.defaultTestLoader.discover("../lib/CIME/tests") + for test in suite: + print(test._tests[0]._testMethodName) if __name__ == "__main__": # Include the directories diff --git a/scripts/tests/scripts_regression_tests.py b/scripts/tests/scripts_regression_tests.py deleted file mode 100755 index 2bc2bc15378..00000000000 --- a/scripts/tests/scripts_regression_tests.py +++ /dev/null @@ -1,3717 +0,0 @@ -#!/usr/bin/env python3 - -""" -Script containing CIME python regression test suite. This suite should be run -to confirm overall CIME correctness. -""" - -import glob, os, re, shutil, signal, sys, tempfile, \ - threading, time, logging, unittest, getpass, \ - filecmp, time, atexit - -from xml.etree.ElementTree import ParseError - -cimeroot = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..")) -sys.path.insert(0, cimeroot) -# Remove all pyc files to ensure we're testing the right things -import subprocess, argparse -subprocess.call('/bin/rm -f $(find . -name "*.pyc")', shell=True, cwd=cimeroot) -import CIME.six -from CIME.six import assertRaisesRegex -import stat as osstat - -import collections - -from CIME.utils import run_cmd, run_cmd_no_fail, get_lids, get_current_commit, \ - safe_copy, CIMEError, get_cime_root, get_src_root, Timeout, \ - import_from_file, get_model -import CIME.get_tests -import CIME.test_scheduler, CIME.wait_for_tests -from CIME.test_scheduler import TestScheduler -from CIME.XML.compilers import Compilers -from CIME.XML.env_run import EnvRun -from CIME.XML.machines import Machines -from CIME.XML.files import Files -from CIME.case import Case -from CIME.code_checker import check_code, get_all_checkable_files -from CIME.test_status import * -from CIME.provenance import get_test_success, save_test_success - -SCRIPT_DIR = CIME.utils.get_scripts_root() -TOOLS_DIR = os.path.join(get_cime_root(), "CIME", "Tools") -TEST_COMPILER = None -GLOBAL_TIMEOUT = None -TEST_MPILIB = None -MACHINE = None -FAST_ONLY = False -NO_BATCH = False -NO_CMAKE = False -TEST_ROOT = None -NO_TEARDOWN = False -NO_FORTRAN_RUN = False -TEST_RESULT = None - -os.environ["CIME_GLOBAL_WALLTIME"] = "0:05:00" - -# pragma pylint: disable=protected-access -############################################################################### -def run_cmd_assert_result(test_obj, cmd, from_dir=None, expected_stat=0, env=None, verbose=False): -############################################################################### - from_dir = os.getcwd() if from_dir is None else from_dir - stat, output, errput = run_cmd(cmd, from_dir=from_dir, env=env, verbose=verbose) - if expected_stat == 0: - expectation = "SHOULD HAVE WORKED, INSTEAD GOT STAT %s" % stat - else: - expectation = "EXPECTED STAT %s, INSTEAD GOT STAT %s" % (expected_stat, stat) - msg = \ -""" -COMMAND: %s -FROM_DIR: %s -%s -OUTPUT: %s -ERRPUT: %s -""" % (cmd, from_dir, expectation, output, errput) - test_obj.assertEqual(stat, expected_stat, msg=msg) - - return output - -############################################################################### -def assert_test_status(test_obj, test_name, test_status_obj, test_phase, expected_stat): -############################################################################### - test_status = test_status_obj.get_status(test_phase) - test_obj.assertEqual(test_status, expected_stat, msg="Problem with {}: for phase '{}': has status '{}', expected '{}'".format(test_name, test_phase, test_status, expected_stat)) - -############################################################################### -def verify_perms(test_obj, root_dir): -############################################################################### - for root, dirs, files in os.walk(root_dir): - for filename in files: - full_path = os.path.join(root, filename) - st = os.stat(full_path) - test_obj.assertTrue(st.st_mode & osstat.S_IWGRP, msg="file {} is not group writeable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IRGRP, msg="file {} is not group readable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IROTH, msg="file {} is not world readable".format(full_path)) - - for dirname in dirs: - full_path = os.path.join(root, dirname) - st = os.stat(full_path) - - test_obj.assertTrue(st.st_mode & osstat.S_IWGRP, msg="dir {} is not group writable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IRGRP, msg="dir {} is not group readable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IXGRP, msg="dir {} is not group executable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IROTH, msg="dir {} is not world readable".format(full_path)) - test_obj.assertTrue(st.st_mode & osstat.S_IXOTH, msg="dir {} is not world executable".format(full_path)) - -############################################################################### -def get_casedir(test_obj, case_fragment, all_cases): -############################################################################### - potential_matches = [item for item in all_cases if case_fragment in item] - test_obj.assertTrue(len(potential_matches) == 1, "Ambiguous casedir selection for {}, found {} among {}".format(case_fragment, potential_matches, all_cases)) - return potential_matches[0] - -############################################################################### -class A_RunUnitTests(unittest.TestCase): -############################################################################### - - def test_resolve_variable_name(self): - files = Files() - machinefile = files.get_value("MACHINES_SPEC_FILE") - self.assertTrue(os.path.isfile(machinefile), - msg="Path did not resolve to existing file %s" % machinefile) - - def test_unittests(self): - # Finds all files contained in CIME/tests or its subdirectories that - # match the pattern 'test*.py', and runs the unit tests found there - # (i.e., tests defined using python's unittest module). - # - # This is analogous to running: - # python3 -m unittest discover -s CIME/tests -t . - # from cime/scripts/lib - # - # Yes, that means we have a bunch of unit tests run from this one unit - # test. - - before_model = get_model() - testsuite = unittest.defaultTestLoader.discover( - start_dir = os.path.join(LIB_DIR,"CIME","tests"), - pattern = 'test*.py', - top_level_dir = LIB_DIR) - - testrunner = unittest.TextTestRunner(buffer=False) - - # Disable logging; otherwise log messages written by code under test - # clutter the unit test output - log_lvl = logging.getLogger().getEffectiveLevel() - logging.disable(logging.CRITICAL) - try: - results = testrunner.run(testsuite) - finally: - logging.getLogger().setLevel(log_lvl) - - self.assertTrue(results.wasSuccessful()) - self.assertEqual(get_model(), before_model) - - def test_lib_doctests(self): - # Find and run all the doctests in the lib directory tree - skip_list = ["six.py", "CIME/SystemTests/mvk.py", "CIME/SystemTests/pgn.py"] - for root, _, files in os.walk(LIB_DIR): - for file_ in files: - filepath = os.path.join(root, file_)[len(LIB_DIR)+1:] - if filepath.endswith(".py") and filepath not in skip_list: - with open(os.path.join(root, file_)) as fd: - content = fd.read() - if '>>>' in content: - print("Running doctests for {}".format(filepath)) - run_cmd_assert_result(self, 'PYTHONPATH={}:$PYTHONPATH python3 -m doctest {} 2>&1'.format(LIB_DIR, filepath), from_dir=LIB_DIR) - else: - print("{} has no doctests".format(filepath)) - -############################################################################### -class C_TestGridGeneration(unittest.TestCase): -############################################################################### - @classmethod - def setUpClass(cls): - cls._do_teardown = [] - cls._testroot = os.path.join(TEST_ROOT, "TestGridGeneration") - cls._testdirs = [] - - def test_gen_domain(self): - if get_model() != "e3sm": - self.skipTest("Skipping gen_domain test. Depends on E3SM tools") - cime_root = get_cime_root() - inputdata = MACHINE.get_value("DIN_LOC_ROOT") - - tool_name = "test_gen_domain" - tool_location = os.path.join(cime_root, "tools", "mapping", "gen_domain_files", "test_gen_domain.sh") - args = "--cime_root={} --inputdata_root={}".format(cime_root, inputdata) - - cls = self.__class__ - test_dir = os.path.join(cls._testroot, tool_name) - cls._testdirs.append(test_dir) - os.makedirs(test_dir) - run_cmd_assert_result(self, "{} {}".format(tool_location, args), from_dir=test_dir) - cls._do_teardown.append(test_dir) - - @classmethod - def tearDownClass(cls): - do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN - teardown_root = True - for tfile in cls._testdirs: - if tfile not in cls._do_teardown: - print("Detected failed test or user request no teardown") - print("Leaving case directory : %s"%tfile) - teardown_root = False - elif do_teardown: - shutil.rmtree(tfile) - - if teardown_root and do_teardown: - shutil.rmtree(cls._testroot) - -############################################################################### -def make_fake_teststatus(path, testname, status, phase): -############################################################################### - expect(phase in CORE_PHASES, "Bad phase '%s'" % phase) - with TestStatus(test_dir=path, test_name=testname) as ts: - for core_phase in CORE_PHASES: - if core_phase == phase: - ts.set_status(core_phase, status, comments=("time=42" if phase == RUN_PHASE else "")) - break - else: - ts.set_status(core_phase, TEST_PASS_STATUS, comments=("time=42" if phase == RUN_PHASE else "")) - -############################################################################### -def parse_test_status(line): -############################################################################### - status, test = line.split()[0:2] - return test, status - -############################################################################### -def kill_subprocesses(name=None, sig=signal.SIGKILL, expected_num_killed=None, tester=None): -############################################################################### - # Kill all subprocesses - proc_ids = CIME.utils.find_proc_id(proc_name=name, children_only=True) - if (expected_num_killed is not None): - tester.assertEqual(len(proc_ids), expected_num_killed, - msg="Expected to find %d processes to kill, found %d" % (expected_num_killed, len(proc_ids))) - for proc_id in proc_ids: - try: - os.kill(proc_id, sig) - except OSError: - pass - -############################################################################### -def kill_python_subprocesses(sig=signal.SIGKILL, expected_num_killed=None, tester=None): -############################################################################### - kill_subprocesses("[Pp]ython", sig, expected_num_killed, tester) - -########################################################################### -def assert_dashboard_has_build(tester, build_name, expected_count=1): -########################################################################### - # Do not test E3SM dashboard if model is CESM - if get_model() == "e3sm": - time.sleep(10) # Give chance for cdash to update - - wget_file = tempfile.mktemp() - - run_cmd_no_fail("wget https://my.cdash.org/api/v1/index.php?project=ACME_test --no-check-certificate -O %s" % wget_file) - - raw_text = open(wget_file, "r").read() - os.remove(wget_file) - - num_found = raw_text.count(build_name) - tester.assertEqual(num_found, expected_count, - msg="Dashboard did not have expected num occurances of build name '%s'. Expected %s, found %s" % (build_name, expected_count, num_found)) - -############################################################################### -def setup_proxy(): -############################################################################### - if ("http_proxy" not in os.environ): - proxy = MACHINE.get_value("PROXY") - if (proxy is not None): - os.environ["http_proxy"] = proxy - return True - - return False - -############################################################################### -class N_TestUnitTest(unittest.TestCase): -############################################################################### - @classmethod - def setUpClass(cls): - cls._do_teardown = [] - cls._testroot = os.path.join(TEST_ROOT, 'TestUnitTests') - cls._testdirs = [] - - def _has_unit_test_support(self): - if TEST_COMPILER is None: - default_compiler = MACHINE.get_default_compiler() - compiler = Compilers(MACHINE, compiler=default_compiler) - else: - compiler = Compilers(MACHINE, compiler=TEST_COMPILER) - attrs = {'MPILIB': 'mpi-serial', 'compile_threaded': 'FALSE'} - pfunit_path = compiler.get_optional_compiler_node("PFUNIT_PATH", - attributes=attrs) - if pfunit_path is None: - return False - else: - return True - - def test_a_unit_test(self): - cls = self.__class__ - if not self._has_unit_test_support(): - self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine") - test_dir = os.path.join(cls._testroot,"unit_tester_test") - cls._testdirs.append(test_dir) - os.makedirs(test_dir) - unit_test_tool = os.path.abspath(os.path.join(get_cime_root(),"scripts","fortran_unit_testing","run_tests.py")) - test_spec_dir = os.path.join(os.path.dirname(unit_test_tool),"Examples", "interpolate_1d", "tests") - args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir) - args += " --machine {}".format(MACHINE.get_machine_name()) - run_cmd_no_fail("{} {}".format(unit_test_tool, args)) - cls._do_teardown.append(test_dir) - - def test_b_cime_f90_unit_tests(self): - cls = self.__class__ - if (FAST_ONLY): - self.skipTest("Skipping slow test") - - if not self._has_unit_test_support(): - self.skipTest("Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine") - - test_dir = os.path.join(cls._testroot,"driver_f90_tests") - cls._testdirs.append(test_dir) - os.makedirs(test_dir) - test_spec_dir = get_cime_root() - unit_test_tool = os.path.abspath(os.path.join(test_spec_dir,"scripts","fortran_unit_testing","run_tests.py")) - args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir) - args += " --machine {}".format(MACHINE.get_machine_name()) - run_cmd_no_fail("{} {}".format(unit_test_tool, args)) - cls._do_teardown.append(test_dir) - - @classmethod - def tearDownClass(cls): - do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN - - teardown_root = True - for tfile in cls._testdirs: - if tfile not in cls._do_teardown: - print("Detected failed test or user request no teardown") - print("Leaving case directory : %s"%tfile) - teardown_root = False - elif do_teardown: - shutil.rmtree(tfile) - - if teardown_root and do_teardown: - shutil.rmtree(cls._testroot) - -############################################################################### -class J_TestCreateNewcase(unittest.TestCase): -############################################################################### - @classmethod - def setUpClass(cls): - cls._testdirs = [] - cls._do_teardown = [] - cls._testroot = os.path.join(TEST_ROOT, 'TestCreateNewcase') - cls._root_dir = os.getcwd() - - cimeroot = CIME.utils.get_cime_root() - sys.path.insert(0, os.path.join(cimeroot, "Tools")) - - def tearDown(self): - cls = self.__class__ - os.chdir(cls._root_dir) - - def test_a_createnewcase(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'testcreatenewcase') - if os.path.exists(testdir): - shutil.rmtree(testdir) - args = " --case %s --compset X --output-root %s --handle-preexisting-dirs=r" % (testdir, cls._testroot) - if get_model() == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args = args + " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args = args + " --mpilib %s"%TEST_MPILIB - if CIME.utils.get_cime_default_driver() == "nuopc": - args += " --res f19_g17 " - else: - args += " --res f19_g16 " - - cls._testdirs.append(testdir) - run_cmd_assert_result(self, "./create_newcase %s"%(args), from_dir=SCRIPT_DIR) - self.assertTrue(os.path.exists(testdir)) - self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) - - run_cmd_assert_result(self, "./case.setup", from_dir=testdir) - run_cmd_assert_result(self, "./case.build", from_dir=testdir) - - with Case(testdir, read_only=False) as case: - ntasks = case.get_value("NTASKS_ATM") - case.set_value("NTASKS_ATM", ntasks+1) - - # this should fail with a locked file issue - run_cmd_assert_result(self, "./case.build", - from_dir=testdir, expected_stat=1) - - run_cmd_assert_result(self, "./case.setup --reset", from_dir=testdir) - run_cmd_assert_result(self, "./case.build", from_dir=testdir) - with Case(testdir, read_only=False) as case: - case.set_value("CHARGE_ACCOUNT", "fred") - - # this should not fail with a locked file issue - run_cmd_assert_result(self, "./case.build", from_dir=testdir) - - run_cmd_assert_result(self, "./case.st_archive --test-all", from_dir=testdir) - - # Trying to set values outside of context manager should fail - case = Case(testdir, read_only=False) - with self.assertRaises(CIMEError): - case.set_value("NTASKS_ATM", 42) - - # Trying to read_xml with pending changes should fail - with self.assertRaises(CIMEError): - with Case(testdir, read_only=False) as case: - case.set_value("CHARGE_ACCOUNT", "fouc") - case.read_xml() - - cls._do_teardown.append(testdir) - - def test_aa_no_flush_on_instantiate(self): - testdir = os.path.join(self.__class__._testroot, 'testcreatenewcase') - with Case(testdir, read_only=False) as case: - for env_file in case._files: - self.assertFalse(env_file.needsrewrite, msg="Instantiating a case should not trigger a flush call") - - with Case(testdir, read_only=False) as case: - case.set_value("HIST_OPTION","nyears") - runfile = case.get_env('run') - self.assertTrue(runfile.needsrewrite, msg="Expected flush call not triggered") - for env_file in case._files: - if env_file != runfile: - self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}" - .format(env_file.filename)) - # Flush the file - runfile.write() - # set it again to the same value - case.set_value("HIST_OPTION","nyears") - # now the file should not need to be flushed - for env_file in case._files: - self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}" - .format(env_file.filename)) - - # Check once more with a new instance - with Case(testdir, read_only=False) as case: - case.set_value("HIST_OPTION","nyears") - for env_file in case._files: - self.assertFalse(env_file.needsrewrite, msg="Unexpected flush triggered for file {}" - .format(env_file.filename)) - - def test_b_user_mods(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'testusermods') - if os.path.exists(testdir): - shutil.rmtree(testdir) - - cls._testdirs.append(testdir) - - user_mods_dir = os.path.join(CIME.utils.get_cime_root(), "scripts", "tests", "user_mods_test1") - args = " --case %s --compset X --user-mods-dir %s --output-root %s --handle-preexisting-dirs=r"% (testdir, user_mods_dir, cls._testroot) - if get_model() == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args = args + " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args = args + " --mpilib %s"%TEST_MPILIB - if CIME.utils.get_cime_default_driver() == "nuopc": - args += " --res f19_g17 " - else: - args += " --res f19_g16 " - - run_cmd_assert_result(self, "%s/create_newcase %s " - % (SCRIPT_DIR, args),from_dir=SCRIPT_DIR) - - self.assertTrue(os.path.isfile(os.path.join(testdir,"SourceMods","src.drv","somefile.F90")), msg="User_mods SourceMod missing") - - with open(os.path.join(testdir,"user_nl_cpl"),"r") as fd: - contents = fd.read() - self.assertTrue("a different cpl test option" in contents, msg="User_mods contents of user_nl_cpl missing") - self.assertTrue("a cpl namelist option" in contents, msg="User_mods contents of user_nl_cpl missing") - cls._do_teardown.append(testdir) - - def test_c_create_clone_keepexe(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'test_create_clone_keepexe') - if os.path.exists(testdir): - shutil.rmtree(testdir) - prevtestdir = cls._testdirs[0] - user_mods_dir = os.path.join(CIME.utils.get_cime_root(), "scripts", "tests", "user_modes_test3") - - cmd = "%s/create_clone --clone %s --case %s --keepexe --user-mods-dir %s" \ - % (SCRIPT_DIR, prevtestdir, testdir, user_mods_dir) - run_cmd_assert_result(self, cmd, from_dir=SCRIPT_DIR, expected_stat=1) - cls._do_teardown.append(testdir) - - def test_d_create_clone_new_user(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'test_create_clone_new_user') - if os.path.exists(testdir): - shutil.rmtree(testdir) - prevtestdir = cls._testdirs[0] - cls._testdirs.append(testdir) - # change the USER and CIME_OUTPUT_ROOT to nonsense values - # this is intended as a test of whether create_clone is independent of user - run_cmd_assert_result(self, "./xmlchange USER=this_is_not_a_user", - from_dir=prevtestdir) - - fakeoutputroot = cls._testroot.replace(os.environ.get("USER"), "this_is_not_a_user") - run_cmd_assert_result(self, "./xmlchange CIME_OUTPUT_ROOT=%s"%fakeoutputroot, - from_dir=prevtestdir) - - # this test should pass (user name is replaced) - run_cmd_assert_result(self, "%s/create_clone --clone %s --case %s " % - (SCRIPT_DIR, prevtestdir, testdir),from_dir=SCRIPT_DIR) - - shutil.rmtree(testdir) - # this test should pass - run_cmd_assert_result(self, "%s/create_clone --clone %s --case %s --cime-output-root %s" % - (SCRIPT_DIR, prevtestdir, testdir, cls._testroot),from_dir=SCRIPT_DIR) - - cls._do_teardown.append(testdir) - - def test_dd_create_clone_not_writable(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'test_create_clone_not_writable') - if os.path.exists(testdir): - shutil.rmtree(testdir) - prevtestdir = cls._testdirs[0] - cls._testdirs.append(testdir) - - with Case(prevtestdir, read_only=False) as case1: - case2 = case1.create_clone(testdir) - with self.assertRaises(CIMEError): - case2.set_value("CHARGE_ACCOUNT", "fouc") - cls._do_teardown.append(testdir) - - def test_e_xmlquery(self): - # Set script and script path - xmlquery = "./xmlquery" - cls = self.__class__ - casedir = cls._testdirs[0] - - # Check for environment - self.assertTrue(os.path.isdir(SCRIPT_DIR)) - self.assertTrue(os.path.isdir(TOOLS_DIR)) - self.assertTrue(os.path.isfile(os.path.join(casedir,xmlquery))) - - # Test command line options - with Case(casedir, read_only=True) as case: - STOP_N = case.get_value("STOP_N") - COMP_CLASSES = case.get_values("COMP_CLASSES") - BUILD_COMPLETE = case.get_value("BUILD_COMPLETE") - cmd = xmlquery + " STOP_N --value" - output = run_cmd_no_fail(cmd, from_dir=casedir) - self.assertTrue(output == str(STOP_N), msg="%s != %s"%(output, STOP_N)) - cmd = xmlquery + " BUILD_COMPLETE --value" - output = run_cmd_no_fail(cmd, from_dir=casedir) - self.assertTrue(output == "TRUE", msg="%s != %s"%(output, BUILD_COMPLETE)) - # we expect DOCN_MODE to be undefined in this X compset - # this test assures that we do not try to resolve this as a compvar - cmd = xmlquery + " DOCN_MODE --value" - _, output, error = run_cmd(cmd, from_dir=casedir) - self.assertTrue(error == "ERROR: No results found for variable DOCN_MODE", - msg="unexpected result for DOCN_MODE, output {}, error {}". - format(output, error)) - - for comp in COMP_CLASSES: - caseresult = case.get_value("NTASKS_%s"%comp) - cmd = xmlquery + " NTASKS_%s --value"%comp - output = run_cmd_no_fail(cmd, from_dir=casedir) - self.assertTrue(output == str(caseresult), msg="%s != %s"%(output, caseresult)) - cmd = xmlquery + " NTASKS --subgroup %s --value"%comp - output = run_cmd_no_fail(cmd, from_dir=casedir) - self.assertTrue(output == str(caseresult), msg="%s != %s"%(output, caseresult)) - if MACHINE.has_batch_system(): - JOB_QUEUE = case.get_value("JOB_QUEUE", subgroup="case.run") - cmd = xmlquery + " JOB_QUEUE --subgroup case.run --value" - output = run_cmd_no_fail(cmd, from_dir=casedir) - self.assertTrue(output == JOB_QUEUE, msg="%s != %s"%(output, JOB_QUEUE)) - - cmd = xmlquery + " --listall" - run_cmd_no_fail(cmd, from_dir=casedir) - - cls._do_teardown.append(cls._testroot) - - def test_f_createnewcase_with_user_compset(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'testcreatenewcase_with_user_compset') - if os.path.exists(testdir): - shutil.rmtree(testdir) - - cls._testdirs.append(testdir) - - if get_model() == "cesm": - if CIME.utils.get_cime_default_driver() == "nuopc": - pesfile = os.path.join(get_src_root(),"components","cmeps","cime_config","config_pes.xml") - else: - pesfile = os.path.join(get_src_root(),"components","cpl7","driver","cime_config","config_pes.xml") - else: - pesfile = os.path.join(get_src_root(),"driver-mct","cime_config","config_pes.xml") - - args = "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" % (testdir, pesfile, cls._testroot) - if get_model() == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args += " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args = args + " --mpilib %s"%TEST_MPILIB - - run_cmd_assert_result(self, "%s/create_newcase %s"%(SCRIPT_DIR, args), from_dir=SCRIPT_DIR) - run_cmd_assert_result(self, "./case.setup", from_dir=testdir) - run_cmd_assert_result(self, "./case.build", from_dir=testdir) - - cls._do_teardown.append(testdir) - - def test_g_createnewcase_with_user_compset_and_env_mach_pes(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'testcreatenewcase_with_user_compset_and_env_mach_pes') - if os.path.exists(testdir): - shutil.rmtree(testdir) - previous_testdir = cls._testdirs[-1] - cls._testdirs.append(testdir) - - pesfile = os.path.join(previous_testdir,"env_mach_pes.xml") - args = "--case %s --compset 2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV --pesfile %s --res f19_g16 --output-root %s --handle-preexisting-dirs=r" % (testdir, pesfile, cls._testroot) - if get_model() == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args += " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args += " --mpilib %s"%TEST_MPILIB - - run_cmd_assert_result(self, "%s/create_newcase %s"%(SCRIPT_DIR, args), from_dir=SCRIPT_DIR) - run_cmd_assert_result(self, "diff env_mach_pes.xml %s"%(previous_testdir), from_dir=testdir) - # this line should cause the diff to fail (I assume no machine is going to default to 17 tasks) - run_cmd_assert_result(self, "./xmlchange NTASKS=17", from_dir=testdir) - run_cmd_assert_result(self, "diff env_mach_pes.xml %s"%(previous_testdir), from_dir=testdir, - expected_stat=1) - - cls._do_teardown.append(testdir) - - def test_h_primary_component(self): - cls = self.__class__ - - testdir = os.path.join(cls._testroot, 'testprimarycomponent') - if os.path.exists(testdir): - shutil.rmtree(testdir) - - cls._testdirs.append(testdir) - args = " --case CreateNewcaseTest --script-root %s --compset X --output-root %s --handle-preexisting-dirs u" % (testdir, cls._testroot) - if get_model() == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args += " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args += " --mpilib %s"%TEST_MPILIB - if CIME.utils.get_cime_default_driver() == "nuopc": - args += " --res f19_g17 " - else: - args += " --res f19_g16 " - - run_cmd_assert_result(self, "%s/create_newcase %s" % (SCRIPT_DIR, args), from_dir=SCRIPT_DIR) - self.assertTrue(os.path.exists(testdir)) - self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) - - with Case(testdir, read_only=False) as case: - case._compsetname = case.get_value("COMPSET") - case.set_comp_classes(case.get_values("COMP_CLASSES")) - primary = case._find_primary_component() - self.assertEqual(primary, "drv", msg="primary component test expected drv but got %s"%primary) - # now we are going to corrupt the case so that we can do more primary_component testing - case.set_valid_values("COMP_GLC","%s,fred"%case.get_value("COMP_GLC")) - case.set_value("COMP_GLC","fred") - primary = case._find_primary_component() - self.assertEqual(primary, "fred", msg="primary component test expected fred but got %s"%primary) - case.set_valid_values("COMP_ICE","%s,wilma"%case.get_value("COMP_ICE")) - case.set_value("COMP_ICE","wilma") - primary = case._find_primary_component() - self.assertEqual(primary, "wilma", msg="primary component test expected wilma but got %s"%primary) - - case.set_valid_values("COMP_OCN","%s,bambam,docn"%case.get_value("COMP_OCN")) - case.set_value("COMP_OCN","bambam") - primary = case._find_primary_component() - self.assertEqual(primary, "bambam", msg="primary component test expected bambam but got %s"%primary) - - case.set_valid_values("COMP_LND","%s,barney"%case.get_value("COMP_LND")) - case.set_value("COMP_LND","barney") - primary = case._find_primary_component() - # This is a "J" compset - self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary) - case.set_value("COMP_OCN","docn") - case.set_valid_values("COMP_LND","%s,barney"%case.get_value("COMP_LND")) - case.set_value("COMP_LND","barney") - primary = case._find_primary_component() - self.assertEqual(primary, "barney", msg="primary component test expected barney but got %s"%primary) - case.set_valid_values("COMP_ATM","%s,wilma"%case.get_value("COMP_ATM")) - case.set_value("COMP_ATM","wilma") - primary = case._find_primary_component() - self.assertEqual(primary, "wilma", msg="primary component test expected wilma but got %s"%primary) - # this is a "E" compset - case._compsetname = case._compsetname.replace("XOCN","DOCN%SOM") - primary = case._find_primary_component() - self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary) - # finally a "B" compset - case.set_value("COMP_OCN","bambam") - primary = case._find_primary_component() - self.assertEqual(primary, "allactive", msg="primary component test expected allactive but got %s"%primary) - - cls._do_teardown.append(testdir) - - def test_j_createnewcase_user_compset_vs_alias(self): - """ - Create a compset using the alias and another compset using the full compset name - and make sure they are the same by comparing the namelist files in CaseDocs. - Ignore the modelio files and clean the directory names out first. - """ - cls = self.__class__ - - testdir1 = os.path.join(cls._testroot, 'testcreatenewcase_user_compset') - if os.path.exists(testdir1): - shutil.rmtree(testdir1) - cls._testdirs.append(testdir1) - - args = ' --case CreateNewcaseTest --script-root {} --compset 2000_DATM%NYF_SLND_SICE_DOCN%SOMAQP_SROF_SGLC_SWAV --output-root {} --handle-preexisting-dirs u' .format(testdir1, cls._testroot) - if CIME.utils.get_cime_default_driver() == "nuopc": - args += " --res f19_g17 " - else: - args += " --res f19_g16 " - if get_model() == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args += " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args += " --mpilib %s"%TEST_MPILIB - - run_cmd_assert_result(self, "{}/create_newcase {}" .format (SCRIPT_DIR, args), from_dir=SCRIPT_DIR) - run_cmd_assert_result(self, "./case.setup ", from_dir=testdir1) - run_cmd_assert_result(self, "./preview_namelists ", from_dir=testdir1) - - dir1 = os.path.join(testdir1,"CaseDocs") - dir2 = os.path.join(testdir1,"CleanCaseDocs") - os.mkdir(dir2) - for _file in os.listdir(dir1): - if "modelio" in _file: - continue - with open(os.path.join(dir1,_file),"r") as fi: - file_text = fi.read() - file_text = file_text.replace(os.path.basename(testdir1),"PATH") - file_text = re.sub(r"logfile =.*","",file_text) - with open(os.path.join(dir2,_file), "w") as fo: - fo.write(file_text) - cleancasedocs1 = dir2 - - testdir2 = os.path.join(cls._testroot, 'testcreatenewcase_alias_compset') - if os.path.exists(testdir2): - shutil.rmtree(testdir2) - cls._testdirs.append(testdir2) - args = ' --case CreateNewcaseTest --script-root {} --compset ADSOMAQP --output-root {} --handle-preexisting-dirs u'.format(testdir2, cls._testroot) - if CIME.utils.get_cime_default_driver() == "nuopc": - args += " --res f19_g17 " - else: - args += " --res f19_g16 " - if get_model() == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args += " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args += " --mpilib %s"%TEST_MPILIB - - run_cmd_assert_result(self, "{}/create_newcase {}".format(SCRIPT_DIR, args), from_dir=SCRIPT_DIR) - run_cmd_assert_result(self, "./case.setup ", from_dir=testdir2) - run_cmd_assert_result(self, "./preview_namelists ", from_dir=testdir2) - - dir1 = os.path.join(testdir2,"CaseDocs") - dir2 = os.path.join(testdir2,"CleanCaseDocs") - os.mkdir(dir2) - for _file in os.listdir(dir1): - if "modelio" in _file: - continue - with open(os.path.join(dir1,_file),"r") as fi: - file_text = fi.read() - file_text = file_text.replace(os.path.basename(testdir2),"PATH") - file_text = re.sub(r"logfile =.*","",file_text) - with open(os.path.join(dir2,_file), "w") as fo: - fo.write(file_text) - - cleancasedocs2 = dir2 - dcmp = filecmp.dircmp(cleancasedocs1, cleancasedocs2) - self.assertTrue(len(dcmp.diff_files) == 0, "CaseDocs differ {}".format(dcmp.diff_files)) - - cls._do_teardown.append(testdir1) - cls._do_teardown.append(testdir2) - - def test_k_append_config(self): - machlist_before = MACHINE.list_available_machines() - self.assertEqual(len(machlist_before)>1, True, msg="Problem reading machine list") - - newmachfile = os.path.join(get_cime_root(),"config", - "xml_schemas","config_machines_template.xml") - MACHINE.read(newmachfile) - machlist_after = MACHINE.list_available_machines() - - self.assertEqual(len(machlist_after)-len(machlist_before), 1, msg="Not able to append config_machines.xml {} {}".format(len(machlist_after), len(machlist_before))) - self.assertEqual("mymachine" in machlist_after, True, msg="Not able to append config_machines.xml") - - def test_ka_createnewcase_extra_machines_dir(self): - # Test that we pick up changes in both config_machines.xml and - # config_compilers.xml in a directory specified with the --extra-machines-dir - # argument to create_newcase. - cls = self.__class__ - casename = 'testcreatenewcase_extra_machines_dir' - - # Setup: stage some xml files in a temporary directory - extra_machines_dir = os.path.join(cls._testroot, '{}_machine_config'.format(casename)) - os.makedirs(extra_machines_dir) - cls._do_teardown.append(extra_machines_dir) - newmachfile = os.path.join(get_cime_root(),"config", - "xml_schemas","config_machines_template.xml") - safe_copy(newmachfile, os.path.join(extra_machines_dir, "config_machines.xml")) - os.environ["CIME_NO_CMAKE_MACRO"] = "ON" - config_compilers_text = """\ - - - - /my/netcdf/path - - -""" - config_compilers_path = os.path.join(extra_machines_dir, "config_compilers.xml") - with open(config_compilers_path, "w") as config_compilers: - config_compilers.write(config_compilers_text) - - # Create the case - testdir = os.path.join(cls._testroot, casename) - if os.path.exists(testdir): - shutil.rmtree(testdir) - # In the following, note that 'mymachine' is the machine name defined in - # config_machines_template.xml - args = (" --case {testdir} --compset X --mach mymachine" - " --output-root {testroot} --non-local" - " --extra-machines-dir {extra_machines_dir}".format( - testdir=testdir, testroot=cls._testroot, - extra_machines_dir=extra_machines_dir)) - if get_model() == "cesm": - args += " --run-unsupported" - - if CIME.utils.get_cime_default_driver() == "nuopc": - args += " --res f19_g17 " - else: - args += " --res f19_g16 " - run_cmd_assert_result(self, "./create_newcase {}".format(args), - from_dir=SCRIPT_DIR) - cls._do_teardown.append(testdir) - - # Run case.setup - run_cmd_assert_result(self, "./case.setup", - from_dir=testdir) - - # Make sure Macros file contains expected text - if get_model() != "e3sm": - macros_file_name = os.path.join(testdir, "Macros.make") - self.assertTrue(os.path.isfile(macros_file_name)) - with open(macros_file_name) as macros_file: - macros_contents = macros_file.read() - expected_re = re.compile("NETCDF_PATH.*/my/netcdf/path") - self.assertTrue(expected_re.search(macros_contents)) - del os.environ["CIME_NO_CMAKE_MACRO"] - - def test_m_createnewcase_alternate_drivers(self): - # Test that case.setup runs for nuopc and moab drivers - cls = self.__class__ - model = get_model() - for driver in ("nuopc", "moab"): - if not os.path.exists(os.path.join(get_cime_root(),"src","drivers",driver)): - self.skipTest("Skipping driver test for {}, driver not found".format(driver)) - if ((model == 'cesm' and driver == 'moab') or - (model == 'e3sm' and driver == 'nuopc')): - continue - - testdir = os.path.join(cls._testroot, 'testcreatenewcase.{}'.format( driver)) - if os.path.exists(testdir): - shutil.rmtree(testdir) - args = " --driver {} --case {} --compset X --res f19_g16 --output-root {} --handle-preexisting-dirs=r".format(driver, testdir, cls._testroot) - if model == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args = args + " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args = args + " --mpilib %s"%TEST_MPILIB - - cls._testdirs.append(testdir) - run_cmd_assert_result(self, "./create_newcase %s"%(args), from_dir=SCRIPT_DIR) - self.assertTrue(os.path.exists(testdir)) - self.assertTrue(os.path.exists(os.path.join(testdir, "case.setup"))) - - run_cmd_assert_result(self, "./case.setup", from_dir=testdir) - with Case(testdir, read_only=False) as case: - comp_interface = case.get_value("COMP_INTERFACE") - self.assertTrue(driver == comp_interface, msg="%s != %s"%(driver, comp_interface)) - - cls._do_teardown.append(testdir) - - def test_n_createnewcase_bad_compset(self): - cls = self.__class__ - model = get_model() - - testdir = os.path.join(cls._testroot, 'testcreatenewcase_bad_compset') - if os.path.exists(testdir): - shutil.rmtree(testdir) - args = " --case %s --compset InvalidCompsetName --output-root %s --handle-preexisting-dirs=r " % (testdir, cls._testroot) - if model == "cesm": - args += " --run-unsupported" - if TEST_COMPILER is not None: - args = args + " --compiler %s"%TEST_COMPILER - if TEST_MPILIB is not None: - args = args + " --mpilib %s"%TEST_MPILIB - if CIME.utils.get_cime_default_driver() == "nuopc": - args += " --res f19_g17 " - else: - args += " --res f19_g16 " - - run_cmd_assert_result(self, "./create_newcase %s"%(args), - from_dir=SCRIPT_DIR, expected_stat=1) - self.assertFalse(os.path.exists(testdir)) - - @classmethod - def tearDownClass(cls): - do_teardown = len(cls._do_teardown) > 0 and sys.exc_info() == (None, None, None) and not NO_TEARDOWN - rmtestroot = True - for tfile in cls._testdirs: - if tfile not in cls._do_teardown: - print("Detected failed test or user request no teardown") - print("Leaving case directory : %s"%tfile) - rmtestroot = False - elif do_teardown: - try: - print ("Attempt to remove directory {}".format(tfile)) - shutil.rmtree(tfile) - except BaseException: - print("Could not remove directory {}".format(tfile)) - if rmtestroot and do_teardown: - shutil.rmtree(cls._testroot) - -############################################################################### -class M_TestWaitForTests(unittest.TestCase): -############################################################################### - - ########################################################################### - def setUp(self): - ########################################################################### - self._testroot = os.path.join(TEST_ROOT,"TestWaitForTests") - self._timestamp = CIME.utils.get_timestamp() - - # basic tests - self._testdir_all_pass = os.path.join(self._testroot, 'scripts_regression_tests.testdir_all_pass') - self._testdir_with_fail = os.path.join(self._testroot, 'scripts_regression_tests.testdir_with_fail') - self._testdir_unfinished = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished') - self._testdir_unfinished2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished2') - - # live tests - self._testdir_teststatus1 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus1') - self._testdir_teststatus2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_teststatus2') - - self._testdirs = [self._testdir_all_pass, self._testdir_with_fail, self._testdir_unfinished, self._testdir_unfinished2, - self._testdir_teststatus1, self._testdir_teststatus2] - basic_tests = self._testdirs[:self._testdirs.index(self._testdir_teststatus1)] - - for testdir in self._testdirs: - if os.path.exists(testdir): - shutil.rmtree(testdir) - os.makedirs(testdir) - - for r in range(10): - for testdir in basic_tests: - os.makedirs(os.path.join(testdir, str(r))) - make_fake_teststatus(os.path.join(testdir, str(r)), "Test_%d" % r, TEST_PASS_STATUS, RUN_PHASE) - - make_fake_teststatus(os.path.join(self._testdir_with_fail, "5"), "Test_5", TEST_FAIL_STATUS, RUN_PHASE) - make_fake_teststatus(os.path.join(self._testdir_unfinished, "5"), "Test_5", TEST_PEND_STATUS, RUN_PHASE) - make_fake_teststatus(os.path.join(self._testdir_unfinished2, "5"), "Test_5", TEST_PASS_STATUS, SUBMIT_PHASE) - - integration_tests = self._testdirs[len(basic_tests):] - for integration_test in integration_tests: - os.makedirs(os.path.join(integration_test, "0")) - make_fake_teststatus(os.path.join(integration_test, "0"), "Test_0", TEST_PASS_STATUS, CORE_PHASES[0]) - - # Set up proxy if possible - self._unset_proxy = setup_proxy() - - self._thread_error = None - - ########################################################################### - def tearDown(self): - ########################################################################### - do_teardown = sys.exc_info() == (None, None, None) and not NO_TEARDOWN - - if do_teardown: - for testdir in self._testdirs: - shutil.rmtree(testdir) - - kill_subprocesses() - - if (self._unset_proxy): - del os.environ["http_proxy"] - - ########################################################################### - def simple_test(self, testdir, expected_results, extra_args="", build_name=None): - ########################################################################### - # Need these flags to test dashboard if e3sm - if get_model() == "e3sm" and build_name is not None: - extra_args += " -b %s" % build_name - - expected_stat = 0 - for expected_result in expected_results: - if not (expected_result == "PASS" or (expected_result == "PEND" and "-n" in extra_args)): - expected_stat = CIME.utils.TESTS_FAILED_ERR_CODE - - output = run_cmd_assert_result(self, "%s/wait_for_tests -p ACME_test */TestStatus %s" % (TOOLS_DIR, extra_args), - from_dir=testdir, expected_stat=expected_stat) - - lines = [line for line in output.splitlines() if (line.startswith("PASS") or line.startswith("FAIL") or line.startswith("PEND"))] - self.assertEqual(len(lines), len(expected_results)) - for idx, line in enumerate(lines): - testname, status = parse_test_status(line) - self.assertEqual(status, expected_results[idx]) - self.assertEqual(testname, "Test_%d" % idx) - - ########################################################################### - def threaded_test(self, testdir, expected_results, extra_args="", build_name=None): - ########################################################################### - try: - self.simple_test(testdir, expected_results, extra_args, build_name) - except AssertionError as e: - self._thread_error = str(e) - - ########################################################################### - def test_wait_for_test_all_pass(self): - ########################################################################### - self.simple_test(self._testdir_all_pass, ["PASS"] * 10) - - ########################################################################### - def test_wait_for_test_with_fail(self): - ########################################################################### - expected_results = ["FAIL" if item == 5 else "PASS" for item in range(10)] - self.simple_test(self._testdir_with_fail, expected_results) - - ########################################################################### - def test_wait_for_test_no_wait(self): - ########################################################################### - expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] - self.simple_test(self._testdir_unfinished, expected_results, "-n") - - ########################################################################### - def test_wait_for_test_timeout(self): - ########################################################################### - expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] - self.simple_test(self._testdir_unfinished, expected_results, "--timeout=3") - - ########################################################################### - def test_wait_for_test_wait_for_pend(self): - ########################################################################### - run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished, ["PASS"] * 10)) - run_thread.daemon = True - run_thread.start() - - time.sleep(5) # Kinda hacky - - self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") - - with TestStatus(test_dir=os.path.join(self._testdir_unfinished, "5")) as ts: - ts.set_status(RUN_PHASE, TEST_PASS_STATUS) - - run_thread.join(timeout=10) - - self.assertFalse(run_thread.is_alive(), msg="wait_for_tests should have finished") - - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - ########################################################################### - def test_wait_for_test_wait_for_missing_run_phase(self): - ########################################################################### - run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished2, ["PASS"] * 10)) - run_thread.daemon = True - run_thread.start() - - time.sleep(5) # Kinda hacky - - self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") - - with TestStatus(test_dir=os.path.join(self._testdir_unfinished2, "5")) as ts: - ts.set_status(RUN_PHASE, TEST_PASS_STATUS) - - run_thread.join(timeout=10) - - self.assertFalse(run_thread.is_alive(), msg="wait_for_tests should have finished") - - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - ########################################################################### - def test_wait_for_test_wait_kill(self): - ########################################################################### - expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] - run_thread = threading.Thread(target=self.threaded_test, args=(self._testdir_unfinished, expected_results)) - run_thread.daemon = True - run_thread.start() - - time.sleep(5) - - self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") - - kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1, tester=self) - - run_thread.join(timeout=10) - - self.assertFalse(run_thread.is_alive(), msg="wait_for_tests should have finished") - - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - ########################################################################### - def test_wait_for_test_cdash_pass(self): - ########################################################################### - expected_results = ["PASS"] * 10 - build_name = "regression_test_pass_" + self._timestamp - run_thread = threading.Thread(target=self.threaded_test, - args=(self._testdir_all_pass, expected_results, "", build_name)) - run_thread.daemon = True - run_thread.start() - - run_thread.join(timeout=10) - - self.assertFalse(run_thread.is_alive(), msg="wait_for_tests should have finished") - - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - assert_dashboard_has_build(self, build_name) - - ########################################################################### - def test_wait_for_test_cdash_kill(self): - ########################################################################### - expected_results = ["PEND" if item == 5 else "PASS" for item in range(10)] - build_name = "regression_test_kill_" + self._timestamp - run_thread = threading.Thread(target=self.threaded_test, - args=(self._testdir_unfinished, expected_results, "", build_name)) - run_thread.daemon = True - run_thread.start() - - time.sleep(5) - - self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") - - kill_python_subprocesses(signal.SIGTERM, expected_num_killed=1, tester=self) - - run_thread.join(timeout=10) - - self.assertFalse(run_thread.is_alive(), msg="wait_for_tests should have finished") - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - assert_dashboard_has_build(self, build_name) - - if get_model() == "e3sm": - cdash_result_dir = os.path.join(self._testdir_unfinished, "Testing") - tag_file = os.path.join(cdash_result_dir, "TAG") - self.assertTrue(os.path.isdir(cdash_result_dir)) - self.assertTrue(os.path.isfile(tag_file)) - - tag = open(tag_file, "r").readlines()[0].strip() - xml_file = os.path.join(cdash_result_dir, tag, "Test.xml") - self.assertTrue(os.path.isfile(xml_file)) - - xml_contents = open(xml_file, "r").read() - self.assertTrue(r'Test_0Test_1Test_2Test_3Test_4Test_5Test_6Test_7Test_8Test_9' - in xml_contents) - self.assertTrue(r'Test_5' in xml_contents) - - # TODO: Any further checking of xml output worth doing? - - ########################################################################### - def live_test_impl(self, testdir, expected_results, last_phase, last_status): - ########################################################################### - run_thread = threading.Thread(target=self.threaded_test, args=(testdir, expected_results)) - run_thread.daemon = True - run_thread.start() - - time.sleep(5) - - self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited") - - for core_phase in CORE_PHASES[1:]: - with TestStatus(test_dir=os.path.join(self._testdir_teststatus1, "0")) as ts: - ts.set_status(core_phase, last_status if core_phase == last_phase else TEST_PASS_STATUS) - - time.sleep(5) - - if core_phase != last_phase: - self.assertTrue(run_thread.is_alive(), msg="wait_for_tests should have waited after passing phase {}".format(core_phase)) - else: - run_thread.join(timeout=10) - self.assertFalse(run_thread.is_alive(), msg="wait_for_tests should have finished after phase {}".format(core_phase)) - break - - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - - ########################################################################### - def test_wait_for_test_test_status_integration_pass(self): - ########################################################################### - self.live_test_impl(self._testdir_teststatus1, ["PASS"], RUN_PHASE, TEST_PASS_STATUS) - - ########################################################################### - def test_wait_for_test_test_status_integration_submit_fail(self): - ########################################################################### - self.live_test_impl(self._testdir_teststatus1, ["FAIL"], SUBMIT_PHASE, TEST_FAIL_STATUS) - -############################################################################### -class TestCreateTestCommon(unittest.TestCase): -############################################################################### - - ########################################################################### - def setUp(self): - ########################################################################### - self._thread_error = None - self._unset_proxy = setup_proxy() - self._machine = MACHINE.get_machine_name() - self._compiler = MACHINE.get_default_compiler() if TEST_COMPILER is None else TEST_COMPILER - self._baseline_name = "fake_testing_only_%s" % CIME.utils.get_timestamp() - self._baseline_area = os.path.join(TEST_ROOT, "baselines") - self._testroot = TEST_ROOT - self._hasbatch = MACHINE.has_batch_system() and not NO_BATCH - self._do_teardown = not NO_TEARDOWN - self._root_dir = os.getcwd() - - ########################################################################### - def tearDown(self): - ########################################################################### - kill_subprocesses() - - os.chdir(self._root_dir) - - if (self._unset_proxy): - del os.environ["http_proxy"] - - files_to_clean = [] - - baselines = os.path.join(self._baseline_area, self._baseline_name) - if (os.path.isdir(baselines)): - files_to_clean.append(baselines) - - for test_id in ["master", self._baseline_name]: - for leftover in glob.glob(os.path.join(self._testroot, "*%s*" % test_id)): - files_to_clean.append(leftover) - - do_teardown = self._do_teardown and sys.exc_info() == (None, None, None) - if (not do_teardown and files_to_clean): - print("Detected failed test or user request no teardown") - print("Leaving files:") - for file_to_clean in files_to_clean: - print(" " + file_to_clean) - else: - # For batch machines need to avoid race condition as batch system - # finishes I/O for the case. - if self._hasbatch: - time.sleep(5) - - for file_to_clean in files_to_clean: - if (os.path.isdir(file_to_clean)): - shutil.rmtree(file_to_clean) - else: - os.remove(file_to_clean) - - ########################################################################### - def _create_test(self, extra_args, test_id=None, run_errors=False, env_changes=""): - ########################################################################### - """ - Convenience wrapper around create_test. Returns list of full paths to created cases. If multiple cases, - the order of the returned list is not guaranteed to match the order of the arguments. - """ - # All stub model not supported in nuopc driver - driver = CIME.utils.get_cime_default_driver() - if driver == 'nuopc' and 'cime_developer' in extra_args: - extra_args.append(" ^SMS_Ln3.T42_T42.S ^PRE.f19_f19.ADESP_TEST ^PRE.f19_f19.ADESP ^DAE.ww3a.ADWAV") - - test_id = "{}-{}".format(self._baseline_name, CIME.utils.get_timestamp()) if test_id is None else test_id - extra_args.append("-t {}".format(test_id)) - extra_args.append("--baseline-root {}".format(self._baseline_area)) - if NO_BATCH: - extra_args.append("--no-batch") - if TEST_COMPILER and ([extra_arg for extra_arg in extra_args if "--compiler" in extra_arg] == []): - extra_args.append("--compiler={}".format(TEST_COMPILER)) - if TEST_MPILIB and ([extra_arg for extra_arg in extra_args if "--mpilib" in extra_arg] == []): - extra_args.append("--mpilib={}".format(TEST_MPILIB)) - extra_args.append("--test-root={0} --output-root={0}".format(self._testroot)) - - full_run = (set(extra_args) & set(["-n", "--namelist-only", "--no-setup", "--no-build", "--no-run"])) == set() - if full_run and not NO_BATCH: - extra_args.append("--wait") - - expected_stat = 0 if not run_errors else CIME.utils.TESTS_FAILED_ERR_CODE - - output = run_cmd_assert_result(self, "{} {}/create_test {}".format(env_changes, SCRIPT_DIR, " ".join(extra_args)), - expected_stat=expected_stat) - cases = [] - for line in output.splitlines(): - if "Case dir:" in line: - casedir = line.split()[-1] - self.assertTrue(os.path.isdir(casedir), msg="Missing casedir {}".format(casedir)) - cases.append(casedir) - - self.assertTrue(len(cases) > 0, "create_test made no cases") - - return cases[0] if len(cases) == 1 else cases - - ########################################################################### - def _wait_for_tests(self, test_id, expect_works=True, always_wait=False): - ########################################################################### - if self._hasbatch or always_wait: - timeout_arg = "--timeout={}".format(GLOBAL_TIMEOUT) if GLOBAL_TIMEOUT is not None else "" - expected_stat = 0 if expect_works else CIME.utils.TESTS_FAILED_ERR_CODE - run_cmd_assert_result(self, "{}/wait_for_tests {} *{}/TestStatus".format(TOOLS_DIR, timeout_arg, test_id), - from_dir=self._testroot, expected_stat=expected_stat) - -############################################################################### -class O_TestTestScheduler(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_a_phases(self): - ########################################################################### - # exclude the MEMLEAK tests here. - tests = CIME.get_tests.get_full_test_names(["cime_test_only", - "^TESTMEMLEAKFAIL_P1.f09_g16.X", - "^TESTMEMLEAKPASS_P1.f09_g16.X", - "^TESTRUNSTARCFAIL_P1.f19_g16_rx1.A", - "^TESTTESTDIFF_P1.f19_g16_rx1.A", - "^TESTBUILDFAILEXC_P1.f19_g16_rx1.A", - "^TESTRUNFAILEXC_P1.f19_g16_rx1.A"], - self._machine, self._compiler) - self.assertEqual(len(tests), 3) - ct = TestScheduler(tests, test_root=self._testroot, output_root=self._testroot, - compiler=self._compiler, mpilib=TEST_MPILIB) - - build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0] - run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0] - pass_test = [item for item in tests if "TESTRUNPASS" in item][0] - - self.assertTrue("BUILDFAIL" in build_fail_test, msg="Wrong test '%s'" % build_fail_test) - self.assertTrue("RUNFAIL" in run_fail_test, msg="Wrong test '%s'" % run_fail_test) - self.assertTrue("RUNPASS" in pass_test, msg="Wrong test '%s'" % pass_test) - - for idx, phase in enumerate(ct._phases): - for test in ct._tests: - if (phase == CIME.test_scheduler.TEST_START): - continue - elif (phase == MODEL_BUILD_PHASE): - ct._update_test_status(test, phase, TEST_PEND_STATUS) - - if (test == build_fail_test): - ct._update_test_status(test, phase, TEST_FAIL_STATUS) - self.assertTrue(ct._is_broken(test)) - self.assertFalse(ct._work_remains(test)) - else: - ct._update_test_status(test, phase, TEST_PASS_STATUS) - self.assertFalse(ct._is_broken(test)) - self.assertTrue(ct._work_remains(test)) - - elif (phase == RUN_PHASE): - if (test == build_fail_test): - with self.assertRaises(CIMEError): - ct._update_test_status(test, phase, TEST_PEND_STATUS) - else: - ct._update_test_status(test, phase, TEST_PEND_STATUS) - self.assertFalse(ct._work_remains(test)) - - if (test == run_fail_test): - ct._update_test_status(test, phase, TEST_FAIL_STATUS) - self.assertTrue(ct._is_broken(test)) - else: - ct._update_test_status(test, phase, TEST_PASS_STATUS) - self.assertFalse(ct._is_broken(test)) - - self.assertFalse(ct._work_remains(test)) - - else: - with self.assertRaises(CIMEError): - ct._update_test_status(test, ct._phases[idx+1], TEST_PEND_STATUS) - - with self.assertRaises(CIMEError): - ct._update_test_status(test, phase, TEST_PASS_STATUS) - - ct._update_test_status(test, phase, TEST_PEND_STATUS) - self.assertFalse(ct._is_broken(test)) - self.assertTrue(ct._work_remains(test)) - - with self.assertRaises(CIMEError): - ct._update_test_status(test, phase, TEST_PEND_STATUS) - - ct._update_test_status(test, phase, TEST_PASS_STATUS) - - with self.assertRaises(CIMEError): - ct._update_test_status(test, phase, TEST_FAIL_STATUS) - - self.assertFalse(ct._is_broken(test)) - self.assertTrue(ct._work_remains(test)) - - ########################################################################### - def test_b_full(self): - ########################################################################### - tests = CIME.get_tests.get_full_test_names(["cime_test_only"], self._machine, self._compiler) - test_id="%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - ct = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, test_root=self._testroot, - output_root=self._testroot,compiler=self._compiler, mpilib=TEST_MPILIB) - - build_fail_test = [item for item in tests if "TESTBUILDFAIL_" in item][0] - build_fail_exc_test = [item for item in tests if "TESTBUILDFAILEXC" in item][0] - run_fail_test = [item for item in tests if "TESTRUNFAIL_" in item][0] - run_fail_exc_test = [item for item in tests if "TESTRUNFAILEXC" in item][0] - pass_test = [item for item in tests if "TESTRUNPASS" in item][0] - test_diff_test = [item for item in tests if "TESTTESTDIFF" in item][0] - mem_fail_test = [item for item in tests if "TESTMEMLEAKFAIL" in item][0] - mem_pass_test = [item for item in tests if "TESTMEMLEAKPASS" in item][0] - st_arch_fail_test = [item for item in tests if "TESTRUNSTARCFAIL" in item][0] - - log_lvl = logging.getLogger().getEffectiveLevel() - logging.disable(logging.CRITICAL) - try: - ct.run_tests() - finally: - logging.getLogger().setLevel(log_lvl) - - self._wait_for_tests(test_id, expect_works=False) - - test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id)) - self.assertEqual(len(tests), len(test_statuses)) - - for test_status in test_statuses: - ts = TestStatus(test_dir=os.path.dirname(test_status)) - test_name = ts.get_name() - log_files = glob.glob("%s/%s*%s/TestStatus.log" % (self._testroot, test_name, test_id)) - self.assertEqual(len(log_files), 1, "Expected exactly one TestStatus.log file, found %d" % len(log_files)) - log_file = log_files[0] - if (test_name == build_fail_test): - - - assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_FAIL_STATUS) - data = open(log_file, "r").read() - self.assertTrue("Intentional fail for testing infrastructure" in data, - "Broken test did not report build error:\n%s" % data) - elif (test_name == build_fail_exc_test): - data = open(log_file, "r").read() - assert_test_status(self, test_name, ts, SHAREDLIB_BUILD_PHASE, TEST_FAIL_STATUS) - self.assertTrue("Exception from init" in data, - "Broken test did not report build error:\n%s" % data) - elif (test_name == run_fail_test): - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS) - elif (test_name == run_fail_exc_test): - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS) - data = open(log_file, "r").read() - self.assertTrue("Exception from run_phase" in data, - "Broken test did not report run error:\n%s" % data) - elif (test_name == mem_fail_test): - assert_test_status(self, test_name, ts, MEMLEAK_PHASE, TEST_FAIL_STATUS) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - elif (test_name == test_diff_test): - assert_test_status(self, test_name, ts, "COMPARE_base_rest", TEST_FAIL_STATUS) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - elif test_name == st_arch_fail_test: - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, STARCHIVE_PHASE, TEST_FAIL_STATUS) - else: - self.assertTrue(test_name in [pass_test, mem_pass_test]) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - if (test_name == mem_pass_test): - assert_test_status(self, test_name, ts, MEMLEAK_PHASE, TEST_PASS_STATUS) - - ########################################################################### - def test_c_use_existing(self): - ########################################################################### - tests = CIME.get_tests.get_full_test_names(["TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTRUNFAIL_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.f19_g16_rx1.A"], - self._machine, self._compiler) - test_id="%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - ct = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, test_root=self._testroot, - output_root=self._testroot,compiler=self._compiler, mpilib=TEST_MPILIB) - - build_fail_test = [item for item in tests if "TESTBUILDFAIL" in item][0] - run_fail_test = [item for item in tests if "TESTRUNFAIL" in item][0] - pass_test = [item for item in tests if "TESTRUNPASS" in item][0] - - log_lvl = logging.getLogger().getEffectiveLevel() - logging.disable(logging.CRITICAL) - try: - ct.run_tests() - finally: - logging.getLogger().setLevel(log_lvl) - - test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id)) - self.assertEqual(len(tests), len(test_statuses)) - - self._wait_for_tests(test_id, expect_works=False) - - for test_status in test_statuses: - casedir = os.path.dirname(test_status) - ts = TestStatus(test_dir=casedir) - test_name = ts.get_name() - if test_name == build_fail_test: - assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_FAIL_STATUS) - with TestStatus(test_dir=casedir) as ts: - ts.set_status(MODEL_BUILD_PHASE, TEST_PEND_STATUS) - elif test_name == run_fail_test: - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_FAIL_STATUS) - with TestStatus(test_dir=casedir) as ts: - ts.set_status(SUBMIT_PHASE, TEST_PEND_STATUS) - else: - self.assertTrue(test_name == pass_test) - assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - - os.environ["TESTBUILDFAIL_PASS"] = "True" - os.environ["TESTRUNFAIL_PASS"] = "True" - ct2 = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, use_existing=True, - test_root=self._testroot,output_root=self._testroot,compiler=self._compiler, - mpilib=TEST_MPILIB) - - log_lvl = logging.getLogger().getEffectiveLevel() - logging.disable(logging.CRITICAL) - try: - ct2.run_tests() - finally: - logging.getLogger().setLevel(log_lvl) - - self._wait_for_tests(test_id) - - for test_status in test_statuses: - ts = TestStatus(test_dir=os.path.dirname(test_status)) - test_name = ts.get_name() - assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - - del os.environ["TESTBUILDFAIL_PASS"] - del os.environ["TESTRUNFAIL_PASS"] - - # test that passed tests are not re-run - - ct2 = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH, use_existing=True, - test_root=self._testroot,output_root=self._testroot,compiler=self._compiler, - mpilib=TEST_MPILIB) - - log_lvl = logging.getLogger().getEffectiveLevel() - logging.disable(logging.CRITICAL) - try: - ct2.run_tests() - finally: - logging.getLogger().setLevel(log_lvl) - - self._wait_for_tests(test_id) - - for test_status in test_statuses: - ts = TestStatus(test_dir=os.path.dirname(test_status)) - test_name = ts.get_name() - assert_test_status(self, test_name, ts, MODEL_BUILD_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, SUBMIT_PHASE, TEST_PASS_STATUS) - assert_test_status(self, test_name, ts, RUN_PHASE, TEST_PASS_STATUS) - - ########################################################################### - def test_d_retry(self): - ########################################################################### - args = ["TESTBUILDFAIL_P1.f19_g16_rx1.A", "TESTRUNFAILRESET_P1.f19_g16_rx1.A", "TESTRUNPASS_P1.f19_g16_rx1.A", "--retry=1"] - - self._create_test(args) - -############################################################################### -class P_TestJenkinsGenericJob(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def setUp(self): - ########################################################################### - if get_model() != "e3sm": - self.skipTest("Skipping Jenkins tests. E3SM feature") - TestCreateTestCommon.setUp(self) - - # Need to run in a subdir in order to not have CTest clash. Name it - # such that it should be cleaned up by the parent tearDown - self._testdir = os.path.join(self._testroot, "jenkins_test_%s" % self._baseline_name) - os.makedirs(self._testdir) - - # Change root to avoid clashing with other jenkins_generic_jobs - self._jenkins_root = os.path.join(self._testdir, "J") - - ########################################################################### - def tearDown(self): - ########################################################################### - TestCreateTestCommon.tearDown(self) - - if "TESTRUNDIFF_ALTERNATE" in os.environ: - del os.environ["TESTRUNDIFF_ALTERNATE"] - - ########################################################################### - def simple_test(self, expect_works, extra_args, build_name=None): - ########################################################################### - if NO_BATCH: - extra_args += " --no-batch" - - # Need these flags to test dashboard if e3sm - if get_model() == "e3sm" and build_name is not None: - extra_args += " -p ACME_test --submit-to-cdash --cdash-build-group=Nightly -c %s" % build_name - - run_cmd_assert_result(self, "%s/jenkins_generic_job -r %s %s -B %s" % (TOOLS_DIR, self._testdir, extra_args, self._baseline_area), - from_dir=self._testdir, expected_stat=(0 if expect_works else CIME.utils.TESTS_FAILED_ERR_CODE)) - - ########################################################################### - def threaded_test(self, expect_works, extra_args, build_name=None): - ########################################################################### - try: - self.simple_test(expect_works, extra_args, build_name) - except AssertionError as e: - self._thread_error = str(e) - - ########################################################################### - def assert_num_leftovers(self, suite): - ########################################################################### - num_tests_in_tiny = len(CIME.get_tests.get_test_suite(suite)) - - jenkins_dirs = glob.glob("%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize())) # case dirs - # scratch_dirs = glob.glob("%s/*%s*/" % (self._testroot, test_id)) # blr/run dirs - - self.assertEqual(num_tests_in_tiny, len(jenkins_dirs), - msg="Wrong number of leftover directories in %s, expected %d, see %s" % \ - (self._jenkins_root, num_tests_in_tiny, jenkins_dirs)) - - # JGF: Can't test this at the moment due to root change flag given to jenkins_generic_job - # self.assertEqual(num_tests_in_tiny + 1, len(scratch_dirs), - # msg="Wrong number of leftover directories in %s, expected %d, see %s" % \ - # (self._testroot, num_tests_in_tiny, scratch_dirs)) - - ########################################################################### - def test_jenkins_generic_job(self): - ########################################################################### - # Generate fresh baselines so that this test is not impacted by - # unresolved diffs - self.simple_test(True, "-t cime_test_only_pass -g -b %s" % self._baseline_name) - self.assert_num_leftovers("cime_test_only_pass") - - build_name = "jenkins_generic_job_pass_%s" % CIME.utils.get_timestamp() - self.simple_test(True, "-t cime_test_only_pass -b %s" % self._baseline_name, build_name=build_name) - self.assert_num_leftovers("cime_test_only_pass") # jenkins_generic_job should have automatically cleaned up leftovers from prior run - assert_dashboard_has_build(self, build_name) - - ########################################################################### - def test_jenkins_generic_job_kill(self): - ########################################################################### - build_name = "jenkins_generic_job_kill_%s" % CIME.utils.get_timestamp() - run_thread = threading.Thread(target=self.threaded_test, args=(False, " -t cime_test_only_slow_pass -b master", build_name)) - run_thread.daemon = True - run_thread.start() - - time.sleep(120) - - kill_subprocesses(sig=signal.SIGTERM) - - run_thread.join(timeout=30) - - self.assertFalse(run_thread.is_alive(), msg="jenkins_generic_job should have finished") - self.assertTrue(self._thread_error is None, msg="Thread had failure: %s" % self._thread_error) - assert_dashboard_has_build(self, build_name) - - ########################################################################### - def test_jenkins_generic_job_realistic_dash(self): - ########################################################################### - # The actual quality of the cdash results for this test can only - # be inspected manually - - # Generate fresh baselines so that this test is not impacted by - # unresolved diffs - self.simple_test(False, "-t cime_test_all -g -b %s" % self._baseline_name) - self.assert_num_leftovers("cime_test_all") - - # Should create a diff - os.environ["TESTRUNDIFF_ALTERNATE"] = "True" - - # Should create a nml diff - # Modify namelist - fake_nl = """ - &fake_nml - fake_item = 'fake' - fake = .true. -/""" - baseline_glob = glob.glob(os.path.join(self._baseline_area, self._baseline_name, "TESTRUNPASS*")) - self.assertEqual(len(baseline_glob), 1, msg="Expected one match, got:\n%s" % "\n".join(baseline_glob)) - - for baseline_dir in baseline_glob: - nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in") - self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path) - - os.chmod(nl_path, osstat.S_IRUSR | osstat.S_IWUSR) - with open(nl_path, "a") as nl_file: - nl_file.write(fake_nl) - - build_name = "jenkins_generic_job_mixed_%s" % CIME.utils.get_timestamp() - self.simple_test(False, "-t cime_test_all -b %s" % self._baseline_name, build_name=build_name) - self.assert_num_leftovers("cime_test_all") # jenkins_generic_job should have automatically cleaned up leftovers from prior run - assert_dashboard_has_build(self, build_name) - -############################################################################### -class M_TestCimePerformance(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_cime_case_ctrl_performance(self): - ########################################################################### - - ts = time.time() - - num_repeat = 5 - for _ in range(num_repeat): - self._create_test(["cime_tiny","--no-build"]) - - elapsed = time.time() - ts - - print("Perf test result: {:0.2f}".format(elapsed)) - -############################################################################### -class T_TestRunRestart(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_run_restart(self): - ########################################################################### - if (NO_FORTRAN_RUN): - self.skipTest("Skipping fortran test") - driver = CIME.utils.get_cime_default_driver() - if driver == "mct": - walltime="00:15:00" - else: - walltime="00:30:00" - - casedir = self._create_test(["--walltime "+walltime,"NODEFAIL_P1.f09_g16.X"], test_id=self._baseline_name) - rundir = run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir) - fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL") - self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel) - - self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 3) - - ########################################################################### - def test_run_restart_too_many_fails(self): - ########################################################################### - if (NO_FORTRAN_RUN): - self.skipTest("Skipping fortran test") - driver = CIME.utils.get_cime_default_driver() - if driver == "mct": - walltime="00:15:00" - else: - walltime="00:30:00" - - casedir = self._create_test(["--walltime "+walltime,"NODEFAIL_P1.f09_g16.X"], test_id=self._baseline_name, env_changes="NODEFAIL_NUM_FAILS=5", run_errors=True) - rundir = run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir) - fail_sentinel = os.path.join(rundir, "FAIL_SENTINEL") - self.assertTrue(os.path.exists(fail_sentinel), msg="Missing %s" % fail_sentinel) - - self.assertEqual(open(fail_sentinel, "r").read().count("FAIL"), 4) - -############################################################################### -class Q_TestBlessTestResults(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def setUp(self): - ########################################################################### - TestCreateTestCommon.setUp(self) - - # Set a restrictive umask so we can test that SharedAreas used for - # recording baselines are working - restrictive_mask = 0o027 - self._orig_umask = os.umask(restrictive_mask) - - ########################################################################### - def tearDown(self): - ########################################################################### - TestCreateTestCommon.tearDown(self) - - if "TESTRUNDIFF_ALTERNATE" in os.environ: - del os.environ["TESTRUNDIFF_ALTERNATE"] - - os.umask(self._orig_umask) - - ############################################################################### - def test_bless_test_results(self): - ############################################################################### - if (NO_FORTRAN_RUN): - self.skipTest("Skipping fortran test") - # Test resubmit scenario if Machine has a batch system - if MACHINE.has_batch_system(): - test_names = ["TESTRUNDIFFRESUBMIT_Mmpi-serial.f19_g16_rx1.A", "TESTRUNDIFF_Mmpi-serial.f19_g16_rx1.A"] - else: - test_names = ["TESTRUNDIFF_P1.f19_g16_rx1.A"] - - # Generate some baselines - for test_name in test_names: - if get_model() == "e3sm": - genargs = ["-g", "-o", "-b", self._baseline_name, test_name] - compargs = ["-c", "-b", self._baseline_name, test_name] - else: - genargs = ["-g", self._baseline_name, "-o", test_name, - "--baseline-root ", self._baseline_area] - compargs = ["-c", self._baseline_name, test_name, - "--baseline-root ", self._baseline_area] - - self._create_test(genargs) - # Hist compare should pass - self._create_test(compargs) - # Change behavior - os.environ["TESTRUNDIFF_ALTERNATE"] = "True" - - # Hist compare should now fail - test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - self._create_test(compargs, test_id=test_id, run_errors=True) - - # compare_test_results should detect the fail - cpr_cmd = "{}/compare_test_results --test-root {} -t {} 2>&1" \ - .format(TOOLS_DIR, self._testroot, test_id) - output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE) - - # use regex - expected_pattern = re.compile(r'FAIL %s[^\s]* BASELINE' % test_name) - the_match = expected_pattern.search(output) - self.assertNotEqual(the_match, None, - msg="Cmd '%s' failed to display failed test %s in output:\n%s" % (cpr_cmd, test_name, output)) - # Bless - run_cmd_no_fail("{}/bless_test_results --test-root {} --hist-only --force -t {}" - .format(TOOLS_DIR, self._testroot, test_id)) - # Hist compare should now pass again - self._create_test(compargs) - verify_perms(self, self._baseline_area) - if "TESTRUNDIFF_ALTERNATE" in os.environ: - del os.environ["TESTRUNDIFF_ALTERNATE"] - - ############################################################################### - def test_rebless_namelist(self): - ############################################################################### - # Generate some namelist baselines - if (NO_FORTRAN_RUN): - self.skipTest("Skipping fortran test") - test_to_change = "TESTRUNPASS_P1.f19_g16_rx1.A" - if get_model() == "e3sm": - genargs = ["-g", "-o", "-b", self._baseline_name, "cime_test_only_pass"] - compargs = ["-c", "-b", self._baseline_name, "cime_test_only_pass"] - else: - genargs = ["-g", self._baseline_name, "-o", "cime_test_only_pass"] - compargs = ["-c", self._baseline_name, "cime_test_only_pass"] - - self._create_test(genargs) - - # Basic namelist compare - test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - cases = self._create_test(compargs, test_id=test_id) - casedir = get_casedir(self, test_to_change, cases) - - # Check standalone case.cmpgen_namelists - run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir) - - # compare_test_results should pass - cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1" \ - .format(TOOLS_DIR, self._testroot, test_id) - output = run_cmd_assert_result(self, cpr_cmd) - - # use regex - expected_pattern = re.compile(r'PASS %s[^\s]* NLCOMP' % test_to_change) - the_match = expected_pattern.search(output) - self.assertNotEqual(the_match, None, - msg="Cmd '%s' failed to display passed test in output:\n%s" % (cpr_cmd, output)) - - - # Modify namelist - fake_nl = """ - &fake_nml - fake_item = 'fake' - fake = .true. -/""" - baseline_area = self._baseline_area - baseline_glob = glob.glob(os.path.join(baseline_area, self._baseline_name, "TEST*")) - self.assertEqual(len(baseline_glob), 3, msg="Expected three matches, got:\n%s" % "\n".join(baseline_glob)) - - for baseline_dir in baseline_glob: - nl_path = os.path.join(baseline_dir, "CaseDocs", "datm_in") - self.assertTrue(os.path.isfile(nl_path), msg="Missing file %s" % nl_path) - - os.chmod(nl_path, osstat.S_IRUSR | osstat.S_IWUSR) - with open(nl_path, "a") as nl_file: - nl_file.write(fake_nl) - - # Basic namelist compare should now fail - test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - self._create_test(compargs, test_id=test_id, run_errors=True) - casedir = get_casedir(self, test_to_change, cases) - - # Unless namelists are explicitly ignored - test_id2 = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - self._create_test(compargs + ["--ignore-namelists"], test_id=test_id2) - - run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100) - - # preview namelists should work - run_cmd_assert_result(self, "./preview_namelists", from_dir=casedir) - - # This should still fail - run_cmd_assert_result(self, "./case.cmpgen_namelists", from_dir=casedir, expected_stat=100) - - # compare_test_results should fail - cpr_cmd = "{}/compare_test_results --test-root {} -n -t {} 2>&1" \ - .format(TOOLS_DIR, self._testroot, test_id) - output = run_cmd_assert_result(self, cpr_cmd, expected_stat=CIME.utils.TESTS_FAILED_ERR_CODE) - - # use regex - expected_pattern = re.compile(r'FAIL %s[^\s]* NLCOMP' % test_to_change) - the_match = expected_pattern.search(output) - self.assertNotEqual(the_match, None, - msg="Cmd '%s' failed to display passed test in output:\n%s" % (cpr_cmd, output)) - - # Bless - new_test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp()) - run_cmd_no_fail("{}/bless_test_results --test-root {} -n --force -t {} --new-test-root={} --new-test-id={}" - .format(TOOLS_DIR, self._testroot, test_id, self._testroot, new_test_id)) - - # Basic namelist compare should now pass again - self._create_test(compargs) - - verify_perms(self, self._baseline_area) - -class X_TestQueryConfig(unittest.TestCase): - def test_query_compsets(self): - run_cmd_no_fail("{}/query_config --compsets".format(SCRIPT_DIR)) - - def test_query_components(self): - run_cmd_no_fail("{}/query_config --components".format(SCRIPT_DIR)) - - def test_query_grids(self): - run_cmd_no_fail("{}/query_config --grids".format(SCRIPT_DIR)) - - def test_query_machines(self): - run_cmd_no_fail("{}/query_config --machines".format(SCRIPT_DIR)) - -############################################################################### -class Y_TestUserConcurrentMods(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_user_concurrent_mods(self): - ########################################################################### - # Put this inside any test that's slow - if (FAST_ONLY): - self.skipTest("Skipping slow test") - - casedir = self._create_test(["--walltime=0:30:00", "TESTRUNUSERXMLCHANGE_Mmpi-serial.f19_g16.X"], test_id=self._baseline_name) - - with Timeout(3000): - while True: - with open(os.path.join(casedir, "CaseStatus"), "r") as fd: - self._wait_for_tests(self._baseline_name) - contents = fd.read() - if contents.count("model execution success") == 2: - break - - time.sleep(5) - - rundir = run_cmd_no_fail("./xmlquery RUNDIR --value", from_dir=casedir) - if CIME.utils.get_cime_default_driver() == 'nuopc': - chk_file = "nuopc.runconfig" - else: - chk_file = "drv_in" - with open(os.path.join(rundir, chk_file), "r") as fd: - contents = fd.read() - self.assertTrue("stop_n = 6" in contents) - -############################################################################### -class Z_FullSystemTest(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_full_system(self): - ########################################################################### - # Put this inside any test that's slow - if (FAST_ONLY): - self.skipTest("Skipping slow test") - - driver = CIME.utils.get_cime_default_driver() - if driver == "mct": - cases = self._create_test(["--walltime=0:15:00", "cime_developer"], test_id=self._baseline_name) - else: - cases = self._create_test(["--walltime=0:30:00", "cime_developer"], test_id=self._baseline_name) - - run_cmd_assert_result(self, "%s/cs.status.%s" % (self._testroot, self._baseline_name), - from_dir=self._testroot) - - # Ensure that we can get test times - for case_dir in cases: - test_status = os.path.join(case_dir, "TestStatus") - test_time = CIME.wait_for_tests.get_test_time(os.path.dirname(test_status)) - self.assertIs(type(test_time), int, msg="get time did not return int for %s" % test_status) - self.assertTrue(test_time > 0, msg="test time was zero for %s" % test_status) - - # Test that re-running works - skip_tests = None - if CIME.utils.get_cime_default_driver() == 'nuopc': - skip_tests=["SMS_Ln3.T42_T42.S","PRE.f19_f19.ADESP_TEST","PRE.f19_f19.ADESP","DAE.ww3a.ADWAV"] - tests = CIME.get_tests.get_test_suite("cime_developer", machine=self._machine, compiler=self._compiler,skip_tests=skip_tests) - - for test in tests: - casedir = get_casedir(self, test, cases) - - # Subtle issue: The run phases of these tests will be in the PASS state until - # the submitted case.test script is run, which could take a while if the system is - # busy. This potentially leaves a window where the wait_for_tests command below will - # not wait for the re-submitted jobs to run because it sees the original PASS. - # The code below forces things back to PEND to avoid this race condition. Note - # that we must use the MEMLEAK phase, not the RUN phase, because RUN being in a non-PEND - # state is how system tests know they are being re-run and must reset certain - # case settings. - if self._hasbatch: - with TestStatus(test_dir=casedir) as ts: - ts.set_status(MEMLEAK_PHASE, TEST_PEND_STATUS) - - run_cmd_assert_result(self, "./case.submit --skip-preview-namelist", from_dir=casedir) - - self._wait_for_tests(self._baseline_name) - -############################################################################### -class K_TestCimeCase(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_cime_case(self): - ########################################################################### - casedir = self._create_test(["--no-build", "TESTRUNPASS_P1.f19_g16_rx1.A"], test_id=self._baseline_name) - - self.assertEqual(type(MACHINE.get_value("MAX_TASKS_PER_NODE")), int) - self.assertTrue(type(MACHINE.get_value("PROJECT_REQUIRED")) in [type(None) , bool]) - - with Case(casedir, read_only=False) as case: - build_complete = case.get_value("BUILD_COMPLETE") - self.assertFalse(build_complete, - msg="Build complete had wrong value '%s'" % - build_complete) - - case.set_value("BUILD_COMPLETE", True) - build_complete = case.get_value("BUILD_COMPLETE") - self.assertTrue(build_complete, - msg="Build complete had wrong value '%s'" % - build_complete) - - case.flush() - - build_complete = run_cmd_no_fail("./xmlquery BUILD_COMPLETE --value", - from_dir=casedir) - self.assertEqual(build_complete, "TRUE", - msg="Build complete had wrong value '%s'" % - build_complete) - - # Test some test properties - self.assertEqual(case.get_value("TESTCASE"), "TESTRUNPASS") - - def _batch_test_fixture(self, testcase_name): - if not MACHINE.has_batch_system() or NO_BATCH: - self.skipTest("Skipping testing user prerequisites without batch systems") - testdir = os.path.join(self._testroot, testcase_name) - if os.path.exists(testdir): - shutil.rmtree(testdir) - args = "--case {name} --script-root {testdir} --compset X --res f19_g16 --handle-preexisting-dirs=r --output-root {testdir}".format(name=testcase_name, testdir=testdir) - if get_model() == "cesm": - args += " --run-unsupported" - - run_cmd_assert_result(self, "{}/create_newcase {}".format(SCRIPT_DIR, args), - from_dir=SCRIPT_DIR) - run_cmd_assert_result(self, "./case.setup", from_dir=testdir) - - return testdir - - ########################################################################### - def test_cime_case_prereq(self): - ########################################################################### - testcase_name = 'prereq_test' - testdir = self._batch_test_fixture(testcase_name) - with Case(testdir, read_only=False) as case: - if case.get_value("depend_string") is None: - self.skipTest("Skipping prereq test, depend_string was not provided for this batch system") - job_name = "case.run" - prereq_name = 'prereq_test' - batch_commands = case.submit_jobs(prereq=prereq_name, job=job_name, skip_pnl=True, dry_run=True) - self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run") - self.assertTrue(len(batch_commands) > 0, "case.submit_jobs did not return any job submission string") - # The first element in the internal sequence should just be the job name - # The second one (batch_cmd_index) should be the actual batch submission command - batch_cmd_index = 1 - # The prerequisite should be applied to all jobs, though we're only expecting one - for batch_cmd in batch_commands: - self.assertTrue(isinstance(batch_cmd, collections.Sequence), "case.submit_jobs did not return a sequence of sequences") - self.assertTrue(len(batch_cmd) > batch_cmd_index, "case.submit_jobs returned internal sequences with length <= {}".format(batch_cmd_index)) - self.assertTrue(isinstance(batch_cmd[1], CIME.six.string_types), "case.submit_jobs returned internal sequences without the batch command string as the second parameter: {}".format(batch_cmd[1])) - batch_cmd_args = batch_cmd[1] - - jobid_ident = "jobid" - dep_str_fmt = case.get_env('batch').get_value('depend_string', subgroup=None) - self.assertTrue(jobid_ident in dep_str_fmt, "dependency string doesn't include the jobid identifier {}".format(jobid_ident)) - dep_str = dep_str_fmt[:dep_str_fmt.index(jobid_ident)] - - prereq_substr = None - while dep_str in batch_cmd_args: - dep_id_pos = batch_cmd_args.find(dep_str) + len(dep_str) - batch_cmd_args = batch_cmd_args[dep_id_pos:] - prereq_substr = batch_cmd_args[:len(prereq_name)] - if prereq_substr == prereq_name: - break - - self.assertTrue(prereq_name in prereq_substr, "Dependencies added, but not the user specified one") - - ########################################################################### - def test_cime_case_allow_failed_prereq(self): - ########################################################################### - testcase_name = 'allow_failed_prereq_test' - testdir = self._batch_test_fixture(testcase_name) - with Case(testdir, read_only=False) as case: - depend_allow = case.get_value("depend_allow_string") - if depend_allow is None: - self.skipTest("Skipping allow_failed_prereq test, depend_allow_string was not provided for this batch system") - job_name = "case.run" - prereq_name = "prereq_allow_fail_test" - depend_allow = depend_allow.replace("jobid", prereq_name) - batch_commands = case.submit_jobs(prereq=prereq_name, allow_fail=True, job=job_name, skip_pnl=True, dry_run=True) - self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run") - num_submissions = 1 - if case.get_value("DOUT_S"): - num_submissions = 2 - self.assertTrue(len(batch_commands) == num_submissions, "case.submit_jobs did not return any job submission strings") - self.assertTrue(depend_allow in batch_commands[0][1]) - - ########################################################################### - def test_cime_case_resubmit_immediate(self): - ########################################################################### - testcase_name = 'resubmit_immediate_test' - testdir = self._batch_test_fixture(testcase_name) - with Case(testdir, read_only=False) as case: - depend_string = case.get_value("depend_string") - if depend_string is None: - self.skipTest("Skipping resubmit_immediate test, depend_string was not provided for this batch system") - depend_string = re.sub('jobid.*$','',depend_string) - job_name = "case.run" - num_submissions = 6 - case.set_value("RESUBMIT", num_submissions - 1) - batch_commands = case.submit_jobs(job=job_name, skip_pnl=True, dry_run=True, resubmit_immediate=True) - self.assertTrue(isinstance(batch_commands, collections.Sequence), "case.submit_jobs did not return a sequence for a dry run") - if case.get_value("DOUT_S"): - num_submissions = 12 - self.assertTrue(len(batch_commands) == num_submissions, "case.submit_jobs did not return {} submitted jobs".format(num_submissions)) - for i, cmd in enumerate(batch_commands): - if i > 0: - self.assertTrue(depend_string in cmd[1]) - - ########################################################################### - def test_cime_case_st_archive_resubmit(self): - ########################################################################### - testcase_name = "st_archive_resubmit_test" - testdir = self._batch_test_fixture(testcase_name) - with Case(testdir, read_only=False) as case: - case.case_setup(clean=False, test_mode=False, reset=True) - orig_resubmit = 2 - case.set_value("RESUBMIT", orig_resubmit) - case.case_st_archive(resubmit=False) - new_resubmit = case.get_value("RESUBMIT") - self.assertTrue(orig_resubmit == new_resubmit, "st_archive resubmitted when told not to") - case.case_st_archive(resubmit=True) - new_resubmit = case.get_value("RESUBMIT") - self.assertTrue((orig_resubmit - 1) == new_resubmit, "st_archive did not resubmit when told to") - - ########################################################################### - def test_cime_case_build_threaded_1(self): - ########################################################################### - casedir = self._create_test(["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], test_id=self._baseline_name) - - with Case(casedir, read_only=False) as case: - build_threaded = case.get_value("SMP_PRESENT") - self.assertFalse(build_threaded) - - build_threaded = case.get_build_threaded() - self.assertFalse(build_threaded) - - case.set_value("FORCE_BUILD_SMP", True) - - build_threaded = case.get_build_threaded() - self.assertTrue(build_threaded) - - ########################################################################### - def test_cime_case_build_threaded_2(self): - ########################################################################### - casedir = self._create_test(["--no-build", "TESTRUNPASS_P1x2.f19_g16_rx1.A"], test_id=self._baseline_name) - - with Case(casedir, read_only=False) as case: - build_threaded = case.get_value("SMP_PRESENT") - self.assertTrue(build_threaded) - - build_threaded = case.get_build_threaded() - self.assertTrue(build_threaded) - - ########################################################################### - def test_cime_case_mpi_serial(self): - ########################################################################### - casedir = self._create_test(["--no-build", "TESTRUNPASS_Mmpi-serial_P10.f19_g16_rx1.A"], test_id=self._baseline_name) - - with Case(casedir, read_only=True) as case: - - # Serial cases should not be using pnetcdf - self.assertEqual(case.get_value("CPL_PIO_TYPENAME"), "netcdf") - - # Serial cases should be using 1 task - self.assertEqual(case.get_value("TOTALPES"), 1) - - self.assertEqual(case.get_value("NTASKS_CPL"), 1) - - ########################################################################### - def test_cime_case_force_pecount(self): - ########################################################################### - casedir = self._create_test(["--no-build", "--force-procs=16", "--force-threads=8", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name) - - with Case(casedir, read_only=True) as case: - self.assertEqual(case.get_value("NTASKS_CPL"), 16) - - self.assertEqual(case.get_value("NTHRDS_CPL"), 8) - - ########################################################################### - def test_cime_case_xmlchange_append(self): - ########################################################################### - casedir = self._create_test(["--no-build", "TESTRUNPASS_P1x1.f19_g16_rx1.A"], test_id=self._baseline_name) - - run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt1'", from_dir=casedir) - result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir) - self.assertEqual(result, "-opt1") - - run_cmd_assert_result(self, "./xmlchange --id PIO_CONFIG_OPTS --val='-opt2' --append", from_dir=casedir) - result = run_cmd_assert_result(self, "./xmlquery --value PIO_CONFIG_OPTS", from_dir=casedir) - self.assertEqual(result, "-opt1 -opt2") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_1(self): - ########################################################################### - if get_model() != "e3sm": - self.skipTest("Skipping walltime test. Depends on E3SM batch settings") - - test_name = "ERS.f19_g16_rx1.A" - casedir = self._create_test(["--no-setup", "--machine=blues", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "00:10:00") - - result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "biggpu") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_2(self): - ########################################################################### - if get_model() != "e3sm": - self.skipTest("Skipping walltime test. Depends on E3SM batch settings") - - test_name = "ERS_P64.f19_g16_rx1.A" - casedir = self._create_test(["--no-setup", "--machine=blues", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "01:00:00") - - result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "biggpu") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_3(self): - ########################################################################### - if get_model() != "e3sm": - self.skipTest("Skipping walltime test. Depends on E3SM batch settings") - - test_name = "ERS_P64.f19_g16_rx1.A" - casedir = self._create_test(["--no-setup", "--machine=blues", "--walltime=0:10:00", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "00:10:00") - - result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "biggpu") # Not smart enough to select faster queue - - ########################################################################### - def test_cime_case_test_walltime_mgmt_4(self): - ########################################################################### - if get_model() != "e3sm": - self.skipTest("Skipping walltime test. Depends on E3SM batch settings") - - test_name = "ERS_P1.f19_g16_rx1.A" - casedir = self._create_test(["--no-setup", "--machine=blues", "--walltime=2:00:00", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "01:00:00") - - result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "biggpu") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_5(self): - ########################################################################### - if get_model() != "e3sm": - self.skipTest("Skipping walltime test. Depends on E3SM batch settings") - - test_name = "ERS_P1.f19_g16_rx1.A" - casedir = self._create_test(["--no-setup", "--machine=blues", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - run_cmd_assert_result(self, "./xmlchange JOB_QUEUE=slartibartfast --subgroup=case.test", from_dir=casedir, expected_stat=1) - - run_cmd_assert_result(self, "./xmlchange JOB_QUEUE=slartibartfast --force --subgroup=case.test", from_dir=casedir) - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "01:00:00") - - result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "slartibartfast") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_6(self): - ########################################################################### - if not self._hasbatch: - self.skipTest("Skipping walltime test. Depends on batch system") - - test_name = "ERS_P1.f19_g16_rx1.A" - casedir = self._create_test(["--no-build", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - run_cmd_assert_result(self, "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", from_dir=casedir) - - run_cmd_assert_result(self, "./case.setup --reset", from_dir=casedir) - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - with Case(casedir) as case: - walltime_format = case.get_value("walltime_format", subgroup=None) - if walltime_format is not None and walltime_format.count(":") == 1: - self.assertEqual(result, "421:32") - else: - self.assertEqual(result, "421:32:11") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_7(self): - ########################################################################### - if not self._hasbatch: - self.skipTest("Skipping walltime test. Depends on batch system") - - test_name = "ERS_P1.f19_g16_rx1.A" - casedir = self._create_test(["--no-build", "--walltime=01:00:00", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - run_cmd_assert_result(self, "./xmlchange JOB_WALLCLOCK_TIME=421:32:11 --subgroup=case.test", from_dir=casedir) - - run_cmd_assert_result(self, "./case.setup --reset", from_dir=casedir) - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - with Case(casedir) as case: - walltime_format = case.get_value("walltime_format", subgroup=None) - if walltime_format is not None and walltime_format.count(":") == 1: - self.assertEqual(result, "421:32") - else: - self.assertEqual(result, "421:32:11") - - ########################################################################### - def test_cime_case_test_walltime_mgmt_8(self): - ########################################################################### - if get_model() != "e3sm": - self.skipTest("Skipping walltime test. Depends on E3SM batch settings") - - test_name = "SMS_P25600.f19_g16_rx1.A" - machine, compiler = "theta", "gnu" - casedir = self._create_test(["--no-setup", "--machine={}".format(machine), "--compiler={}".format(compiler), "--project e3sm", test_name], test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - result = run_cmd_assert_result(self, "./xmlquery JOB_WALLCLOCK_TIME --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "09:00:00") - - result = run_cmd_assert_result(self, "./xmlquery JOB_QUEUE --subgroup=case.test --value", from_dir=casedir) - self.assertEqual(result, "default") - - ########################################################################### - def test_cime_case_test_custom_project(self): - ########################################################################### - test_name = "ERS_P1.f19_g16_rx1.A" - # have to use a machine both models know and one that doesn't put PROJECT in any key paths - if get_model() == "e3sm": - machine = "mappy" - else: - machine = "melvin" - compiler = "gnu" - casedir = self._create_test(["--no-setup", "--machine={}".format(machine), "--compiler={}".format(compiler), "--project=testproj", test_name, "--mpilib=mpi-serial"], - test_id=self._baseline_name, - env_changes="unset CIME_GLOBAL_WALLTIME &&") - - result = run_cmd_assert_result(self, "./xmlquery --value PROJECT --subgroup=case.test", from_dir=casedir) - self.assertEqual(result, "testproj") - - ########################################################################### - def test_create_test_longname(self): - ########################################################################### - self._create_test(["SMS.f19_g16.2000_SATM_XLND_SICE_SOCN_XROF_XGLC_SWAV", "--no-build"]) - - ########################################################################### - def test_env_loading(self): - ########################################################################### - if self._machine != "mappy": - self.skipTest("Skipping env load test - Only works on mappy") - - casedir = self._create_test(["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name) - - with Case(casedir, read_only=True) as case: - env_mach = case.get_env("mach_specific") - orig_env = dict(os.environ) - - env_mach.load_env(case) - module_env = dict(os.environ) - - os.environ.clear() - os.environ.update(orig_env) - - env_mach.load_env(case, force_method="generic") - generic_env = dict(os.environ) - - os.environ.clear() - os.environ.update(orig_env) - - problems = "" - for mkey, mval in module_env.items(): - if mkey not in generic_env: - if not mkey.startswith("PS") and mkey != "OLDPWD": - problems += "Generic missing key: {}\n".format(mkey) - elif mval != generic_env[mkey] and mkey not in ["_", "SHLVL", "PWD"] and not mkey.endswith("()"): - problems += "Value mismatch for key {}: {} != {}\n".format(mkey, repr(mval), repr(generic_env[mkey])) - - for gkey in generic_env.keys(): - if gkey not in module_env: - problems += "Modules missing key: {}\n".format(gkey) - - self.assertEqual(problems, "", msg=problems) - - ########################################################################### - def test_case_submit_interface(self): - ########################################################################### - # the current directory may not exist, so make sure we are in a real directory - os.chdir(os.getenv("HOME")) - sys.path.append(TOOLS_DIR) - case_submit_path = os.path.join(TOOLS_DIR, "case.submit") - - module = import_from_file("case.submit", case_submit_path) - - sys.argv = ["case.submit", "--batch-args", "'random_arguments_here.%j'", - "--mail-type", "fail", "--mail-user", "'random_arguments_here.%j'"] - module._main_func(None, True) - - ########################################################################### - def test_xml_caching(self): - ########################################################################### - casedir = self._create_test(["--no-build", "TESTRUNPASS.f19_g16_rx1.A"], test_id=self._baseline_name) - - active = os.path.join(casedir, "env_run.xml") - backup = os.path.join(casedir, "env_run.xml.bak") - - safe_copy(active, backup) - - with Case(casedir, read_only=False) as case: - env_run = EnvRun(casedir, read_only=True) - self.assertEqual(case.get_value("RUN_TYPE"), "startup") - case.set_value("RUN_TYPE", "branch") - self.assertEqual(case.get_value("RUN_TYPE"), "branch") - self.assertEqual(env_run.get_value("RUN_TYPE"), "branch") - - with Case(casedir) as case: - self.assertEqual(case.get_value("RUN_TYPE"), "branch") - - time.sleep(0.2) - safe_copy(backup, active) - - with Case(casedir, read_only=False) as case: - self.assertEqual(case.get_value("RUN_TYPE"), "startup") - case.set_value("RUN_TYPE", "branch") - - with Case(casedir, read_only=False) as case: - self.assertEqual(case.get_value("RUN_TYPE"), "branch") - time.sleep(0.2) - safe_copy(backup, active) - case.read_xml() # Manual re-sync - self.assertEqual(case.get_value("RUN_TYPE"), "startup") - case.set_value("RUN_TYPE", "branch") - self.assertEqual(case.get_value("RUN_TYPE"), "branch") - - with Case(casedir) as case: - self.assertEqual(case.get_value("RUN_TYPE"), "branch") - time.sleep(0.2) - safe_copy(backup, active) - env_run = EnvRun(casedir, read_only=True) - self.assertEqual(env_run.get_value("RUN_TYPE"), "startup") - - with Case(casedir, read_only=False) as case: - self.assertEqual(case.get_value("RUN_TYPE"), "startup") - case.set_value("RUN_TYPE", "branch") - - # behind the back detection. - with self.assertRaises(CIMEError): - with Case(casedir, read_only=False) as case: - case.set_value("RUN_TYPE", "startup") - time.sleep(0.2) - safe_copy(backup, active) - - with Case(casedir, read_only=False) as case: - case.set_value("RUN_TYPE", "branch") - - # If there's no modications within CIME, the files should not be written - # and therefore no timestamp check - with Case(casedir) as case: - time.sleep(0.2) - safe_copy(backup, active) - - ########################################################################### - def test_configure(self): - ########################################################################### - testname = "SMS.f09_g16.X" - casedir = self._create_test([testname, "--no-build"], test_id=self._baseline_name) - - manual_config_dir = os.path.join(casedir, "manual_config") - os.mkdir(manual_config_dir) - - run_cmd_no_fail("{} --machine={} --compiler={}".format(os.path.join(get_cime_root(), "tools", "configure"), self._machine, self._compiler), from_dir=manual_config_dir) - - with open(os.path.join(casedir, "env_mach_specific.xml"), "r") as fd: - case_env_contents = fd.read() - - with open(os.path.join(manual_config_dir, "env_mach_specific.xml"), "r") as fd: - man_env_contents = fd.read() - - self.assertEqual(case_env_contents, man_env_contents) - - ########################################################################### - def test_self_build_cprnc(self): - ########################################################################### - if NO_FORTRAN_RUN: - self.skipTest("Skipping fortran test") - if TEST_COMPILER and "gpu" in TEST_COMPILER: - self.skipTest("Skipping cprnc test for gpu compiler") - - testname = "ERS_Ln7.f19_g16_rx1.A" - casedir = self._create_test([testname, "--no-build"], test_id=self._baseline_name) - - run_cmd_assert_result(self, "./xmlchange CCSM_CPRNC=this_is_a_broken_cprnc", - from_dir=casedir) - run_cmd_assert_result(self, "./case.build", from_dir=casedir) - run_cmd_assert_result(self, "./case.submit", from_dir=casedir) - - self._wait_for_tests(self._baseline_name, always_wait=True) - - ########################################################################### - def test_case_clean(self): - ########################################################################### - testname = "ERS_Ln7.f19_g16_rx1.A" - casedir = self._create_test([testname, "--no-build"], test_id=self._baseline_name) - - run_cmd_assert_result(self, "./case.setup --clean", from_dir=casedir) - run_cmd_assert_result(self, "./case.setup --clean", from_dir=casedir) - run_cmd_assert_result(self, "./case.setup", from_dir=casedir) - -############################################################################### -class X_TestSingleSubmit(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_single_submit(self): - ########################################################################### - # Skip unless on a batch system and users did not select no-batch - if (not self._hasbatch): - self.skipTest("Skipping single submit. Not valid without batch") - if get_model() != "e3sm": - self.skipTest("Skipping single submit. E3SM experimental feature") - if self._machine not in ["sandiatoss3"]: - self.skipTest("Skipping single submit. Only works on sandiatoss3") - - # Keep small enough for now that we don't have to worry about load balancing - self._create_test(["--single-submit", "SMS_Ln9_P8.f45_g37_rx1.A", "SMS_Ln9_P8.f19_g16_rx1.A"], - env_changes="unset CIME_GLOBAL_WALLTIME &&") - -############################################################################### -class G_TestBuildSystem(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def test_clean_rebuild(self): - ########################################################################### - casedir = self._create_test(["--no-run", "SMS.f19_g16_rx1.A"], test_id=self._baseline_name) - - # Clean a component and a sharedlib - run_cmd_assert_result(self, "./case.build --clean atm", from_dir=casedir) - run_cmd_assert_result(self, "./case.build --clean gptl", from_dir=casedir) - - # Repeating should not be an error - run_cmd_assert_result(self, "./case.build --clean atm", from_dir=casedir) - run_cmd_assert_result(self, "./case.build --clean gptl", from_dir=casedir) - - run_cmd_assert_result(self, "./case.build", from_dir=casedir) - -############################################################################### -class L_TestSaveTimings(TestCreateTestCommon): -############################################################################### - - ########################################################################### - def simple_test(self, manual_timing=False): - ########################################################################### - if (NO_FORTRAN_RUN): - self.skipTest("Skipping fortran test") - timing_flag = "" if manual_timing else "--save-timing" - driver = CIME.utils.get_cime_default_driver() - if driver == "mct": - walltime="00:15:00" - else: - walltime="00:30:00" - self._create_test(["SMS_Ln9_P1.f19_g16_rx1.A", timing_flag, "--walltime="+walltime], test_id=self._baseline_name) - - statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, self._baseline_name)) - self.assertEqual(len(statuses), 1, msg="Should have had exactly one match, found %s" % statuses) - casedir = os.path.dirname(statuses[0]) - - with Case(casedir, read_only=True) as case: - lids = get_lids(case) - timing_dir = case.get_value("SAVE_TIMING_DIR") - casename = case.get_value("CASE") - - self.assertEqual(len(lids), 1, msg="Expected one LID, found %s" % lids) - - if manual_timing: - run_cmd_assert_result(self, "cd %s && %s/save_provenance postrun" % (casedir, TOOLS_DIR)) - if get_model() == "e3sm": - provenance_glob = os.path.join(timing_dir, "performance_archive", getpass.getuser(), casename, lids[0] + "*") - provenance_dirs = glob.glob(provenance_glob) - self.assertEqual(len(provenance_dirs), 1, msg="wrong number of provenance dirs, expected 1, got {}, looked for {}".format(provenance_dirs, provenance_glob)) - verify_perms(self, ''.join(provenance_dirs)) - - ########################################################################### - def test_save_timings(self): - ########################################################################### - self.simple_test() - - ########################################################################### - def test_save_timings_manual(self): - ########################################################################### - self.simple_test(manual_timing=True) - - ########################################################################### - def _record_success(self, test_name, test_success, commit, exp_last_pass, exp_trans_fail, baseline_dir): - ########################################################################### - save_test_success(baseline_dir, None, test_name, test_success, force_commit_test=commit) - was_success, last_pass, trans_fail = get_test_success(baseline_dir, None, test_name, testing=True) - self.assertEqual(test_success, was_success, msg="Broken was_success {} {}".format(test_name, commit)) - self.assertEqual(last_pass, exp_last_pass, msg="Broken last_pass {} {}".format(test_name, commit)) - self.assertEqual(trans_fail, exp_trans_fail, msg="Broken trans_fail {} {}".format(test_name, commit)) - if test_success: - self.assertEqual(exp_last_pass, commit, msg="Should never") - - ########################################################################### - def test_success_recording(self): - ########################################################################### - if get_model() != "e3sm": - self.skipTest("Skipping success recording tests. E3SM feature") - - fake_test1 = "faketest1" - fake_test2 = "faketest2" - baseline_dir = os.path.join(self._baseline_area, self._baseline_name) - - # Test initial state - was_success, last_pass, trans_fail = get_test_success(baseline_dir, None, fake_test1, testing=True) - self.assertFalse(was_success, msg="Broken initial was_success") - self.assertEqual(last_pass, None, msg="Broken initial last_pass") - self.assertEqual(trans_fail, None, msg="Broken initial trans_fail") - - # Test first result (test1 fails, test2 passes) - # test_name , success, commit , expP , expTF, baseline) - self._record_success(fake_test1, False , "AAA" , None , "AAA", baseline_dir) - self._record_success(fake_test2, True , "AAA" , "AAA", None , baseline_dir) - - # Test second result matches first (no transition) (test1 fails, test2 passes) - # test_name , success, commit , expP , expTF, baseline) - self._record_success(fake_test1, False , "BBB" , None , "AAA", baseline_dir) - self._record_success(fake_test2, True , "BBB" , "BBB", None , baseline_dir) - - # Test transition to new state (first real transition) (test1 passes, test2 fails) - # test_name , success, commit , expP , expTF, baseline) - self._record_success(fake_test1, True , "CCC" , "CCC", "AAA", baseline_dir) - self._record_success(fake_test2, False , "CCC" , "BBB", "CCC", baseline_dir) - - # Test transition to new state (second real transition) (test1 fails, test2 passes) - # test_name , success, commit , expP , expTF, baseline) - self._record_success(fake_test1, False , "DDD" , "CCC", "DDD", baseline_dir) - self._record_success(fake_test2, True , "DDD" , "DDD", "CCC", baseline_dir) - - # Test final repeat (test1 fails, test2 passes) - # test_name , success, commit , expP , expTF, baseline) - self._record_success(fake_test1, False , "EEE" , "CCC", "DDD", baseline_dir) - self._record_success(fake_test2, True , "EEE" , "EEE", "CCC", baseline_dir) - - # Test final transition (test1 passes, test2 fails) - # test_name , success, commit , expP , expTF, baseline) - self._record_success(fake_test1, True , "FFF" , "FFF", "DDD", baseline_dir) - self._record_success(fake_test2, False , "FFF" , "EEE", "FFF", baseline_dir) - -# Machinery for Macros generation tests. - -class MockMachines(object): - - """A mock version of the Machines object to simplify testing.""" - - def __init__(self, name, os_): - """Store the name.""" - self.name = name - self.os = os_ - - def get_machine_name(self): - """Return the name we were given.""" - return self.name - - def get_value(self, var_name): - """Allow the operating system to be queried.""" - assert var_name == "OS", "Build asked for a value not " \ - "implemented in the testing infrastructure." - return self.os - - def is_valid_compiler(self, _): # pylint:disable=no-self-use - """Assume all compilers are valid.""" - return True - - def is_valid_MPIlib(self, _): - """Assume all MPILIB settings are valid.""" - return True - -# pragma pylint: disable=unused-argument - def get_default_MPIlib(self, attributes=None): - return "mpich2" - - def get_default_compiler(self): - return "intel" - - -def get_macros(macro_maker, build_xml, build_system): - """Generate build system ("Macros" file) output from config_compilers XML. - - Arguments: - macro_maker - The underlying Build object. - build_xml - A string containing the XML to operate on. - build_system - Either "Makefile" or "CMake", depending on desired output. - - The return value is a string containing the build system output. - """ - # Build.write_macros expects file-like objects as input, so - # we need to wrap the strings in StringIO objects. - xml = CIME.six.StringIO(str(build_xml)) - output = CIME.six.StringIO() - output_format = None - if build_system == "Makefile": - output_format = "make" - elif build_system == "CMake": - output_format = "cmake" - else: - output_format = build_system - - macro_maker.write_macros_file(macros_file=output, - output_format=output_format, xml=xml) - return str(output.getvalue()) - - -def _wrap_config_compilers_xml(inner_string): - """Utility function to create a config_compilers XML string. - - Pass this function a string containing elements, and it will add - the necessary header/footer to the file. - """ - _xml_template = """ - -{} - -""" - - return _xml_template.format(inner_string) - - -class MakefileTester(object): - - """Helper class for checking Makefile output. - - Public methods: - __init__ - query_var - assert_variable_equals - assert_variable_matches - """ -# Note that the following is a Makefile and the echo line must begin with a tab - _makefile_template = """ -include Macros -query: -\techo '$({})' > query.out -""" - - def __init__(self, parent, make_string): - """Constructor for Makefile test helper class. - - Arguments: - parent - The TestCase object that is using this item. - make_string - Makefile contents to test. - """ - self.parent = parent - self.make_string = make_string - - def query_var(self, var_name, env, var): - """Request the value of a variable in the Makefile, as a string. - - Arguments: - var_name - Name of the variable to query. - env - A dict containing extra environment variables to set when calling - make. - var - A dict containing extra make variables to set when calling make. - (The distinction between env and var actually matters only for - CMake, though.) - """ - if env is None: - env = dict() - if var is None: - var = dict() - - # Write the Makefile strings to temporary files. - temp_dir = tempfile.mkdtemp() - macros_file_name = os.path.join(temp_dir, "Macros") - makefile_name = os.path.join(temp_dir, "Makefile") - output_name = os.path.join(temp_dir, "query.out") - - with open(macros_file_name, "w") as macros_file: - macros_file.write(self.make_string) - with open(makefile_name, "w") as makefile: - makefile.write(self._makefile_template.format(var_name)) - - environment = os.environ.copy() - environment.update(env) - environment.update(var) - gmake_exe = MACHINE.get_value("GMAKE") - if gmake_exe is None: - gmake_exe = "gmake" - run_cmd_assert_result(self.parent, "%s query --directory=%s 2>&1" % (gmake_exe, temp_dir), env=environment) - - with open(output_name, "r") as output: - query_result = output.read().strip() - - # Clean up the Makefiles. - shutil.rmtree(temp_dir) - - return query_result - - def assert_variable_equals(self, var_name, value, env=None, var=None): - """Assert that a variable in the Makefile has a given value. - - Arguments: - var_name - Name of variable to check. - value - The string that the variable value should be equal to. - env - Optional. Dict of environment variables to set when calling make. - var - Optional. Dict of make variables to set when calling make. - """ - self.parent.assertEqual(self.query_var(var_name, env, var), value) - - def assert_variable_matches(self, var_name, regex, env=None, var=None): - """Assert that a variable in the Makefile matches a regex. - - Arguments: - var_name - Name of variable to check. - regex - The regex to match. - env - Optional. Dict of environment variables to set when calling make. - var - Optional. Dict of make variables to set when calling make. - """ - self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex) - - -class CMakeTester(object): - - """Helper class for checking CMake output. - - Public methods: - __init__ - query_var - assert_variable_equals - assert_variable_matches - """ - - _cmakelists_template = """ -include(./Macros.cmake) -file(WRITE query.out "${{{}}}") -""" - - def __init__(self, parent, cmake_string): - """Constructor for CMake test helper class. - - Arguments: - parent - The TestCase object that is using this item. - cmake_string - CMake contents to test. - """ - self.parent = parent - self.cmake_string = cmake_string - - def query_var(self, var_name, env, var): - """Request the value of a variable in Macros.cmake, as a string. - - Arguments: - var_name - Name of the variable to query. - env - A dict containing extra environment variables to set when calling - cmake. - var - A dict containing extra CMake variables to set when calling cmake. - """ - if env is None: - env = dict() - if var is None: - var = dict() - - # Write the CMake strings to temporary files. - temp_dir = tempfile.mkdtemp() - macros_file_name = os.path.join(temp_dir, "Macros.cmake") - cmakelists_name = os.path.join(temp_dir, "CMakeLists.txt") - output_name = os.path.join(temp_dir, "query.out") - - with open(macros_file_name, "w") as macros_file: - for key in var: - macros_file.write("set({} {})\n".format(key, var[key])) - macros_file.write(self.cmake_string) - with open(cmakelists_name, "w") as cmakelists: - cmakelists.write(self._cmakelists_template.format(var_name)) - - environment = os.environ.copy() - environment.update(env) - os_ = MACHINE.get_value("OS") - # cmake will not work on cray systems without this flag - if os_ == "CNL": - cmake_args = "-DCMAKE_SYSTEM_NAME=Catamount" - else: - cmake_args = "" - - run_cmd_assert_result(self.parent, "cmake %s . 2>&1" % cmake_args, from_dir=temp_dir, env=environment) - - with open(output_name, "r") as output: - query_result = output.read().strip() - - # Clean up the CMake files. - shutil.rmtree(temp_dir) - - return query_result - - def assert_variable_equals(self, var_name, value, env=None, var=None): - """Assert that a variable in the CMakeLists has a given value. - - Arguments: - var_name - Name of variable to check. - value - The string that the variable value should be equal to. - env - Optional. Dict of environment variables to set when calling cmake. - var - Optional. Dict of CMake variables to set when calling cmake. - """ - self.parent.assertEqual(self.query_var(var_name, env, var), value) - - def assert_variable_matches(self, var_name, regex, env=None, var=None): - """Assert that a variable in the CMkeLists matches a regex. - - Arguments: - var_name - Name of variable to check. - regex - The regex to match. - env - Optional. Dict of environment variables to set when calling cmake. - var - Optional. Dict of CMake variables to set when calling cmake. - """ - self.parent.assertRegexpMatches(self.query_var(var_name, env, var), regex) - - -############################################################################### -class G_TestMacrosBasic(unittest.TestCase): -############################################################################### - - """Basic infrastructure tests. - - This class contains tests that do not actually depend on the output of the - macro file conversion. This includes basic smoke testing and tests of - error-handling in the routine. - """ - - def test_script_is_callable(self): - """The test script can be called on valid output without dying.""" - # This is really more a smoke test of this script than anything else. - maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) - test_xml = _wrap_config_compilers_xml("FALSE") - get_macros(maker, test_xml, "Makefile") - - def test_script_rejects_bad_xml(self): - """The macro writer rejects input that's not valid XML.""" - maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) - with self.assertRaises(ParseError): - get_macros(maker, "This is not valid XML.", "Makefile") - - def test_script_rejects_bad_build_system(self): - """The macro writer rejects a bad build system string.""" - maker = Compilers(MockMachines("mymachine", "SomeOS"), version=2.0) - bad_string = "argle-bargle." - with assertRaisesRegex(self, - CIMEError, - "Unrecognized build system provided to write_macros: " + bad_string): - get_macros(maker, "This string is irrelevant.", bad_string) - - -############################################################################### -class H_TestMakeMacros(unittest.TestCase): -############################################################################### - - """Makefile macros tests. - - This class contains tests of the Makefile output of Build. - - Aside from the usual setUp and test methods, this class has a utility method - (xml_to_tester) that converts XML input directly to a MakefileTester object. - """ - def setUp(self): - self.test_os = "SomeOS" - self.test_machine = "mymachine" - self.test_compiler = MACHINE.get_default_compiler() if TEST_COMPILER is None else TEST_COMPILER - self.test_mpilib = MACHINE.get_default_MPIlib(attributes={"compiler":self.test_compiler}) if TEST_MPILIB is None else TEST_MPILIB - - self._maker = Compilers(MockMachines(self.test_machine, self.test_os), version=2.0) - - def xml_to_tester(self, xml_string): - """Helper that directly converts an XML string to a MakefileTester.""" - test_xml = _wrap_config_compilers_xml(xml_string) - return MakefileTester(self, get_macros(self._maker, test_xml, "Makefile")) - - def test_generic_item(self): - """The macro writer can write out a single generic item.""" - xml_string = "FALSE" - tester = self.xml_to_tester(xml_string) - tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") - - def test_machine_specific_item(self): - """The macro writer can pick out a machine-specific item.""" - xml1 = """TRUE""".format(self.test_machine) - xml2 = """FALSE""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - # Do this a second time, but with elements in the reverse order, to - # ensure that the code is not "cheating" by taking the first match. - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - - def test_ignore_non_match(self): - """The macro writer ignores an entry with the wrong machine name.""" - xml1 = """TRUE""" - xml2 = """FALSE""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") - # Again, double-check that we don't just get lucky with the order. - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") - - def test_os_specific_item(self): - """The macro writer can pick out an OS-specific item.""" - xml1 = """TRUE""".format(self.test_os) - xml2 = """FALSE""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - - def test_mach_other_compiler(self): - """The macro writer compiler-specific logic works as expected.""" - xml1 = """a b c""".format(self.test_compiler) - xml2 = """x y z""".format(self.test_machine) - xml3 = """x y z""".format(self.test_machine,self.test_compiler) - xml4 = """x y z""".format(self.test_machine,self.test_compiler) - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("CFLAGS", "a b c",var={"COMPILER":self.test_compiler}) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("CFLAGS", "a b c",var={"COMPILER":self.test_compiler}) - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("CFLAGS", "a b c",var={"COMPILER":self.test_compiler}) - tester = self.xml_to_tester(xml1+xml3) - tester.assert_variable_equals("CFLAGS", "a b c x y z",var={"COMPILER":self.test_compiler}) - tester = self.xml_to_tester(xml1+xml4) - tester.assert_variable_equals("CFLAGS", "x y z",var={"COMPILER":self.test_compiler}) - tester = self.xml_to_tester(xml4+xml1) - tester.assert_variable_equals("CFLAGS", "x y z",var={"COMPILER":self.test_compiler}) - - def test_mach_beats_os(self): - """The macro writer chooses machine-specific over os-specific matches.""" - xml1 = """FALSE""".format(self.test_os) - xml2 = """TRUE""".format(self.test_machine) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - - def test_mach_and_os_beats_mach(self): - """The macro writer chooses the most-specific match possible.""" - xml1 = """FALSE""".format(self.test_machine) - xml2 = """TRUE""" - xml2 = xml2.format(self.test_machine, self.test_os) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE") - - def test_build_time_attribute(self): - """The macro writer writes conditionals for build-time choices.""" - xml1 = """/path/to/mpich""" - xml2 = """/path/to/openmpi""" - xml3 = """/path/to/default""" - tester = self.xml_to_tester(xml1+xml2+xml3) - tester.assert_variable_equals("MPI_PATH", "/path/to/default") - tester.assert_variable_equals("MPI_PATH", "/path/to/mpich", var={"MPILIB": "mpich"}) - tester.assert_variable_equals("MPI_PATH", "/path/to/openmpi", var={"MPILIB": "openmpi"}) - tester = self.xml_to_tester(xml3+xml2+xml1) - tester.assert_variable_equals("MPI_PATH", "/path/to/default") - tester.assert_variable_equals("MPI_PATH", "/path/to/mpich", var={"MPILIB": "mpich"}) - tester.assert_variable_equals("MPI_PATH", "/path/to/openmpi", var={"MPILIB": "openmpi"}) - - def test_reject_duplicate_defaults(self): - """The macro writer dies if given many defaults.""" - xml1 = """/path/to/default""" - xml2 = """/path/to/other_default""" - with assertRaisesRegex(self, - CIMEError, - "Variable MPI_PATH is set ambiguously in config_compilers.xml."): - self.xml_to_tester(xml1+xml2) - - def test_reject_duplicates(self): - """The macro writer dies if given many matches for a given configuration.""" - xml1 = """/path/to/mpich""" - xml2 = """/path/to/mpich2""" - with assertRaisesRegex(self, - CIMEError, - "Variable MPI_PATH is set ambiguously in config_compilers.xml."): - self.xml_to_tester(xml1+xml2) - - def test_reject_ambiguous(self): - """The macro writer dies if given an ambiguous set of matches.""" - xml1 = """/path/to/mpich""" - xml2 = """/path/to/mpi-debug""" - with assertRaisesRegex(self, - CIMEError, - "Variable MPI_PATH is set ambiguously in config_compilers.xml."): - self.xml_to_tester(xml1+xml2) - - def test_compiler_changeable_at_build_time(self): - """The macro writer writes information for multiple compilers.""" - xml1 = """FALSE""" - xml2 = """TRUE""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SUPPORTS_CXX", "TRUE", var={"COMPILER": "gnu"}) - tester.assert_variable_equals("SUPPORTS_CXX", "FALSE") - - def test_base_flags(self): - """Test that we get "base" compiler flags.""" - xml1 = """-O2""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("FFLAGS", "-O2") - - def test_machine_specific_base_flags(self): - """Test selection among base compiler flag sets based on machine.""" - xml1 = """-O2""" - xml2 = """-O3""".format(self.test_machine) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-O3") - - def test_build_time_base_flags(self): - """Test selection of base flags based on build-time attributes.""" - xml1 = """-O2""" - xml2 = """-O3""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-O2") - tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) - - def test_build_time_base_flags_same_parent(self): - """Test selection of base flags in the same parent element.""" - xml1 = """-O2""" - xml2 = """-O3""" - tester = self.xml_to_tester(""+xml1+xml2+"") - tester.assert_variable_equals("FFLAGS", "-O2") - tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) - # Check for order independence here, too. - tester = self.xml_to_tester(""+xml2+xml1+"") - tester.assert_variable_equals("FFLAGS", "-O2") - tester.assert_variable_equals("FFLAGS", "-O3", var={"DEBUG": "TRUE"}) - - def test_append_flags(self): - """Test appending flags to a list.""" - xml1 = """-delicious""" - xml2 = """-cake""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-delicious -cake") - # Order independence, as usual. - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("FFLAGS", "-delicious -cake") - - def test_machine_specific_append_flags(self): - """Test appending flags that are either more or less machine-specific.""" - xml1 = """-delicious""" - xml2 = """-cake""".format(self.test_machine) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_matches("FFLAGS", "^(-delicious -cake|-cake -delicious)$") - - def test_machine_specific_base_keeps_append_flags(self): - """Test that machine-specific base flags don't override default append flags.""" - xml1 = """-delicious""" - xml2 = """-cake""".format(self.test_machine) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-cake -delicious") - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("FFLAGS", "-cake -delicious") - - def test_machine_specific_base_and_append_flags(self): - """Test that machine-specific base flags coexist with machine-specific append flags.""" - xml1 = """-delicious""".format(self.test_machine) - xml2 = """-cake""".format(self.test_machine) - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-cake -delicious") - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("FFLAGS", "-cake -delicious") - - def test_append_flags_without_base(self): - """Test appending flags to a value set before Macros is included.""" - xml1 = """-cake""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("FFLAGS", "-delicious -cake", var={"FFLAGS": "-delicious"}) - - def test_build_time_append_flags(self): - """Test build_time selection of compiler flags.""" - xml1 = """-cake""" - xml2 = """-and-pie""" - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("FFLAGS", "-cake") - tester.assert_variable_matches("FFLAGS", "^(-cake -and-pie|-and-pie -cake)$", var={"DEBUG": "TRUE"}) - - def test_environment_variable_insertion(self): - """Test that ENV{..} inserts environment variables.""" - # DO it again with $ENV{} style - xml1 = """-L$ENV{NETCDF} -lnetcdf""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("LDFLAGS", "-L/path/to/netcdf -lnetcdf", - env={"NETCDF": "/path/to/netcdf"}) - - def test_shell_command_insertion(self): - """Test that $SHELL insert shell command output.""" - xml1 = """-O$SHELL{echo 2} -fast""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("FFLAGS", "-O2 -fast") - - def test_multiple_shell_commands(self): - """Test that more than one $SHELL element can be used.""" - xml1 = """-O$SHELL{echo 2} -$SHELL{echo fast}""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("FFLAGS", "-O2 -fast") - - def test_env_and_shell_command(self): - """Test that $ENV works inside $SHELL elements.""" - xml1 = """-O$SHELL{echo $ENV{OPT_LEVEL}} -fast""" - tester = self.xml_to_tester(xml1) - tester.assert_variable_equals("FFLAGS", "-O2 -fast", env={"OPT_LEVEL": "2"}) - - def test_config_variable_insertion(self): - """Test that $VAR insert variables from config_compilers.""" - # Construct an absurd chain of references just to sure that we don't - # pass by accident, e.g. outputting things in the right order just due - # to good luck in a hash somewhere. - xml1 = """stuff-${MPI_PATH}-stuff""" - xml2 = """${MPICC}""" - xml3 = """${MPICXX}""" - xml4 = """${MPIFC}""" - xml5 = """mpicc""" - tester = self.xml_to_tester(""+xml1+xml2+xml3+xml4+xml5+"") - tester.assert_variable_equals("MPI_LIB_NAME", "stuff-mpicc-stuff") - - def test_config_reject_self_references(self): - """Test that $VAR self-references are rejected.""" - # This is a special case of the next test, which also checks circular - # references. - xml1 = """${MPI_LIB_NAME}""" - err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." - with assertRaisesRegex(self,CIMEError, err_msg): - self.xml_to_tester(""+xml1+"") - - def test_config_reject_cyclical_references(self): - """Test that cyclical $VAR references are rejected.""" - xml1 = """${MPI_PATH}""" - xml2 = """${MPI_LIB_NAME}""" - err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." - with assertRaisesRegex(self,CIMEError, err_msg): - self.xml_to_tester(""+xml1+xml2+"") - - def test_variable_insertion_with_machine_specific_setting(self): - """Test that machine-specific $VAR dependencies are correct.""" - xml1 = """something""" - xml2 = """$MPI_PATH""".format(self.test_machine) - xml3 = """${MPI_LIB_NAME}""" - err_msg = r".* has bad \$VAR references. Check for circular references or variables that are used in a \$VAR but not actually defined." - with assertRaisesRegex(self,CIMEError, err_msg): - self.xml_to_tester(xml1+xml2+xml3) - - def test_override_with_machine_and_new_attributes(self): - """Test that overrides with machine-specific settings with added attributes work correctly.""" - xml1 = """ - - icc - mpicxx - mpif90 - mpicc -""".format(self.test_compiler) - xml2 = """ - - mpifoo - mpiffoo - mpifouc - -""".format(self.test_compiler, self.test_machine, self.test_mpilib) - - tester = self.xml_to_tester(xml1+xml2) - - tester.assert_variable_equals("SCC", "icc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPICXX", "mpifoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPIFC", "mpiffoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPICC", "mpicc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - - tester = self.xml_to_tester(xml2+xml1) - - tester.assert_variable_equals("SCC", "icc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPICXX", "mpifoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPIFC", "mpiffoo", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - tester.assert_variable_equals("MPICC", "mpicc", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - - def test_override_with_machine_and_same_attributes(self): - """Test that machine-specific conditional overrides with the same attribute work correctly.""" - xml1 = """ - - mpifc -""".format(self.test_compiler, self.test_mpilib) - xml2 = """ - - mpif90 - -""".format(self.test_machine, self.test_compiler, self.test_mpilib) - - tester = self.xml_to_tester(xml1+xml2) - - tester.assert_variable_equals("MPIFC", "mpif90", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - - tester = self.xml_to_tester(xml2+xml1) - - tester.assert_variable_equals("MPIFC", "mpif90", var={"COMPILER":self.test_compiler, "MPILIB":self.test_mpilib}) - - def test_appends_not_overriden(self): - """Test that machine-specific base value changes don't interfere with appends.""" - xml1=""" - - - -base1 - -debug1 - -""".format(self.test_compiler) - - xml2=""" - - - -base2 - -debug2 - -""".format(self.test_machine, self.test_compiler) - - tester = self.xml_to_tester(xml1+xml2) - - tester.assert_variable_equals("FFLAGS", "-base2", var={"COMPILER": self.test_compiler}) - tester.assert_variable_equals("FFLAGS", "-base2 -debug2", var={"COMPILER": self.test_compiler, "DEBUG": "TRUE"}) - tester.assert_variable_equals("FFLAGS", "-base2 -debug1", var={"COMPILER": self.test_compiler, "DEBUG": "FALSE"}) - - tester = self.xml_to_tester(xml2+xml1) - - tester.assert_variable_equals("FFLAGS", "-base2", var={"COMPILER": self.test_compiler}) - tester.assert_variable_equals("FFLAGS", "-base2 -debug2", var={"COMPILER": self.test_compiler, "DEBUG": "TRUE"}) - tester.assert_variable_equals("FFLAGS", "-base2 -debug1", var={"COMPILER": self.test_compiler, "DEBUG": "FALSE"}) - - def test_multilevel_specificity(self): - """Check that settings with multiple levels of machine-specificity can be resolved.""" - xml1=""" - - mpifc -""" - - xml2=""" - - mpif03 -""".format(self.test_os, self.test_mpilib) - - xml3=""" - - mpif90 -""".format(self.test_machine) - - # To verify order-independence, test every possible ordering of blocks. - testers = [] - testers.append(self.xml_to_tester(xml1+xml2+xml3)) - testers.append(self.xml_to_tester(xml1+xml3+xml2)) - testers.append(self.xml_to_tester(xml2+xml1+xml3)) - testers.append(self.xml_to_tester(xml2+xml3+xml1)) - testers.append(self.xml_to_tester(xml3+xml1+xml2)) - testers.append(self.xml_to_tester(xml3+xml2+xml1)) - - for tester in testers: - tester.assert_variable_equals("MPIFC", "mpif90", var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "TRUE"}) - tester.assert_variable_equals("MPIFC", "mpif03", var={"COMPILER": self.test_compiler, "MPILIB": self.test_mpilib, "DEBUG": "FALSE"}) - tester.assert_variable_equals("MPIFC", "mpifc", var={"COMPILER": self.test_compiler, "MPILIB": "NON_MATCHING_MPI", "DEBUG": "FALSE"}) - - def test_remove_dependency_issues(self): - """Check that overridden settings don't cause inter-variable dependencies.""" - xml1=""" - - ${SFC} -""" - - xml2=""" -""".format(self.test_machine) + """ - ${MPIFC} - mpif90 -""" - - tester = self.xml_to_tester(xml1+xml2) - tester.assert_variable_equals("SFC", "mpif90") - tester.assert_variable_equals("MPIFC", "mpif90") - - tester = self.xml_to_tester(xml2+xml1) - tester.assert_variable_equals("SFC", "mpif90") - tester.assert_variable_equals("MPIFC", "mpif90") - - -############################################################################### -class I_TestCMakeMacros(H_TestMakeMacros): -############################################################################### - - """CMake macros tests. - - This class contains tests of the CMake output of Build. - - This class simply inherits all of the methods of TestMakeOutput, but changes - the definition of xml_to_tester to create a CMakeTester instead. - """ - - def xml_to_tester(self, xml_string): - """Helper that directly converts an XML string to a MakefileTester.""" - test_xml = _wrap_config_compilers_xml(xml_string) - if (NO_CMAKE): - self.skipTest("Skipping cmake test") - else: - return CMakeTester(self, get_macros(self._maker, test_xml, "CMake")) - -############################################################################### -class S_TestManageAndQuery(unittest.TestCase): - """Tests various scripts to manage and query xml files""" - - def setUp(self): - if get_model() == "e3sm": - self.skipTest("Skipping XML test management tests. E3SM does not use this.") - - def _run_and_assert_query_testlist(self, extra_args=""): - """Ensure that query_testlist runs successfully with the given extra arguments""" - files = Files() - testlist_drv = files.get_value("TESTS_SPEC_FILE", {"component":"drv"}) - - run_cmd_assert_result(self, "{}/query_testlists --xml-testlist {} {}".format( - SCRIPT_DIR, testlist_drv, extra_args)) - - def test_query_testlists_runs(self): - """Make sure that query_testlists runs successfully - - This simply makes sure that query_testlists doesn't generate any errors - when it runs. This helps ensure that changes in other utilities don't - break query_testlists. - """ - self._run_and_assert_query_testlist(extra_args="--show-options") - - def test_query_testlists_define_testtypes_runs(self): - """Make sure that query_testlists runs successfully with the --define-testtypes argument""" - self._run_and_assert_query_testlist(extra_args="--define-testtypes") - - def test_query_testlists_count_runs(self): - """Make sure that query_testlists runs successfully with the --count argument""" - self._run_and_assert_query_testlist(extra_args="--count") - - def test_query_testlists_list_runs(self): - """Make sure that query_testlists runs successfully with the --list argument""" - self._run_and_assert_query_testlist(extra_args="--list categories") - -############################################################################### -class B_CheckCode(unittest.TestCase): -############################################################################### - # Tests are generated in the main loop below - longMessage = True - - all_results = None - -def make_pylint_test(pyfile, all_files): - def test(self): - if B_CheckCode.all_results is None: - B_CheckCode.all_results = check_code(all_files) - #pylint: disable=unsubscriptable-object - try: - result = B_CheckCode.all_results[pyfile] - except KeyError: - # Missing key == no issues found - self.assertTrue(True) - else: - self.assertTrue(result == "", msg=f"{pyfile}\n{result}") - - return test - -def check_for_pylint(): - #pylint: disable=import-error - from distutils.spawn import find_executable - pylint = find_executable("pylint") - if pylint is not None: - output = run_cmd_no_fail("pylint --version") - pylintver = re.search(r"pylint\s+(\d+)[.](\d+)[.](\d+)", output) - major = int(pylintver.group(1)) - minor = int(pylintver.group(2)) - if pylint is None or major < 1 or (major == 1 and minor < 5): - print("pylint version 1.5 or newer not found, pylint tests skipped") - return False - return True - -def write_provenance_info(): - curr_commit = get_current_commit(repo=LIB_DIR) - logging.info("\nTesting commit %s" % curr_commit) - cime_model = get_model() - logging.info("Using cime_model = %s" % cime_model) - logging.info("Testing machine = %s" % MACHINE.get_machine_name()) - if TEST_COMPILER is not None: - logging.info("Testing compiler = %s"% TEST_COMPILER) - if TEST_MPILIB is not None: - logging.info("Testing mpilib = %s"% TEST_MPILIB) - logging.info("Test root: %s" % TEST_ROOT) - logging.info("Test driver: %s" % CIME.utils.get_cime_default_driver()) - logging.info("Python version {}\n".format(sys.version)) - -def cleanup(): - # if the TEST_ROOT directory exists and is empty, remove it - if os.path.exists(TEST_ROOT) and TEST_RESULT.result.wasSuccessful(): - testreporter = os.path.join(TEST_ROOT,"testreporter") - files = os.listdir(TEST_ROOT) - if len(files)==1 and os.path.isfile(testreporter): - os.unlink(testreporter) - if not os.listdir(TEST_ROOT): - print("All pass, removing directory:", TEST_ROOT) - os.rmdir(TEST_ROOT) - -def _main_func(description): - global MACHINE - global NO_CMAKE - global FAST_ONLY - global NO_BATCH - global TEST_COMPILER - global TEST_MPILIB - global TEST_ROOT - global GLOBAL_TIMEOUT - global NO_TEARDOWN - global NO_FORTRAN_RUN - global TEST_RESULT - config = CIME.utils.get_cime_config() - - help_str = \ -""" -{0} [TEST] [TEST] -OR -{0} --help - -\033[1mEXAMPLES:\033[0m - \033[1;32m# Run the full suite \033[0m - > {0} - - \033[1;32m# Run all code checker tests \033[0m - > {0} B_CheckCode - - \033[1;32m# Run test test_wait_for_test_all_pass from class M_TestWaitForTests \033[0m - > {0} M_TestWaitForTests.test_wait_for_test_all_pass -""".format(os.path.basename(sys.argv[0])) - - parser = argparse.ArgumentParser(usage=help_str, - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - parser.add_argument("--fast", action="store_true", - help="Skip full system tests, which saves a lot of time") - - parser.add_argument("--no-batch", action="store_true", - help="Do not submit jobs to batch system, run locally." - " If false, will default to machine setting.") - - parser.add_argument("--no-fortran-run", action="store_true", - help="Do not run any fortran jobs. Implies --fast" - " Used for github actions") - - parser.add_argument("--no-cmake", action="store_true", - help="Do not run cmake tests") - - parser.add_argument("--no-teardown", action="store_true", - help="Do not delete directories left behind by testing") - - parser.add_argument("--machine", - help="Select a specific machine setting for cime") - - parser.add_argument("--compiler", - help="Select a specific compiler setting for cime") - - parser.add_argument( "--mpilib", - help="Select a specific compiler setting for cime") - - parser.add_argument( "--test-root", - help="Select a specific test root for all cases created by the testing") - - parser.add_argument("--timeout", type=int, - help="Select a specific timeout for all tests") - - ns, args = parser.parse_known_args() - - # Now set the sys.argv to the unittest_args (leaving sys.argv[0] alone) - sys.argv[1:] = args - - FAST_ONLY = ns.fast - NO_BATCH = ns.no_batch - NO_CMAKE = ns.no_cmake - GLOBAL_TIMEOUT = ns.timeout - NO_TEARDOWN = ns.no_teardown - NO_FORTRAN_RUN = ns.no_fortran_run - if NO_FORTRAN_RUN: - FAST_ONLY = True - - os.chdir(os.path.dirname(__file__)) - - if ns.machine is not None: - MACHINE = Machines(machine=ns.machine) - os.environ["CIME_MACHINE"] = ns.machine - elif "CIME_MACHINE" in os.environ: - mach_name = os.environ["CIME_MACHINE"] - MACHINE = Machines(machine=mach_name) - elif config.has_option("create_test", "MACHINE"): - MACHINE = Machines(machine=config.get("create_test", "MACHINE")) - elif config.has_option("main", "MACHINE"): - MACHINE = Machines(machine=config.get("main", "MACHINE")) - else: - MACHINE = Machines() - - if ns.compiler is not None: - TEST_COMPILER = ns.compiler - elif config.has_option("create_test", "COMPILER"): - TEST_COMPILER = config.get("create_test", "COMPILER") - elif config.has_option("main", "COMPILER"): - TEST_COMPILER = config.get("main", "COMPILER") - - if ns.mpilib is not None: - TEST_MPILIB = ns.mpilib - elif config.has_option("create_test", "MPILIB"): - TEST_MPILIB = config.get("create_test", "MPILIB") - elif config.has_option("main", "MPILIB"): - TEST_MPILIB = config.get("main", "MPILIB") - - if ns.test_root is not None: - TEST_ROOT = ns.test_root - elif config.has_option("create_test", "TEST_ROOT"): - TEST_ROOT = config.get("create_test", "TEST_ROOT") - else: - TEST_ROOT = os.path.join(MACHINE.get_value("CIME_OUTPUT_ROOT"), - "scripts_regression_test.%s"% CIME.utils.get_timestamp()) - - args = lambda: None # just something to set attrs on - for log_param in ["debug", "silent", "verbose"]: - flag = "--%s" % log_param - if flag in sys.argv: - sys.argv.remove(flag) - setattr(args, log_param, True) - else: - setattr(args, log_param, False) - - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, None) - - write_provenance_info() - atexit.register(cleanup) - - # Find all python files in repo and create a pylint test for each - if check_for_pylint(): - files_to_test = get_all_checkable_files() - - for file_to_test in files_to_test: - pylint_test = make_pylint_test(file_to_test, files_to_test) - testname = "test_pylint_%s" % file_to_test.replace("/", "_").replace(".", "_") - expect(not hasattr(B_CheckCode, testname), "Repeat %s" % testname) - setattr(B_CheckCode, testname, pylint_test) - - try: - TEST_RESULT = unittest.main(verbosity=2, catchbreak=True, exit=False) - except CIMEError as e: - if e.__str__() != "False": - print("Detected failures, leaving directory:", TEST_ROOT) - raise - else: - # Implements same behavior as unittesst.main - # https://github.com/python/cpython/blob/b6d68aa08baebb753534a26d537ac3c0d2c21c79/Lib/unittest/main.py#L272-L273 - sys.exit(not TEST_RESULT.result.wasSuccessful()) - -if (__name__ == "__main__"): - _main_func(__doc__) diff --git a/scripts/tests/user_mods_test3/shell_commands b/scripts/tests/user_mods_test3/shell_commands index 7adf34ebb44..476661b9b56 100755 --- a/scripts/tests/user_mods_test3/shell_commands +++ b/scripts/tests/user_mods_test3/shell_commands @@ -5,4 +5,4 @@ then ./xmlchange PIO_VERSION=2 else ./xmlchange PIO_VERSION=1 -fi \ No newline at end of file +fi diff --git a/setup.cfg b/setup.cfg index 8423bccd5d2..a32390e0b21 100644 --- a/setup.cfg +++ b/setup.cfg @@ -13,3 +13,14 @@ console_scripts = create_test = CIME.scripts.create_test:_main_func query_config = CIME.scripts.query_config:_main_func query_testlists = CIME.scripts.query_testlists:_main_func + +[tool:pytest] +junit_family=xunit2 +addopts = --cov=CIME --cov-report term-missing --cov-report html:test_coverage/html --cov-report xml:test_coverage/coverage.xml -s +python_files = test_*.py +testpaths = + scripts/lib/CIME/tests + +[coverage:report] +omit = + scripts/lib/CIME/tests/* diff --git a/src/share/README b/src/share/README index dff6073505b..e3efa6552d0 100644 --- a/src/share/README +++ b/src/share/README @@ -28,4 +28,3 @@ Current subsets ("libraries") of shared code only include: util - very generic, general-purpose code that is likely to be useful to all CIME components. CIME components may be explicitly required to use some parts of this code, for example the physical constants module. - diff --git a/src/share/timing/ChangeLog b/src/share/timing/ChangeLog index c06f7d70fc9..619565f0224 100644 --- a/src/share/timing/ChangeLog +++ b/src/share/timing/ChangeLog @@ -7,10 +7,10 @@ timing_180911: Moved detail to end of timer name when specify planned move of the prefix logic into gptl.c) [Patrick Worley] timing_180910: Removed addition of double quotes to timer names in - perf_mod.F90 and added this as an output option in + perf_mod.F90 and added this as an output option in gptl.c (so internally the names do not have the quotes) [Patrick Worley] -timing_180822: Fixed perf_mod.F90 bug that prevents PAPI derived events +timing_180822: Fixed perf_mod.F90 bug that prevents PAPI derived events from being recognized. [Patrick Worley] timing_180731: Refactored implementation of append/write modes; @@ -19,12 +19,12 @@ timing_180731: Refactored implementation of append/write modes; timing_180730: Added support for setting GPTLmaxthreads. Cleaned up white space. Added SEQUENTIAL to fortran open, to avoid problems on some systems. Added timing overhead measurement to perf_mod. Fixed errors in - f_wrappers.c in definition of gptlpr_query_append and + f_wrappers.c in definition of gptlpr_query_append and gptlpr_XXX_write. [Patrick Worley (some from Jim Rosinksi)] timing_180403: Added GPTLstartstop_val(f) to gptl.h, to provide explicit typing and eliminate compile-time warning for some compilers. - Also do not define the CPP tokens HAVE_COMM_F2C and + Also do not define the CPP tokens HAVE_COMM_F2C and HAVE_GETTIMEOFDAY in private.h if they have already been defined, also eliminating compile-time warnings. [Patrick Worley] diff --git a/src/share/timing/GPTLutil.c b/src/share/timing/GPTLutil.c index f882834d2a1..d9a1a93866a 100644 --- a/src/share/timing/GPTLutil.c +++ b/src/share/timing/GPTLutil.c @@ -79,4 +79,3 @@ void *GPTLallocate (const int nbytes) return ptr; } - diff --git a/src/share/timing/gptl.c b/src/share/timing/gptl.c index c6d50500bc4..1eeacccfd25 100644 --- a/src/share/timing/gptl.c +++ b/src/share/timing/gptl.c @@ -347,7 +347,7 @@ int GPTLsetoption (const int option, /* option */ printf ("%s: boolean dopr_quotes = %d\n", thisfunc, val); return 0; case GPTLprint_mode: - print_mode = (PRMode) val; + print_mode = (PRMode) val; if (verbose) printf ("%s: print_mode = %s\n", thisfunc, modestr (print_mode)); return 0; @@ -679,10 +679,10 @@ int GPTLprefix_set (const char *prefixname) /* prefix string */ /* ** Note: if in a parallel region with only one active thread, e.g. ** thread 0, this will NOT be identified as a serial regions. - ** If want GPTLprefix_set to apply to all threads, will need to - ** "fire up" the idle threads in some sort of parallel loop. + ** If want GPTLprefix_set to apply to all threads, will need to + ** "fire up" the idle threads in some sort of parallel loop. ** It is not safe to just test omp_in_parallel and - ** omp_get_thread_num == 1 unless add a thread barrier, and this + ** omp_get_thread_num == 1 unless add a thread barrier, and this ** barrier would apply to all calls, so would be a performance bottleneck. */ @@ -690,7 +690,7 @@ int GPTLprefix_set (const char *prefixname) /* prefix string */ prefix_len_nt = len_prefix; ptr_prefix = prefix_nt; - + } else { if ((t = get_thread_num ()) < 0) @@ -707,7 +707,7 @@ int GPTLprefix_set (const char *prefixname) /* prefix string */ } /* -** GPTLprefix_setf: define prefix for subsequent timer names when +** GPTLprefix_setf: define prefix for subsequent timer names when ** the string may not be null terminated ** ** Input arguments: @@ -744,10 +744,10 @@ int GPTLprefix_setf (const char *prefixname, const int prefixlen) /* prefix str /* ** Note: if in a parallel region with only one active thread, e.g. ** thread 0, this will NOT be identified as a serial regions. - ** If want GPTLprefix_setf to apply to all threads, will need to - ** "fire up" the idle threads in some sort of parallel loop. + ** If want GPTLprefix_setf to apply to all threads, will need to + ** "fire up" the idle threads in some sort of parallel loop. ** It is not safe to just test omp_in_parallel and - ** omp_get_thread_num == 1 unless add a thread barrier, and this + ** omp_get_thread_num == 1 unless add a thread barrier, and this ** barrier would apply to all calls, so would be a performance bottleneck. */ @@ -755,7 +755,7 @@ int GPTLprefix_setf (const char *prefixname, const int prefixlen) /* prefix str prefix_len_nt = len_prefix; ptr_prefix = prefix_nt; - + } else { if ((t = get_thread_num ()) < 0) @@ -804,10 +804,10 @@ int GPTLprefix_unset () /* ** Note: if in a parallel region with only one active thread, e.g. ** thread 0, this will NOT be identified as a serial regions. - ** If want GPTLprefix_unset to apply to all threads, will need to - ** "fire up" the idle threads in some sort of parallel loop. + ** If want GPTLprefix_unset to apply to all threads, will need to + ** "fire up" the idle threads in some sort of parallel loop. ** It is not safe to just test omp_in_parallel and - ** omp_get_thread_num == 1 unless add a thread barrier, and this + ** omp_get_thread_num == 1 unless add a thread barrier, and this ** barrier would apply to all calls, so would be a performance bottleneck. */ @@ -815,7 +815,7 @@ int GPTLprefix_unset () prefix_len_nt = 0; ptr_prefix = prefix_nt; - + } else { if ((t = get_thread_num ()) < 0) @@ -962,7 +962,7 @@ int GPTLstart (const char *timername) /* timer name */ /* ** If prefix string is defined, prepend it to timername - ** and assign the name pointer to the new string. + ** and assign the name pointer to the new string. ** Otherwise assign the name pointer to the original string. */ @@ -1234,7 +1234,7 @@ int GPTLstartf (const char *timername, const int namelen) /* timer name and l /* ** If prefix string is defined, prepend it to timername - ** and assign the name pointer to the new string. + ** and assign the name pointer to the new string. ** Otherwise assign the name pointer to the original string. */ @@ -1470,7 +1470,7 @@ int GPTLstartf_handle (const char *name, /* timer name */ ** add_prefix: add prefix string to timer name ** ** Input arguments: -** new_name: new name +** new_name: new name ** timername: timer name ** namelen: length of timer name ** t: thread id @@ -1488,7 +1488,7 @@ static int add_prefix (char *new_name, const char *timername, const int namelen, for (c = 0; c < numchars; c++) { new_name[c] = prefix_nt[c]; } - + /* add thread-specific prefix */ numchars = MIN (prefix_len[t], MAX_CHARS-prefix_len_nt); for (c = 0; c < numchars; c++) { @@ -1510,7 +1510,7 @@ static int add_prefix (char *new_name, const char *timername, const int namelen, /* ** update_ll_hash: Update linked list and hash table. -** Called by GPTLstart(f), GPTLstart_instr, +** Called by GPTLstart(f), GPTLstart_instr, ** and GPTLstart(f)_handle. ** ** Input arguments: @@ -1790,7 +1790,7 @@ int GPTLstop (const char *timername) /* timer name */ /* ** If prefix string is defined, prepend it to timername - ** and assign the name pointer to the new string. + ** and assign the name pointer to the new string. ** Otherwise assign the name pointer to the original string. */ @@ -2049,7 +2049,7 @@ int GPTLstopf (const char *timername, const int namelen) /* timer name and lengt /* ** If prefix string is defined, prepend it to timername - ** and assign the name pointer to the new string. + ** and assign the name pointer to the new string. ** Otherwise assign the name pointer to the original string. */ @@ -3001,7 +3001,7 @@ int construct_tree (Timer *timerst, Method method) return 0; } -/* +/* ** modestr: Return a pointer to a string that represents the mode ** ** Input arguments: @@ -3256,7 +3256,7 @@ static void printstats (const Timer *timer, /* Pad to length of longest name */ - extraspace = max_name_len[t] - strlen (timer->name); + extraspace = max_name_len[t] - strlen (timer->name); for (i = 0; i < extraspace; ++i) fprintf (fp, " "); @@ -4308,7 +4308,7 @@ int GPTLquery (const char *timername, /* ** If prefix string is defined, prepend it to timername - ** and assign the name pointer to the new string. + ** and assign the name pointer to the new string. ** Otherwise assign the name pointer to the original string. */ @@ -4376,7 +4376,7 @@ int GPTLquerycounters (const char *timername, /* ** If prefix string is defined, prepend it to timername - ** and assign the name pointer to the new string. + ** and assign the name pointer to the new string. ** Otherwise assign the name pointer to the original string. */ @@ -4443,7 +4443,7 @@ int GPTLget_wallclock (const char *timername, /* ** If prefix string is defined, prepend it to timername - ** and assign the name pointer to the new string. + ** and assign the name pointer to the new string. ** Otherwise assign the name pointer to the original string. */ @@ -4515,7 +4515,7 @@ int GPTLstartstop_vals (const char *timername, /* timer name */ /* ** If prefix string is defined, prepend it to timername - ** and assign the name pointer to the new string. + ** and assign the name pointer to the new string. ** Otherwise assign the name pointer to the original string. */ if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ @@ -4531,7 +4531,7 @@ int GPTLstartstop_vals (const char *timername, /* timer name */ if (ptr) { /* - ** The timer already exists. If add_count is > 0, then increment the + ** The timer already exists. If add_count is > 0, then increment the ** count and update the time stamp. Then let control jump to the point where ** wallclock settings are adjusted. */ @@ -4553,7 +4553,7 @@ int GPTLstartstop_vals (const char *timername, /* timer name */ /* ** If add_count >= 0, then set count to desired value. - ** Otherwise, assume add_count == 0 and set count to 0. + ** Otherwise, assume add_count == 0 and set count to 0. */ if (add_count >= 0){ ptr->count = add_count; @@ -4568,7 +4568,7 @@ int GPTLstartstop_vals (const char *timername, /* timer name */ ptr->wall.min = add_time; ptr->wall.latest_is_min = 1; - /* + /* ** Minor mod: Subtract the overhead of the above start/stop call, before ** adding user input */ @@ -4582,12 +4582,12 @@ int GPTLstartstop_vals (const char *timername, /* timer name */ /* Update accum with user input */ ptr->wall.accum += add_time; - /* + /* ** Update latest with user input: - ** If add_count > 0 and old count > 0 (new count > add_count), + ** If add_count > 0 and old count > 0 (new count > add_count), ** assume new event time is the average (add_time/add_count). - ** If add_count > 0 and old count = 0 (new count == add_count), - ** assume new event time is the augmented average + ** If add_count > 0 and old count = 0 (new count == add_count), + ** assume new event time is the augmented average ** ((latest value + add_time)/add_count). ** If add_count == 0, new event time is latest value + add_time. */ @@ -4606,8 +4606,8 @@ int GPTLstartstop_vals (const char *timername, /* timer name */ /* Update min with user input */ if ((ptr->count <= 1) || (add_count == ptr->count)) { - /* - ** still recording walltime for first occurrence, + /* + ** still recording walltime for first occurrence, ** so assign latest estimate to min and prev_min */ ptr->wall.min = ptr->wall.latest; @@ -4616,15 +4616,15 @@ int GPTLstartstop_vals (const char *timername, /* timer name */ if (add_count > 0){ /* check whether latest is the new min */ if (ptr->wall.latest < ptr->wall.min){ - ptr->wall.prev_min = ptr->wall.min; + ptr->wall.prev_min = ptr->wall.min; ptr->wall.min = ptr->wall.latest; ptr->wall.latest_is_min = 1; } else { ptr->wall.latest_is_min = 0; } } else { - /* - ** still recording walltime for latest occurrence, + /* + ** still recording walltime for latest occurrence, ** so check whether updated latest is the new min. */ if (ptr->wall.latest_is_min == 1){ @@ -4685,7 +4685,7 @@ int GPTLstartstop_valsf (const char *timername, /* timer name */ /* ** If prefix string is defined, prepend it to timername - ** and assign the name pointer to the new string. + ** and assign the name pointer to the new string. ** Otherwise assign the name pointer to the original string. */ if ((prefix_len[t] > 0) || (prefix_len_nt > 0)){ @@ -4701,7 +4701,7 @@ int GPTLstartstop_valsf (const char *timername, /* timer name */ if (ptr) { /* - ** The timer already exists. If add_count is > 0, then increment the + ** The timer already exists. If add_count is > 0, then increment the ** count and update the time stamp. Then let control jump to the point where ** wallclock settings are adjusted. */ @@ -4723,7 +4723,7 @@ int GPTLstartstop_valsf (const char *timername, /* timer name */ /* ** If add_count >= 0, then set count to desired value. - ** Otherwise, assume add_count == 0 and set count to 0. + ** Otherwise, assume add_count == 0 and set count to 0. */ if (add_count >= 0){ ptr->count = add_count; @@ -4738,7 +4738,7 @@ int GPTLstartstop_valsf (const char *timername, /* timer name */ ptr->wall.min = add_time; ptr->wall.latest_is_min = 1; - /* + /* ** Minor mod: Subtract the overhead of the above start/stop call, before ** adding user input */ @@ -4752,12 +4752,12 @@ int GPTLstartstop_valsf (const char *timername, /* timer name */ /* Update accum with user input */ ptr->wall.accum += add_time; - /* + /* ** Update latest with user input: - ** If add_count > 0 and old count > 0 (new count > add_count), + ** If add_count > 0 and old count > 0 (new count > add_count), ** assume new event time is the average (add_time/add_count). - ** If add_count > 0 and old count = 0 (new count == add_count), - ** assume new event time is the augmented average + ** If add_count > 0 and old count = 0 (new count == add_count), + ** assume new event time is the augmented average ** ((latest value + add_time)/add_count). ** If add_count == 0, new event time is latest value + add_time. */ @@ -4776,8 +4776,8 @@ int GPTLstartstop_valsf (const char *timername, /* timer name */ /* Update min with user input */ if ((ptr->count <= 1) || (add_count == ptr->count)) { - /* - ** still recording walltime for first occurrence, + /* + ** still recording walltime for first occurrence, ** so assign latest estimate to min and prev_min */ ptr->wall.min = ptr->wall.latest; @@ -4786,15 +4786,15 @@ int GPTLstartstop_valsf (const char *timername, /* timer name */ if (add_count > 0){ /* check whether latest is the new min */ if (ptr->wall.latest < ptr->wall.min){ - ptr->wall.prev_min = ptr->wall.min; + ptr->wall.prev_min = ptr->wall.min; ptr->wall.min = ptr->wall.latest; ptr->wall.latest_is_min = 1; } else { ptr->wall.latest_is_min = 0; } } else { - /* - ** still recording walltime for latest occurrence, + /* + ** still recording walltime for latest occurrence, ** so check whether updated latest is the new min. */ if (ptr->wall.latest_is_min == 1){ @@ -4855,7 +4855,7 @@ int GPTLget_eventvalue (const char *timername, /* ** If prefix string is defined, prepend it to timername - ** and assign the name pointer to the new string. + ** and assign the name pointer to the new string. ** Otherwise assign the name pointer to the original string. */ @@ -5478,7 +5478,7 @@ Timer *GPTLgetentry (const char *timername) /* ** If prefix string is defined, prepend it to timername - ** and assign the name pointer to the new string. + ** and assign the name pointer to the new string. ** Otherwise assign the name pointer to the original string. */ @@ -5683,7 +5683,7 @@ static void print_threadmapping (FILE *fp) static int serial_region () { - /* + /* ** This test is more robust than 'omp_in_parallel', which is true ** in a parallel region when only one thread is active, which may ** not be thread 0. Other active thread teams also will not be @@ -5946,7 +5946,7 @@ static void print_threadmapping (FILE *fp) /* ** serial_region: determine whether in a serial or parallel region -** +** ** Not currently implemented (or even defined) when using PTHREADS/ ** It is an error if this is ever called. ** diff --git a/src/share/timing/gptl_papi.c b/src/share/timing/gptl_papi.c index 941316918be..1f701cb8976 100644 --- a/src/share/timing/gptl_papi.c +++ b/src/share/timing/gptl_papi.c @@ -1323,4 +1323,3 @@ int GPTLevent_code_to_name (int code, char *name) } #endif /* HAVE_PAPI */ - diff --git a/tools/configure b/tools/configure index 8d472fa8542..cd790c13f19 100755 --- a/tools/configure +++ b/tools/configure @@ -29,48 +29,68 @@ from CIME.XML.machines import Machines logger = logging.getLogger(__name__) + def parse_command_line(args): """Command line argument parser for configure.""" description = __doc__ parser = argparse.ArgumentParser(description=description) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument("--machine", - help="The machine to create build information for.") - parser.add_argument("--machines-dir", - help="The machines directory to take build information " - "from. Overrides the CIME_MODEL environment variable, " - "and must be specified if that variable is not set.") - parser.add_argument("--macros-format", action='append', - choices=['Makefile', 'CMake'], - help="The format of Macros file to generate. If " - "'Makefile' is passed in, a file called 'Macros.make' " - "is generated. If 'CMake' is passed in, a file called " - "'Macros.cmake' is generated. This option can be " - "specified multiple times to generate multiple files. " - "If not used at all, Macros generation is skipped. " - "Note that Depends files are currently always in " - "Makefile format, regardless of this option.") - parser.add_argument("--output-dir", default=os.getcwd(), - help="The directory to write files to. If not " - "specified, defaults to the current working directory.") - - parser.add_argument("--compiler", "-compiler", - help="Specify a compiler. " - "To see list of supported compilers for each machine, use the utility query_config in this directory") - - parser.add_argument("--mpilib", "-mpilib", - help="Specify the mpilib. " - "To see list of supported mpilibs for each machine, use the utility query_config in this directory. " - "The default is the first listing in MPILIBS in config_machines.xml") - - parser.add_argument("--clean", action="store_true", - help="Remove old Macros and env files before attempting to create new ones") - - parser.add_argument("--comp-interface", - default="mct", - help="""The cime driver/cpl interface to use.""" - ) + parser.add_argument( + "--machine", help="The machine to create build information for." + ) + parser.add_argument( + "--machines-dir", + help="The machines directory to take build information " + "from. Overrides the CIME_MODEL environment variable, " + "and must be specified if that variable is not set.", + ) + parser.add_argument( + "--macros-format", + action="append", + choices=["Makefile", "CMake"], + help="The format of Macros file to generate. If " + "'Makefile' is passed in, a file called 'Macros.make' " + "is generated. If 'CMake' is passed in, a file called " + "'Macros.cmake' is generated. This option can be " + "specified multiple times to generate multiple files. " + "If not used at all, Macros generation is skipped. " + "Note that Depends files are currently always in " + "Makefile format, regardless of this option.", + ) + parser.add_argument( + "--output-dir", + default=os.getcwd(), + help="The directory to write files to. If not " + "specified, defaults to the current working directory.", + ) + + parser.add_argument( + "--compiler", + "-compiler", + help="Specify a compiler. " + "To see list of supported compilers for each machine, use the utility query_config in this directory", + ) + + parser.add_argument( + "--mpilib", + "-mpilib", + help="Specify the mpilib. " + "To see list of supported mpilibs for each machine, use the utility query_config in this directory. " + "The default is the first listing in MPILIBS in config_machines.xml", + ) + + parser.add_argument( + "--clean", + action="store_true", + help="Remove old Macros and env files before attempting to create new ones", + ) + + parser.add_argument( + "--comp-interface", + default="mct", + help="""The cime driver/cpl interface to use.""", + ) argcnt = len(args) args = parser.parse_args() @@ -85,20 +105,25 @@ def parse_command_line(args): if model is not None: machobj = Machines(machine=args.machine) else: - expect(False, "Either --mach-dir or the CIME_MODEL environment " - "variable must be specified!") + expect( + False, + "Either --mach-dir or the CIME_MODEL environment " + "variable must be specified!", + ) - opts['machobj'] = machobj + opts["machobj"] = machobj if args.macros_format is None: - opts['macros_format'] = [] + opts["macros_format"] = [] else: - opts['macros_format'] = args.macros_format + opts["macros_format"] = args.macros_format - expect(os.path.isdir(args.output_dir), - "Output directory '%s' does not exist." % args.output_dir) + expect( + os.path.isdir(args.output_dir), + "Output directory '%s' does not exist." % args.output_dir, + ) - opts['output_dir'] = args.output_dir + opts["output_dir"] = args.output_dir # Set compiler. if args.compiler is not None: @@ -108,23 +133,31 @@ def parse_command_line(args): else: compiler = machobj.get_default_compiler() os.environ["COMPILER"] = compiler - expect(opts['machobj'].is_valid_compiler(compiler), - "Invalid compiler vendor given in COMPILER environment variable: %s" - % compiler) - opts['compiler'] = compiler - opts['os'] = machobj.get_value('OS') - opts['comp_interface'] = args.comp_interface + expect( + opts["machobj"].is_valid_compiler(compiler), + "Invalid compiler vendor given in COMPILER environment variable: %s" % compiler, + ) + opts["compiler"] = compiler + opts["os"] = machobj.get_value("OS") + opts["comp_interface"] = args.comp_interface if args.clean: - files = ["Macros.make", "Macros.cmake", "env_mach_specific.xml", ".env_mach_specific.sh", - ".env_mach_specific.csh", "Depends.%s"%compiler, "Depends.%s"%args.machine, - "Depends.%s.%s"%(args.machine,compiler)] + files = [ + "Macros.make", + "Macros.cmake", + "env_mach_specific.xml", + ".env_mach_specific.sh", + ".env_mach_specific.csh", + "Depends.%s" % compiler, + "Depends.%s" % args.machine, + "Depends.%s.%s" % (args.machine, compiler), + ] for file_ in files: if os.path.isfile(file_): - logger.warn("Removing file %s"%file_) + logger.warn("Removing file %s" % file_) os.remove(file_) if argcnt == 2: - opts['clean_only'] = True + opts["clean_only"] = True return opts # Set MPI library. @@ -133,32 +166,45 @@ def parse_command_line(args): elif "MPILIB" in os.environ: mpilib = os.environ["MPILIB"] else: - mpilib = machobj.get_default_MPIlib(attributes={"compiler":compiler}) + mpilib = machobj.get_default_MPIlib(attributes={"compiler": compiler}) os.environ["MPILIB"] = mpilib - expect(opts['machobj'].is_valid_MPIlib(mpilib, attributes={"compiler":compiler}), - "Invalid MPI library name given in MPILIB environment variable: %s" % - mpilib) - opts['mpilib'] = mpilib + expect( + opts["machobj"].is_valid_MPIlib(mpilib, attributes={"compiler": compiler}), + "Invalid MPI library name given in MPILIB environment variable: %s" % mpilib, + ) + opts["mpilib"] = mpilib # Set DEBUG flag. if "DEBUG" in os.environ: - expect(os.environ["DEBUG"].lower() in ('true', 'false'), - "Invalid DEBUG environment variable value (must be 'TRUE' or " - "'FALSE'): %s" % os.environ["DEBUG"]) + expect( + os.environ["DEBUG"].lower() in ("true", "false"), + "Invalid DEBUG environment variable value (must be 'TRUE' or " + "'FALSE'): %s" % os.environ["DEBUG"], + ) debug = os.environ["DEBUG"].lower() == "true" else: debug = False os.environ["DEBUG"] = "FALSE" - opts['debug'] = debug + opts["debug"] = debug return opts + def _main(): opts = parse_command_line(sys.argv) if "clean_only" not in opts or not opts["clean_only"]: - configure(opts['machobj'], opts['output_dir'], opts['macros_format'], - opts['compiler'], opts['mpilib'], opts['debug'], opts['comp_interface'], opts['os']) + configure( + opts["machobj"], + opts["output_dir"], + opts["macros_format"], + opts["compiler"], + opts["mpilib"], + opts["debug"], + opts["comp_interface"], + opts["os"], + ) + if __name__ == "__main__": _main() diff --git a/tools/cprnc/compare_vars_mod.F90.in b/tools/cprnc/compare_vars_mod.F90.in index 4ea08d781c0..83dc131856a 100644 --- a/tools/cprnc/compare_vars_mod.F90.in +++ b/tools/cprnc/compare_vars_mod.F90.in @@ -222,6 +222,7 @@ contains ! TYPE real,int,double subroutine compare_var_{TYPE}(n,file, vid, idiff, ifilldiff, ifail, tindex) + use, intrinsic :: ieee_arithmetic, only: ieee_is_nan integer, intent(in) :: n type(file_t) :: file(2) integer, intent(in) :: vid(2) @@ -357,8 +358,18 @@ contains s2 = count(mask(:,1)) vdiff = abs(buf(:,1)-buf(:,2)) rms = sqrt(sum(vdiff**2,mask(:,1))/real(s2)) - diffcnt = count((vdiff>0) .and. mask(:,1)) - + diffcnt = 0 +#if {ITYPE}==TYPEDOUBLE || {ITYPE}==TYPEREAL + ! Count the NaN values only if they differ between files + do i=1,s1 + if(mask(i,1)) then + if(ieee_is_nan(buf(i,1)) .neqv. ieee_is_nan(buf(i,2))) then + diffcnt = diffcnt + 1 + endif + endif + enddo +#endif + diffcnt = diffcnt + count(vdiff>0 .and. mask(:,1)) ! Compute normalized rms difference; normalize using the avg abs field ! values. Note that this differs from the definition of normalized rms ! difference found in some references (e.g., normalizing by [max - min], which diff --git a/tools/cprnc/run_tests b/tools/cprnc/run_tests index 9b505420078..98d1a22f3bd 100755 --- a/tools/cprnc/run_tests +++ b/tools/cprnc/run_tests @@ -51,6 +51,8 @@ my %tests = ('copy.nc' => {control => 'control.nc'}, 'copy_char.nc' => {control => 'control_char.nc', extra_args => '-m'}, + + 'diffs_in_nans.nc' => {control => 'control_floatDoubleNan.nc'}, ); #---------------------------------------------------------------------- diff --git a/tools/cprnc/test_inputs/README b/tools/cprnc/test_inputs/README index b5dda7c8998..f1bdfbd94e6 100644 --- a/tools/cprnc/test_inputs/README +++ b/tools/cprnc/test_inputs/README @@ -176,3 +176,13 @@ Note: This file just has a character variable, in order to test what is output for character variables (which cannot be analyzed). - copy_char.nc: identical to control_char.nc + +--- FILES COMPARED AGAINST control_floatDoubleNan.nc --- + +Note: This file has a float variable, a double variable, and a double +variable with a NaN value. Its initial purpose is for testing +comparisons involving NaNs. + +- diffs_in_nans.nc: a float and double variable each have a NaN where + the control file does not, and another double variable has a NaN + only where the control file also has a NaN diff --git a/tools/cprnc/test_inputs/control_floatDoubleNan.nc b/tools/cprnc/test_inputs/control_floatDoubleNan.nc new file mode 100644 index 00000000000..1e99d3281e6 Binary files /dev/null and b/tools/cprnc/test_inputs/control_floatDoubleNan.nc differ diff --git a/tools/cprnc/test_inputs/diffs_in_nans.nc b/tools/cprnc/test_inputs/diffs_in_nans.nc new file mode 100644 index 00000000000..732bf56896f Binary files /dev/null and b/tools/cprnc/test_inputs/diffs_in_nans.nc differ diff --git a/tools/load_balancing_tool/layouts.py b/tools/load_balancing_tool/layouts.py index 6e53b6a20bb..7b4e0b32d79 100644 --- a/tools/load_balancing_tool/layouts.py +++ b/tools/load_balancing_tool/layouts.py @@ -1,6 +1,7 @@ import optimize_model import pulp + class IceLndAtmOcn(optimize_model.OptimizeModel): """ Optimized the problem based on the Layout @@ -43,7 +44,7 @@ class IceLndAtmOcn(optimize_model.OptimizeModel): """ def get_required_components(self): - return ['LND', 'ICE', 'ATM', 'OCN'] + return ["LND", "ICE", "ATM", "OCN"] def optimize(self): """ @@ -52,16 +53,24 @@ def optimize(self): set state STATE_SOLVED_OK if solved, otherwise STATE_SOLVED_BAD """ - assert self.state != self.STATE_UNDEFINED,\ - "set_data() must be called before optimize()!" - self.atm = self.models['ATM'] - self.lnd = self.models['LND'] - self.ice = self.models['ICE'] - self.ocn = self.models['OCN'] - self.real_variables = ['TotalTime', 'T1', 'Tice', 'Tlnd', 'Tatm', - 'Tocn'] - self.integer_variables = ['NBice', 'NBlnd', 'NBatm', 'NBocn', - 'Nice', 'Nlnd', 'Natm', 'Nocn'] + assert ( + self.state != self.STATE_UNDEFINED + ), "set_data() must be called before optimize()!" + self.atm = self.models["ATM"] + self.lnd = self.models["LND"] + self.ice = self.models["ICE"] + self.ocn = self.models["OCN"] + self.real_variables = ["TotalTime", "T1", "Tice", "Tlnd", "Tatm", "Tocn"] + self.integer_variables = [ + "NBice", + "NBlnd", + "NBatm", + "NBocn", + "Nice", + "Nlnd", + "Natm", + "Nocn", + ] self.X = {} X = self.X self.prob = pulp.LpProblem("Minimize ACME time cost", pulp.LpMinimize) @@ -71,32 +80,54 @@ def optimize(self): for iv in self.integer_variables: X[iv] = pulp.LpVariable(iv, lowBound=1, cat=pulp.LpInteger) - # cost function - self.prob += X['TotalTime'] + self.prob += X["TotalTime"] - #constraints + # constraints self.constraints = [] # Layout-dependent constraints. Choosing another layout to model # will require editing these constraints - self.constraints.append([X['Tice'] - X['T1'] <= 0, "Tice - T1 == 0"]) - self.constraints.append([X['Tlnd'] - X['T1'] <= 0, "Tlnd - T1 == 0"]) - self.constraints.append([X['T1'] + X['Tatm'] - X['TotalTime'] <= 0, - "T1 + Tatm - TotalTime <= 0"]) - self.constraints.append([X['Tocn'] - X['TotalTime'] <= 0, - "Tocn - TotalTime == 0"]) - self.constraints.append([X['Nice'] + X['Nlnd'] - X['Natm'] == 0, - "Nice + Nlnd - Natm == 0"]) - self.constraints.append([X['Natm'] + X['Nocn'] == self.maxtasks, - "Natm + Nocn <= %d" % (self.maxtasks)]) - self.constraints.append([self.atm.blocksize * X['NBatm'] - X['Natm'] == 0, - "Natm = %d * NBatm" % self.atm.blocksize]) - self.constraints.append([self.ice.blocksize * X['NBice'] - X['Nice'] == 0, - "Nice = %d * NBice" % self.ice.blocksize]) - self.constraints.append([self.lnd.blocksize * X['NBlnd'] - X['Nlnd'] == 0, - "Nlnd = %d * NBlnd" % self.lnd.blocksize]) - self.constraints.append([self.ocn.blocksize * X['NBocn'] - X['Nocn'] == 0, - "Nocn = %d * NBocn" % self.ocn.blocksize]) + self.constraints.append([X["Tice"] - X["T1"] <= 0, "Tice - T1 == 0"]) + self.constraints.append([X["Tlnd"] - X["T1"] <= 0, "Tlnd - T1 == 0"]) + self.constraints.append( + [X["T1"] + X["Tatm"] - X["TotalTime"] <= 0, "T1 + Tatm - TotalTime <= 0"] + ) + self.constraints.append( + [X["Tocn"] - X["TotalTime"] <= 0, "Tocn - TotalTime == 0"] + ) + self.constraints.append( + [X["Nice"] + X["Nlnd"] - X["Natm"] == 0, "Nice + Nlnd - Natm == 0"] + ) + self.constraints.append( + [ + X["Natm"] + X["Nocn"] == self.maxtasks, + "Natm + Nocn <= %d" % (self.maxtasks), + ] + ) + self.constraints.append( + [ + self.atm.blocksize * X["NBatm"] - X["Natm"] == 0, + "Natm = %d * NBatm" % self.atm.blocksize, + ] + ) + self.constraints.append( + [ + self.ice.blocksize * X["NBice"] - X["Nice"] == 0, + "Nice = %d * NBice" % self.ice.blocksize, + ] + ) + self.constraints.append( + [ + self.lnd.blocksize * X["NBlnd"] - X["Nlnd"] == 0, + "Nlnd = %d * NBlnd" % self.lnd.blocksize, + ] + ) + self.constraints.append( + [ + self.ocn.blocksize * X["NBocn"] - X["Nocn"] == 0, + "Nocn = %d * NBocn" % self.ocn.blocksize, + ] + ) # These are the constraints based on the timing data. # They should be the same no matter what the layout of the components. @@ -115,46 +146,67 @@ def get_solution(self): """ Return a dictionary of the solution variables. """ - assert self.state == self.STATE_SOLVED_OK,\ - "solver failed, no solution available" - return {'NBLOCKS_ICE':self.X['NBice'].varValue, - 'NBLOCKS_LND':self.X['NBlnd'].varValue, - 'NBLOCKS_ATM':self.X['NBatm'].varValue, - 'NBLOCKS_OCN':self.X['NBocn'].varValue, - 'NTASKS_ICE':self.X['Nice'].varValue, - 'NTASKS_LND':self.X['Nlnd'].varValue, - 'NTASKS_ATM':self.X['Natm'].varValue, - 'NTASKS_OCN':self.X['Nocn'].varValue, - 'NTASKS_TOTAL':self.maxtasks, - 'COST_ICE':self.X['Tice'].varValue, - 'COST_LND':self.X['Tlnd'].varValue, - 'COST_ATM':self.X['Tatm'].varValue, - 'COST_OCN':self.X['Tocn'].varValue, - 'COST_TOTAL':self.X['TotalTime'].varValue} + assert ( + self.state == self.STATE_SOLVED_OK + ), "solver failed, no solution available" + return { + "NBLOCKS_ICE": self.X["NBice"].varValue, + "NBLOCKS_LND": self.X["NBlnd"].varValue, + "NBLOCKS_ATM": self.X["NBatm"].varValue, + "NBLOCKS_OCN": self.X["NBocn"].varValue, + "NTASKS_ICE": self.X["Nice"].varValue, + "NTASKS_LND": self.X["Nlnd"].varValue, + "NTASKS_ATM": self.X["Natm"].varValue, + "NTASKS_OCN": self.X["Nocn"].varValue, + "NTASKS_TOTAL": self.maxtasks, + "COST_ICE": self.X["Tice"].varValue, + "COST_LND": self.X["Tlnd"].varValue, + "COST_ATM": self.X["Tatm"].varValue, + "COST_OCN": self.X["Tocn"].varValue, + "COST_TOTAL": self.X["TotalTime"].varValue, + } def write_pe_file(self, pefilename): """ Write out a pe_file that can be used to implement the optimized layout """ - assert self.state == self.STATE_SOLVED_OK,\ - "solver failed, no solution available" - natm = int(self.X['Natm'].varValue) - nlnd = int(self.X['Nlnd'].varValue) - nice = int(self.X['Nice'].varValue) - nocn = int(self.X['Nocn'].varValue) - ntasks = {'atm':natm, 'lnd':nlnd, 'rof':1, 'ice':nice, - 'ocn':nocn, 'glc':1, 'wav':1, 'cpl':1} - roots = {'atm':0, 'lnd':nice, 'rof':0, 'ice':0, - 'ocn':natm, 'glc':0, 'wav':0, 'cpl':0} + assert ( + self.state == self.STATE_SOLVED_OK + ), "solver failed, no solution available" + natm = int(self.X["Natm"].varValue) + nlnd = int(self.X["Nlnd"].varValue) + nice = int(self.X["Nice"].varValue) + nocn = int(self.X["Nocn"].varValue) + ntasks = { + "atm": natm, + "lnd": nlnd, + "rof": 1, + "ice": nice, + "ocn": nocn, + "glc": 1, + "wav": 1, + "cpl": 1, + } + roots = { + "atm": 0, + "lnd": nice, + "rof": 0, + "ice": 0, + "ocn": natm, + "glc": 0, + "wav": 0, + "cpl": 0, + } nthrds = {} - for c in ['atm', 'lnd', 'rof', 'ice', 'ocn', 'glc', 'wav', 'cpl']: + for c in ["atm", "lnd", "rof", "ice", "ocn", "glc", "wav", "cpl"]: if c.upper() in self.models: nthrds[c] = self.models[c.upper()].nthrds else: nthrds[c] = 1 self.write_pe_template(pefilename, ntasks, nthrds, roots) + class IceLndWavAtmOcn(optimize_model.OptimizeModel): """ Optimized the problem based on the Layout @@ -201,9 +253,8 @@ class IceLndWavAtmOcn(optimize_model.OptimizeModel): def __init__(self): self.models = {} - def get_required_components(self): - return ['LND', 'ICE', 'WAV', 'ATM', 'OCN'] + return ["LND", "ICE", "WAV", "ATM", "OCN"] def optimize(self): """ @@ -212,17 +263,35 @@ def optimize(self): set state STATE_SOLVED_OK if solved, otherwise STATE_SOLVED_BAD """ - assert self.state != self.STATE_UNDEFINED,\ - "set_data() must be called before optimize()!" - self.atm = self.models['ATM'] - self.lnd = self.models['LND'] - self.ice = self.models['ICE'] - self.ocn = self.models['OCN'] - self.wav = self.models['WAV'] - self.real_variables = ['TotalTime', 'T1', 'Tice', 'Tlnd', 'Tatm', - 'Tocn', 'Twav'] - self.integer_variables = ['NBice', 'NBlnd', 'NBatm', 'NBocn', 'NBwav', - 'Nice', 'Nlnd', 'Natm', 'Nocn', 'Nwav'] + assert ( + self.state != self.STATE_UNDEFINED + ), "set_data() must be called before optimize()!" + self.atm = self.models["ATM"] + self.lnd = self.models["LND"] + self.ice = self.models["ICE"] + self.ocn = self.models["OCN"] + self.wav = self.models["WAV"] + self.real_variables = [ + "TotalTime", + "T1", + "Tice", + "Tlnd", + "Tatm", + "Tocn", + "Twav", + ] + self.integer_variables = [ + "NBice", + "NBlnd", + "NBatm", + "NBocn", + "NBwav", + "Nice", + "Nlnd", + "Natm", + "Nocn", + "Nwav", + ] self.X = {} X = self.X self.prob = pulp.LpProblem("Minimize ACME time cost", pulp.LpMinimize) @@ -232,35 +301,64 @@ def optimize(self): for iv in self.integer_variables: X[iv] = pulp.LpVariable(iv, lowBound=1, cat=pulp.LpInteger) - # cost function - self.prob += X['TotalTime'] + self.prob += X["TotalTime"] - #constraints + # constraints self.constraints = [] # Layout-dependent constraints. Choosing another layout to model # will require editing these constraints - self.constraints.append([X['Tice'] - X['T1'] <= 0, "Tice - T1 == 0"]) - self.constraints.append([X['Tlnd'] - X['T1'] <= 0, "Tlnd - T1 == 0"]) - self.constraints.append([X['Twav'] - X['T1'] <= 0, "Twav - T1 == 0"]) - self.constraints.append([X['T1'] + X['Tatm'] - X['TotalTime'] <= 0, - "T1 + Tatm - TotalTime <= 0"]) - self.constraints.append([X['Tocn'] - X['TotalTime'] <= 0, - "Tocn - TotalTime == 0"]) - self.constraints.append([X['Nice'] + X['Nlnd'] + X['Nwav'] - X['Natm'] == 0, - "Nice + Nlnd + Nwav - Natm == 0"]) - self.constraints.append([X['Natm'] + X['Nocn'] == self.maxtasks, - "Natm + Nocn <= %d" % (self.maxtasks)]) - self.constraints.append([self.atm.blocksize * X['NBatm'] - X['Natm'] == 0, - "Natm = %d * NBatm" % self.atm.blocksize]) - self.constraints.append([self.ice.blocksize * X['NBice'] - X['Nice'] == 0, - "Nice = %d * NBice" % self.ice.blocksize]) - self.constraints.append([self.lnd.blocksize * X['NBlnd'] - X['Nlnd'] == 0, - "Nlnd = %d * NBlnd" % self.lnd.blocksize]) - self.constraints.append([self.ocn.blocksize * X['NBocn'] - X['Nocn'] == 0, - "Nocn = %d * NBocn" % self.ocn.blocksize]) - self.constraints.append([self.wav.blocksize * X['NBwav'] - X['Nwav'] == 0, - "Nwav = %d * NBwav" % self.wav.blocksize]) + self.constraints.append([X["Tice"] - X["T1"] <= 0, "Tice - T1 == 0"]) + self.constraints.append([X["Tlnd"] - X["T1"] <= 0, "Tlnd - T1 == 0"]) + self.constraints.append([X["Twav"] - X["T1"] <= 0, "Twav - T1 == 0"]) + self.constraints.append( + [X["T1"] + X["Tatm"] - X["TotalTime"] <= 0, "T1 + Tatm - TotalTime <= 0"] + ) + self.constraints.append( + [X["Tocn"] - X["TotalTime"] <= 0, "Tocn - TotalTime == 0"] + ) + self.constraints.append( + [ + X["Nice"] + X["Nlnd"] + X["Nwav"] - X["Natm"] == 0, + "Nice + Nlnd + Nwav - Natm == 0", + ] + ) + self.constraints.append( + [ + X["Natm"] + X["Nocn"] == self.maxtasks, + "Natm + Nocn <= %d" % (self.maxtasks), + ] + ) + self.constraints.append( + [ + self.atm.blocksize * X["NBatm"] - X["Natm"] == 0, + "Natm = %d * NBatm" % self.atm.blocksize, + ] + ) + self.constraints.append( + [ + self.ice.blocksize * X["NBice"] - X["Nice"] == 0, + "Nice = %d * NBice" % self.ice.blocksize, + ] + ) + self.constraints.append( + [ + self.lnd.blocksize * X["NBlnd"] - X["Nlnd"] == 0, + "Nlnd = %d * NBlnd" % self.lnd.blocksize, + ] + ) + self.constraints.append( + [ + self.ocn.blocksize * X["NBocn"] - X["Nocn"] == 0, + "Nocn = %d * NBocn" % self.ocn.blocksize, + ] + ) + self.constraints.append( + [ + self.wav.blocksize * X["NBwav"] - X["Nwav"] == 0, + "Nwav = %d * NBwav" % self.wav.blocksize, + ] + ) # These are the constraints based on the timing data. # They should be the same no matter what the layout of the components. @@ -279,45 +377,65 @@ def get_solution(self): """ Return a dictionary of the solution variables. """ - assert self.state == self.STATE_SOLVED_OK,\ - "solver failed, no solution available" - return {'NBLOCKS_ICE':self.X['NBice'].varValue, - 'NBLOCKS_LND':self.X['NBlnd'].varValue, - 'NBLOCKS_WAV':self.X['NBwav'].varValue, - 'NBLOCKS_ATM':self.X['NBatm'].varValue, - 'NBLOCKS_OCN':self.X['NBocn'].varValue, - 'NTASKS_ICE':self.X['Nice'].varValue, - 'NTASKS_LND':self.X['Nlnd'].varValue, - 'NTASKS_WAV':self.X['Nwav'].varValue, - 'NTASKS_ATM':self.X['Natm'].varValue, - 'NTASKS_OCN':self.X['Nocn'].varValue, - 'NTASKS_TOTAL':self.maxtasks, - 'COST_ICE':self.X['Tice'].varValue, - 'COST_LND':self.X['Tlnd'].varValue, - 'COST_WAV':self.X['Twav'].varValue, - 'COST_ATM':self.X['Tatm'].varValue, - 'COST_OCN':self.X['Tocn'].varValue, - 'COST_TOTAL':self.X['TotalTime'].varValue} + assert ( + self.state == self.STATE_SOLVED_OK + ), "solver failed, no solution available" + return { + "NBLOCKS_ICE": self.X["NBice"].varValue, + "NBLOCKS_LND": self.X["NBlnd"].varValue, + "NBLOCKS_WAV": self.X["NBwav"].varValue, + "NBLOCKS_ATM": self.X["NBatm"].varValue, + "NBLOCKS_OCN": self.X["NBocn"].varValue, + "NTASKS_ICE": self.X["Nice"].varValue, + "NTASKS_LND": self.X["Nlnd"].varValue, + "NTASKS_WAV": self.X["Nwav"].varValue, + "NTASKS_ATM": self.X["Natm"].varValue, + "NTASKS_OCN": self.X["Nocn"].varValue, + "NTASKS_TOTAL": self.maxtasks, + "COST_ICE": self.X["Tice"].varValue, + "COST_LND": self.X["Tlnd"].varValue, + "COST_WAV": self.X["Twav"].varValue, + "COST_ATM": self.X["Tatm"].varValue, + "COST_OCN": self.X["Tocn"].varValue, + "COST_TOTAL": self.X["TotalTime"].varValue, + } def write_pe_file(self, pefilename): """ Write out a pe_file that can be used to implement the optimized layout """ - assert self.state == self.STATE_SOLVED_OK,\ - "solver failed, no solution available" - natm = int(self.X['Natm'].varValue) - nlnd = int(self.X['Nlnd'].varValue) - nice = int(self.X['Nice'].varValue) - nocn = int(self.X['Nocn'].varValue) - nwav = int(self.X['Nwav'].varValue) - - ntasks = {'atm':natm, 'lnd':nlnd, 'rof':1, 'ice':nice, - 'ocn':nocn, 'glc':1, 'wav':nwav, 'cpl':1} - roots = {'atm':0, 'lnd':0, 'rof':0, 'ice':nlnd, - 'ocn':natm, 'glc':0, 'wav':nlnd+nice, 'cpl':0} + assert ( + self.state == self.STATE_SOLVED_OK + ), "solver failed, no solution available" + natm = int(self.X["Natm"].varValue) + nlnd = int(self.X["Nlnd"].varValue) + nice = int(self.X["Nice"].varValue) + nocn = int(self.X["Nocn"].varValue) + nwav = int(self.X["Nwav"].varValue) + + ntasks = { + "atm": natm, + "lnd": nlnd, + "rof": 1, + "ice": nice, + "ocn": nocn, + "glc": 1, + "wav": nwav, + "cpl": 1, + } + roots = { + "atm": 0, + "lnd": 0, + "rof": 0, + "ice": nlnd, + "ocn": natm, + "glc": 0, + "wav": nlnd + nice, + "cpl": 0, + } nthrds = {} - for c in ['atm', 'lnd', 'rof', 'ice', 'ocn', 'glc', 'wav', 'cpl']: + for c in ["atm", "lnd", "rof", "ice", "ocn", "glc", "wav", "cpl"]: if c.upper() in self.models: nthrds[c] = self.models[c.upper()].nthrds else: diff --git a/tools/load_balancing_tool/load_balancing_solve.py b/tools/load_balancing_tool/load_balancing_solve.py index bef12a3c1d5..2b95d930e69 100755 --- a/tools/load_balancing_tool/load_balancing_solve.py +++ b/tools/load_balancing_tool/load_balancing_solve.py @@ -18,82 +18,104 @@ try: from Tools.standard_script_setup import * -except ImportError, e: - print "Error importing Tools.standard_script_setup" - print "May need to add cime/scripts to PYTHONPATH\n" +except ImportError as e: + print("Error importing Tools.standard_script_setup") + print("May need to add cime/scripts to PYTHONPATH\n") raise ImportError(e) from CIME.utils import expect from CIME.XML.machines import Machines + logger = logging.getLogger(__name__) # These values can be overridden on the command line DEFAULT_TESTID = "lbt" DEFAULT_BLOCKSIZE = 1 DEFAULT_LAYOUT = "IceLndAtmOcn" -COMPONENT_LIST = ['ATM', 'ICE', 'CPL', 'LND', 'WAV', 'ROF', 'OCN', 'GLC', 'ESP'] +COMPONENT_LIST = ["ATM", "ICE", "CPL", "LND", "WAV", "ROF", "OCN", "GLC", "ESP"] ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### help_str = """ Solve a Mixed Integer Linear Program to find a PE layout that minimizes the wall-clock time per model day. """ - parser = argparse.ArgumentParser(usage=help_str, - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = argparse.ArgumentParser( + usage=help_str, + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) - parser.add_argument('--test-id', default=DEFAULT_TESTID, - help='test-id to use for all timing runs') - - parser.add_argument("-r", "--test-root", - help="Where test cases were created." - " Will default to output root as defined in the config_machines file") - - parser.add_argument('--timing-dir', help='alternative to using casename ' - 'to find timing data, instead read all files in' - ' this directory') - - parser.add_argument('--blocksize', - help='default minimum size of blocks to assign to all ' - 'components. Components can be assigned different ' - 'blocksizes using --blocksize_XXX. Default 1', type=int) + parser.add_argument( + "--test-id", default=DEFAULT_TESTID, help="test-id to use for all timing runs" + ) + + parser.add_argument( + "-r", + "--test-root", + help="Where test cases were created." + " Will default to output root as defined in the config_machines file", + ) + + parser.add_argument( + "--timing-dir", + help="alternative to using casename " + "to find timing data, instead read all files in" + " this directory", + ) + + parser.add_argument( + "--blocksize", + help="default minimum size of blocks to assign to all " + "components. Components can be assigned different " + "blocksizes using --blocksize_XXX. Default 1", + type=int, + ) for c in COMPONENT_LIST: - parser.add_argument('--blocksize-%s' % c.lower(), - help='minimum blocksize for component %s, if ' - 'different from --blocksize', type=int) - - parser.add_argument('--total-tasks', type=int, - help='Number of pes available for assignment') - - parser.add_argument("--layout", - help="name of layout to solve (default selected internally)") - - parser.add_argument("--graph-models", action="store_true", - help="plot cost v. ntasks models. requires matplotlib") - - parser.add_argument("--print-models", action="store_true", - help="print all costs and ntasks") + parser.add_argument( + "--blocksize-%s" % c.lower(), + help="minimum blocksize for component %s, if " "different from --blocksize", + type=int, + ) + + parser.add_argument( + "--total-tasks", type=int, help="Number of pes available for assignment" + ) + + parser.add_argument( + "--layout", help="name of layout to solve (default selected internally)" + ) + + parser.add_argument( + "--graph-models", + action="store_true", + help="plot cost v. ntasks models. requires matplotlib", + ) + + parser.add_argument( + "--print-models", action="store_true", help="print all costs and ntasks" + ) parser.add_argument("--pe-output", help="write pe layout to file") - parser.add_argument('--json-output', help="write MILP data to .json file") + parser.add_argument("--json-output", help="write MILP data to .json file") - parser.add_argument('--json-input', help="solve using data from .json file") + parser.add_argument("--json-input", help="solve using data from .json file") - args = CIME.utils.parse_args_and_handle_standard_logging_options(args, - parser) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.total_tasks is None and args.json_input is None: - expect(args.total_tasks is not None or args.json_input is not None, - "--total-tasks or --json-input option must be set") + expect( + args.total_tasks is not None or args.json_input is not None, + "--total-tasks or --json-input option must be set", + ) blocksizes = {} for c in COMPONENT_LIST: - attrib = 'blocksize_%s' % c.lower() + attrib = "blocksize_%s" % c.lower() if getattr(args, attrib) is not None: blocksizes[c] = getattr(args, attrib) elif args.blocksize is not None: @@ -103,10 +125,19 @@ def parse_command_line(args, description): machobj = Machines() test_root = machobj.get_value("CIME_OUTPUT_ROOT") - return (args.test_id, test_root, args.timing_dir, blocksizes, - args.total_tasks, args.layout, args.graph_models, - args.print_models, args.pe_output, args.json_output, - args.json_input) + return ( + args.test_id, + test_root, + args.timing_dir, + blocksizes, + args.total_tasks, + args.layout, + args.graph_models, + args.print_models, + args.pe_output, + args.json_output, + args.json_input, + ) def _locate_timing_files(test_root, test_id, timing_dir): @@ -119,7 +150,7 @@ def _locate_timing_files(test_root, test_id, timing_dir): # Add command-line timing directory if it exists if timing_dir is not None: - logger.info('found directory ' + timing_dir) + logger.info("found directory " + timing_dir) timing_dirs.append(timing_dir) else: # Add script_dir/casename_prefix_*/timing @@ -127,7 +158,7 @@ def _locate_timing_files(test_root, test_id, timing_dir): if fn.endswith(test_id): fn = os.path.join(test_root, fn, "timing") if os.path.isdir(fn): - print "found {}".format(fn) + print("found {}".format(fn)) timing_cases_tmp.append(fn) timing_dirs = sorted(timing_cases_tmp) @@ -136,12 +167,13 @@ def _locate_timing_files(test_root, test_id, timing_dir): full_fn = None for fn in os.listdir(td): full_fn = os.path.join(td, fn) - if full_fn.find('.gz') < 0: + if full_fn.find(".gz") < 0: timing_files.append(full_fn) if full_fn is None: logger.warning("WARNING: no timing files found in directory %s", (td)) return timing_files + def _parse_timing_files(timing_files): """ Parse every file in list for timing information and return data dict @@ -149,34 +181,48 @@ def _parse_timing_files(timing_files): data = {} for timing_file in timing_files: timing = _read_timing_file(timing_file) - logger.debug('ntasks: %s' % "; ".join([str(k) + ":" + - str(timing[k]['ntasks']) - for k in timing.keys()])) - logger.debug('cost: %s' % "; ".join([str(k) + ":" + - str(timing[k]['cost']) - for k in timing.keys()])) + logger.debug( + "ntasks: %s" + % "; ".join( + [str(k) + ":" + str(timing[k]["ntasks"]) for k in timing.keys()] + ) + ) + logger.debug( + "cost: %s" + % "; ".join([str(k) + ":" + str(timing[k]["cost"]) for k in timing.keys()]) + ) for key in timing: if key not in data: - data[key] = {'cost':[], 'ntasks':[], 'nthrds':[]} - - if timing[key]['ntasks'] in data[key]['ntasks']: - logger.warning('WARNING: duplicate timing run data in %s ' - 'for %s ntasks=%d.', timing_file, key, - timing[key]['ntasks']) - index = data[key]['ntasks'].index(timing[key]['ntasks']) - logger.warning('Existing value: cost=%s. Ignoring new value: ' - 'cost=%s', data[key]['cost'][index], - timing[key]['cost']) - elif 'name' in data[key] and data[key]['name'] != timing[key]['name']: - expect(False, "Timing files have inconsistant model components {} has {} vs {}" - .format(key, data[key]['name'], timing[key]['name'])) + data[key] = {"cost": [], "ntasks": [], "nthrds": []} + + if timing[key]["ntasks"] in data[key]["ntasks"]: + logger.warning( + "WARNING: duplicate timing run data in %s " "for %s ntasks=%d.", + timing_file, + key, + timing[key]["ntasks"], + ) + index = data[key]["ntasks"].index(timing[key]["ntasks"]) + logger.warning( + "Existing value: cost=%s. Ignoring new value: " "cost=%s", + data[key]["cost"][index], + timing[key]["cost"], + ) + elif "name" in data[key] and data[key]["name"] != timing[key]["name"]: + expect( + False, + "Timing files have inconsistant model components {} has {} vs {}".format( + key, data[key]["name"], timing[key]["name"] + ), + ) else: - data[key]['name'] = timing[key]['name'] - data[key]['cost'].append(timing[key]['cost']) - data[key]['ntasks'].append(timing[key]['ntasks']) - data[key]['nthrds'].append(timing[key]['nthrds']) + data[key]["name"] = timing[key]["name"] + data[key]["cost"].append(timing[key]["cost"]) + data[key]["ntasks"].append(timing[key]["ntasks"]) + data[key]["nthrds"].append(timing[key]["nthrds"]) return data + def _set_blocksizes(data, blocksizes): """ Set blocksizes according to command line arguments. @@ -186,9 +232,10 @@ def _set_blocksizes(data, blocksizes): for key in COMPONENT_LIST: if key in data: if key in blocksizes: - data[key]['blocksize'] = blocksizes[key] - elif 'blocksize' not in data[key]: - data[key]['blocksize'] = DEFAULT_BLOCKSIZE + data[key]["blocksize"] = blocksizes[key] + elif "blocksize" not in data[key]: + data[key]["blocksize"] = DEFAULT_BLOCKSIZE + def _read_timing_file(filename): """ @@ -201,19 +248,19 @@ def _read_timing_file(filename): } """ - logger.info('Reading timing file %s', filename) + logger.info("Reading timing file %s", filename) try: timing_file = open(filename, "r") timing_lines = timing_file.readlines() timing_file.close() - except Exception, e: + except Exception as e: logger.critical("Unable to open file %s", filename) raise e models = {} for line in timing_lines: # Get number of tasks and thrds # atm = xatm 8 0 8 x 1 1 (1 ) - #(\w+) = (\w+) \s+ \d+ \s+ \d+ \s+ (\d+)\s+ x\s+(\d+) + # (\w+) = (\w+) \s+ \d+ \s+ \d+ \s+ (\d+)\s+ x\s+(\d+) m = re.search(r"(\w+) = (\w+)\s+\d+\s+\d+\s+(\d+)\s+x\s+(\d+)", line) if m: component = m.groups()[0].upper() @@ -221,75 +268,90 @@ def _read_timing_file(filename): ntasks = int(m.groups()[2]) nthrds = int(m.groups()[3]) if component in models: - models[component]['ntasks'] = ntasks - models[component]['nthrds'] = nthrds - models[component]['name'] = name + models[component]["ntasks"] = ntasks + models[component]["nthrds"] = nthrds + models[component]["name"] = name else: - models[component] = {'name':name,'ntasks':ntasks, 'nthrds':nthrds} + models[component] = {"name": name, "ntasks": ntasks, "nthrds": nthrds} continue # get cost # ATM Run Time: 17.433 seconds 1.743 seconds/mday - #(\w+)Run Time: \s \d+.\d+ seconds \s+(\d+.\d+) seconds/mday - m = re.search(r"(\w+) Run Time:\s+(\d+\.\d+) seconds \s+(\d+\.\d+)" - " seconds/mday", line) + # (\w+)Run Time: \s \d+.\d+ seconds \s+(\d+.\d+) seconds/mday + m = re.search( + r"(\w+) Run Time:\s+(\d+\.\d+) seconds \s+(\d+\.\d+)" " seconds/mday", line + ) if m: component = m.groups()[0] cost = float(m.groups()[1]) if component != "TOT": if component in models: - models[component]['cost'] = cost + models[component]["cost"] = cost else: - models[component] = {'cost':cost} + models[component] = {"cost": cost} return models + ################################################################################ -def load_balancing_solve(test_id, test_root, timing_dir, blocksizes, total_tasks, layout, graph_models, print_models, pe_output, json_output, json_input): -################################################################################ +def load_balancing_solve( + test_id, + test_root, + timing_dir, + blocksizes, + total_tasks, + layout, + graph_models, + print_models, + pe_output, + json_output, + json_input, +): + ################################################################################ if json_input is not None: # All data is read from given json file with open(json_input, "r") as jsonfile: try: data = json.load(jsonfile) - except ValueError, e: + except ValueError as e: logger.critical("Unable to parse json file %s", jsonfile) raise e # layout, totaltasks, blocksizes may already be set by json file # but can be overriden by options if layout is not None: - data['layout'] = layout + data["layout"] = layout if total_tasks is not None: - data['totaltasks'] = total_tasks + data["totaltasks"] = total_tasks else: # find and parse timing files - timing_files = _locate_timing_files(test_root, - test_id, - timing_dir) + timing_files = _locate_timing_files(test_root, test_id, timing_dir) expect(len(timing_files) > 0, "No timing data found") data = _parse_timing_files(timing_files) - data['totaltasks'] = total_tasks + data["totaltasks"] = total_tasks if layout is None: # try to determine layout automatically - if 'ATM' in data and 'OCN' in data and 'WAV' in data: - aname = data['ATM']['name'] - oname = data['OCN']['name'] - wname = data['WAV']['name'] - if aname not in ('DATM', 'XATM', 'SATM') and \ - oname not in ('DOCN', 'XOCN', 'SOCN'): - if wname in ('DWAV', 'XWAV', 'SWAV'): - data['layout'] = "IceLndAtmOcn" + if "ATM" in data and "OCN" in data and "WAV" in data: + aname = data["ATM"]["name"] + oname = data["OCN"]["name"] + wname = data["WAV"]["name"] + if aname not in ("DATM", "XATM", "SATM") and oname not in ( + "DOCN", + "XOCN", + "SOCN", + ): + if wname in ("DWAV", "XWAV", "SWAV"): + data["layout"] = "IceLndAtmOcn" else: - data['layout'] = "IceLndWavAtmOcn" + data["layout"] = "IceLndWavAtmOcn" - logger.info("Using layout = {}".format(data['layout'])) + logger.info("Using layout = {}".format(data["layout"])) else: expect(False, "Could not automatically determine layout") else: - data['layout'] = layout + data["layout"] = layout _set_blocksizes(data, blocksizes) @@ -311,14 +373,15 @@ def load_balancing_solve(test_id, test_root, timing_dir, blocksizes, total_tasks else: opt.write_timings(fd=None, level=logging.DEBUG) - logger.info("Solving Mixed Integer Linear Program using PuLP interface to " - "COIN-CBC") + logger.info( + "Solving Mixed Integer Linear Program using PuLP interface to " "COIN-CBC" + ) status = opt.optimize() logger.info("PuLP solver status: " + opt.get_state_string(status)) solution = opt.get_solution() for k in sorted(solution): - if k[0] == 'N': + if k[0] == "N": logger.info("%s = %d", k, solution[k]) else: logger.info("%s = %f", k, solution[k]) @@ -328,12 +391,40 @@ def load_balancing_solve(test_id, test_root, timing_dir, blocksizes, total_tasks return 0 + ############################################################################### def _main_func(description): -############################################################################### - test_id, test_root, timing_dir, blocksizes, total_tasks, layout, graph_models, print_models, pe_output, json_output, json_input = parse_command_line(sys.argv, description) + ############################################################################### + ( + test_id, + test_root, + timing_dir, + blocksizes, + total_tasks, + layout, + graph_models, + print_models, + pe_output, + json_output, + json_input, + ) = parse_command_line(sys.argv, description) + + sys.exit( + load_balancing_solve( + test_id, + test_root, + timing_dir, + blocksizes, + total_tasks, + layout, + graph_models, + print_models, + pe_output, + json_output, + json_input, + ) + ) - sys.exit(load_balancing_solve(test_id, test_root, timing_dir, blocksizes, total_tasks, layout, graph_models, print_models, pe_output, json_output, json_input)) ############################################################################### diff --git a/tools/load_balancing_tool/load_balancing_submit.py b/tools/load_balancing_tool/load_balancing_submit.py index a023e306ae9..98cf2391b59 100755 --- a/tools/load_balancing_tool/load_balancing_submit.py +++ b/tools/load_balancing_tool/load_balancing_submit.py @@ -9,9 +9,9 @@ try: from Tools.standard_script_setup import * -except ImportError, e: - print 'Error importing Tools.standard_script_setup' - print 'May need to add cime/scripts to PYTHONPATH\n' +except ImportError as e: + print("Error importing Tools.standard_script_setup") + print("May need to add cime/scripts to PYTHONPATH\n") raise ImportError(e) from CIME.utils import expect, get_full_test_name @@ -26,19 +26,19 @@ # Default CIME variables, these can be overridden using the # --extra-options-file option CIME_DEFAULTS = { - 'STOP_OPTION':'ndays', - 'STOP_N':'10', - 'REST_OPTION':'never', - 'DOUT_S':'FALSE', - 'COMP_RUN_BARRIERS':'TRUE', - 'TIMER_LEVEL':'9' + "STOP_OPTION": "ndays", + "STOP_N": "10", + "REST_OPTION": "never", + "DOUT_S": "FALSE", + "COMP_RUN_BARRIERS": "TRUE", + "TIMER_LEVEL": "9", } -DEFAULT_TESTID = 'lbt' +DEFAULT_TESTID = "lbt" ############################################################################### def parse_command_line(args, description): -############################################################################### + ############################################################################### help_str = """ Requires a pes xml file listing the timing runs you will submit and their corresponding pe layouts. Use the 'pesize' tag to name each run. @@ -163,55 +163,82 @@ def parse_command_line(args, description):
""" - parser = argparse.ArgumentParser(usage=help_str, - description=description, - formatter_class=argparse.ArgumentDefaultsHelpFormatter) + parser = argparse.ArgumentParser( + usage=help_str, + description=description, + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + ) CIME.utils.setup_standard_logging_options(parser) # Required arguments - parser.add_argument('--compset', - help='Specify compset', required=True) - parser.add_argument('--res', - help='Specify resolution', required=True) - parser.add_argument('--pesfile', required=True) + parser.add_argument("--compset", help="Specify compset", required=True) + parser.add_argument("--res", help="Specify resolution", required=True) + parser.add_argument("--pesfile", required=True) # Optional pass-through arguments to create_newcase - parser.add_argument('--compiler', help='Choose compiler to build with') + parser.add_argument("--compiler", help="Choose compiler to build with") - parser.add_argument('--project', help='Specify project id') + parser.add_argument("--project", help="Specify project id") - parser.add_argument('--machine', help='machine name') + parser.add_argument("--machine", help="machine name") - parser.add_argument('--mpilib', help='mpi library name') + parser.add_argument("--mpilib", help="mpi library name") - parser.add_argument("-r", "--test-root", - help="Where test cases will be created." - " Will default to output root as defined in the config_machines file") + parser.add_argument( + "-r", + "--test-root", + help="Where test cases will be created." + " Will default to output root as defined in the config_machines file", + ) - parser.add_argument('--extra-options-file', - help='file listing options to be run using xmlchange') - parser.add_argument('--test-id', default=DEFAULT_TESTID, - help='test-id to use for all timing runs') - parser.add_argument('--force-purge', action='store_true') + parser.add_argument( + "--extra-options-file", help="file listing options to be run using xmlchange" + ) + parser.add_argument( + "--test-id", default=DEFAULT_TESTID, help="test-id to use for all timing runs" + ) + parser.add_argument("--force-purge", action="store_true") args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - return (args.compset, args.res, args.pesfile, args.mpilib, - args.compiler, args.project, args.machine, args.extra_options_file, - args.test_id, args.force_purge, args.test_root) + return ( + args.compset, + args.res, + args.pesfile, + args.mpilib, + args.compiler, + args.project, + args.machine, + args.extra_options_file, + args.test_id, + args.force_purge, + args.test_root, + ) + ################################################################################ -def load_balancing_submit(compset, res, pesfile, mpilib, compiler, project, machine, - extra_options_file, test_id, force_purge, test_root): -################################################################################ +def load_balancing_submit( + compset, + res, + pesfile, + mpilib, + compiler, + project, + machine, + extra_options_file, + test_id, + force_purge, + test_root, +): + ################################################################################ # Read in list of pes from given file - expect(os.access(pesfile, os.R_OK), 'ERROR: File %s not found', pesfile) + expect(os.access(pesfile, os.R_OK), "ERROR: File %s not found", pesfile) - logger.info('Reading XML file %s. Searching for pesize entries:', pesfile) + logger.info("Reading XML file %s. Searching for pesize entries:", pesfile) try: pesobj = Pes(pesfile) except ParseError: - expect(False, 'ERROR: File %s not parseable', pesfile) + expect(False, "ERROR: File %s not parseable", pesfile) pesize_list = [] grid_nodes = pesobj.get_children("grid") @@ -220,58 +247,72 @@ def load_balancing_submit(compset, res, pesfile, mpilib, compiler, project, mach for mnode in mach_nodes: pes_nodes = pesobj.get_children("pes", root=mnode) for pnode in pes_nodes: - pesize = pesobj.get(pnode, 'pesize') + pesize = pesobj.get(pnode, "pesize") if not pesize: - logger.critical('No pesize for pes node in file %s', pesfile) + logger.critical("No pesize for pes node in file %s", pesfile) if pesize in pesize_list: - logger.critical('pesize %s duplicated in file %s', pesize, pesfile) + logger.critical("pesize %s duplicated in file %s", pesize, pesfile) pesize_list.append(pesize) - expect(pesize_list, 'ERROR: No grid entries found in pes file {}'.format(pesfile)) + expect(pesize_list, "ERROR: No grid entries found in pes file {}".format(pesfile)) machobj = Machines(machine=machine) if test_root is None: test_root = machobj.get_value("CIME_OUTPUT_ROOT") if machine is None: machine = machobj.get_machine_name() - print "machine is {}".format(machine) + print("machine is {}".format(machine)) if compiler is None: compiler = machobj.get_default_compiler() - print "compiler is {}".format(compiler) + print("compiler is {}".format(compiler)) if mpilib is None: - mpilib = machobj.get_default_MPIlib({"compiler":compiler}) - - - + mpilib = machobj.get_default_MPIlib({"compiler": compiler}) test_names = [] for i in xrange(len(pesize_list)): - test_names.append(get_full_test_name("PFS_I{}".format(i),grid=res, compset=compset, - machine=machine, compiler=compiler)) + test_names.append( + get_full_test_name( + "PFS_I{}".format(i), + grid=res, + compset=compset, + machine=machine, + compiler=compiler, + ) + ) casedir = os.path.join(test_root, test_names[-1] + "." + test_id) - print "casedir is {}".format(casedir) + print("casedir is {}".format(casedir)) if os.path.isdir(casedir): if force_purge: - logger.info('Removing directory %s', casedir) + logger.info("Removing directory %s", casedir) shutil.rmtree(casedir) else: - expect(False, - "casedir {} already exists, use the --force-purge option, --test-root or" - " --test-id options".format(casedir)) - - tests = TestScheduler(test_names, no_setup = True, - compiler=compiler, machine_name=machine, mpilib=mpilib, - test_root=test_root, test_id=test_id, project=project) + expect( + False, + "casedir {} already exists, use the --force-purge option, --test-root or" + " --test-id options".format(casedir), + ) + + tests = TestScheduler( + test_names, + no_setup=True, + compiler=compiler, + machine_name=machine, + mpilib=mpilib, + test_root=test_root, + test_id=test_id, + project=project, + ) success = tests.run_tests(wait=True) expect(success, "Error in creating cases") testnames = [] for test in tests.get_testnames(): - testname = os.path.join(test_root, test + "." + test_id) - testnames.append( testname) + testname = os.path.join(test_root, test + "." + test_id) + testnames.append(testname) logger.info("test is {}".format(testname)) with Case(testname) as case: - pes_ntasks, pes_nthrds, pes_rootpe, _, _, _ = \ - pesobj.find_pes_layout('any', 'any', 'any', pesize_opts=pesize_list.pop(0)) + pes_ntasks, pes_nthrds, pes_rootpe, _, _, _ = pesobj.find_pes_layout( + "any", "any", "any", pesize_opts=pesize_list.pop(0) + ) for key in pes_ntasks: case.set_value(key, pes_ntasks[key]) for key in pes_nthrds: @@ -281,41 +322,75 @@ def load_balancing_submit(compset, res, pesfile, mpilib, compiler, project, mach if extra_options_file is not None: try: - extras = open(extra_options_file, 'r') + extras = open(extra_options_file, "r") for line in extras.readlines(): - split = line.split('=') + split = line.split("=") if len(split) == 2: - logger.info('setting %s=%s', split[0], split[1]) + logger.info("setting %s=%s", split[0], split[1]) case.set_value(split[0], split[1]) else: - logger.debug('ignoring line in {}: {}'.format( - extra_options_file, line)) + logger.debug( + "ignoring line in {}: {}".format( + extra_options_file, line + ) + ) extras.close() except IOError: - expect(False, "ERROR: Could not read file {}".format(extra_options_file)) - - - tests = TestScheduler(test_names, use_existing=True, test_root=test_root, test_id=test_id) + expect( + False, + "ERROR: Could not read file {}".format(extra_options_file), + ) + + tests = TestScheduler( + test_names, use_existing=True, test_root=test_root, test_id=test_id + ) success = tests.run_tests(wait=False) expect(success, "Error in running cases") # need to fix - logger.info('Timing jobs submitted. After jobs completed, run to optimize ' - 'pe layout:\n load_balancing_solve --test-id {} --test-root {}'. - format(test_id, test_root)) + logger.info( + "Timing jobs submitted. After jobs completed, run to optimize " + "pe layout:\n load_balancing_solve --test-id {} --test-root {}".format( + test_id, test_root + ) + ) + ############################################################################### def _main_func(description): -############################################################################### - compset, res, pesfile, mpilib, compiler, project, machine, extra_options_file, casename_prefix, \ - force_purge, test_root = parse_command_line(sys.argv, description) + ############################################################################### + ( + compset, + res, + pesfile, + mpilib, + compiler, + project, + machine, + extra_options_file, + casename_prefix, + force_purge, + test_root, + ) = parse_command_line(sys.argv, description) + + sys.exit( + load_balancing_submit( + compset, + res, + pesfile, + mpilib, + compiler, + project, + machine, + extra_options_file, + casename_prefix, + force_purge, + test_root, + ) + ) - sys.exit(load_balancing_submit(compset, res, pesfile, mpilib, - compiler, project, machine, - extra_options_file, casename_prefix, - force_purge, test_root)) ############################################################################### -if __name__ == '__main__': +if __name__ == "__main__": _main_func(__doc__) diff --git a/tools/load_balancing_tool/optimize_model.py b/tools/load_balancing_tool/optimize_model.py index 99413129594..f07025ea9be 100644 --- a/tools/load_balancing_tool/optimize_model.py +++ b/tools/load_balancing_tool/optimize_model.py @@ -9,38 +9,41 @@ import operator import importlib from CIME.utils import expect + try: import pulp -except ImportError, e: - sys.stderr.write("pulp library not installed or located. " - "Try pip install [--user] pulp\n") +except ImportError as e: + sys.stderr.write( + "pulp library not installed or located. " "Try pip install [--user] pulp\n" + ) raise e logger = logging.getLogger(__name__) + def solver_factory(data): """ load data either from a json file or dictionary """ - expect(data.has_key('totaltasks'),"totaltasks not found in data") + expect(data.has_key("totaltasks"), "totaltasks not found in data") - layout = data['layout'] - sp = layout.rsplit('.', 1) + layout = data["layout"] + sp = layout.rsplit(".", 1) try: if len(sp) > 1: layout_module = importlib.import_module(sp[0]) layout = sp[1] else: import layouts + layout_module = layouts except ImportError: - expect(False,"cannot import %s\n") + expect(False, "cannot import %s\n") try: solverclass = getattr(layout_module, layout) except KeyError: - expect(False, "layout class %s not found in %s\n", - layout, layout_module) + expect(False, "layout class %s not found in %s\n", layout, layout_module) solver = solverclass() @@ -50,34 +53,42 @@ def solver_factory(data): solver.set_data(data) return solver + class ModelData: """ Convert dictionary data entry into usable object """ + def __init__(self, name, model_dict): self.name = name - self.blocksize = model_dict['blocksize'] - self.nthrds = model_dict['nthrds'][0] - ntasks = copy.deepcopy(model_dict['ntasks']) - cost = copy.deepcopy(model_dict['cost']) - assert len(ntasks) == len(cost), "ntasks data not same length as cost for %s" % name + self.blocksize = model_dict["blocksize"] + self.nthrds = model_dict["nthrds"][0] + ntasks = copy.deepcopy(model_dict["ntasks"]) + cost = copy.deepcopy(model_dict["cost"]) + assert len(ntasks) == len(cost), ( + "ntasks data not same length as cost for %s" % name + ) # sort smallest ntasks to largest - tup = zip(*sorted(zip(cost, ntasks), - key=operator.itemgetter(1))) + tup = zip(*sorted(zip(cost, ntasks), key=operator.itemgetter(1))) self.cost = list(tup[0]) self.ntasks = list(tup[1]) for j in self.ntasks: if j > 1 and j % self.blocksize: - logger.warning("WARNING: %s pe %d not divisible by " - "blocksize %d. Results may be invalid\n", - name, j, self.blocksize) + logger.warning( + "WARNING: %s pe %d not divisible by " + "blocksize %d. Results may be invalid\n", + name, + j, + self.blocksize, + ) + class OptimizeModel(object): STATE_UNDEFINED = 0 STATE_UNSOLVED = 1 STATE_SOLVED_OK = 2 STATE_SOLVED_BAD = 3 - states = ['Undefined', 'Unsolved', 'Solved', 'No Solution'] + states = ["Undefined", "Unsolved", "Solved", "No Solution"] def __init__(self): self.models = {} @@ -102,13 +113,12 @@ def set_data(self, data_dict): sets state to STATE_UNSOLVED """ # get deep copy, because we need to divide ntasks by blocksize - self.maxtasks = data_dict['totaltasks'] + self.maxtasks = data_dict["totaltasks"] for key in data_dict: - if isinstance(data_dict[key], dict) and 'ntasks' in data_dict[key]: + if isinstance(data_dict[key], dict) and "ntasks" in data_dict[key]: self.models[key] = ModelData(key, data_dict[key]) - # extrapolate for n=1 and n=maxtasks for m in self.models.values(): m.extrapolated = [False] * len(m.cost) @@ -126,13 +136,15 @@ def set_data(self, data_dict): if m.cost[-2] <= 0.0: factor = 1.0 elif len(m.ntasks) > 1: - factor = (1.0 - m.cost[-1]/m.cost[-2]) / \ - (1.0 - 1. * m.ntasks[-2] / m.ntasks[-1]) + factor = (1.0 - m.cost[-1] / m.cost[-2]) / ( + 1.0 - 1.0 * m.ntasks[-2] / m.ntasks[-1] + ) else: # not much information to go on ... factor = 1.0 - m.cost.append(m.cost[-1] * (1.0 - factor + - factor * m.ntasks[-1] / self.maxtasks)) + m.cost.append( + m.cost[-1] * (1.0 - factor + factor * m.ntasks[-1] / self.maxtasks) + ) m.ntasks.append(self.maxtasks) m.extrapolated.append(True) @@ -145,23 +157,37 @@ def add_model_constraints(self): This should be the same for any layout so is provided in base class Assumes cost variables are 'Txxx' and ntask variables are 'Nxxx' """ - assert self.state != self.STATE_UNDEFINED,\ - "set_data() must be called before add_model_constraints()" + assert ( + self.state != self.STATE_UNDEFINED + ), "set_data() must be called before add_model_constraints()" for k in self.get_required_components(): m = self.models[k] - tk = 'T' + k.lower() # cost(time) key - nk = 'N' + k.lower() # nprocs key + tk = "T" + k.lower() # cost(time) key + nk = "N" + k.lower() # nprocs key for i in range(0, len(m.cost) - 1): - slope = (m.cost[i+1] - m.cost[i]) / (1. * m.ntasks[i+1] - m.ntasks[i]) - self.constraints.append([self.X[tk] - slope * self.X[nk] >= \ - m.cost[i] - slope * m.ntasks[i], - "T%s - %f*N%s >= %f" % \ - (k.lower(), slope, k.lower(), - m.cost[i] - slope * m.ntasks[i])]) + slope = (m.cost[i + 1] - m.cost[i]) / ( + 1.0 * m.ntasks[i + 1] - m.ntasks[i] + ) + self.constraints.append( + [ + self.X[tk] - slope * self.X[nk] + >= m.cost[i] - slope * m.ntasks[i], + "T%s - %f*N%s >= %f" + % ( + k.lower(), + slope, + k.lower(), + m.cost[i] - slope * m.ntasks[i], + ), + ] + ) if slope > 0: - logger.warning("WARNING: Nonconvex cost function for model " - "%s. Review costs to ensure data is correct " - "(--graph_models or --print_models)", k) + logger.warning( + "WARNING: Nonconvex cost function for model " + "%s. Review costs to ensure data is correct " + "(--graph_models or --print_models)", + k, + ) break if slope == 0: @@ -191,8 +217,9 @@ def write_timings(self, fd=sys.stdout, level=logging.DEBUG): Can be used to check that the data provided to the model is reasonable. Also see graph_costs() """ - assert self.state != self.STATE_UNDEFINED,\ - "set_data() must be called before write_timings()" + assert ( + self.state != self.STATE_UNDEFINED + ), "set_data() must be called before write_timings()" for k in self.models: m = self.models[k] message = "***%s***" % k @@ -204,8 +231,7 @@ def write_timings(self, fd=sys.stdout, level=logging.DEBUG): extra = "" if m.extrapolated[i]: extra = " (extrapolated)" - message = "%4d: %f%s" % \ - (m.ntasks[i], m.cost[i], extra) + message = "%4d: %f%s" % (m.ntasks[i], m.cost[i], extra) if fd is not None: fd.write(message + "\n") logger.log(level, message) @@ -218,8 +244,9 @@ def graph_costs(self): If matplotlib is not available, nothing will happen """ - assert self.state != self.STATE_UNDEFINED,\ - "set_data() must be called before graph_costs()" + assert ( + self.state != self.STATE_UNDEFINED + ), "set_data() must be called before graph_costs()" try: import matplotlib.pyplot as pyplot except ImportError: @@ -230,32 +257,35 @@ def graph_costs(self): nrows = (nplots + 1) / 2 ncols = 2 fig, ax = pyplot.subplots(nrows, ncols) - row = 0; col = 0 + row = 0 + col = 0 for k in self.models: m = self.models[k] p = ax[row, col] - p.loglog(m.ntasks, m.cost, 'k-') + p.loglog(m.ntasks, m.cost, "k-") for i in range(len(m.ntasks)): if not m.extrapolated[i]: - p.plot(m.ntasks[i], m.cost[i], 'bx') + p.plot(m.ntasks[i], m.cost[i], "bx") else: - p.plot(m.ntasks[i], m.cost[i], 'rx') + p.plot(m.ntasks[i], m.cost[i], "rx") p.set_title(m.name) - p.set_xlabel('ntasks') - p.set_ylabel('cost (s/mday)') + p.set_xlabel("ntasks") + p.set_ylabel("cost (s/mday)") p.set_xlim([1, self.maxtasks]) row += 1 if row == nrows: row = 0 col += 1 - fig.suptitle("log-log plot of Cost/mday vs ntasks for designated " - "components.\nPerfectly scalable components would have a " - "straight line. Blue 'X's designate points\nfrom data, " - "red 'X's designate extrapolated data. Areas above the " - "line plots represent\nthe feasible region. Global " - "optimality of solution depends on the convexity of " - "these line plots.\nClose graph to continue on to solve.") + fig.suptitle( + "log-log plot of Cost/mday vs ntasks for designated " + "components.\nPerfectly scalable components would have a " + "straight line. Blue 'X's designate points\nfrom data, " + "red 'X's designate extrapolated data. Areas above the " + "line plots represent\nthe feasible region. Global " + "optimality of solution depends on the convexity of " + "these line plots.\nClose graph to continue on to solve." + ) fig.tight_layout() fig.subplots_adjust(top=0.75) logger.info("close graph window to continue") @@ -289,10 +319,11 @@ def get_solution(self): Return a dictionary of the solution variables, can be overridden. Default implementation returns values in self.X """ - assert self.state == self.STATE_SOLVED_OK,\ - "solver failed, no solution available" + assert ( + self.state == self.STATE_SOLVED_OK + ), "solver failed, no solution available" retval = {} - if hasattr(self,'X') and isinstance(self.X, dict): + if hasattr(self, "X") and isinstance(self.X, dict): for k in self.X: retval[k] = self.X[k].varValue return retval @@ -327,28 +358,31 @@ def write_pe_template(self, pefilename, ntasks, nthrds, roots): from distutils.spawn import find_executable from xml.etree import ElementTree as ET from CIME.utils import run_cmd + logger.info("Writing pe node info to %s", pefilename) - root = ET.Element('config_pes') - grid = ET.SubElement(root, 'grid') - grid.set('name', 'any') - mach = ET.SubElement(grid, 'mach') - mach.set('name', 'any') - pes = ET.SubElement(mach, 'pes') - pes.set('compset', 'any') - pes.set('pesize', '') - ntasks_node = ET.SubElement(pes, 'ntasks') + root = ET.Element("config_pes") + grid = ET.SubElement(root, "grid") + grid.set("name", "any") + mach = ET.SubElement(grid, "mach") + mach.set("name", "any") + pes = ET.SubElement(mach, "pes") + pes.set("compset", "any") + pes.set("pesize", "") + ntasks_node = ET.SubElement(pes, "ntasks") for k in ntasks: - node = ET.SubElement(ntasks_node, 'ntasks_' + k) + node = ET.SubElement(ntasks_node, "ntasks_" + k) node.text = str(ntasks[k]) - nthrds_node = ET.SubElement(pes, 'nthrds') + nthrds_node = ET.SubElement(pes, "nthrds") for k in nthrds: - node = ET.SubElement(nthrds_node, 'nthrds_' + k) + node = ET.SubElement(nthrds_node, "nthrds_" + k) node.text = str(nthrds[k]) - rootpe_node = ET.SubElement(pes, 'rootpe') + rootpe_node = ET.SubElement(pes, "rootpe") for k in roots: - node = ET.SubElement(rootpe_node, 'rootpe_' + k) + node = ET.SubElement(rootpe_node, "rootpe_" + k) node.text = str(roots[k]) xmllint = find_executable("xmllint") if xmllint is not None: - run_cmd("%s --format --output %s -" % (xmllint, pefilename), - input_str=ET.tostring(root)) + run_cmd( + "%s --format --output %s -" % (xmllint, pefilename), + input_str=ET.tostring(root), + ) diff --git a/tools/load_balancing_tool/tests/atm_lnd.py b/tools/load_balancing_tool/tests/atm_lnd.py index 55aacc99b68..29b147c30ea 100644 --- a/tools/load_balancing_tool/tests/atm_lnd.py +++ b/tools/load_balancing_tool/tests/atm_lnd.py @@ -1,11 +1,13 @@ import sys, logging import pulp import optimize_model + logger = logging.getLogger(__name__) + class AtmLnd(optimize_model.OptimizeModel): def get_required_components(self): - return ['ATM', 'LND', 'ROF', 'ICE', 'CPL', 'OCN'] + return ["ATM", "LND", "ROF", "ICE", "CPL", "OCN"] def optimize(self): """ @@ -19,20 +21,40 @@ def optimize(self): -- use self.set_state(lpstatus) -- Returns state """ - assert self.state != self.STATE_UNDEFINED,\ - "set_data() must be called before optimize()!" - self.atm = self.models['ATM'] - self.lnd = self.models['LND'] - self.ice = self.models['ICE'] - self.ocn = self.models['OCN'] - self.rof = self.models['ROF'] - self.cpl = self.models['CPL'] - - self.real_variables = ['TotalTime', 'Tice', 'Tlnd', 'Tatm', - 'Tocn', 'Trof', 'Tcpl'] - self.integer_variables = ['NBice', 'NBlnd', 'NBatm', 'NBocn', - 'NBrof', 'NBcpl', 'Nrof', 'Ncpl', - 'Nice', 'Nlnd', 'Natm', 'Nocn', 'N1'] + assert ( + self.state != self.STATE_UNDEFINED + ), "set_data() must be called before optimize()!" + self.atm = self.models["ATM"] + self.lnd = self.models["LND"] + self.ice = self.models["ICE"] + self.ocn = self.models["OCN"] + self.rof = self.models["ROF"] + self.cpl = self.models["CPL"] + + self.real_variables = [ + "TotalTime", + "Tice", + "Tlnd", + "Tatm", + "Tocn", + "Trof", + "Tcpl", + ] + self.integer_variables = [ + "NBice", + "NBlnd", + "NBatm", + "NBocn", + "NBrof", + "NBcpl", + "Nrof", + "Ncpl", + "Nice", + "Nlnd", + "Natm", + "Nocn", + "N1", + ] self.X = {} X = self.X self.prob = pulp.LpProblem("Minimize ACME time cost", pulp.LpMinimize) @@ -42,43 +64,76 @@ def optimize(self): for iv in self.integer_variables: X[iv] = pulp.LpVariable(iv, lowBound=1, cat=pulp.LpInteger) - # cost function - self.prob += X['TotalTime'] + self.prob += X["TotalTime"] - #constraints + # constraints self.constraints = [] # Layout-dependent constraints. Choosing another layout to model # will require editing these constraints - self.constraints.append([X['Tatm'] + X['Trof'] + X['Tcpl'] - X['TotalTime'] <= 0, "Tatm + Trof + Tcpl - TotalTime <= 0"]) - self.constraints.append([X['Tlnd'] + X['Tice'] + X['Tocn'] - X['TotalTime'] <= 0, "Tlnd + Tice + Tocn - TotalTime <= 0"]) - - self.constraints.append([X['Natm'] - X['N1'] == 0, - "Natm - N1 <= 0"]) - self.constraints.append([X['Nrof'] - X['N1'] == 0, - "Nrof - N1 <= 0"]) - self.constraints.append([X['Ncpl'] - X['N1'] == 0, - "Ncpl - N1 <= 0"]) - - self.constraints.append([X['Nlnd'] + X['N1'] == self.maxtasks, - "Nlnd + N1 <= MAXN"]) - self.constraints.append([X['Nice'] + X['N1'] == self.maxtasks, - "Nice + N1 <= MAXN"]) - self.constraints.append([X['Nocn'] + X['N1'] == self.maxtasks, - "Nocn + N1 <= MAXN"]) - - self.constraints.append([self.atm.blocksize * X['NBatm'] - X['Natm'] == 0, - "Natm = %d * NBatm" % self.atm.blocksize]) - self.constraints.append([self.ice.blocksize * X['NBice'] - X['Nice'] == 0, - "Nice = %d * NBice" % self.ice.blocksize]) - self.constraints.append([self.lnd.blocksize * X['NBlnd'] - X['Nlnd'] == 0, - "Nlnd = %d * NBlnd" % self.lnd.blocksize]) - self.constraints.append([self.ocn.blocksize * X['NBocn'] - X['Nocn'] == 0, - "Nocn = %d * NBocn" % self.ocn.blocksize]) - self.constraints.append([self.rof.blocksize * X['NBrof'] - X['Nrof'] == 0, - "Nrof = %d * NBrof" % self.rof.blocksize]) - self.constraints.append([self.cpl.blocksize * X['NBcpl'] - X['Ncpl'] == 0, - "Ncpl = %d * NBcpl" % self.cpl.blocksize]) + self.constraints.append( + [ + X["Tatm"] + X["Trof"] + X["Tcpl"] - X["TotalTime"] <= 0, + "Tatm + Trof + Tcpl - TotalTime <= 0", + ] + ) + self.constraints.append( + [ + X["Tlnd"] + X["Tice"] + X["Tocn"] - X["TotalTime"] <= 0, + "Tlnd + Tice + Tocn - TotalTime <= 0", + ] + ) + + self.constraints.append([X["Natm"] - X["N1"] == 0, "Natm - N1 <= 0"]) + self.constraints.append([X["Nrof"] - X["N1"] == 0, "Nrof - N1 <= 0"]) + self.constraints.append([X["Ncpl"] - X["N1"] == 0, "Ncpl - N1 <= 0"]) + + self.constraints.append( + [X["Nlnd"] + X["N1"] == self.maxtasks, "Nlnd + N1 <= MAXN"] + ) + self.constraints.append( + [X["Nice"] + X["N1"] == self.maxtasks, "Nice + N1 <= MAXN"] + ) + self.constraints.append( + [X["Nocn"] + X["N1"] == self.maxtasks, "Nocn + N1 <= MAXN"] + ) + + self.constraints.append( + [ + self.atm.blocksize * X["NBatm"] - X["Natm"] == 0, + "Natm = %d * NBatm" % self.atm.blocksize, + ] + ) + self.constraints.append( + [ + self.ice.blocksize * X["NBice"] - X["Nice"] == 0, + "Nice = %d * NBice" % self.ice.blocksize, + ] + ) + self.constraints.append( + [ + self.lnd.blocksize * X["NBlnd"] - X["Nlnd"] == 0, + "Nlnd = %d * NBlnd" % self.lnd.blocksize, + ] + ) + self.constraints.append( + [ + self.ocn.blocksize * X["NBocn"] - X["Nocn"] == 0, + "Nocn = %d * NBocn" % self.ocn.blocksize, + ] + ) + self.constraints.append( + [ + self.rof.blocksize * X["NBrof"] - X["Nrof"] == 0, + "Nrof = %d * NBrof" % self.rof.blocksize, + ] + ) + self.constraints.append( + [ + self.cpl.blocksize * X["NBcpl"] - X["Ncpl"] == 0, + "Ncpl = %d * NBcpl" % self.cpl.blocksize, + ] + ) # These are the constraints based on the timing data. # They should be the same no matter what the layout of the components. @@ -92,26 +147,41 @@ def optimize(self): self.prob.solve() self.set_state(self.prob.status) return self.state - def write_pe_file(self, pefilename): """ - Write out a pe_file that can be used to implement the + Write out a pe_file that can be used to implement the optimized layout """ - natm = self.X['Natm'].varValue - nlnd = self.X['Nlnd'].varValue - nice = self.X['Nice'].varValue - nocn = self.X['Nocn'].varValue - ncpl = self.X['Ncpl'].varValue - nrof = self.X['Nrof'].varValue + natm = self.X["Natm"].varValue + nlnd = self.X["Nlnd"].varValue + nice = self.X["Nice"].varValue + nocn = self.X["Nocn"].varValue + ncpl = self.X["Ncpl"].varValue + nrof = self.X["Nrof"].varValue npart = max(natm, nrof, ncpl) - ntasks = {'atm':natm, 'lnd':nldn, 'rof':nrof, 'ice':nice, - 'ocn':nocn, 'glc':1, 'wav':1, 'cpl':ncpl} - roots = {'atm':0, 'lnd':npart, 'rof':0, 'ice':npart, - 'ocn':npart, 'glc':0, 'wav':0, 'cpl':0} + ntasks = { + "atm": natm, + "lnd": nldn, + "rof": nrof, + "ice": nice, + "ocn": nocn, + "glc": 1, + "wav": 1, + "cpl": ncpl, + } + roots = { + "atm": 0, + "lnd": npart, + "rof": 0, + "ice": npart, + "ocn": npart, + "glc": 0, + "wav": 0, + "cpl": 0, + } nthrds = {} - for c in ['atm', 'lnd', 'rof', 'ice', 'ocn', 'glc', 'wav', 'cpl']: + for c in ["atm", "lnd", "rof", "ice", "ocn", "glc", "wav", "cpl"]: nthrds[c] = self.models[c.upper()].nthrds - + self.write_pe_template(pefilename, ntasks, nthrds, roots) diff --git a/tools/load_balancing_tool/tests/example.json b/tools/load_balancing_tool/tests/example.json index 606d01a9ee2..543080af402 100644 --- a/tools/load_balancing_tool/tests/example.json +++ b/tools/load_balancing_tool/tests/example.json @@ -1,13 +1,13 @@ -{ +{ "description" : "Optimize using data available from original load balancing tool. The original tool solved the problem using a different model, so we do not expect exact replication: (Original solution: NTASKS_ATM: 1006 NTASKS_ICE: 889 NTASKS_LND: 117 NTASKS_OCN: 18 TOTAL_COST: 28.749 s/mday)", "layout" : "IceLndAtmOcn", "totaltasks" : 1024, "ATM" : { - "ntasks" : [32,64,128,256,512], + "ntasks" : [32,64,128,256,512], "blocksize" : 8, "nthrds" : [1], "cost" : [427.471, 223.332, 119.580, 66.182, 37.769] - }, + }, "OCN" : { "ntasks" : [32,64,128,256,512], "blocksize" : 8, diff --git a/tools/load_balancing_tool/tests/load_balancing_test.py b/tools/load_balancing_tool/tests/load_balancing_test.py index ba28f40fa04..b7866b4689d 100755 --- a/tools/load_balancing_tool/tests/load_balancing_test.py +++ b/tools/load_balancing_tool/tests/load_balancing_test.py @@ -14,60 +14,58 @@ """ try: from Tools.standard_script_setup import * -except ImportError, e: - print 'Error importing Tools.standard_script_setup' - print 'May need to add cime/scripts to PYTHONPATH\n' +except ImportError as e: + print("Error importing Tools.standard_script_setup") + print("May need to add cime/scripts to PYTHONPATH\n") raise ImportError(e) try: import optimize_model -except ImportError, e: - print 'Error importing optimize_model' - print 'May need to add cime/tools/load_balancing_tool to PYTHONPATH\n' +except ImportError as e: + print("Error importing optimize_model") + print("May need to add cime/tools/load_balancing_tool to PYTHONPATH\n") raise ImportError(e) - from CIME.utils import run_cmd_no_fail, get_full_test_name from CIME.XML.machines import Machines from CIME.XML import pes import unittest, json, tempfile, sys, re, copy -SCRIPT_DIR = CIME.utils.get_scripts_root() +SCRIPT_DIR = CIME.utils.get_scripts_root() MACHINE = Machines() CODE_DIR = os.path.join(SCRIPT_DIR, "..", "tools", "load_balancing_tool") -TEST_DIR = os.path.join(SCRIPT_DIR, "..", "tools", "load_balancing_tool", - "tests") +TEST_DIR = os.path.join(SCRIPT_DIR, "..", "tools", "load_balancing_tool", "tests") X_OPTIONS = """ STOP_N=1 """ JSON_DICT = { - "description" : "Optimize using data available from original load balancing tool. The original tool solved the problem using a different model, so we do not expect exact replication: (Original solution: NTASKS_ATM: 1006 NTASKS_ICE: 889 NTASKS_LND: 117 NTASKS_OCN: 18 TOTAL_COST: 28.749 s/mday)", - "layout" : "IceLndAtmOcn", - "totaltasks" : 1024, - "ATM" : { - "ntasks" : [32,64,128,256,512], - "blocksize" : 8, - "nthrds" : [1], - "cost" : [427.471, 223.332, 119.580, 66.182, 37.769] - }, - "OCN" : { - "ntasks" : [32,64,128,256,512], - "blocksize" : 8, - "nthrds" : [1], - "cost" : [ 15.745, 7.782, 4.383, 3.181, 2.651] - }, - "LND" : { - "ntasks" : [32,64,128,256,512], - "blocksize" : 8, - "nthrds" : [1], - "cost" : [ 4.356, 2.191, 1.191, 0.705, 0.560] - }, - "ICE" : { - "ntasks" : [32,64,160,320,640], - "blocksize" : 8, - "nthrds" : [1], - "cost" : [8.018, 4.921, 2.368, 1.557, 1.429] - } + "description": "Optimize using data available from original load balancing tool. The original tool solved the problem using a different model, so we do not expect exact replication: (Original solution: NTASKS_ATM: 1006 NTASKS_ICE: 889 NTASKS_LND: 117 NTASKS_OCN: 18 TOTAL_COST: 28.749 s/mday)", + "layout": "IceLndAtmOcn", + "totaltasks": 1024, + "ATM": { + "ntasks": [32, 64, 128, 256, 512], + "blocksize": 8, + "nthrds": [1], + "cost": [427.471, 223.332, 119.580, 66.182, 37.769], + }, + "OCN": { + "ntasks": [32, 64, 128, 256, 512], + "blocksize": 8, + "nthrds": [1], + "cost": [15.745, 7.782, 4.383, 3.181, 2.651], + }, + "LND": { + "ntasks": [32, 64, 128, 256, 512], + "blocksize": 8, + "nthrds": [1], + "cost": [4.356, 2.191, 1.191, 0.705, 0.560], + }, + "ICE": { + "ntasks": [32, 64, 160, 320, 640], + "blocksize": 8, + "nthrds": [1], + "cost": [8.018, 4.921, 2.368, 1.557, 1.429], + }, } PES_XML = """ @@ -152,72 +150,90 @@ ############################################################################### def _main_func(description): -############################################################################### + ############################################################################### unittest.main(verbosity=2, catchbreak=True) + ############################################################################### + class LoadBalanceTests(unittest.TestCase): def _check_solution(self, output, var, val): """ Utility function, checks output of milp solve to make sure solution value is expected """ - pattern = var + ' = (\d+)' + pattern = var + " = (\d+)" m = re.search(pattern, output) if not m: self.fail("pattern '%s' not found in output" % (pattern)) check = int(m.groups()[0]) self.assertTrue(check == val, "%s = %d, expected %d" % (var, check, val)) - def test_pulp(self): try: import pulp - except ImportError, e: + except ImportError as e: self.fail("ERROR: pulp not found. Install or set PYTHONPATH") - x = pulp.LpVariable('x') - p = pulp.LpProblem('p', pulp.LpMinimize) + x = pulp.LpVariable("x") + p = pulp.LpProblem("p", pulp.LpMinimize) p.solve() self.assertTrue(p.status == 1, "ERROR: simple pulp solve failed") - def test_read_and_write_json(self): "Solve from json file, writing to new json file, solve from new file" - with tempfile.NamedTemporaryFile('w+') as jsonfile1, tempfile.NamedTemporaryFile('w+') as jsonfile2: + with tempfile.NamedTemporaryFile( + "w+" + ) as jsonfile1, tempfile.NamedTemporaryFile("w+") as jsonfile2: json.dump(JSON_DICT, jsonfile1) jsonfile1.flush() - cmd = "./load_balancing_solve.py --json-input %s --json-output %s" % (jsonfile1.name, jsonfile2.name) + cmd = "./load_balancing_solve.py --json-input %s --json-output %s" % ( + jsonfile1.name, + jsonfile2.name, + ) output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) self._check_solution(output, "NTASKS_ATM", 992) cmd = "./load_balancing_solve.py --json-input %s" % jsonfile2.name output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) self._check_solution(output, "NTASKS_ATM", 992) - def test_solve_from_timing_dir(self): - cmd = "./load_balancing_solve.py --timing-dir %s --total-tasks 64 --blocksize 2 --layout IceLndAtmOcn" % os.path.join(TEST_DIR, "timing") + cmd = ( + "./load_balancing_solve.py --timing-dir %s --total-tasks 64 --blocksize 2 --layout IceLndAtmOcn" + % os.path.join(TEST_DIR, "timing") + ) output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) self._check_solution(output, "NTASKS_ATM", 62) def test_write_pes(self): - with tempfile.NamedTemporaryFile('w+') as jsonfile1, tempfile.NamedTemporaryFile('w+') as pes_file: + with tempfile.NamedTemporaryFile( + "w+" + ) as jsonfile1, tempfile.NamedTemporaryFile("w+") as pes_file: json.dump(JSON_DICT, jsonfile1) jsonfile1.flush() - cmd = "./load_balancing_solve.py --json-input %s --pe-output %s" % (jsonfile1.name, pes_file.name) + cmd = "./load_balancing_solve.py --json-input %s --pe-output %s" % ( + jsonfile1.name, + pes_file.name, + ) output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) - self.assertTrue(os.access(pes_file.name, os.R_OK), "pesfile %s not written" % pes_file.name) + self.assertTrue( + os.access(pes_file.name, os.R_OK), + "pesfile %s not written" % pes_file.name, + ) pesobj = CIME.XML.pes.Pes(pes_file.name) - pes_ntasks, pes_nthrds, pes_rootpe, _, _, _ = \ - pesobj.find_pes_layout('any', 'any', 'any', '') - self.assertTrue(pes_ntasks['NTASKS_ATM']==992) - + pes_ntasks, pes_nthrds, pes_rootpe, _, _, _ = pesobj.find_pes_layout( + "any", "any", "any", "" + ) + self.assertTrue(pes_ntasks["NTASKS_ATM"] == 992) def test_set_blocksize_atm(self): - cmd = "./load_balancing_solve.py --timing-dir %s --total-tasks 64 --blocksize 2 --blocksize-atm 4 --layout IceLndAtmOcn" % os.path.join(TEST_DIR, "timing") + cmd = ( + "./load_balancing_solve.py --timing-dir %s --total-tasks 64 --blocksize 2 --blocksize-atm 4 --layout IceLndAtmOcn" + % os.path.join(TEST_DIR, "timing") + ) output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) self._check_solution(output, "NTASKS_ATM", 60) self._check_solution(output, "NBLOCKS_ATM", 15) @@ -227,13 +243,15 @@ def test_set_blocksize_atm(self): def test_graph_models(self): try: import matplotlib - except ImportError, e: + except ImportError as e: self.skipTest("matplotlib not found") - with tempfile.NamedTemporaryFile('w+') as jsonfile: + with tempfile.NamedTemporaryFile("w+") as jsonfile: json.dump(JSON_DICT, jsonfile) jsonfile.flush() - cmd = "./load_balancing_solve.py --json-input %s --graph-models" % (jsonfile.name) + cmd = "./load_balancing_solve.py --json-input %s --graph-models" % ( + jsonfile.name + ) output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) self._check_solution(output, "NTASKS_ATM", 992) @@ -242,56 +260,76 @@ def test_xcase_submit(self): machine = MACHINE.get_machine_name() compiler = MACHINE.get_default_compiler() - test_name = get_full_test_name("PFS_I0",grid="f19_g16", compset="X", - machine=machine, compiler=compiler) - expected_dir = os.path.join(test_root, - "{}.test_lbt".format(test_name), - "timing") + test_name = get_full_test_name( + "PFS_I0", grid="f19_g16", compset="X", machine=machine, compiler=compiler + ) + expected_dir = os.path.join( + test_root, "{}.test_lbt".format(test_name), "timing" + ) if not os.path.isdir(expected_dir): - with tempfile.NamedTemporaryFile('w+') as tfile, tempfile.NamedTemporaryFile('w+') as xfile: + with tempfile.NamedTemporaryFile( + "w+" + ) as tfile, tempfile.NamedTemporaryFile("w+") as xfile: tfile.write(PES_XML) tfile.flush() xfile.write(X_OPTIONS) xfile.flush() - cmd = "./load_balancing_submit.py --pesfile {} --res f19_g16 --compset X --test-id test_lbt --extra-options-file {} --test-root {}".format(tfile.name, xfile.name, test_root) + cmd = "./load_balancing_submit.py --pesfile {} --res f19_g16 --compset X --test-id test_lbt --extra-options-file {} --test-root {}".format( + tfile.name, xfile.name, test_root + ) if MACHINE.has_batch_system(): - sys.stdout.write("Jobs will be submitted to queue. Rerun " - "load_balancing_test.py after jobs have " - "finished.") + sys.stdout.write( + "Jobs will be submitted to queue. Rerun " + "load_balancing_test.py after jobs have " + "finished." + ) else: cmd += " --force-purge" output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) - self.assertTrue(output.find("Timing jobs submitted") >= 0, - "Expected 'Timing jobs submitted' in output") + self.assertTrue( + output.find("Timing jobs submitted") >= 0, + "Expected 'Timing jobs submitted' in output", + ) if os.path.isdir(expected_dir): - cmd = "./load_balancing_solve.py --total-tasks 32 --blocksize 1 --test-id test_lbt --print-models --test-root {} --layout IceLndAtmOcn".format(test_root) + cmd = "./load_balancing_solve.py --total-tasks 32 --blocksize 1 --test-id test_lbt --print-models --test-root {} --layout IceLndAtmOcn".format( + test_root + ) output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) - self.assertTrue(output.find("***ATM***") > 0, - "--print-models failed to print ATM data") + self.assertTrue( + output.find("***ATM***") > 0, "--print-models failed to print ATM data" + ) self._check_solution(output, "NTASKS_ATM", 31) def test_use_atm_lnd(self): "Solve layout atm_lnd from json file" - with tempfile.NamedTemporaryFile('w+') as jsonfile1: + with tempfile.NamedTemporaryFile("w+") as jsonfile1: atmlnd_dict = copy.deepcopy(JSON_DICT) # Fake data for ROF, CPL - atmlnd_dict['ROF'] = {"ntasks" : [32,64,128,256], - "blocksize" : 8, - "nthrds" : [1], - "cost" : [8.0, 4.0, 2.0, 1.0]} - atmlnd_dict['CPL'] = {"ntasks" : [32,64,128,256], - "blocksize" : 8, - "nthrds" : [1], - "cost" : [8.0, 4.0, 2.0, 1.0]} + atmlnd_dict["ROF"] = { + "ntasks": [32, 64, 128, 256], + "blocksize": 8, + "nthrds": [1], + "cost": [8.0, 4.0, 2.0, 1.0], + } + atmlnd_dict["CPL"] = { + "ntasks": [32, 64, 128, 256], + "blocksize": 8, + "nthrds": [1], + "cost": [8.0, 4.0, 2.0, 1.0], + } json.dump(atmlnd_dict, jsonfile1) jsonfile1.flush() - cmd = "./load_balancing_solve.py --json-input %s --print-models --layout tests.atm_lnd.AtmLnd" % (jsonfile1.name) + cmd = ( + "./load_balancing_solve.py --json-input %s --print-models --layout tests.atm_lnd.AtmLnd" + % (jsonfile1.name) + ) output = run_cmd_no_fail(cmd, from_dir=CODE_DIR) self._check_solution(output, "Natm", 976) - self._check_solution(output, "NBatm", 976/8) + self._check_solution(output, "NBatm", 976 / 8) + -if __name__ == '__main__': +if __name__ == "__main__": _main_func(__doc__) diff --git a/tools/load_balancing_tool/tests/timing/timing_1 b/tools/load_balancing_tool/tests/timing/timing_1 index be66400a530..1e3f13e85c7 100644 --- a/tools/load_balancing_tool/tests/timing/timing_1 +++ b/tools/load_balancing_tool/tests/timing/timing_1 @@ -12,179 +12,176 @@ stop_option : ndays, stop_n = 10 run_length : 10 days (9 for ocean) - component comp_pes root_pe tasks x threads instances (stride) - --------- ------ ------- ------ ------ --------- ------ - cpl = cpl 2 0 2 x 1 1 (1 ) - atm = xatm 2 0 2 x 1 1 (1 ) - lnd = xlnd 2 0 2 x 1 1 (1 ) - ice = xice 2 0 2 x 1 1 (1 ) - ocn = xocn 2 0 2 x 1 1 (1 ) - rof = xrof 2 0 2 x 1 1 (1 ) - glc = xglc 2 0 2 x 1 1 (1 ) - wav = xwav 2 0 2 x 1 1 (1 ) - esp = sesp 8 0 8 x 1 1 (1 ) - - total pes active : 8 - pes per node : 8 - pe count for cost estimate : 8 - - Overall Metrics: - Model Cost: 36.51 pe-hrs/simulated_year - Model Throughput: 5.26 simulated_years/day - - Init Time : 9.675 seconds - Run Time : 450.174 seconds 45.017 seconds/day - Final Time : 0.001 seconds - - Actual Ocn Init Wait Time : 0.000 seconds - Estimated Ocn Init Run Time : 0.000 seconds - Estimated Run Time Correction : 0.000 seconds - (This correction has been applied to the ocean and total run times) - -Runs Time in total seconds, seconds/model-day, and model-years/wall-day -CPL Run Time represents time in CPL pes alone, not including time associated with data exchange with other components - - TOT Run Time: 450.174 seconds 45.017 seconds/mday 5.26 myears/wday - CPL Run Time: 324.956 seconds 32.496 seconds/mday 7.28 myears/wday - ATM Run Time: 20.444 seconds 2.044 seconds/mday 115.79 myears/wday - LND Run Time: 29.597 seconds 2.960 seconds/mday 79.98 myears/wday - ICE Run Time: 45.316 seconds 4.532 seconds/mday 52.24 myears/wday - OCN Run Time: 0.383 seconds 0.038 seconds/mday 6180.48 myears/wday - ROF Run Time: 5.402 seconds 0.540 seconds/mday 438.19 myears/wday - GLC Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday - WAV Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday - ESP Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday - CPL COMM Time: 17.674 seconds 1.767 seconds/mday 133.93 myears/wday - - ----------------- DRIVER TIMING FLOWCHART --------------------- - - NOTE: min:max driver timers (seconds/day): - CPL (pes 0 to 1) - OCN (pes 0 to 1) - LND (pes 0 to 1) - ROF (pes 0 to 1) - ICE (pes 0 to 1) - ATM (pes 0 to 1) - GLC (pes 0 to 1) - WAV (pes 0 to 1) - - CPL:CLOCK_ADVANCE 0.004: 0.005 - CPL:OCNPRE1_BARRIER 0.000: 0.000 - CPL:OCNPRE1 3.571: 3.655 - CPL:OCNPREP_BARRIER 0.000: 0.000 - CPL:OCNPREP 0.009: 0.010 - CPL:C2O_BARRIER <----> 0.000: 0.001 - CPL:C2O <----> 0.010: 0.010 - CPL:LNDPREP_BARRIER 0.003: 0.087 - CPL:LNDPREP 0.405: 0.409 - CPL:C2L_BARRIER <----> 0.013: 0.015 - CPL:C2L <----> 0.299: 0.299 - CPL:ICEPREP_BARRIER 0.000: 0.000 - CPL:ICEPREP 0.958: 0.960 - CPL:C2I_BARRIER <----> 0.025: 0.028 - CPL:C2I <----> 0.439: 0.439 - CPL:ROFPREP_BARRIER 0.000: 0.000 - CPL:ROFPREP 1.751: 1.757 - CPL:C2R_BARRIER <----> 0.032: 0.038 - CPL:C2R <----> 0.119: 0.119 - CPL:ICE_RUN_BARRIER 0.000: 0.000 - CPL:ICE_RUN 3.970: 4.532 - CPL:LND_RUN_BARRIER 0.007: 0.576 - CPL:LND_RUN 2.920: 2.960 - CPL:ROF_RUN_BARRIER 0.059: 0.106 - CPL:ROF_RUN 0.522: 0.540 - CPL:ATMOCNP_BARRIER 0.016: 0.026 - CPL:ATMOCNP 10.243: 10.402 - CPL:L2C_BARRIER <----> 0.158: 0.318 - CPL:L2C 2.655: 2.667 - CPL:LNDPOST_BARRIER 0.002: 0.003 - CPL:LNDPOST 0.048: 0.048 - CPL:R2C_BARRIER <----> 0.001: 0.001 - CPL:R2C <----> 0.142: 0.142 - CPL:ROFPOST_BARRIER 0.001: 0.001 - CPL:ROFPOST 5.444: 5.647 - CPL:I2C_BARRIER <----> 0.000: 0.000 - CPL:I2C <----> 0.311: 0.311 - CPL:ICEPOST_BARRIER 0.003: 0.003 - CPL:ICEPOST 0.000: 0.000 - CPL:FRACSET_BARRIER 0.000: 0.000 - CPL:FRACSET 0.368: 0.370 - CPL:ATMPREP_BARRIER 0.004: 0.006 - CPL:ATMPREP 9.441: 9.458 - CPL:C2A_BARRIER <----> 0.040: 0.057 - CPL:C2A <----> 0.082: 0.083 - CPL:OCN_RUN_BARRIER 0.000: 0.000 - CPL:OCN_RUN 0.039: 0.043 - CPL:ATM_RUN_BARRIER 0.002: 0.004 - CPL:ATM_RUN 1.518: 2.044 - CPL:A2C_BARRIER <----> 0.002: 0.537 - CPL:A2C <----> 0.096: 0.098 - CPL:ATMPOST_BARRIER 0.000: 0.002 - CPL:ATMPOST 0.000: 0.000 - CPL:O2C_BARRIER <----> 0.000: 0.000 - CPL:O2C <----> 0.003: 0.003 - CPL:OCNPOST_BARRIER 0.000: 0.000 - CPL:OCNPOST 0.000: 0.000 - CPL:HISTORY_BARRIER 0.000: 0.000 - CPL:HISTORY 0.000: 0.000 - CPL:TSTAMP_WRITE 0.000: 0.000 - CPL:TPROF_WRITE 0.001: 45.013 - CPL:RUN_LOOP_BSTOP 0.000: 0.000 + component comp_pes root_pe tasks x threads instances (stride) + --------- ------ ------- ------ ------ --------- ------ + cpl = cpl 2 0 2 x 1 1 (1 ) + atm = xatm 2 0 2 x 1 1 (1 ) + lnd = xlnd 2 0 2 x 1 1 (1 ) + ice = xice 2 0 2 x 1 1 (1 ) + ocn = xocn 2 0 2 x 1 1 (1 ) + rof = xrof 2 0 2 x 1 1 (1 ) + glc = xglc 2 0 2 x 1 1 (1 ) + wav = xwav 2 0 2 x 1 1 (1 ) + esp = sesp 8 0 8 x 1 1 (1 ) + + total pes active : 8 + pes per node : 8 + pe count for cost estimate : 8 + + Overall Metrics: + Model Cost: 36.51 pe-hrs/simulated_year + Model Throughput: 5.26 simulated_years/day + + Init Time : 9.675 seconds + Run Time : 450.174 seconds 45.017 seconds/day + Final Time : 0.001 seconds + + Actual Ocn Init Wait Time : 0.000 seconds + Estimated Ocn Init Run Time : 0.000 seconds + Estimated Run Time Correction : 0.000 seconds + (This correction has been applied to the ocean and total run times) + +Runs Time in total seconds, seconds/model-day, and model-years/wall-day +CPL Run Time represents time in CPL pes alone, not including time associated with data exchange with other components + + TOT Run Time: 450.174 seconds 45.017 seconds/mday 5.26 myears/wday + CPL Run Time: 324.956 seconds 32.496 seconds/mday 7.28 myears/wday + ATM Run Time: 20.444 seconds 2.044 seconds/mday 115.79 myears/wday + LND Run Time: 29.597 seconds 2.960 seconds/mday 79.98 myears/wday + ICE Run Time: 45.316 seconds 4.532 seconds/mday 52.24 myears/wday + OCN Run Time: 0.383 seconds 0.038 seconds/mday 6180.48 myears/wday + ROF Run Time: 5.402 seconds 0.540 seconds/mday 438.19 myears/wday + GLC Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + WAV Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + ESP Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + CPL COMM Time: 17.674 seconds 1.767 seconds/mday 133.93 myears/wday + + +---------------- DRIVER TIMING FLOWCHART --------------------- + + NOTE: min:max driver timers (seconds/day): + CPL (pes 0 to 1) + OCN (pes 0 to 1) + LND (pes 0 to 1) + ROF (pes 0 to 1) + ICE (pes 0 to 1) + ATM (pes 0 to 1) + GLC (pes 0 to 1) + WAV (pes 0 to 1) + + CPL:CLOCK_ADVANCE 0.004: 0.005 + CPL:OCNPRE1_BARRIER 0.000: 0.000 + CPL:OCNPRE1 3.571: 3.655 + CPL:OCNPREP_BARRIER 0.000: 0.000 + CPL:OCNPREP 0.009: 0.010 + CPL:C2O_BARRIER <----> 0.000: 0.001 + CPL:C2O <----> 0.010: 0.010 + CPL:LNDPREP_BARRIER 0.003: 0.087 + CPL:LNDPREP 0.405: 0.409 + CPL:C2L_BARRIER <----> 0.013: 0.015 + CPL:C2L <----> 0.299: 0.299 + CPL:ICEPREP_BARRIER 0.000: 0.000 + CPL:ICEPREP 0.958: 0.960 + CPL:C2I_BARRIER <----> 0.025: 0.028 + CPL:C2I <----> 0.439: 0.439 + CPL:ROFPREP_BARRIER 0.000: 0.000 + CPL:ROFPREP 1.751: 1.757 + CPL:C2R_BARRIER <----> 0.032: 0.038 + CPL:C2R <----> 0.119: 0.119 + CPL:ICE_RUN_BARRIER 0.000: 0.000 + CPL:ICE_RUN 3.970: 4.532 + CPL:LND_RUN_BARRIER 0.007: 0.576 + CPL:LND_RUN 2.920: 2.960 + CPL:ROF_RUN_BARRIER 0.059: 0.106 + CPL:ROF_RUN 0.522: 0.540 + CPL:ATMOCNP_BARRIER 0.016: 0.026 + CPL:ATMOCNP 10.243: 10.402 + CPL:L2C_BARRIER <----> 0.158: 0.318 + CPL:L2C 2.655: 2.667 + CPL:LNDPOST_BARRIER 0.002: 0.003 + CPL:LNDPOST 0.048: 0.048 + CPL:R2C_BARRIER <----> 0.001: 0.001 + CPL:R2C <----> 0.142: 0.142 + CPL:ROFPOST_BARRIER 0.001: 0.001 + CPL:ROFPOST 5.444: 5.647 + CPL:I2C_BARRIER <----> 0.000: 0.000 + CPL:I2C <----> 0.311: 0.311 + CPL:ICEPOST_BARRIER 0.003: 0.003 + CPL:ICEPOST 0.000: 0.000 + CPL:FRACSET_BARRIER 0.000: 0.000 + CPL:FRACSET 0.368: 0.370 + CPL:ATMPREP_BARRIER 0.004: 0.006 + CPL:ATMPREP 9.441: 9.458 + CPL:C2A_BARRIER <----> 0.040: 0.057 + CPL:C2A <----> 0.082: 0.083 + CPL:OCN_RUN_BARRIER 0.000: 0.000 + CPL:OCN_RUN 0.039: 0.043 + CPL:ATM_RUN_BARRIER 0.002: 0.004 + CPL:ATM_RUN 1.518: 2.044 + CPL:A2C_BARRIER <----> 0.002: 0.537 + CPL:A2C <----> 0.096: 0.098 + CPL:ATMPOST_BARRIER 0.000: 0.002 + CPL:ATMPOST 0.000: 0.000 + CPL:O2C_BARRIER <----> 0.000: 0.000 + CPL:O2C <----> 0.003: 0.003 + CPL:OCNPOST_BARRIER 0.000: 0.000 + CPL:OCNPOST 0.000: 0.000 + CPL:HISTORY_BARRIER 0.000: 0.000 + CPL:HISTORY 0.000: 0.000 + CPL:TSTAMP_WRITE 0.000: 0.000 + CPL:TPROF_WRITE 0.001: 45.013 + CPL:RUN_LOOP_BSTOP 0.000: 0.000 More info on coupler timing: - CPL:OCNPRE1 3.571: 3.655 - CPL:ocnpre1_atm2ocn 3.571: 3.655 + CPL:OCNPRE1 3.571: 3.655 + CPL:ocnpre1_atm2ocn 3.571: 3.655 - CPL:OCNPREP 0.009: 0.010 - CPL:ocnprep_avg 0.009: 0.010 + CPL:OCNPREP 0.009: 0.010 + CPL:ocnprep_avg 0.009: 0.010 - CPL:LNDPREP 0.405: 0.409 - CPL:lndprep_atm2lnd 0.105: 0.107 - CPL:lndprep_mrgx2l 0.298: 0.304 + CPL:LNDPREP 0.405: 0.409 + CPL:lndprep_atm2lnd 0.105: 0.107 + CPL:lndprep_mrgx2l 0.298: 0.304 - CPL:ICEPREP 0.958: 0.960 - CPL:iceprep_ocn2ice 0.079: 0.086 - CPL:iceprep_atm2ice 0.247: 0.255 - CPL:iceprep_mrgx2i 0.624: 0.626 + CPL:ICEPREP 0.958: 0.960 + CPL:iceprep_ocn2ice 0.079: 0.086 + CPL:iceprep_atm2ice 0.247: 0.255 + CPL:iceprep_mrgx2i 0.624: 0.626 - CPL:ROFPREP 1.751: 1.757 - CPL:rofprep_l2xavg 0.000: 0.000 - CPL:rofprep_lnd2rof 1.682: 1.692 - CPL:rofprep_mrgx2r 0.064: 0.069 + CPL:ROFPREP 1.751: 1.757 + CPL:rofprep_l2xavg 0.000: 0.000 + CPL:rofprep_lnd2rof 1.682: 1.692 + CPL:rofprep_mrgx2r 0.064: 0.069 - CPL:ATMPREP 9.441: 9.458 - CPL:atmprep_xao2atm 2.299: 2.318 - CPL:atmprep_ocn2atm 1.458: 1.474 - CPL:atmprep_alb2atm 0.955: 0.961 - CPL:atmprep_ice2atm 2.669: 2.714 - CPL:atmprep_lnd2atm 0.835: 0.850 - CPL:atmprep_mrgx2a 1.156: 1.209 + CPL:ATMPREP 9.441: 9.458 + CPL:atmprep_xao2atm 2.299: 2.318 + CPL:atmprep_ocn2atm 1.458: 1.474 + CPL:atmprep_alb2atm 0.955: 0.961 + CPL:atmprep_ice2atm 2.669: 2.714 + CPL:atmprep_lnd2atm 0.835: 0.850 + CPL:atmprep_mrgx2a 1.156: 1.209 - CPL:ATMOCNP 10.243: 10.402 - CPL:atmocnp_ice2ocn 0.204: 0.215 - CPL:atmocnp_fluxo 3.843: 3.855 - CPL:atmocnp_mrgx2o 4.861: 4.932 - CPL:atmocnp_accum 0.706: 0.713 - CPL:atmocnp_ocnalb 0.609: 0.707 + CPL:ATMOCNP 10.243: 10.402 + CPL:atmocnp_ice2ocn 0.204: 0.215 + CPL:atmocnp_fluxo 3.843: 3.855 + CPL:atmocnp_mrgx2o 4.861: 4.932 + CPL:atmocnp_accum 0.706: 0.713 + CPL:atmocnp_ocnalb 0.609: 0.707 - CPL:OCNPOST 0.000: 0.000 + CPL:OCNPOST 0.000: 0.000 - CPL:LNDPOST 0.048: 0.048 + CPL:LNDPOST 0.048: 0.048 - CPL:rofpost_rof2lnd 0.818: 0.959 - CPL:rofpost_rof2ocn 4.625: 4.689 + CPL:rofpost_rof2lnd 0.818: 0.959 + CPL:rofpost_rof2ocn 4.625: 4.689 - CPL:ICEPOST 0.000: 0.000 - - - - CPL:ATMPOST 0.000: 0.000 + CPL:ICEPOST 0.000: 0.000 + CPL:ATMPOST 0.000: 0.000 diff --git a/tools/load_balancing_tool/tests/timing/timing_2 b/tools/load_balancing_tool/tests/timing/timing_2 index f0420a4e7cd..6bd3b388c42 100644 --- a/tools/load_balancing_tool/tests/timing/timing_2 +++ b/tools/load_balancing_tool/tests/timing/timing_2 @@ -12,179 +12,176 @@ stop_option : ndays, stop_n = 10 run_length : 10 days (9 for ocean) - component comp_pes root_pe tasks x threads instances (stride) - --------- ------ ------- ------ ------ --------- ------ - cpl = cpl 4 0 4 x 1 1 (1 ) - atm = xatm 4 0 4 x 1 1 (1 ) - lnd = xlnd 4 0 4 x 1 1 (1 ) - ice = xice 4 0 4 x 1 1 (1 ) - ocn = xocn 4 0 4 x 1 1 (1 ) - rof = xrof 4 0 4 x 1 1 (1 ) - glc = xglc 4 0 4 x 1 1 (1 ) - wav = xwav 4 0 4 x 1 1 (1 ) - esp = sesp 8 0 8 x 1 1 (1 ) - - total pes active : 8 - pes per node : 8 - pe count for cost estimate : 8 - - Overall Metrics: - Model Cost: 25.80 pe-hrs/simulated_year - Model Throughput: 7.44 simulated_years/day - - Init Time : 5.366 seconds - Run Time : 318.103 seconds 31.810 seconds/day - Final Time : 0.000 seconds - - Actual Ocn Init Wait Time : 0.006 seconds - Estimated Ocn Init Run Time : 0.000 seconds - Estimated Run Time Correction : 0.000 seconds - (This correction has been applied to the ocean and total run times) - -Runs Time in total seconds, seconds/model-day, and model-years/wall-day -CPL Run Time represents time in CPL pes alone, not including time associated with data exchange with other components - - TOT Run Time: 318.103 seconds 31.810 seconds/mday 7.44 myears/wday - CPL Run Time: 230.786 seconds 23.079 seconds/mday 10.26 myears/wday - ATM Run Time: 10.763 seconds 1.076 seconds/mday 219.93 myears/wday - LND Run Time: 15.610 seconds 1.561 seconds/mday 151.64 myears/wday - ICE Run Time: 25.715 seconds 2.571 seconds/mday 92.05 myears/wday - OCN Run Time: 0.200 seconds 0.020 seconds/mday 11835.62 myears/wday - ROF Run Time: 3.775 seconds 0.378 seconds/mday 627.05 myears/wday - GLC Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday - WAV Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday - ESP Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday - CPL COMM Time: 20.367 seconds 2.037 seconds/mday 116.22 myears/wday - - ----------------- DRIVER TIMING FLOWCHART --------------------- - - NOTE: min:max driver timers (seconds/day): - CPL (pes 0 to 3) - OCN (pes 0 to 3) - LND (pes 0 to 3) - ROF (pes 0 to 3) - ICE (pes 0 to 3) - ATM (pes 0 to 3) - GLC (pes 0 to 3) - WAV (pes 0 to 3) - - CPL:CLOCK_ADVANCE 0.004: 0.005 - CPL:OCNPRE1_BARRIER 0.001: 0.001 - CPL:OCNPRE1 2.454: 2.531 - CPL:OCNPREP_BARRIER 0.000: 0.000 - CPL:OCNPREP 0.009: 0.010 - CPL:C2O_BARRIER <----> 0.000: 0.001 - CPL:C2O <----> 0.010: 0.010 - CPL:LNDPREP_BARRIER 0.020: 0.098 - CPL:LNDPREP 0.364: 0.385 - CPL:C2L_BARRIER <----> 0.026: 0.047 - CPL:C2L <----> 0.353: 0.361 - CPL:ICEPREP_BARRIER 0.000: 0.010 - CPL:ICEPREP 0.867: 0.903 - CPL:C2I_BARRIER <----> 0.057: 0.091 - CPL:C2I <----> 0.516: 0.529 - CPL:ROFPREP_BARRIER 0.000: 0.015 - CPL:ROFPREP 1.097: 1.122 - CPL:C2R_BARRIER <----> 0.042: 0.068 - CPL:C2R <----> 0.144: 0.146 - CPL:ICE_RUN_BARRIER 0.001: 0.004 - CPL:ICE_RUN 2.072: 2.571 - CPL:LND_RUN_BARRIER 0.036: 0.533 - CPL:LND_RUN 1.514: 1.561 - CPL:ROF_RUN_BARRIER 0.125: 0.177 - CPL:ROF_RUN 0.337: 0.378 - CPL:ATMOCNP_BARRIER 0.040: 0.077 - CPL:ATMOCNP 6.880: 7.078 - CPL:L2C_BARRIER <----> 0.401: 0.598 - CPL:L2C 3.004: 3.072 - CPL:LNDPOST_BARRIER 0.021: 0.027 - CPL:LNDPOST 0.033: 0.034 - CPL:R2C_BARRIER <----> 0.002: 0.004 - CPL:R2C <----> 0.153: 0.159 - CPL:ROFPOST_BARRIER 0.013: 0.018 - CPL:ROFPOST 3.295: 4.113 - CPL:I2C_BARRIER <----> 0.001: 0.002 - CPL:I2C <----> 0.339: 0.352 - CPL:ICEPOST_BARRIER 0.025: 0.038 - CPL:ICEPOST 0.000: 0.000 - CPL:FRACSET_BARRIER 0.001: 0.002 - CPL:FRACSET 0.330: 0.331 - CPL:ATMPREP_BARRIER 0.009: 0.009 - CPL:ATMPREP 6.625: 6.677 - CPL:C2A_BARRIER <----> 0.069: 0.121 - CPL:C2A <----> 0.073: 0.079 - CPL:OCN_RUN_BARRIER 0.000: 0.000 - CPL:OCN_RUN 0.020: 0.022 - CPL:ATM_RUN_BARRIER 0.004: 0.009 - CPL:ATM_RUN 0.718: 1.076 - CPL:A2C_BARRIER <----> 0.043: 0.405 - CPL:A2C <----> 0.091: 0.098 - CPL:ATMPOST_BARRIER 0.002: 0.009 - CPL:ATMPOST 0.000: 0.000 - CPL:O2C_BARRIER <----> 0.000: 0.000 - CPL:O2C <----> 0.003: 0.003 - CPL:OCNPOST_BARRIER 0.000: 0.000 - CPL:OCNPOST 0.000: 0.000 - CPL:HISTORY_BARRIER 0.000: 0.001 - CPL:HISTORY 0.000: 0.000 - CPL:TSTAMP_WRITE 0.000: 0.000 - CPL:TPROF_WRITE 0.002: 31.806 - CPL:RUN_LOOP_BSTOP 0.000: 0.000 + component comp_pes root_pe tasks x threads instances (stride) + --------- ------ ------- ------ ------ --------- ------ + cpl = cpl 4 0 4 x 1 1 (1 ) + atm = xatm 4 0 4 x 1 1 (1 ) + lnd = xlnd 4 0 4 x 1 1 (1 ) + ice = xice 4 0 4 x 1 1 (1 ) + ocn = xocn 4 0 4 x 1 1 (1 ) + rof = xrof 4 0 4 x 1 1 (1 ) + glc = xglc 4 0 4 x 1 1 (1 ) + wav = xwav 4 0 4 x 1 1 (1 ) + esp = sesp 8 0 8 x 1 1 (1 ) + + total pes active : 8 + pes per node : 8 + pe count for cost estimate : 8 + + Overall Metrics: + Model Cost: 25.80 pe-hrs/simulated_year + Model Throughput: 7.44 simulated_years/day + + Init Time : 5.366 seconds + Run Time : 318.103 seconds 31.810 seconds/day + Final Time : 0.000 seconds + + Actual Ocn Init Wait Time : 0.006 seconds + Estimated Ocn Init Run Time : 0.000 seconds + Estimated Run Time Correction : 0.000 seconds + (This correction has been applied to the ocean and total run times) + +Runs Time in total seconds, seconds/model-day, and model-years/wall-day +CPL Run Time represents time in CPL pes alone, not including time associated with data exchange with other components + + TOT Run Time: 318.103 seconds 31.810 seconds/mday 7.44 myears/wday + CPL Run Time: 230.786 seconds 23.079 seconds/mday 10.26 myears/wday + ATM Run Time: 10.763 seconds 1.076 seconds/mday 219.93 myears/wday + LND Run Time: 15.610 seconds 1.561 seconds/mday 151.64 myears/wday + ICE Run Time: 25.715 seconds 2.571 seconds/mday 92.05 myears/wday + OCN Run Time: 0.200 seconds 0.020 seconds/mday 11835.62 myears/wday + ROF Run Time: 3.775 seconds 0.378 seconds/mday 627.05 myears/wday + GLC Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + WAV Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + ESP Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + CPL COMM Time: 20.367 seconds 2.037 seconds/mday 116.22 myears/wday + + +---------------- DRIVER TIMING FLOWCHART --------------------- + + NOTE: min:max driver timers (seconds/day): + CPL (pes 0 to 3) + OCN (pes 0 to 3) + LND (pes 0 to 3) + ROF (pes 0 to 3) + ICE (pes 0 to 3) + ATM (pes 0 to 3) + GLC (pes 0 to 3) + WAV (pes 0 to 3) + + CPL:CLOCK_ADVANCE 0.004: 0.005 + CPL:OCNPRE1_BARRIER 0.001: 0.001 + CPL:OCNPRE1 2.454: 2.531 + CPL:OCNPREP_BARRIER 0.000: 0.000 + CPL:OCNPREP 0.009: 0.010 + CPL:C2O_BARRIER <----> 0.000: 0.001 + CPL:C2O <----> 0.010: 0.010 + CPL:LNDPREP_BARRIER 0.020: 0.098 + CPL:LNDPREP 0.364: 0.385 + CPL:C2L_BARRIER <----> 0.026: 0.047 + CPL:C2L <----> 0.353: 0.361 + CPL:ICEPREP_BARRIER 0.000: 0.010 + CPL:ICEPREP 0.867: 0.903 + CPL:C2I_BARRIER <----> 0.057: 0.091 + CPL:C2I <----> 0.516: 0.529 + CPL:ROFPREP_BARRIER 0.000: 0.015 + CPL:ROFPREP 1.097: 1.122 + CPL:C2R_BARRIER <----> 0.042: 0.068 + CPL:C2R <----> 0.144: 0.146 + CPL:ICE_RUN_BARRIER 0.001: 0.004 + CPL:ICE_RUN 2.072: 2.571 + CPL:LND_RUN_BARRIER 0.036: 0.533 + CPL:LND_RUN 1.514: 1.561 + CPL:ROF_RUN_BARRIER 0.125: 0.177 + CPL:ROF_RUN 0.337: 0.378 + CPL:ATMOCNP_BARRIER 0.040: 0.077 + CPL:ATMOCNP 6.880: 7.078 + CPL:L2C_BARRIER <----> 0.401: 0.598 + CPL:L2C 3.004: 3.072 + CPL:LNDPOST_BARRIER 0.021: 0.027 + CPL:LNDPOST 0.033: 0.034 + CPL:R2C_BARRIER <----> 0.002: 0.004 + CPL:R2C <----> 0.153: 0.159 + CPL:ROFPOST_BARRIER 0.013: 0.018 + CPL:ROFPOST 3.295: 4.113 + CPL:I2C_BARRIER <----> 0.001: 0.002 + CPL:I2C <----> 0.339: 0.352 + CPL:ICEPOST_BARRIER 0.025: 0.038 + CPL:ICEPOST 0.000: 0.000 + CPL:FRACSET_BARRIER 0.001: 0.002 + CPL:FRACSET 0.330: 0.331 + CPL:ATMPREP_BARRIER 0.009: 0.009 + CPL:ATMPREP 6.625: 6.677 + CPL:C2A_BARRIER <----> 0.069: 0.121 + CPL:C2A <----> 0.073: 0.079 + CPL:OCN_RUN_BARRIER 0.000: 0.000 + CPL:OCN_RUN 0.020: 0.022 + CPL:ATM_RUN_BARRIER 0.004: 0.009 + CPL:ATM_RUN 0.718: 1.076 + CPL:A2C_BARRIER <----> 0.043: 0.405 + CPL:A2C <----> 0.091: 0.098 + CPL:ATMPOST_BARRIER 0.002: 0.009 + CPL:ATMPOST 0.000: 0.000 + CPL:O2C_BARRIER <----> 0.000: 0.000 + CPL:O2C <----> 0.003: 0.003 + CPL:OCNPOST_BARRIER 0.000: 0.000 + CPL:OCNPOST 0.000: 0.000 + CPL:HISTORY_BARRIER 0.000: 0.001 + CPL:HISTORY 0.000: 0.000 + CPL:TSTAMP_WRITE 0.000: 0.000 + CPL:TPROF_WRITE 0.002: 31.806 + CPL:RUN_LOOP_BSTOP 0.000: 0.000 More info on coupler timing: - CPL:OCNPRE1 2.454: 2.531 - CPL:ocnpre1_atm2ocn 2.454: 2.531 + CPL:OCNPRE1 2.454: 2.531 + CPL:ocnpre1_atm2ocn 2.454: 2.531 - CPL:OCNPREP 0.009: 0.010 - CPL:ocnprep_avg 0.009: 0.010 + CPL:OCNPREP 0.009: 0.010 + CPL:ocnprep_avg 0.009: 0.010 - CPL:LNDPREP 0.364: 0.385 - CPL:lndprep_atm2lnd 0.102: 0.110 - CPL:lndprep_mrgx2l 0.262: 0.275 + CPL:LNDPREP 0.364: 0.385 + CPL:lndprep_atm2lnd 0.102: 0.110 + CPL:lndprep_mrgx2l 0.262: 0.275 - CPL:ICEPREP 0.867: 0.903 - CPL:iceprep_ocn2ice 0.084: 0.086 - CPL:iceprep_atm2ice 0.240: 0.268 - CPL:iceprep_mrgx2i 0.542: 0.553 + CPL:ICEPREP 0.867: 0.903 + CPL:iceprep_ocn2ice 0.084: 0.086 + CPL:iceprep_atm2ice 0.240: 0.268 + CPL:iceprep_mrgx2i 0.542: 0.553 - CPL:ROFPREP 1.097: 1.122 - CPL:rofprep_l2xavg 0.000: 0.000 - CPL:rofprep_lnd2rof 1.053: 1.079 - CPL:rofprep_mrgx2r 0.043: 0.043 + CPL:ROFPREP 1.097: 1.122 + CPL:rofprep_l2xavg 0.000: 0.000 + CPL:rofprep_lnd2rof 1.053: 1.079 + CPL:rofprep_mrgx2r 0.043: 0.043 - CPL:ATMPREP 6.625: 6.677 - CPL:atmprep_xao2atm 1.546: 1.563 - CPL:atmprep_ocn2atm 0.973: 0.988 - CPL:atmprep_alb2atm 0.565: 0.579 - CPL:atmprep_ice2atm 1.925: 1.954 - CPL:atmprep_lnd2atm 0.778: 0.830 - CPL:atmprep_mrgx2a 0.779: 0.829 + CPL:ATMPREP 6.625: 6.677 + CPL:atmprep_xao2atm 1.546: 1.563 + CPL:atmprep_ocn2atm 0.973: 0.988 + CPL:atmprep_alb2atm 0.565: 0.579 + CPL:atmprep_ice2atm 1.925: 1.954 + CPL:atmprep_lnd2atm 0.778: 0.830 + CPL:atmprep_mrgx2a 0.779: 0.829 - CPL:ATMOCNP 6.880: 7.078 - CPL:atmocnp_ice2ocn 0.200: 0.222 - CPL:atmocnp_fluxo 2.093: 2.228 - CPL:atmocnp_mrgx2o 3.654: 3.837 - CPL:atmocnp_accum 0.537: 0.572 - CPL:atmocnp_ocnalb 0.323: 0.418 + CPL:ATMOCNP 6.880: 7.078 + CPL:atmocnp_ice2ocn 0.200: 0.222 + CPL:atmocnp_fluxo 2.093: 2.228 + CPL:atmocnp_mrgx2o 3.654: 3.837 + CPL:atmocnp_accum 0.537: 0.572 + CPL:atmocnp_ocnalb 0.323: 0.418 - CPL:OCNPOST 0.000: 0.000 + CPL:OCNPOST 0.000: 0.000 - CPL:LNDPOST 0.033: 0.034 + CPL:LNDPOST 0.033: 0.034 - CPL:rofpost_rof2lnd 0.716: 0.842 - CPL:rofpost_rof2ocn 2.531: 3.271 + CPL:rofpost_rof2lnd 0.716: 0.842 + CPL:rofpost_rof2ocn 2.531: 3.271 - CPL:ICEPOST 0.000: 0.000 - - - - CPL:ATMPOST 0.000: 0.000 + CPL:ICEPOST 0.000: 0.000 + CPL:ATMPOST 0.000: 0.000 diff --git a/tools/load_balancing_tool/tests/timing/timing_3 b/tools/load_balancing_tool/tests/timing/timing_3 index 66ce956e9c8..d8fc6e7e197 100644 --- a/tools/load_balancing_tool/tests/timing/timing_3 +++ b/tools/load_balancing_tool/tests/timing/timing_3 @@ -12,179 +12,176 @@ stop_option : ndays, stop_n = 10 run_length : 10 days (9 for ocean) - component comp_pes root_pe tasks x threads instances (stride) - --------- ------ ------- ------ ------ --------- ------ - cpl = cpl 8 0 8 x 1 1 (1 ) - atm = xatm 8 0 8 x 1 1 (1 ) - lnd = xlnd 8 0 8 x 1 1 (1 ) - ice = xice 8 0 8 x 1 1 (1 ) - ocn = xocn 8 0 8 x 1 1 (1 ) - rof = xrof 8 0 8 x 1 1 (1 ) - glc = xglc 8 0 8 x 1 1 (1 ) - wav = xwav 8 0 8 x 1 1 (1 ) - esp = sesp 8 0 8 x 1 1 (1 ) - - total pes active : 8 - pes per node : 8 - pe count for cost estimate : 8 - - Overall Metrics: - Model Cost: 21.61 pe-hrs/simulated_year - Model Throughput: 8.89 simulated_years/day - - Init Time : 5.442 seconds - Run Time : 266.378 seconds 26.638 seconds/day - Final Time : 0.000 seconds - - Actual Ocn Init Wait Time : 0.006 seconds - Estimated Ocn Init Run Time : 0.000 seconds - Estimated Run Time Correction : 0.000 seconds - (This correction has been applied to the ocean and total run times) - -Runs Time in total seconds, seconds/model-day, and model-years/wall-day -CPL Run Time represents time in CPL pes alone, not including time associated with data exchange with other components - - TOT Run Time: 266.378 seconds 26.638 seconds/mday 8.89 myears/wday - CPL Run Time: 192.504 seconds 19.250 seconds/mday 12.30 myears/wday - ATM Run Time: 6.064 seconds 0.606 seconds/mday 390.36 myears/wday - LND Run Time: 9.020 seconds 0.902 seconds/mday 262.43 myears/wday - ICE Run Time: 15.208 seconds 1.521 seconds/mday 155.65 myears/wday - OCN Run Time: 0.132 seconds 0.013 seconds/mday 17932.75 myears/wday - ROF Run Time: 3.640 seconds 0.364 seconds/mday 650.31 myears/wday - GLC Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday - WAV Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday - ESP Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday - CPL COMM Time: 24.289 seconds 2.429 seconds/mday 97.46 myears/wday - - ----------------- DRIVER TIMING FLOWCHART --------------------- - - NOTE: min:max driver timers (seconds/day): - CPL (pes 0 to 7) - OCN (pes 0 to 7) - LND (pes 0 to 7) - ROF (pes 0 to 7) - ICE (pes 0 to 7) - ATM (pes 0 to 7) - GLC (pes 0 to 7) - WAV (pes 0 to 7) - - CPL:CLOCK_ADVANCE 0.005: 0.005 - CPL:OCNPRE1_BARRIER 0.001: 0.002 - CPL:OCNPRE1 2.048: 2.204 - CPL:OCNPREP_BARRIER 0.000: 0.001 - CPL:OCNPREP 0.009: 0.010 - CPL:C2O_BARRIER <----> 0.001: 0.002 - CPL:C2O <----> 0.011: 0.012 - CPL:LNDPREP_BARRIER 0.023: 0.178 - CPL:LNDPREP 0.376: 0.386 - CPL:C2L_BARRIER <----> 0.041: 0.050 - CPL:C2L <----> 0.412: 0.425 - CPL:ICEPREP_BARRIER 0.007: 0.022 - CPL:ICEPREP 0.870: 0.888 - CPL:C2I_BARRIER <----> 0.063: 0.084 - CPL:C2I <----> 0.634: 0.652 - CPL:ROFPREP_BARRIER 0.005: 0.028 - CPL:ROFPREP 0.738: 0.891 - CPL:C2R_BARRIER <----> 0.042: 0.196 - CPL:C2R <----> 0.167: 0.174 - CPL:ICE_RUN_BARRIER 0.004: 0.012 - CPL:ICE_RUN 1.287: 1.521 - CPL:LND_RUN_BARRIER 0.118: 0.342 - CPL:LND_RUN 0.872: 0.902 - CPL:ROF_RUN_BARRIER 0.128: 0.156 - CPL:ROF_RUN 0.345: 0.364 - CPL:ATMOCNP_BARRIER 0.058: 0.074 - CPL:ATMOCNP 5.643: 5.764 - CPL:L2C_BARRIER <----> 0.224: 0.344 - CPL:L2C 3.486: 3.592 - CPL:LNDPOST_BARRIER 0.034: 0.045 - CPL:LNDPOST 0.029: 0.030 - CPL:R2C_BARRIER <----> 0.007: 0.009 - CPL:R2C <----> 0.191: 0.196 - CPL:ROFPOST_BARRIER 0.019: 0.026 - CPL:ROFPOST 2.026: 3.835 - CPL:I2C_BARRIER <----> 0.004: 0.008 - CPL:I2C <----> 0.436: 0.444 - CPL:ICEPOST_BARRIER 0.060: 0.070 - CPL:ICEPOST 0.000: 0.000 - CPL:FRACSET_BARRIER 0.001: 0.004 - CPL:FRACSET 0.282: 0.319 - CPL:ATMPREP_BARRIER 0.012: 0.049 - CPL:ATMPREP 5.355: 5.427 - CPL:C2A_BARRIER <----> 0.055: 0.127 - CPL:C2A <----> 0.068: 0.079 - CPL:OCN_RUN_BARRIER 0.000: 0.000 - CPL:OCN_RUN 0.013: 0.015 - CPL:ATM_RUN_BARRIER 0.011: 0.022 - CPL:ATM_RUN 0.423: 0.606 - CPL:A2C_BARRIER <----> 0.068: 0.254 - CPL:A2C <----> 0.091: 0.093 - CPL:ATMPOST_BARRIER 0.005: 0.010 - CPL:ATMPOST 0.000: 0.000 - CPL:O2C_BARRIER <----> 0.000: 0.000 - CPL:O2C <----> 0.003: 0.004 - CPL:OCNPOST_BARRIER 0.000: 0.001 - CPL:OCNPOST 0.000: 0.000 - CPL:HISTORY_BARRIER 0.000: 0.001 - CPL:HISTORY 0.000: 0.000 - CPL:TSTAMP_WRITE 0.000: 0.000 - CPL:TPROF_WRITE 0.001: 0.001 - CPL:RUN_LOOP_BSTOP 0.000: 0.000 + component comp_pes root_pe tasks x threads instances (stride) + --------- ------ ------- ------ ------ --------- ------ + cpl = cpl 8 0 8 x 1 1 (1 ) + atm = xatm 8 0 8 x 1 1 (1 ) + lnd = xlnd 8 0 8 x 1 1 (1 ) + ice = xice 8 0 8 x 1 1 (1 ) + ocn = xocn 8 0 8 x 1 1 (1 ) + rof = xrof 8 0 8 x 1 1 (1 ) + glc = xglc 8 0 8 x 1 1 (1 ) + wav = xwav 8 0 8 x 1 1 (1 ) + esp = sesp 8 0 8 x 1 1 (1 ) + + total pes active : 8 + pes per node : 8 + pe count for cost estimate : 8 + + Overall Metrics: + Model Cost: 21.61 pe-hrs/simulated_year + Model Throughput: 8.89 simulated_years/day + + Init Time : 5.442 seconds + Run Time : 266.378 seconds 26.638 seconds/day + Final Time : 0.000 seconds + + Actual Ocn Init Wait Time : 0.006 seconds + Estimated Ocn Init Run Time : 0.000 seconds + Estimated Run Time Correction : 0.000 seconds + (This correction has been applied to the ocean and total run times) + +Runs Time in total seconds, seconds/model-day, and model-years/wall-day +CPL Run Time represents time in CPL pes alone, not including time associated with data exchange with other components + + TOT Run Time: 266.378 seconds 26.638 seconds/mday 8.89 myears/wday + CPL Run Time: 192.504 seconds 19.250 seconds/mday 12.30 myears/wday + ATM Run Time: 6.064 seconds 0.606 seconds/mday 390.36 myears/wday + LND Run Time: 9.020 seconds 0.902 seconds/mday 262.43 myears/wday + ICE Run Time: 15.208 seconds 1.521 seconds/mday 155.65 myears/wday + OCN Run Time: 0.132 seconds 0.013 seconds/mday 17932.75 myears/wday + ROF Run Time: 3.640 seconds 0.364 seconds/mday 650.31 myears/wday + GLC Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + WAV Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + ESP Run Time: 0.000 seconds 0.000 seconds/mday 0.00 myears/wday + CPL COMM Time: 24.289 seconds 2.429 seconds/mday 97.46 myears/wday + + +---------------- DRIVER TIMING FLOWCHART --------------------- + + NOTE: min:max driver timers (seconds/day): + CPL (pes 0 to 7) + OCN (pes 0 to 7) + LND (pes 0 to 7) + ROF (pes 0 to 7) + ICE (pes 0 to 7) + ATM (pes 0 to 7) + GLC (pes 0 to 7) + WAV (pes 0 to 7) + + CPL:CLOCK_ADVANCE 0.005: 0.005 + CPL:OCNPRE1_BARRIER 0.001: 0.002 + CPL:OCNPRE1 2.048: 2.204 + CPL:OCNPREP_BARRIER 0.000: 0.001 + CPL:OCNPREP 0.009: 0.010 + CPL:C2O_BARRIER <----> 0.001: 0.002 + CPL:C2O <----> 0.011: 0.012 + CPL:LNDPREP_BARRIER 0.023: 0.178 + CPL:LNDPREP 0.376: 0.386 + CPL:C2L_BARRIER <----> 0.041: 0.050 + CPL:C2L <----> 0.412: 0.425 + CPL:ICEPREP_BARRIER 0.007: 0.022 + CPL:ICEPREP 0.870: 0.888 + CPL:C2I_BARRIER <----> 0.063: 0.084 + CPL:C2I <----> 0.634: 0.652 + CPL:ROFPREP_BARRIER 0.005: 0.028 + CPL:ROFPREP 0.738: 0.891 + CPL:C2R_BARRIER <----> 0.042: 0.196 + CPL:C2R <----> 0.167: 0.174 + CPL:ICE_RUN_BARRIER 0.004: 0.012 + CPL:ICE_RUN 1.287: 1.521 + CPL:LND_RUN_BARRIER 0.118: 0.342 + CPL:LND_RUN 0.872: 0.902 + CPL:ROF_RUN_BARRIER 0.128: 0.156 + CPL:ROF_RUN 0.345: 0.364 + CPL:ATMOCNP_BARRIER 0.058: 0.074 + CPL:ATMOCNP 5.643: 5.764 + CPL:L2C_BARRIER <----> 0.224: 0.344 + CPL:L2C 3.486: 3.592 + CPL:LNDPOST_BARRIER 0.034: 0.045 + CPL:LNDPOST 0.029: 0.030 + CPL:R2C_BARRIER <----> 0.007: 0.009 + CPL:R2C <----> 0.191: 0.196 + CPL:ROFPOST_BARRIER 0.019: 0.026 + CPL:ROFPOST 2.026: 3.835 + CPL:I2C_BARRIER <----> 0.004: 0.008 + CPL:I2C <----> 0.436: 0.444 + CPL:ICEPOST_BARRIER 0.060: 0.070 + CPL:ICEPOST 0.000: 0.000 + CPL:FRACSET_BARRIER 0.001: 0.004 + CPL:FRACSET 0.282: 0.319 + CPL:ATMPREP_BARRIER 0.012: 0.049 + CPL:ATMPREP 5.355: 5.427 + CPL:C2A_BARRIER <----> 0.055: 0.127 + CPL:C2A <----> 0.068: 0.079 + CPL:OCN_RUN_BARRIER 0.000: 0.000 + CPL:OCN_RUN 0.013: 0.015 + CPL:ATM_RUN_BARRIER 0.011: 0.022 + CPL:ATM_RUN 0.423: 0.606 + CPL:A2C_BARRIER <----> 0.068: 0.254 + CPL:A2C <----> 0.091: 0.093 + CPL:ATMPOST_BARRIER 0.005: 0.010 + CPL:ATMPOST 0.000: 0.000 + CPL:O2C_BARRIER <----> 0.000: 0.000 + CPL:O2C <----> 0.003: 0.004 + CPL:OCNPOST_BARRIER 0.000: 0.001 + CPL:OCNPOST 0.000: 0.000 + CPL:HISTORY_BARRIER 0.000: 0.001 + CPL:HISTORY 0.000: 0.000 + CPL:TSTAMP_WRITE 0.000: 0.000 + CPL:TPROF_WRITE 0.001: 0.001 + CPL:RUN_LOOP_BSTOP 0.000: 0.000 More info on coupler timing: - CPL:OCNPRE1 2.048: 2.204 - CPL:ocnpre1_atm2ocn 2.047: 2.203 + CPL:OCNPRE1 2.048: 2.204 + CPL:ocnpre1_atm2ocn 2.047: 2.203 - CPL:OCNPREP 0.009: 0.010 - CPL:ocnprep_avg 0.009: 0.010 + CPL:OCNPREP 0.009: 0.010 + CPL:ocnprep_avg 0.009: 0.010 - CPL:LNDPREP 0.376: 0.386 - CPL:lndprep_atm2lnd 0.100: 0.113 - CPL:lndprep_mrgx2l 0.271: 0.275 + CPL:LNDPREP 0.376: 0.386 + CPL:lndprep_atm2lnd 0.100: 0.113 + CPL:lndprep_mrgx2l 0.271: 0.275 - CPL:ICEPREP 0.870: 0.888 - CPL:iceprep_ocn2ice 0.079: 0.090 - CPL:iceprep_atm2ice 0.235: 0.266 - CPL:iceprep_mrgx2i 0.529: 0.555 + CPL:ICEPREP 0.870: 0.888 + CPL:iceprep_ocn2ice 0.079: 0.090 + CPL:iceprep_atm2ice 0.235: 0.266 + CPL:iceprep_mrgx2i 0.529: 0.555 - CPL:ROFPREP 0.738: 0.891 - CPL:rofprep_l2xavg 0.000: 0.000 - CPL:rofprep_lnd2rof 0.712: 0.848 - CPL:rofprep_mrgx2r 0.026: 0.048 + CPL:ROFPREP 0.738: 0.891 + CPL:rofprep_l2xavg 0.000: 0.000 + CPL:rofprep_lnd2rof 0.712: 0.848 + CPL:rofprep_mrgx2r 0.026: 0.048 - CPL:ATMPREP 5.355: 5.427 - CPL:atmprep_xao2atm 1.154: 1.236 - CPL:atmprep_ocn2atm 0.769: 0.812 - CPL:atmprep_alb2atm 0.397: 0.409 - CPL:atmprep_ice2atm 1.548: 1.682 - CPL:atmprep_lnd2atm 0.730: 0.907 - CPL:atmprep_mrgx2a 0.532: 0.615 + CPL:ATMPREP 5.355: 5.427 + CPL:atmprep_xao2atm 1.154: 1.236 + CPL:atmprep_ocn2atm 0.769: 0.812 + CPL:atmprep_alb2atm 0.397: 0.409 + CPL:atmprep_ice2atm 1.548: 1.682 + CPL:atmprep_lnd2atm 0.730: 0.907 + CPL:atmprep_mrgx2a 0.532: 0.615 - CPL:ATMOCNP 5.643: 5.764 - CPL:atmocnp_ice2ocn 0.193: 0.210 - CPL:atmocnp_fluxo 1.350: 1.448 - CPL:atmocnp_mrgx2o 3.238: 3.314 - CPL:atmocnp_accum 0.557: 0.626 - CPL:atmocnp_ocnalb 0.230: 0.282 + CPL:ATMOCNP 5.643: 5.764 + CPL:atmocnp_ice2ocn 0.193: 0.210 + CPL:atmocnp_fluxo 1.350: 1.448 + CPL:atmocnp_mrgx2o 3.238: 3.314 + CPL:atmocnp_accum 0.557: 0.626 + CPL:atmocnp_ocnalb 0.230: 0.282 - CPL:OCNPOST 0.000: 0.000 + CPL:OCNPOST 0.000: 0.000 - CPL:LNDPOST 0.029: 0.030 + CPL:LNDPOST 0.029: 0.030 - CPL:rofpost_rof2lnd 0.702: 0.837 - CPL:rofpost_rof2ocn 1.212: 3.047 + CPL:rofpost_rof2lnd 0.702: 0.837 + CPL:rofpost_rof2ocn 1.212: 3.047 - CPL:ICEPOST 0.000: 0.000 - - - - CPL:ATMPOST 0.000: 0.000 + CPL:ICEPOST 0.000: 0.000 + CPL:ATMPOST 0.000: 0.000 diff --git a/tools/mapping/check_maps/check_map.sh b/tools/mapping/check_maps/check_map.sh index 9d123872bda..16c3fab6f29 100755 --- a/tools/mapping/check_maps/check_map.sh +++ b/tools/mapping/check_maps/check_map.sh @@ -101,4 +101,3 @@ do echo "File not found: $MAP" fi done - diff --git a/tools/mapping/check_maps/src/Makefile b/tools/mapping/check_maps/src/Makefile index 36dfa5c3cbb..077a4125380 100644 --- a/tools/mapping/check_maps/src/Makefile +++ b/tools/mapping/check_maps/src/Makefile @@ -68,4 +68,3 @@ clean: .F90.o: $(ESMF_F90COMPILER) -c $(ESMF_F90COMPILEOPTS) $(ESMF_F90COMPILEPATHS) \ $(ESMF_F90COMPILEFREECPP) $(ESMF_F90COMPILECPPFLAGS) $< - diff --git a/tools/mapping/gen_domain_files/INSTALL b/tools/mapping/gen_domain_files/INSTALL index a244892d296..71d7caefcae 100644 --- a/tools/mapping/gen_domain_files/INSTALL +++ b/tools/mapping/gen_domain_files/INSTALL @@ -14,4 +14,3 @@ where [machine name] is the name of the machine you are building on. In most cases configure can figure that out on its own, but if you get an error that is the first fix to try. Also, some machines have dedicated build nodes, so you might need to SSH to another node before the 'gmake' step. - diff --git a/tools/mapping/gen_domain_files/README b/tools/mapping/gen_domain_files/README index 6cdb53a4856..0e28d5f5177 100644 --- a/tools/mapping/gen_domain_files/README +++ b/tools/mapping/gen_domain_files/README @@ -32,7 +32,7 @@ $ gen_domain -m [--fminval ] [--fmaxval ] [--set-omask] - + where: filemap = input mapping file name (character string) gridocn = output ocean grid name (NOT A FILE NAME!) @@ -119,4 +119,3 @@ NOTES While this type of manipulation has been requested by the AMWG, it is not required by the CESM model, CESM coupler, or the SCRIP map generation tool. - diff --git a/tools/mapping/gen_domain_files/src/gen_domain.F90 b/tools/mapping/gen_domain_files/src/gen_domain.F90 index 079e0d9372a..f0b97f20e4e 100644 --- a/tools/mapping/gen_domain_files/src/gen_domain.F90 +++ b/tools/mapping/gen_domain_files/src/gen_domain.F90 @@ -868,7 +868,7 @@ logical function var_exists(fid, var_name) integer, intent(in) :: fid character(len=*), intent(in) :: var_name integer :: error_code, vid - + error_code = nf_inq_varid(fid, var_name, vid) if (error_code == NF_NOERR) then var_exists = .true. diff --git a/tools/mapping/gen_mapping_files/README b/tools/mapping/gen_mapping_files/README index facaeb290c1..4aac6877339 100644 --- a/tools/mapping/gen_mapping_files/README +++ b/tools/mapping/gen_mapping_files/README @@ -124,4 +124,3 @@ You can also set the following env variables: (Known machines will load tools from modules) MPIEXEC ------ Name of mpirun executable (currently tools only run in serial due to module issues) - diff --git a/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README b/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README index 8d23575691d..6743c7c1aec 100644 --- a/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README +++ b/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/README @@ -81,4 +81,3 @@ You can also set the following env variables: MPIEXEC ------ Name of mpirun executable (ignored if --serial, which is default on cheyenne login nodes - diff --git a/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh b/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh index 4e7c9d95a38..e70e6c75ed9 100755 --- a/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh +++ b/tools/mapping/gen_mapping_files/gen_ESMF_mapping_file/create_ESMF_map.sh @@ -217,10 +217,7 @@ if [ $MACH == "UNSET" ]; then r+([0-9])i+([0-9])n+([0-9]) ) MACH="cheyenne" ;; - geyser* ) - MACH="dav" - ;; - caldera* ) + casper* ) MACH="dav" ;; pronghorn* ) @@ -298,14 +295,17 @@ fi case $MACH in ## cheyenne "cheyenne" ) + esmfvers=8.1.0b23 + intelvers=19.0.5 module purge - module load intel/17.0.1 esmf_libs/7.0.0 + module load intel/$intelvers esmf_libs + module use /glade/p/cesmdata/cseg/PROGS/modulefiles/esmfpkgs/intel/$intelvers if [ "$serial" == "TRUE" ]; then # No MPIEXEC if [ -z "$MPIEXEC" ]; then MPIEXEC="" fi - module load esmf-7.1.0r-ncdfio-uni-O + module load esmf-${esmfvers}-ncdfio-mpiuni-O else # MPIEXEC should be mpirun -np if [ -z "$MPIEXEC" ]; then @@ -314,22 +314,23 @@ case $MACH in fi MPIEXEC="mpirun -np $NCPUS" fi - module load esmf-7.1.0r-ncdfio-mpi-O - module load mpt/2.15f + module load esmf-${esmfvers}-ncdfio-mpt-O + module load mpt/2.22 fi # need to load module to access ncatted module load nco ;; -## geyser, caldera, or pronghorn +## casper "dav" ) + esmfvers=8.0.0 module purge - module load intel/17.0.1 esmflibs/7.1.0r + module load intel/19.1.1 esmflibs/${esmfvers} if [ "$serial" == "TRUE" ]; then # No MPIEXEC if [ -z "$MPIEXEC" ]; then MPIEXEC="" fi - module load esmf-7.1.0r-ncdfio-uni-O + module load esmf-${esmfvers}-ncdfio-uni-O else echo "ERROR: Parallel ESMF tools are not available on $MACH, use --serial" exit 1 @@ -394,7 +395,7 @@ if [ "$mapping" == "NULL" ]; then echo "ERROR: $map_type is not a valid option for --maptype" exit 9 fi -cmd="$MPIEXEC $ESMF_REGRID --ignore_unmapped -m $mapping -w $fmap -s $fsrc -d $fdst $pass_thru" +cmd="$MPIEXEC $ESMF_REGRID --ignore_unmapped --ignore_degenerate -m $mapping -w $fmap -s $fsrc -d $fdst $pass_thru" if [ $use_large == "true" ]; then cmd="$cmd --64bit_offset" diff --git a/tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL b/tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL index c4af5677228..003f424a3da 100644 --- a/tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL +++ b/tools/mapping/gen_mapping_files/runoff_to_ocn/INSTALL @@ -14,4 +14,3 @@ where [machine name] is the name of the machine you are building on. In most cases configure can figure that out on its own, but if you get an error that is the first fix to try. Also, some machines have dedicated build nodes, so you might need to SSH to another node before the 'gmake' step. - diff --git a/tools/mapping/gen_mapping_files/runoff_to_ocn/README b/tools/mapping/gen_mapping_files/runoff_to_ocn/README index 1b88f779104..7e750031ba9 100644 --- a/tools/mapping/gen_mapping_files/runoff_to_ocn/README +++ b/tools/mapping/gen_mapping_files/runoff_to_ocn/README @@ -156,4 +156,3 @@ A couple of useful notes: containing one integer per grid cell -- a positive number represents a region in open ocean, a negative number represents a region in a marginal sea, and a zero is land. - diff --git a/tools/mapping/map_field/INSTALL b/tools/mapping/map_field/INSTALL index a244892d296..71d7caefcae 100644 --- a/tools/mapping/map_field/INSTALL +++ b/tools/mapping/map_field/INSTALL @@ -14,4 +14,3 @@ where [machine name] is the name of the machine you are building on. In most cases configure can figure that out on its own, but if you get an error that is the first fix to try. Also, some machines have dedicated build nodes, so you might need to SSH to another node before the 'gmake' step. - diff --git a/tools/mapping/map_field/README b/tools/mapping/map_field/README index ff43aa9cf47..d961e088ffa 100644 --- a/tools/mapping/map_field/README +++ b/tools/mapping/map_field/README @@ -56,4 +56,3 @@ NOTES (a) The output file is ALWAYS CLOBBERED in the current implementation. (b) There is limited error checking at this time. - diff --git a/tools/statistical_ensemble_test/ensemble.py b/tools/statistical_ensemble_test/ensemble.py index 6ef86b73a96..fc5ff8072f1 100644 --- a/tools/statistical_ensemble_test/ensemble.py +++ b/tools/statistical_ensemble_test/ensemble.py @@ -4,135 +4,136 @@ import random from single_run import process_args_dict, single_case -#============================================================================== -# set up and submit 12-month (original) or 9-time step (uf) run. then create +# ============================================================================== +# set up and submit 12-month (original) or 9-time step (uf) run. then create # clones for a complete ensemble or a set of (3) test cases -#============================================================================== +# ============================================================================== -#generate positive random integers in [0, end-1] -#can't have any duplicates +# generate positive random integers in [0, end-1] +# can't have any duplicates def random_pick(num_pick, end): ar = range(0, end) rand_list = random.sample(ar, num_pick) - #for i in rand_list: + # for i in rand_list: # print i return rand_list -#get the pertlim corressponding to the random int + +# get the pertlim corressponding to the random int def get_pertlim_uf(rand_num): i = rand_num if i == 0: - ptlim = 0 + ptlim = 0 else: - j = 2*int((i - 1)/100) + 101 - k = (i - 1)%100 - if i%2 != 0: - ll = j + int(k/2)*18 - ippt = str(ll).zfill(3) - ptlim = "0."+ippt+"d-13" - else: - ll = j + int((k-1)/2)*18 - ippt = str(ll).zfill(3) - ptlim = "-0."+ippt+"d-13" + j = 2 * int((i - 1) / 100) + 101 + k = (i - 1) % 100 + if i % 2 != 0: + ll = j + int(k / 2) * 18 + ippt = str(ll).zfill(3) + ptlim = "0." + ippt + "d-13" + else: + ll = j + int((k - 1) / 2) * 18 + ippt = str(ll).zfill(3) + ptlim = "-0." + ippt + "d-13" return ptlim def main(argv): - caller = 'ensemble.py' + caller = "ensemble.py" - #directory with single_run.py and ensemble.py + # directory with single_run.py and ensemble.py stat_dir = os.path.dirname(os.path.realpath(__file__)) - print( "STATUS: stat_dir = " + stat_dir) + print("STATUS: stat_dir = " + stat_dir) - opts_dict, case_flags = process_args_dict(caller, argv) + opts_dict, case_flags = process_args_dict(caller, argv) - #default is verification mode (3 runs) - run_type = 'verify' - if opts_dict['ect'] == 'pop' : + # default is verification mode (3 runs) + run_type = "verify" + if opts_dict["ect"] == "pop": clone_count = 0 else: clone_count = 2 - uf = opts_dict['uf'] + uf = opts_dict["uf"] - #check for run_type change (i.e., if doing ensemble instead of verify) - ens_size = opts_dict['ensemble'] - if ens_size > 0: - run_type = 'ensemble' + # check for run_type change (i.e., if doing ensemble instead of verify) + ens_size = opts_dict["ensemble"] + if ens_size > 0: + run_type = "ensemble" clone_count = ens_size - 1 if ens_size > 999: - print('Error: cannot have an ensemble size greater than 999.') + print("Error: cannot have an ensemble size greater than 999.") sys.exit() - print('STATUS: ensemble size = ' + str(ens_size)) - - #generate random pertlim(s) for verify - if run_type == 'verify': - if opts_dict['ect'] == 'pop': + print("STATUS: ensemble size = " + str(ens_size)) + + # generate random pertlim(s) for verify + if run_type == "verify": + if opts_dict["ect"] == "pop": rand_ints = random_pick(1, 40) - else: #cam + else: # cam if uf: end_range = 350 else: end_range = 150 rand_ints = random_pick(3, end_range) - - #now create cases - print('STATUS: creating first case ...') - - #create first case - then clone - if run_type == 'verify': - opts_dict['pertlim'] = get_pertlim_uf(rand_ints[0]) - else: #full ensemble - opts_dict['pertlim'] = "0" - - #first case + + # now create cases + print("STATUS: creating first case ...") + + # create first case - then clone + if run_type == "verify": + opts_dict["pertlim"] = get_pertlim_uf(rand_ints[0]) + else: # full ensemble + opts_dict["pertlim"] = "0" + + # first case single_case(opts_dict, case_flags, stat_dir) - #clone? - if (clone_count > 0): + # clone? + if clone_count > 0: - #now clone - print('STATUS: cloning additional cases ...') + # now clone + print("STATUS: cloning additional cases ...") - #scripts dir + # scripts dir print("STATUS: stat_dir = " + stat_dir) ret = os.chdir(stat_dir) - ret = os.chdir('../../scripts') + ret = os.chdir("../../scripts") scripts_dir = os.getcwd() - print ("STATUS: scripts dir = " + scripts_dir) + print("STATUS: scripts dir = " + scripts_dir) - #we know case name ends in '.000' (already checked) - clone_case = opts_dict['case'] + # we know case name ends in '.000' (already checked) + clone_case = opts_dict["case"] case_pfx = clone_case[:-4] - for i in range(1, clone_count + 1): #1: clone_count - if run_type == 'verify': + for i in range(1, clone_count + 1): # 1: clone_count + if run_type == "verify": this_pertlim = get_pertlim_uf(rand_ints[i]) - else: #full ensemble + else: # full ensemble this_pertlim = get_pertlim_uf(i) - iens = '{0:03d}'.format(i) + iens = "{0:03d}".format(i) new_case = case_pfx + "." + iens os.chdir(scripts_dir) - print ("STATUS: creating new cloned case: " + new_case) + print("STATUS: creating new cloned case: " + new_case) clone_args = " --keepexe --case " + new_case + " --clone " + clone_case - print (" with args: " + clone_args) + print(" with args: " + clone_args) command = scripts_dir + "/create_clone" + clone_args ret = os.system(command) - print ("STATUS: running setup for new cloned case: " + new_case) + print("STATUS: running setup for new cloned case: " + new_case) os.chdir(new_case) - command = './case.setup' + command = "./case.setup" ret = os.system(command) - #adjust perturbation - if opts_dict['ect'] == 'pop': - if run_type == 'verify': #remove old init_ts_perturb - f = open("user_nl_pop","r+") + # adjust perturbation + if opts_dict["ect"] == "pop": + if run_type == "verify": # remove old init_ts_perturb + f = open("user_nl_pop", "r+") all_lines = f.readlines() f.seek(0) for line in all_lines: @@ -144,13 +145,13 @@ def main(argv): else: text = "\ninit_ts_perturb = " + this_pertlim - #now append new pertlim + # now append new pertlim with open("user_nl_pop", "a") as f: f.write(text) else: - if run_type == 'verify': #remove old pertlim first - f = open("user_nl_cam","r+") + if run_type == "verify": # remove old pertlim first + f = open("user_nl_cam", "r+") all_lines = f.readlines() f.seek(0) for line in all_lines: @@ -162,34 +163,37 @@ def main(argv): else: text = "\npertlim = " + this_pertlim - #now append new pertlim + # now append new pertlim with open("user_nl_cam", "a") as f: f.write(text) - - #preview namelists - command = './preview_namelists' + # preview namelists + command = "./preview_namelists" ret = os.system(command) - - #submit? + + # submit? if opts_dict["ns"] == False: - command = './case.submit' + command = "./case.submit" ret = os.system(command) - #Final output + # Final output if run_type == "verify": - if opts_dict['ect'] == 'pop': - print ("STATUS: ---POP-ECT VERIFICATION CASE COMPLETE---") - print ("Set up one case using the following init_ts_perturb value:") - print (get_pertlim_uf(rand_ints[0])) + if opts_dict["ect"] == "pop": + print("STATUS: ---POP-ECT VERIFICATION CASE COMPLETE---") + print("Set up one case using the following init_ts_perturb value:") + print(get_pertlim_uf(rand_ints[0])) else: - print ("STATUS: ---CAM-ECT VERIFICATION CASES COMPLETE---") - print ("Set up three cases using the following pertlim values:") - print (get_pertlim_uf(rand_ints[0]) + ' ' + get_pertlim_uf(rand_ints[1]) + " " + get_pertlim_uf(rand_ints[2])) + print("STATUS: ---CAM-ECT VERIFICATION CASES COMPLETE---") + print("Set up three cases using the following pertlim values:") + print( + get_pertlim_uf(rand_ints[0]) + + " " + + get_pertlim_uf(rand_ints[1]) + + " " + + get_pertlim_uf(rand_ints[2]) + ) else: - print ("STATUS: --ENSEMBLE CASES COMPLETE---") - - + print("STATUS: --ENSEMBLE CASES COMPLETE---") if __name__ == "__main__": diff --git a/tools/statistical_ensemble_test/pyCECT/.gitignore b/tools/statistical_ensemble_test/pyCECT/.gitignore index 2e3097c0eb5..37fc9d40817 100644 --- a/tools/statistical_ensemble_test/pyCECT/.gitignore +++ b/tools/statistical_ensemble_test/pyCECT/.gitignore @@ -88,4 +88,3 @@ ENV/ # Rope project settings .ropeproject - diff --git a/tools/statistical_ensemble_test/pyCECT/CHANGES.rst b/tools/statistical_ensemble_test/pyCECT/CHANGES.rst index 520686f75ae..02472a02d3a 100644 --- a/tools/statistical_ensemble_test/pyCECT/CHANGES.rst +++ b/tools/statistical_ensemble_test/pyCECT/CHANGES.rst @@ -20,7 +20,7 @@ VERSION 3.2.0 ------------- - Migrated from Python 2 to Python 3. - + - Added improved documentation via ReadtheDocs. @@ -47,7 +47,7 @@ VERSION 3.0.7 - Added web_enabled mode and pbs submission script. - + VERSION 3.0.5 ------------- @@ -87,5 +87,3 @@ VERSION 1.0.0 - Initial release. - Includes CAM (atmosphere compnent) tools: CECT and PyEnsSum. - - diff --git a/tools/statistical_ensemble_test/pyCECT/EET.py b/tools/statistical_ensemble_test/pyCECT/EET.py index 9853de0c2eb..1fab1ba2b86 100644 --- a/tools/statistical_ensemble_test/pyCECT/EET.py +++ b/tools/statistical_ensemble_test/pyCECT/EET.py @@ -4,20 +4,20 @@ import argparse import itertools -class exhaustive_test(object): +class exhaustive_test(object): def __init__(self): super(exhaustive_test, self).__init__() def file_to_sets(self, compfile): set_dict = {} - with open(compfile, 'r') as f: + with open(compfile, "r") as f: for line in f: line.strip - key, failset = line.replace(' ', '').split(';', 1) + key, failset = line.replace(" ", "").split(";", 1) try: - failset = list(map(int, failset.split(','))) + failset = list(map(int, failset.split(","))) failset = set(failset) except: @@ -32,11 +32,11 @@ def test_combinations(self, dictionary, runsPerTest=3, nRunFails=2): passed = failed = 0 for compset in itertools.combinations(sims, runsPerTest): - # This block is slightly slower than manually + # This block is slightly slower than manually # specifying the pairs, but it generalizes # easily. failsets = [dictionary[s] for s in compset] - # The following three lines are adapted from + # The following three lines are adapted from # user doug's answer in # http://stackoverflow.com/questions/27369373/pairwise-set-intersection-in-python pairs = itertools.combinations(failsets, 2) @@ -53,14 +53,16 @@ def test_combinations(self, dictionary, runsPerTest=3, nRunFails=2): else: # print("this set passed") # print(compset) - passed +=1 + passed += 1 return passed, failed + if __name__ == "__main__": - parser = argparse.ArgumentParser(description="script to calculate all combinations of ensemble tests") - parser.add_argument("-f", dest="compfile", - help="compfile location", metavar="PATH") + parser = argparse.ArgumentParser( + description="script to calculate all combinations of ensemble tests" + ) + parser.add_argument("-f", dest="compfile", help="compfile location", metavar="PATH") args = parser.parse_args() diff --git a/tools/statistical_ensemble_test/pyCECT/README.rst b/tools/statistical_ensemble_test/pyCECT/README.rst index f1859351bed..32a55abfc45 100644 --- a/tools/statistical_ensemble_test/pyCECT/README.rst +++ b/tools/statistical_ensemble_test/pyCECT/README.rst @@ -21,7 +21,7 @@ CESM simulations. Obtaining the code: ---------------- - + Currently, the most up-to-date development source code is available via git from the site: https://github.com/NCAR/PyCECT diff --git a/tools/statistical_ensemble_test/pyCECT/docs/conf.py b/tools/statistical_ensemble_test/pyCECT/docs/conf.py index 7b88816aae7..d5ec2d0ded5 100644 --- a/tools/statistical_ensemble_test/pyCECT/docs/conf.py +++ b/tools/statistical_ensemble_test/pyCECT/docs/conf.py @@ -12,16 +12,17 @@ # import os import sys -sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('../')) -#sys.path.insert(0, os.path.abspath('../../')) + +sys.path.insert(0, os.path.abspath(".")) +sys.path.insert(0, os.path.abspath("../")) +# sys.path.insert(0, os.path.abspath('../../')) # -- Project information ----------------------------------------------------- -project = 'pyCECT' -copyright = u'2015-2020, University Corporation for Atmospheric Research' -author = u'Haiying Xu, Allison Baker, DOrit Hammerling, Daniel Milroy' +project = "pyCECT" +copyright = u"2015-2020, University Corporation for Atmospheric Research" +author = u"Haiying Xu, Allison Baker, DOrit Hammerling, Daniel Milroy" # -- General configuration --------------------------------------------------- @@ -29,16 +30,16 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -#nbsphinx_execute = 'never' -#extensions = ['nbsphinx', 'sphinx.ext.autodoc', 'sphinx.ext.napoleon'] +# nbsphinx_execute = 'never' +# extensions = ['nbsphinx', 'sphinx.ext.autodoc', 'sphinx.ext.napoleon'] # Add any paths that contain templates here, relative to this directory. -#templates_path = ['_templates'] +# templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # -- Options for HTML output ------------------------------------------------- @@ -46,13 +47,11 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = "alabaster" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] - -master_doc = 'index' - +# html_static_path = ['_static'] +master_doc = "index" diff --git a/tools/statistical_ensemble_test/pyCECT/docs/source/installation.rst b/tools/statistical_ensemble_test/pyCECT/docs/source/installation.rst index 378d1e5dc3e..cd71b36a806 100644 --- a/tools/statistical_ensemble_test/pyCECT/docs/source/installation.rst +++ b/tools/statistical_ensemble_test/pyCECT/docs/source/installation.rst @@ -3,5 +3,3 @@ Installation ============ *COMING SOON* - - diff --git a/tools/statistical_ensemble_test/pyCECT/docs/source/pyCECT.rst b/tools/statistical_ensemble_test/pyCECT/docs/source/pyCECT.rst index 1903afb02e9..31b36a3a666 100644 --- a/tools/statistical_ensemble_test/pyCECT/docs/source/pyCECT.rst +++ b/tools/statistical_ensemble_test/pyCECT/docs/source/pyCECT.rst @@ -9,10 +9,10 @@ Current functionality in the CESM-ECT suite includes: *Atmosphere component (CAM):* - * CAM-ECT: examines yearly-average files from CAM - * UF-CAM-ECT: examine history files from CAM + * CAM-ECT: examines yearly-average files from CAM + * UF-CAM-ECT: examine history files from CAM - Both CAM-ECT and UF-CAM-ECT require a summary file generated by + Both CAM-ECT and UF-CAM-ECT require a summary file generated by pyEnsSum.py. UF-CAM-ECT uses simulations of nine time-steps in length, while CAM-ECT uses yearly averages. The faster UF-CAM-ECT is always suggested to start with. (The CAM-ECT is typically only used in the case of an unexpected @@ -30,7 +30,7 @@ Current functionality in the CESM-ECT suite includes: *Ocean Component (POP):* - * POP-ECT: examines monthly-average files from POP + * POP-ECT: examines monthly-average files from POP POP-ECT requires a summary file generated by pyEnsSumPop.py and uses @@ -111,14 +111,14 @@ Notes and examples: * Note that CAM-ECT is the default test. - * The parameters setting the pass/fail criteria are all set by - default (ie. sigMul, minPCFail, minRunFail, numRunFile, and nPC). + * The parameters setting the pass/fail criteria are all set by + default (ie. sigMul, minPCFail, minRunFail, numRunFile, and nPC). - * If the specified indir contains more files than the number specified by + * If the specified indir contains more files than the number specified by ``--numRunFile `` - (default= 3), then files will be chosen at random + (default= 3), then files will be chosen at random from that directory. * The Ensemble Exhaustive Test (EET) is specified by @@ -127,17 +127,17 @@ Notes and examples: This tool computes the failure rate of tests taken at a time. Therefore, when specifying ``--eet ``, must be greater than or equal to - . + . * To enable printing of extra variable information: ``--printVars`` - * By default, CAM-ECT looks at annual averages which is indictated by + * By default, CAM-ECT looks at annual averages which is indictated by - ``--tslice 1`` + ``--tslice 1`` - (For monthly files, use ``--tslice 0``. Note that this + (For monthly files, use ``--tslice 0``. Note that this should correspond to what has been collected in the summary file.) * To enable printing out sum of standardized mean of all variables and associated box plots @@ -151,7 +151,7 @@ Notes and examples: ``--saveResults`` * *Example:* - + ``python pyCECT.py --sumfile /glade/p/cisl/asap/pycect_sample_data/cam_c1.2.2.1/summary_files/uf.ens.c1.2.2.1_fc5.ne30.nc --indir /glade/p/cisl/asap/pycect_sample_data/cam_c1.2.2.1/uf_cam_test_files --tslice 1`` * *Example using EET* (note that EET takes longer to run - especially for a large number of tests): @@ -160,28 +160,28 @@ Notes and examples: 3. POP-ECT specific options (and summary file generated by pyEnsSumPop.py) - - * To use POP-ECT, you MUST add the following to enable this test + + * To use POP-ECT, you MUST add the following to enable this test (which disables UF-CAM-ECT and CAM-ECT): - - ``--popens`` + + ``--popens`` * Be sure to use a POP-ECT summary file: - - ``--sumfile /glade/p/cisl/asap//pycect_sample_data/pop_c2.0.b10/summary_files/pop.cesm2.0.b10.nc`` - + + ``--sumfile /glade/p/cisl/asap//pycect_sample_data/pop_c2.0.b10/summary_files/pop.cesm2.0.b10.nc`` + * Directory path that contains the run(s) to be evaluated. - - ``--indir /glade/p/cisl/asap//pycect_sample_data/pop_c2.0.b10/pop_test_files/C96`` - * The above directory may contain many POP history files that following the standard - CESM-POP naming convention. To specific which file or files you wish to test, you - simply specifying the test case file prefix (like a wildcard expansion). + ``--indir /glade/p/cisl/asap//pycect_sample_data/pop_c2.0.b10/pop_test_files/C96`` + + * The above directory may contain many POP history files that following the standard + CESM-POP naming convention. To specific which file or files you wish to test, you + simply specifying the test case file prefix (like a wildcard expansion). * To compare against all months in year 2 from the input directory above: ``--input_glob C96.pop.000.pop.h.0002`` - + * To compare only against month 12 in year 1: ``--input_glob C96.pop.000.pop.h.0001-12`` @@ -195,7 +195,7 @@ Notes and examples: ``--jsonfile pop_ensemble.json`` - * The parameters setting the pass/fail criteria are all set by + * The parameters setting the pass/fail criteria are all set by default (ie. pop_tol, pop_threshold) but may be modified: * Specifying test tolerance (the minimum Z-score @@ -207,7 +207,7 @@ Notes and examples: ``--pop_threshold 0.9`` - + * *Example:* - + ``python pyCECT.py --popens --sumfile /glade/p/cisl/asap//pycect_sample_data/pop_c2.0.b10/summary_files/pop.cesm2.0.b10.nc --indir /glade/p/cisl/asap//pycect_sample_data/pop_c2.0.b10/pop_test_files/C96 --jsonfile pop_ensemble.json --input_glob C96.pop.000.pop.h.0001-12`` diff --git a/tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSum.rst b/tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSum.rst index b49fb67c0c9..2c598e4e835 100644 --- a/tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSum.rst +++ b/tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSum.rst @@ -3,10 +3,10 @@ pyEnsSum ============== The verification tools in the CESM-ECT suite all require an *ensemble -summary file*, which contains statistics describing the ensemble distribution. -pyEnsSum can be used to create a CAM (atmospheric component) ensemble summary file. +summary file*, which contains statistics describing the ensemble distribution. +pyEnsSum can be used to create a CAM (atmospheric component) ensemble summary file. -Note that an ensemble summary files for existing CESM tags for CAM-ECT and UF-CAM-ECT +Note that an ensemble summary files for existing CESM tags for CAM-ECT and UF-CAM-ECT that were created by CSEG (CESM Software Engineering Group) are located (respectively) in the CESM input data directories: @@ -14,13 +14,13 @@ $CESMDATAROOT/inputdata/validation/ensembles $CESMDATAROOT/inputdata/validation/uf_ensembles Alternatively, pyEnsSum.py be used to create a summary file for CAM-ECT or -UF-CAM-ECT, given the location of appropriate ensemble history files (which should +UF-CAM-ECT, given the location of appropriate ensemble history files (which should be generated via CIME, https://github.com/ESMCI/cime) (Note: to generate a summary file for POP-ECT, you must use pyEnsSumPop.py, which has its own corresponding instructions) -To use pyEnsSum: +To use pyEnsSum: -------------------- *Note: compatible with Python 3* @@ -52,14 +52,14 @@ To use pyEnsSum: * glob * itertools * datetime - + 3. To see all options (and defaults): ``python pyEnsSum.py -h*``:: - Creates the summary file for an ensemble of CAM data. + Creates the summary file for an ensemble of CAM data. - Args for pyEnsSum : + Args for pyEnsSum : pyEnsSum.py -h : prints out this usage message @@ -75,16 +75,16 @@ To use pyEnsSum: --jsonfile : Jsonfile to provide that a list of variables that will be excluded or included (default = exclude_empty.json) --mpi_disable : Disable mpi mode to run in serial (off by default) - --fIndex : Use this to start at ensemble member instead of 000 (so - ensembles with numbers less than are excluded from summary file) - + --fIndex : Use this to start at ensemble member instead of 000 (so + ensembles with numbers less than are excluded from summary file) + Notes: ------------------ 1. CAM-ECT uses yearly average files, which by default (in the ensemble.py - generation script in CIME) also contains the initial conditions. Therefore, - one typically needs to set ``--tslice 1`` to use the yearly average (because + generation script in CIME) also contains the initial conditions. Therefore, + one typically needs to set ``--tslice 1`` to use the yearly average (because slice 0 is the initial conditions.) 2. UF-CAM-ECT uses timestep nine. By default (in the ensemble.py @@ -92,13 +92,13 @@ Notes: Therefore, one typically needs to set ``--tslice 1`` to use time step nine (because slice 0 is the initial conditions.) -3. There is no need to indicate UF-CAM-ECT vs. CAM-ECT to this routine. It +3. There is no need to indicate UF-CAM-ECT vs. CAM-ECT to this routine. It simply creates statistics for the supplied history files at the specified - time slice. For example, if you want to look at monthly files, simply - supply their location. Monthly files typically do not contain an initial + time slice. For example, if you want to look at monthly files, simply + supply their location. Monthly files typically do not contain an initial condition and would require ``--tslice 0``. -4. The ``--esize`` (the ensemble size) can be less than or equal to the number of files +4. The ``--esize`` (the ensemble size) can be less than or equal to the number of files in ``--indir``. Ensembles numbered 000-(esize-1) will be included unless ``--fIndex`` is specified. UF-CAM-ECT typically uses at least 350 members (the default), whereas CAM-ECT does not require as many. @@ -106,24 +106,24 @@ Notes: 5. Note that ``--res``, ``--tag``, ``--compset``, and ``--mach`` parameters only affect the metadata in the summary file. -6. When running in parallel, the recommended number of cores to use is one +6. When running in parallel, the recommended number of cores to use is one for each 3D variable. The default is to run in paralalel (recommended). 7. You must specify a json file (via ``--jsonfile``) that indicates - the variables in the ensemble + the variables in the ensemble output files that you want to include or exclude from the summary file statistics (see the example json files). We recommend excluding variables, as this is typically less work and pyEnsSum will let you know if you have not listed variables that need to be excluded (see next note). Keep in mind that you must have *fewer* variables included than ensemble members. -8. *IMPORTANT:* If there are variables that need to be excluded (that are not in +8. *IMPORTANT:* If there are variables that need to be excluded (that are not in the .json file already), pyEnsSum will exit early and provide a list of the variables to exclude in the output. These should be added to your exclude variable list (or removed from an include list), and then pyEnsSum can - be re-run. Note that additional problematic variables may be found by - pyEnsSum as variables are detected in three stages. (First any variables that - are constant across the ensemble are identified. Once these are removed, + be re-run. Note that additional problematic variables may be found by + pyEnsSum as variables are detected in three stages. (First any variables that + are constant across the ensemble are identified. Once these are removed, linearly dependant variables are indentified for removal. Finally, variables that are not constant but have very few unique values are identified.) @@ -132,8 +132,8 @@ Example: -------------------------------------- (Note: This example is in test_pyEnsSum.sh) -*To generate a summary file for 350 UF-CAM-ECT simulations runs (time step nine):* - +*To generate a summary file for 350 UF-CAM-ECT simulations runs (time step nine):* + * we specify the size (this is optional since 350 is the default) and data location: ``--esize 350`` @@ -142,20 +142,20 @@ Example: * We also specify the name of file to create for the summary: - ``--sumfile uf.ens.c1.2.2.1_fc5.ne30.nc`` + ``--sumfile uf.ens.c1.2.2.1_fc5.ne30.nc`` * Since the ensemble files contain the intial conditions as well as the values at time step 9 (this is optional as 1 is the default), we set: - ``--tslice 1`` - + ``--tslice 1`` + * We also specify the CESM tag, compset and resolution and machine of our ensemble data so that it can be written to the metadata of the summary file: - ``--tag cesm1.2.2.1 --compset FC5 --res ne30_ne30 --mach cheyenne`` + ``--tag cesm1.2.2.1 --compset FC5 --res ne30_ne30 --mach cheyenne`` * We can exclude or include some variables from the analysis by specifying them in a json file: ``--jsonfile excluded_varlist.json`` * This yields the following command for your job submission script: - + ``python pyCECT.py --esize 350 --indir /glade/p/cisl/asap/pycect_sample_data/cam_c1.2.2.1/uf_cam_ens_files --sumfile uf.ens.c1.2.2.1_fc5.ne30.nc --tslice 1 --tag cesm1.2.2.1 --compset FC5 --res ne30_ne30 --jsonfile excluded_varlist.json`` diff --git a/tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSumPop.rst b/tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSumPop.rst index d9ed78486c6..f4f46dea24c 100644 --- a/tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSumPop.rst +++ b/tools/statistical_ensemble_test/pyCECT/docs/source/pyEnsSumPop.rst @@ -3,27 +3,27 @@ pyEnsSumPop ================== The verification tools in the CESM-ECT suite all require an *ensemble -summary file*, which contains statistics describing the ensemble distribution. -pyEnsSumPop can be used to create a POP (ocean component) ensemble summary file. +summary file*, which contains statistics describing the ensemble distribution. +pyEnsSumPop can be used to create a POP (ocean component) ensemble summary file. -Note that an ensemble summary files for existing CESM tags for POP-ECT +Note that an ensemble summary files for existing CESM tags for POP-ECT that were created by CSEG (CESM Software Engineering Group) are located in the CESM input data directory: $CESMDATAROOT/inputdata/validation/pop_ensembles Alternatively, pyEnsSumPop can be used to create a summary file for POP-ECT -given the location of appropriate ensemble history files (which should +given the location of appropriate ensemble history files (which should be generated in CIME via $CIME/tools/statistical_ensemble_test/ensemble.py). -(Note: to generate a summary file for UF-CAM-ECT or CAM-ECT, you must use +(Note: to generate a summary file for UF-CAM-ECT or CAM-ECT, you must use pyEnsSum.py, which has its own corresponding instructions.) -To use pyEnsSumPop: +To use pyEnsSumPop: -------------------------- - + *Note: compatible with Python 3* 1. On NCAR's Cheyenne machine: @@ -54,15 +54,15 @@ To use pyEnsSumPop: * glob * itertools * datetime - + 3. To see all options (and defaults): ``python pyEnsSumPop.py -h``:: - Creates the summary file for an ensemble of POP data. + Creates the summary file for an ensemble of POP data. - Args for pyEnsSumPop : + Args for pyEnsSumPop : pyEnsSumPop.py -h : prints out this usage message @@ -80,13 +80,13 @@ To use pyEnsSumPop: --jsonfile : Jsonfile to provide that a list of variables that will be included (RECOMMENDED: default = pop_ensemble.json) --mpi_disable : Disable mpi mode to run in serial (off by default) - + Notes: ---------------- -1. POP-ECT uses monthly average files. Therefore, one typically needs +1. POP-ECT uses monthly average files. Therefore, one typically needs to set ``--tslice 0`` (which is the default). 2. Note that ``--res``, ``--tag``, ``--compset``, and --mach only affect the @@ -94,10 +94,10 @@ Notes: 3. The sample script test_pyEnsSumPop.sh gives a recommended parallel configuration for Cheyenne. We recommend one core per month (and make - sure each core has sufficient memory). + sure each core has sufficient memory). -4. The json file indicates variables from the output files that you want - to include in the summary files statistics. We recommend using the +4. The json file indicates variables from the output files that you want + to include in the summary files statistics. We recommend using the default pop_ensemble.json, which contains only 5 variables. @@ -106,12 +106,12 @@ Example: ---------------------------------------- (Note: this example is in test_pyEnsSumPop.sh) -*To generate a summary file for 40 POP-ECT simulations runs (1 year of monthly output):* - +*To generate a summary file for 40 POP-ECT simulations runs (1 year of monthly output):* + * We specify the size (this is optional since 40 is the default) and data location: ``--esize 40`` - + ``--indir /glade/p/cisl/iowa/pop_verification/cesm2_0_beta10/ensembles`` * We also specify the name of file to create for the summary: @@ -127,7 +127,7 @@ Example: ``--nyear 1`` ``--nmonth 12`` - + * We also can specify the tag, resolution, machine and compset information (that will be written to the metadata of the summary file): @@ -142,7 +142,7 @@ Example: * We include a recommended subset of variables (5) for the analysis by specifying them in a json file (optional, as this is the defaut): - + ``--jsonfile pop_ensemble.json`` * This yields the following command for your job submission script: diff --git a/tools/statistical_ensemble_test/pyCECT/docs/source/readme.rst b/tools/statistical_ensemble_test/pyCECT/docs/source/readme.rst index b524d5ce685..cf2936769cd 100644 --- a/tools/statistical_ensemble_test/pyCECT/docs/source/readme.rst +++ b/tools/statistical_ensemble_test/pyCECT/docs/source/readme.rst @@ -24,14 +24,14 @@ CESM/CIME notes: https://github.com/ESMCI/cime 2. Creating the ensemble summaries (via pyEnsSum and pyEnsSumPop) is - typically done by the CESM software developers. See: + typically done by the CESM software developers. See: http://www.cesm.ucar.edu/models/cesm2/python-tools/ 3. A web-based interface to this tool is available here: http://www.cesm.ucar.edu/models/cesm2/verification/ - + Relevant publications: ---------------------- diff --git a/tools/statistical_ensemble_test/pyCECT/pyCECT.py b/tools/statistical_ensemble_test/pyCECT/pyCECT.py index 9d4a2cbaa20..ce6e72164b3 100755 --- a/tools/statistical_ensemble_test/pyCECT/pyCECT.py +++ b/tools/statistical_ensemble_test/pyCECT/pyCECT.py @@ -1,6 +1,6 @@ #! /usr/bin/env python from __future__ import print_function -import sys,getopt,os +import sys, getopt, os import numpy as np import netCDF4 as nc import time @@ -11,331 +11,422 @@ import re from datetime import datetime from asaptools.partition import EqualStride, Duplicate -import asaptools.simplecomm as simplecomm +import asaptools.simplecomm as simplecomm -#This routine compares the results of several (default=3) new CAM tests -#or a POP test against the accepted ensemble (generated by pyEnsSum or -#pyEnsSumPop). +# This routine compares the results of several (default=3) new CAM tests +# or a POP test against the accepted ensemble (generated by pyEnsSum or +# pyEnsSumPop). def main(argv): - # Get command line stuff and store in a dictionary - s="""verbose sumfile= indir= input_globs= tslice= nPC= sigMul= - minPCFail= minRunFail= numRunFile= printVars popens - jsonfile= mpi_enable nbin= minrange= maxrange= outfile= + s = """verbose sumfile= indir= input_globs= tslice= nPC= sigMul= + minPCFail= minRunFail= numRunFile= printVars popens + jsonfile= mpi_enable nbin= minrange= maxrange= outfile= casejson= npick= pepsi_gm pop_tol= web_enabled pop_threshold= printStdMean fIndex= lev= eet= saveResults json_case= """ optkeys = s.split() try: - opts, args = getopt.getopt(argv,"h",optkeys) + opts, args = getopt.getopt(argv, "h", optkeys) except getopt.GetoptError: pyEnsLib.CECT_usage() sys.exit(2) - - + # Set the default value for options opts_dict = {} - opts_dict['input_globs'] = '' - opts_dict['indir'] = '' - opts_dict['tslice'] = 1 - opts_dict['nPC'] = 50 - opts_dict['sigMul'] = 2 - opts_dict['verbose'] = False - opts_dict['minPCFail'] = 3 - opts_dict['minRunFail'] = 2 - opts_dict['numRunFile'] = 3 - opts_dict['printVars'] = False - opts_dict['popens'] = False - opts_dict['jsonfile'] = '' - opts_dict['mpi_enable'] = False - opts_dict['nbin'] = 40 - opts_dict['minrange'] = 0.0 - opts_dict['maxrange'] = 4.0 - opts_dict['outfile'] = 'testcase.result' - opts_dict['casejson'] = '' - opts_dict['npick'] = 10 - opts_dict['pepsi_gm'] = False - opts_dict['test_failure'] = True - opts_dict['pop_tol'] = 3.0 - opts_dict['pop_threshold'] = 0.90 - opts_dict['printStdMean'] = False - opts_dict['lev'] = 0 - opts_dict['eet'] = 0 - opts_dict['json_case'] = '' - opts_dict['sumfile'] = '' - opts_dict['web_enabled'] = False - opts_dict['saveResults']= False + opts_dict["input_globs"] = "" + opts_dict["indir"] = "" + opts_dict["tslice"] = 1 + opts_dict["nPC"] = 50 + opts_dict["sigMul"] = 2 + opts_dict["verbose"] = False + opts_dict["minPCFail"] = 3 + opts_dict["minRunFail"] = 2 + opts_dict["numRunFile"] = 3 + opts_dict["printVars"] = False + opts_dict["popens"] = False + opts_dict["jsonfile"] = "" + opts_dict["mpi_enable"] = False + opts_dict["nbin"] = 40 + opts_dict["minrange"] = 0.0 + opts_dict["maxrange"] = 4.0 + opts_dict["outfile"] = "testcase.result" + opts_dict["casejson"] = "" + opts_dict["npick"] = 10 + opts_dict["pepsi_gm"] = False + opts_dict["test_failure"] = True + opts_dict["pop_tol"] = 3.0 + opts_dict["pop_threshold"] = 0.90 + opts_dict["printStdMean"] = False + opts_dict["lev"] = 0 + opts_dict["eet"] = 0 + opts_dict["json_case"] = "" + opts_dict["sumfile"] = "" + opts_dict["web_enabled"] = False + opts_dict["saveResults"] = False # Call utility library getopt_parseconfig to parse the option keys # and save to the dictionary - caller = 'CECT' - opts_dict = pyEnsLib.getopt_parseconfig(opts,optkeys,caller,opts_dict) - popens = opts_dict['popens'] + caller = "CECT" + opts_dict = pyEnsLib.getopt_parseconfig(opts, optkeys, caller, opts_dict) + popens = opts_dict["popens"] - #some mods for POP-ECT + # some mods for POP-ECT if popens == True: - opts_dict['tslice'] = 0 - opts_dict['numRunFile']= 1 - opts_dict['eet'] = 0 - opts_dict['mpi_enable'] = False - + opts_dict["tslice"] = 0 + opts_dict["numRunFile"] = 1 + opts_dict["eet"] = 0 + opts_dict["mpi_enable"] = False # Create a mpi simplecomm object - if opts_dict['mpi_enable']: - me=simplecomm.create_comm() + if opts_dict["mpi_enable"]: + me = simplecomm.create_comm() else: - me=simplecomm.create_comm(not opts_dict['mpi_enable']) + me = simplecomm.create_comm(not opts_dict["mpi_enable"]) # Print out timestamp, input ensemble file and new run directory - dt=datetime.now() - verbose = opts_dict['verbose'] - if me.get_rank()==0: - print(' ') - print('--------pyCECT--------') - print(' ') + dt = datetime.now() + verbose = opts_dict["verbose"] + if me.get_rank() == 0: + print(" ") + print("--------pyCECT--------") + print(" ") print(dt.strftime("%A, %d. %B %Y %I:%M%p")) - print(' ') - if not opts_dict['web_enabled']: - print('Ensemble summary file = '+opts_dict['sumfile']) - print(' ') - print('Testcase file directory = '+opts_dict['indir']) - print(' ') - print(' ') - - #make sure these are valid - if opts_dict['web_enabled'] == False and os.path.isfile(opts_dict['sumfile']) == False: + print(" ") + if not opts_dict["web_enabled"]: + print("Ensemble summary file = " + opts_dict["sumfile"]) + print(" ") + print("Testcase file directory = " + opts_dict["indir"]) + print(" ") + print(" ") + + # make sure these are valid + if ( + opts_dict["web_enabled"] == False + and os.path.isfile(opts_dict["sumfile"]) == False + ): print("ERROR: Summary file name is not valid.") sys.exit() - if os.path.exists(opts_dict['indir']) == False: + if os.path.exists(opts_dict["indir"]) == False: print("ERROR: --indir path is not valid.") sys.exit() # Ensure sensible EET value - if opts_dict['eet'] and opts_dict['numRunFile'] > opts_dict['eet']: + if opts_dict["eet"] and opts_dict["numRunFile"] > opts_dict["eet"]: pyEnsLib.CECT_usage() sys.exit(2) - - ifiles=[] - in_files=[] + ifiles = [] + in_files = [] # Random pick pop files from not_pick_files list - if opts_dict['casejson']: - with open(opts_dict['casejson']) as fin: - result=json.load(fin) - in_files_first=result['not_pick_files'] - in_files=random.sample(in_files_first,opts_dict['npick']) - print('Testcase files:') - print('\n'.join(in_files)) - - elif opts_dict['json_case']: - json_file=opts_dict['json_case'] - if (os.path.exists(json_file)): - fd=open(json_file) - metainfo=json.load(fd) - if 'CaseName' in metainfo: - casename=metainfo['CaseName'] - if (os.path.exists(opts_dict['indir'])): - for name in casename: - wildname='*.'+name+'.*' - full_glob_str=os.path.join(opts_dict['indir'],wildname) - glob_file=glob.glob(full_glob_str) - in_files.extend(glob_file) - else: - print("ERROR: "+opts_dict['json_case']+" does not exist.") - sys.exit() - print("in_files=",in_files) - else: - wildname='*'+str(opts_dict['input_globs'])+'*' - # Open all input files - if (os.path.exists(opts_dict['indir'])): - full_glob_str=os.path.join(opts_dict['indir'],wildname) - glob_files=glob.glob(full_glob_str) - in_files.extend(glob_files) - num_file=len(in_files) - if num_file == 0: - print("ERROR: no matching files for wildcard=" + wildname+ " found in specified --indir") - sys.exit() - else: - print("Found " + str(num_file) + " matching files in specified --indir") - if opts_dict['numRunFile'] > num_file: - print("ERROR: more files needed (" + str(opts_dict['numRunFile']) +") than available in the indir (" + str(num_file) +").") - sys.exit() + if opts_dict["casejson"]: + with open(opts_dict["casejson"]) as fin: + result = json.load(fin) + in_files_first = result["not_pick_files"] + in_files = random.sample(in_files_first, opts_dict["npick"]) + print("Testcase files:") + print("\n".join(in_files)) + + elif opts_dict["json_case"]: + json_file = opts_dict["json_case"] + if os.path.exists(json_file): + fd = open(json_file) + metainfo = json.load(fd) + if "CaseName" in metainfo: + casename = metainfo["CaseName"] + if os.path.exists(opts_dict["indir"]): + for name in casename: + wildname = "*." + name + ".*" + full_glob_str = os.path.join(opts_dict["indir"], wildname) + glob_file = glob.glob(full_glob_str) + in_files.extend(glob_file) + else: + print("ERROR: " + opts_dict["json_case"] + " does not exist.") + sys.exit() + print("in_files=", in_files) + else: + wildname = "*" + str(opts_dict["input_globs"]) + "*" + # Open all input files + if os.path.exists(opts_dict["indir"]): + full_glob_str = os.path.join(opts_dict["indir"], wildname) + glob_files = glob.glob(full_glob_str) + in_files.extend(glob_files) + num_file = len(in_files) + if num_file == 0: + print( + "ERROR: no matching files for wildcard=" + + wildname + + " found in specified --indir" + ) + sys.exit() + else: + print("Found " + str(num_file) + " matching files in specified --indir") + if opts_dict["numRunFile"] > num_file: + print( + "ERROR: more files needed (" + + str(opts_dict["numRunFile"]) + + ") than available in the indir (" + + str(num_file) + + ")." + ) + sys.exit() in_files.sort() - #print in_files + # print in_files if popens: - #Partition the input file list - in_files_list=me.partition(in_files,func=EqualStride(),involved=True) + # Partition the input file list + in_files_list = me.partition(in_files, func=EqualStride(), involved=True) else: # Random pick cam files - in_files_list=pyEnsLib.Random_pickup(in_files,opts_dict) + in_files_list = pyEnsLib.Random_pickup(in_files, opts_dict) for frun_file in in_files_list: - if frun_file.find(opts_dict['indir']) != -1: - frun_temp=frun_file - else: - frun_temp=opts_dict['indir']+'/'+frun_file - if (os.path.isfile(frun_temp)): - ifiles.append(frun_temp) - else: - print("ERROR: COULD NOT LOCATE FILE " +frun_temp) - sys.exit() - - if opts_dict['web_enabled']: - if len(opts_dict['sumfile'])==0: - opts_dict['sumfile']='/glade/p/cesmdata/cseg/inputdata/validation/' - #need to open ifiles - - - opts_dict['sumfile'],machineid,compiler=pyEnsLib.search_sumfile(opts_dict,ifiles) - if len(machineid)!=0 and len(compiler)!=0: - print(' ') - print('Validation file : machineid = '+machineid+', compiler = '+compiler) - print('Found summary file : '+opts_dict['sumfile']) - print(' ') - else: - print('Warning: machine and compiler are unknown') - + if frun_file.find(opts_dict["indir"]) != -1: + frun_temp = frun_file + else: + frun_temp = opts_dict["indir"] + "/" + frun_file + if os.path.isfile(frun_temp): + ifiles.append(frun_temp) + else: + print("ERROR: COULD NOT LOCATE FILE " + frun_temp) + sys.exit() + + if opts_dict["web_enabled"]: + if len(opts_dict["sumfile"]) == 0: + opts_dict["sumfile"] = "/glade/p/cesmdata/cseg/inputdata/validation/" + # need to open ifiles + + opts_dict["sumfile"], machineid, compiler = pyEnsLib.search_sumfile( + opts_dict, ifiles + ) + if len(machineid) != 0 and len(compiler) != 0: + print(" ") + print( + "Validation file : machineid = " + + machineid + + ", compiler = " + + compiler + ) + print("Found summary file : " + opts_dict["sumfile"]) + print(" ") + else: + print("Warning: machine and compiler are unknown") + if popens: - + # Read in the included var list - if not os.path.exists(opts_dict['jsonfile']): - print("ERROR: POP-ECT requires the specification of a valid json file via --jsonfile.") + if not os.path.exists(opts_dict["jsonfile"]): + print( + "ERROR: POP-ECT requires the specification of a valid json file via --jsonfile." + ) sys.exit() - Var2d,Var3d=pyEnsLib.read_jsonlist(opts_dict['jsonfile'],'ESP') - print(' ') - print('Z-score tolerance = '+'{:3.2f}'.format(opts_dict['pop_tol'])) - print('ZPR = '+'{:.2%}'.format(opts_dict['pop_threshold'])) - zmall,n_timeslice=pyEnsLib.pop_compare_raw_score(opts_dict,ifiles,me.get_rank(),Var3d,Var2d ) + Var2d, Var3d = pyEnsLib.read_jsonlist(opts_dict["jsonfile"], "ESP") + print(" ") + print("Z-score tolerance = " + "{:3.2f}".format(opts_dict["pop_tol"])) + print("ZPR = " + "{:.2%}".format(opts_dict["pop_threshold"])) + zmall, n_timeslice = pyEnsLib.pop_compare_raw_score( + opts_dict, ifiles, me.get_rank(), Var3d, Var2d + ) np.set_printoptions(threshold=sys.maxsize) - if opts_dict['mpi_enable']: - zmall = pyEnsLib.gather_npArray_pop(zmall,me,(me.get_size(),len(Var3d)+len(Var2d),len(ifiles),opts_dict['nbin'])) - if me.get_rank()==0: - fout = open(opts_dict['outfile'],"w") + if opts_dict["mpi_enable"]: + zmall = pyEnsLib.gather_npArray_pop( + zmall, + me, + ( + me.get_size(), + len(Var3d) + len(Var2d), + len(ifiles), + opts_dict["nbin"], + ), + ) + if me.get_rank() == 0: + fout = open(opts_dict["outfile"], "w") for i in range(me.get_size()): for j in zmall[i]: - np.savetxt(fout,j,fmt='%-7.2e') - #cam + np.savetxt(fout, j, fmt="%-7.2e") + # cam else: # Read all variables from the ensemble summary file - ens_var_name,ens_avg,ens_stddev,ens_rmsz,ens_gm,num_3d,mu_gm,sigma_gm,loadings_gm,sigma_scores_gm,is_SE_sum,std_gm, std_gm_array, str_size=pyEnsLib.read_ensemble_summary(opts_dict['sumfile']) - - #Only doing gm + ( + ens_var_name, + ens_avg, + ens_stddev, + ens_rmsz, + ens_gm, + num_3d, + mu_gm, + sigma_gm, + loadings_gm, + sigma_scores_gm, + is_SE_sum, + std_gm, + std_gm_array, + str_size, + ) = pyEnsLib.read_ensemble_summary(opts_dict["sumfile"]) + + # Only doing gm # Add ensemble rmsz and global mean to the dictionary "variables" - variables={} + variables = {} - for k,v in ens_gm.items(): - pyEnsLib.addvariables(variables,k,'gmRange',v) + for k, v in ens_gm.items(): + pyEnsLib.addvariables(variables, k, "gmRange", v) # Get 3d variable name list and 2d variable name list separately - var_name3d=[] - var_name2d=[] - for vcount,v in enumerate(ens_var_name): - if vcount < num_3d: - var_name3d.append(v) - else: - var_name2d.append(v) + var_name3d = [] + var_name2d = [] + for vcount, v in enumerate(ens_var_name): + if vcount < num_3d: + var_name3d.append(v) + else: + var_name2d.append(v) # Get ncol and nlev value - npts3d,npts2d,is_SE=pyEnsLib.get_ncol_nlev(ifiles[0]) - - if (is_SE ^ is_SE_sum): - print('Warning: please note the ensemble summary file is different from the testing files: they use different grids') - - # Compare the new run and the ensemble summary file - results={} - countgm=np.zeros(len(ifiles),dtype=np.int32) + npts3d, npts2d, is_SE = pyEnsLib.get_ncol_nlev(ifiles[0]) + + if is_SE ^ is_SE_sum: + print( + "Warning: please note the ensemble summary file is different from the testing files: they use different grids" + ) + + # Compare the new run and the ensemble summary file + results = {} + countgm = np.zeros(len(ifiles), dtype=np.int32) # Calculate the new run global mean - mean3d,mean2d,varlist=pyEnsLib.generate_global_mean_for_summary(ifiles,var_name3d,var_name2d,is_SE,opts_dict['pepsi_gm'],opts_dict) - means=np.concatenate((mean3d,mean2d),axis=0) + mean3d, mean2d, varlist = pyEnsLib.generate_global_mean_for_summary( + ifiles, var_name3d, var_name2d, is_SE, opts_dict["pepsi_gm"], opts_dict + ) + means = np.concatenate((mean3d, mean2d), axis=0) # Add the new run global mean to the dictionary "results" for i in range(means.shape[1]): for j in range(means.shape[0]): - pyEnsLib.addresults(results,'means',means[j][i],ens_var_name[j],'f'+str(i)) + pyEnsLib.addresults( + results, "means", means[j][i], ens_var_name[j], "f" + str(i) + ) # Evaluate the new run global mean if it is in the range of the ensemble summary global mean range - for fcount,fid in enumerate(ifiles): - countgm[fcount]=pyEnsLib.evaluatestatus('means','gmRange',variables,'gm',results,'f'+str(fcount)) - + for fcount, fid in enumerate(ifiles): + countgm[fcount] = pyEnsLib.evaluatestatus( + "means", "gmRange", variables, "gm", results, "f" + str(fcount) + ) + # Calculate the PCA scores of the new run - new_scores,var_list,comp_std_gm=pyEnsLib.standardized(means,mu_gm,sigma_gm,loadings_gm,ens_var_name,opts_dict,ens_avg,me) - run_index,decision=pyEnsLib.comparePCAscores(ifiles,new_scores,sigma_scores_gm,opts_dict,me) + new_scores, var_list, comp_std_gm = pyEnsLib.standardized( + means, mu_gm, sigma_gm, loadings_gm, ens_var_name, opts_dict, ens_avg, me + ) + run_index, decision = pyEnsLib.comparePCAscores( + ifiles, new_scores, sigma_scores_gm, opts_dict, me + ) # If there is failure, plot out standardized mean and compared standardized mean in box plots -# if opts_dict['printStdMean'] and decision == 'FAILED': - if opts_dict['printStdMean']: + # if opts_dict['printStdMean'] and decision == 'FAILED': + if opts_dict["printStdMean"]: import seaborn as sns import matplotlib - matplotlib.use('Agg') #don't display figures + + matplotlib.use("Agg") # don't display figures import matplotlib.pyplot as plt print(" ") - print('***************************************************************************** ') - print('Test run variable standardized means (for reference only - not used to determine pass/fail)') - print('***************************************************************************** ') + print( + "***************************************************************************** " + ) + print( + "Test run variable standardized means (for reference only - not used to determine pass/fail)" + ) + print( + "***************************************************************************** " + ) print(" ") - category={"all_outside99":[],"two_outside99":[],"one_outside99":[],"all_oneside_outside1QR":[]} - b=list(pyEnsLib.chunk(ens_var_name,10)) - for f,alist in enumerate(b): - for fc,avar in enumerate(alist): - dist_995=np.percentile(std_gm[avar],99.5) - dist_75=np.percentile(std_gm[avar],75) - dist_25=np.percentile(std_gm[avar],25) - dist_05=np.percentile(std_gm[avar],0.5) - c=0 - d=0 - p=0 - q=0 - for i in range(comp_std_gm[f+fc].size): - if comp_std_gm[f+fc][i]>dist_995: - c=c+1 - elif comp_std_gm[f+fc][i]dist_75): - p=p+1 - elif (comp_std_gm[f+fc][i]>dist_05 and comp_std_gm[f+fc][i] dist_995: + c = c + 1 + elif comp_std_gm[f + fc][i] < dist_05: + d = d + 1 + elif ( + comp_std_gm[f + fc][i] < dist_995 + and comp_std_gm[f + fc][i] > dist_75 + ): + p = p + 1 + elif ( + comp_std_gm[f + fc][i] > dist_05 + and comp_std_gm[f + fc][i] < dist_25 + ): + q = q + 1 if c == 3 or d == 3: - category["all_outside99"].append((avar,f+fc)) - elif c == 2 or d == 2: - category["two_outside99"].append((avar,f+fc)) + category["all_outside99"].append((avar, f + fc)) + elif c == 2 or d == 2: + category["two_outside99"].append((avar, f + fc)) elif c == 1 or d == 1: - category["one_outside99"].append((avar,f+fc)) + category["one_outside99"].append((avar, f + fc)) if p == 3 or q == 3: - category["all_oneside_outside1QR"].append((avar,f+fc)) - part_name=opts_dict['indir'].split('/')[-1] + category["all_oneside_outside1QR"].append((avar, f + fc)) + part_name = opts_dict["indir"].split("/")[-1] if not part_name: - part_name=opts_dict['indir'].split('/')[-2] + part_name = opts_dict["indir"].split("/")[-2] for key in sorted(category): - list_array=[] - list_array2=[] - list_var=[] - value=category[key] - - if key=="all_outside99": - print("*** ", len(value), " variables have 3 test run global means outside of the 99th percentile.") + list_array = [] + list_array2 = [] + list_var = [] + value = category[key] + + if key == "all_outside99": + print( + "*** ", + len(value), + " variables have 3 test run global means outside of the 99th percentile.", + ) elif key == "two_outside99": - print("*** ", len(value), " variables have 2 test run global means outside of the 99th percentile.") + print( + "*** ", + len(value), + " variables have 2 test run global means outside of the 99th percentile.", + ) elif key == "one_outside99": - print("*** ", len(value), " variables have 1 test run global mean outside of the 99th percentile.") + print( + "*** ", + len(value), + " variables have 1 test run global mean outside of the 99th percentile.", + ) elif key == "all_oneside_outside1QR": - print("*** ", len(value), " variables have all test run global means outside of the first quartile (but not outside the 99th percentile).") + print( + "*** ", + len(value), + " variables have all test run global means outside of the first quartile (but not outside the 99th percentile).", + ) if len(value) > 0: print(" => generating plot ...") if len(value) > 20: - print(" NOTE: truncating to only plot the first 20 variables.") + print( + " NOTE: truncating to only plot the first 20 variables." + ) value = value[0:20] for each_var in value: @@ -347,82 +438,107 @@ def main(argv): list_var.append(name) - if len(value) !=0 : - ax=sns.boxplot(data=list_array,whis=[0.5,99.5],fliersize=0.0) - sns.stripplot(data=list_array2,jitter=True,color="r") - plt.xticks(list(range(len(list_array))),list_var,fontsize=8,rotation=-45) - - if decision == 'FAILED': - plt.savefig(part_name+"_"+key+"_fail.png") + if len(value) != 0: + ax = sns.boxplot(data=list_array, whis=[0.5, 99.5], fliersize=0.0) + sns.stripplot(data=list_array2, jitter=True, color="r") + plt.xticks( + list(range(len(list_array))), list_var, fontsize=8, rotation=-45 + ) + + if decision == "FAILED": + plt.savefig(part_name + "_" + key + "_fail.png") else: - plt.savefig(part_name+"_"+key+"_pass.png") + plt.savefig(part_name + "_" + key + "_pass.png") plt.close() - -## -# Print file with info about new test runs....to a netcdf file -## - if opts_dict['saveResults']: + + ## + # Print file with info about new test runs....to a netcdf file + ## + if opts_dict["saveResults"]: num_vars = comp_std_gm.shape[0] tsize = comp_std_gm.shape[1] esize = std_gm_array.shape[1] - this_savefile ='savefile.nc' - if (verbose == True): + this_savefile = "savefile.nc" + if verbose == True: print("VERBOSE: Creating ", this_savefile, " ...") if os.path.exists(this_savefile): os.unlink(this_savefile) nc_savefile = nc.Dataset(this_savefile, "w", format="NETCDF4_CLASSIC") - nc_savefile.createDimension('ens_size', esize) - nc_savefile.createDimension('test_size', tsize) - nc_savefile.createDimension('nvars', num_vars) - nc_savefile.createDimension('str_size', str_size) + nc_savefile.createDimension("ens_size", esize) + nc_savefile.createDimension("test_size", tsize) + nc_savefile.createDimension("nvars", num_vars) + nc_savefile.createDimension("str_size", str_size) # Set global attributes now = time.strftime("%c") nc_savefile.creation_date = now - nc_savefile.title = 'PyCECT compare results file' - nc_savefile.summaryfile = opts_dict['sumfile'] - #nc_savefile.testfiles = in_files - - #variables - v_vars = nc_savefile.createVariable("vars", 'S1', ('nvars', 'str_size')) - v_std_gm=nc_savefile.createVariable("std_gm",'f8',('nvars','test_size')) - v_scores=nc_savefile.createVariable("scores",'f8',('nvars','test_size')) - v_ens_sigma_scores = nc_savefile.createVariable('ens_sigma_scores','f8',('nvars',)) - v_ens_std_gm=nc_savefile.createVariable("ens_std_gm",'f8',('nvars','ens_size')) - - #hard-coded size - str_out = nc.stringtochar(np.array(ens_var_name, 'S10')) + nc_savefile.title = "PyCECT compare results file" + nc_savefile.summaryfile = opts_dict["sumfile"] + # nc_savefile.testfiles = in_files + + # variables + v_vars = nc_savefile.createVariable("vars", "S1", ("nvars", "str_size")) + v_std_gm = nc_savefile.createVariable( + "std_gm", "f8", ("nvars", "test_size") + ) + v_scores = nc_savefile.createVariable( + "scores", "f8", ("nvars", "test_size") + ) + v_ens_sigma_scores = nc_savefile.createVariable( + "ens_sigma_scores", "f8", ("nvars",) + ) + v_ens_std_gm = nc_savefile.createVariable( + "ens_std_gm", "f8", ("nvars", "ens_size") + ) + + # hard-coded size + str_out = nc.stringtochar(np.array(ens_var_name, "S10")) v_vars[:] = str_out - v_std_gm[:,:] = comp_std_gm[:,:] - v_scores[:,:] = new_scores[:,:] + v_std_gm[:, :] = comp_std_gm[:, :] + v_scores[:, :] = new_scores[:, :] v_ens_sigma_scores[:] = sigma_scores_gm[:] - v_ens_std_gm[:,:] = std_gm_array[:,:] - + v_ens_std_gm[:, :] = std_gm_array[:, :] + nc_savefile.close() # Print variables (optional) - if opts_dict['printVars']: + if opts_dict["printVars"]: print(" ") - print('***************************************************************************** ') - print('Variable global mean information (for reference only - not used to determine pass/fail)') - print('***************************************************************************** ') - for fcount,fid in enumerate(ifiles): - print(' ') - print('Run '+str(fcount+1)+":") - print(' ') - print('***'+str(countgm[fcount])," of "+str(len(ens_var_name))+' variables are outside of ensemble global mean distribution***') - pyEnsLib.printsummary(results,'gm','means','gmRange',fcount,variables,'global mean') - print(' ') - print('----------------------------------------------------------------------------') - + print( + "***************************************************************************** " + ) + print( + "Variable global mean information (for reference only - not used to determine pass/fail)" + ) + print( + "***************************************************************************** " + ) + for fcount, fid in enumerate(ifiles): + print(" ") + print("Run " + str(fcount + 1) + ":") + print(" ") + print( + "***" + str(countgm[fcount]), + " of " + + str(len(ens_var_name)) + + " variables are outside of ensemble global mean distribution***", + ) + pyEnsLib.printsummary( + results, "gm", "means", "gmRange", fcount, variables, "global mean" + ) + print(" ") + print( + "----------------------------------------------------------------------------" + ) if me.get_rank() == 0: - print(' ') + print(" ") print("Testing complete.") - print(' ') + print(" ") + if __name__ == "__main__": main(sys.argv[1:]) diff --git a/tools/statistical_ensemble_test/pyCECT/pyEnsLib.py b/tools/statistical_ensemble_test/pyCECT/pyEnsLib.py index a452d844ffd..688e42e6a6c 100644 --- a/tools/statistical_ensemble_test/pyCECT/pyEnsLib.py +++ b/tools/statistical_ensemble_test/pyCECT/pyEnsLib.py @@ -1,45 +1,47 @@ #!/usr/bin/env python from __future__ import print_function import configparser -import sys, getopt, os -import numpy as np +import sys, getopt, os +import numpy as np import netCDF4 as nc import time import re import json import random -import asaptools.simplecomm as simplecomm -from asaptools.partition import Duplicate +import asaptools.simplecomm as simplecomm +from asaptools.partition import Duplicate import fnmatch import glob import itertools from itertools import islice from EET import exhaustive_test from scipy import linalg as sla -#import pdb + +# import pdb # # Parse header file of a netcdf to get the variable 3d/2d/1d list # def parse_header_file(filename): - command ='ncdump -h ' + filename + command = "ncdump -h " + filename print(command) - - retvalue=(os.popen(command).readline()) - print(retvalue) + + retvalue = os.popen(command).readline() + print(retvalue) + # -# Create RMSZ zscores for ensemble file sets -# o_files are not open -# this is used for POP +# Create RMSZ zscores for ensemble file sets +# o_files are not open +# this is used for POP def calc_rmsz(o_files, var_name3d, var_name2d, opts_dict): - threshold=1e-12 - popens = opts_dict['popens'] - tslice = opts_dict['tslice'] - nbin = opts_dict['nbin'] - minrange = opts_dict['minrange'] - maxrange = opts_dict['maxrange'] + threshold = 1e-12 + popens = opts_dict["popens"] + tslice = opts_dict["tslice"] + nbin = opts_dict["nbin"] + minrange = opts_dict["minrange"] + maxrange = opts_dict["maxrange"] if not popens: print("ERROR: should not be calculating rmsz for CAM => EXITING") @@ -49,331 +51,433 @@ def calc_rmsz(o_files, var_name3d, var_name2d, opts_dict): input_dims = first_file.dimensions # Create array variables - nlev=len(input_dims['z_t']) - if 'nlon' in input_dims: + nlev = len(input_dims["z_t"]) + if "nlon" in input_dims: nlon = len(input_dims["nlon"]) nlat = len(input_dims["nlat"]) - elif 'lon' in input_dims: + elif "lon" in input_dims: nlon = len(input_dims["lon"]) nlat = len(input_dims["lat"]) - -# npts2d=nlat*nlon -# npts3d=nlev*nlat*nlon -# print("calc_rmsz: nlev,nlat,nlon,o_files, var3dv var2d ..." , nlev, nlat, nlon, len(o_files), len(var_name3d), len(var_name2d)) + # npts2d=nlat*nlon + # npts3d=nlev*nlat*nlon + + # print("calc_rmsz: nlev,nlat,nlon,o_files, var3dv var2d ..." , nlev, nlat, nlon, len(o_files), len(var_name3d), len(var_name2d)) + + output3d = np.zeros((len(o_files), nlev, nlat, nlon), dtype=np.float32) + output2d = np.zeros((len(o_files), nlat, nlon), dtype=np.float32) - output3d = np.zeros((len(o_files),nlev,nlat,nlon),dtype=np.float32) - output2d = np.zeros((len(o_files),nlat,nlon),dtype=np.float32) + ens_avg3d = np.zeros((len(var_name3d), nlev, nlat, nlon), dtype=np.float32) + ens_stddev3d = np.zeros((len(var_name3d), nlev, nlat, nlon), dtype=np.float32) + ens_avg2d = np.zeros((len(var_name2d), nlat, nlon), dtype=np.float32) + ens_stddev2d = np.zeros((len(var_name2d), nlat, nlon), dtype=np.float32) - ens_avg3d=np.zeros((len(var_name3d),nlev,nlat,nlon),dtype=np.float32) - ens_stddev3d=np.zeros((len(var_name3d),nlev,nlat,nlon),dtype=np.float32) - ens_avg2d=np.zeros((len(var_name2d),nlat,nlon),dtype=np.float32) - ens_stddev2d=np.zeros((len(var_name2d),nlat,nlon),dtype=np.float32) + Zscore3d = np.zeros((len(var_name3d), len(o_files), (nbin)), dtype=np.float32) + Zscore2d = np.zeros((len(var_name2d), len(o_files), (nbin)), dtype=np.float32) - Zscore3d = np.zeros((len(var_name3d),len(o_files),(nbin)),dtype=np.float32) - Zscore2d = np.zeros((len(var_name2d),len(o_files),(nbin)),dtype=np.float32) - - first_file.close() - #open all of the files at once - #(not too many for pop - and no longer doing this for cam) + # open all of the files at once + # (not too many for pop - and no longer doing this for cam) handle_o_files = [] for fname in o_files: handle_o_files.append(nc.Dataset(fname, "r")) - #Now lOOP THROUGH 3D - for vcount,vname in enumerate(var_name3d): -# print(vcount, " ", vname) - #Read in vname's data from all ens. files + # Now lOOP THROUGH 3D + for vcount, vname in enumerate(var_name3d): + # print(vcount, " ", vname) + # Read in vname's data from all ens. files + for fcount, this_file in enumerate(handle_o_files): + data = this_file.variables[vname] + output3d[fcount, :, :, :] = data[tslice, :, :, :] + + # for this variable, Generate ens_avg and ens_stddev to store in the ensemble summary file + moutput3d = np.ma.masked_values(output3d, data._FillValue) + ens_avg3d[vcount] = np.ma.average(moutput3d, axis=0) + ens_stddev3d[vcount] = np.ma.std(moutput3d, axis=0, dtype=np.float32) + + # Generate avg, stddev and zscore for this 3d variable + for fcount, this_file in enumerate(handle_o_files): + data = this_file.variables[vname] + # rmask contains a number for each grid point indicating it's region + rmask = this_file.variables["REGION_MASK"] + Zscore = pop_zpdf( + output3d[fcount], + nbin, + (minrange, maxrange), + ens_avg3d[vcount], + ens_stddev3d[vcount], + data._FillValue, + threshold, + rmask, + opts_dict, + ) + Zscore3d[vcount, fcount, :] = Zscore[:] + + # LOOP THROUGH 2D + for vcount, vname in enumerate(var_name2d): + # Read in vname's data of all files for fcount, this_file in enumerate(handle_o_files): - data=this_file.variables[vname] - output3d[fcount,:,:,:]=data[tslice,:,:,:] - - #for this variable, Generate ens_avg and ens_stddev to store in the ensemble summary file - moutput3d=np.ma.masked_values(output3d,data._FillValue) - ens_avg3d[vcount]=np.ma.average(moutput3d,axis=0) - ens_stddev3d[vcount]=np.ma.std(moutput3d,axis=0,dtype=np.float32) - - #Generate avg, stddev and zscore for this 3d variable - for fcount,this_file in enumerate(handle_o_files): - data=this_file.variables[vname] - #rmask contains a number for each grid point indicating it's region - rmask=this_file.variables['REGION_MASK'] - Zscore=pop_zpdf(output3d[fcount],nbin,(minrange,maxrange),ens_avg3d[vcount],ens_stddev3d[vcount],data._FillValue,threshold,rmask,opts_dict) - Zscore3d[vcount,fcount,:]=Zscore[:] - - #LOOP THROUGH 2D - for vcount,vname in enumerate(var_name2d): - #Read in vname's data of all files + data = this_file.variables[vname] + output2d[fcount, :, :] = data[tslice, :, :] + + # Generate ens_avg and esn_stddev to store in the ensemble summary file + moutput2d = np.ma.masked_values(output2d, data._FillValue) + ens_avg2d[vcount] = np.ma.average(moutput2d, axis=0) + ens_stddev2d[vcount] = np.ma.std(moutput2d, axis=0, dtype=np.float32) + + # Generate avg, stddev and zscore for 3d variable for fcount, this_file in enumerate(handle_o_files): - data=this_file.variables[vname] - output2d[fcount,:,:]=data[tslice,:,:] - - #Generate ens_avg and esn_stddev to store in the ensemble summary file - moutput2d=np.ma.masked_values(output2d,data._FillValue) - ens_avg2d[vcount]=np.ma.average(moutput2d,axis=0) - ens_stddev2d[vcount]=np.ma.std(moutput2d,axis=0,dtype=np.float32) - - #Generate avg, stddev and zscore for 3d variable - for fcount,this_file in enumerate(handle_o_files): - data=this_file.variables[vname] - - rmask=this_file.variables['REGION_MASK'] - Zscore=pop_zpdf(output2d[fcount],nbin,(minrange,maxrange),ens_avg2d[vcount],ens_stddev2d[vcount],data._FillValue,threshold,rmask,opts_dict) - Zscore2d[vcount,fcount,:]=Zscore[:] - - #close files + data = this_file.variables[vname] + + rmask = this_file.variables["REGION_MASK"] + Zscore = pop_zpdf( + output2d[fcount], + nbin, + (minrange, maxrange), + ens_avg2d[vcount], + ens_stddev2d[vcount], + data._FillValue, + threshold, + rmask, + opts_dict, + ) + Zscore2d[vcount, fcount, :] = Zscore[:] + + # close files for this_file in handle_o_files: this_file.close() - - return Zscore3d,Zscore2d,ens_avg3d,ens_stddev3d,ens_avg2d,ens_stddev2d + + return Zscore3d, Zscore2d, ens_avg3d, ens_stddev3d, ens_avg2d, ens_stddev2d # # Calculate pop zscore pass rate (ZPR) or pop zpdf values # -def pop_zpdf(input_array,nbin,zrange,ens_avg,ens_stddev,FillValue,threshold,rmask,opts_dict): - - if 'test_failure' in opts_dict: - test_failure=opts_dict['test_failure'] - else: - test_failure=False - -# print("input_array.ndim = ", input_array.ndim) - - #Masked out the missing values (land) - moutput=np.ma.masked_values(input_array,FillValue) - if input_array.ndim==3: - rmask3d=np.zeros(input_array.shape,dtype=np.int32) - for i in rmask3d: - i[:,:]=rmask[:,:] - rmask_array=rmask3d - elif input_array.ndim==2: - rmask_array=np.zeros(input_array.shape,dtype=np.int32) - rmask_array[:,:]=rmask[:,:] - - #Now we just want the open oceans (not marginal seas) - # - so for g1xv7, those are 1,2,3,4,6 - # in the region mask - so we don't want rmask<1 or rmask>6 - moutput2=np.ma.masked_where((rmask_array<1)|(rmask_array>6),moutput) - - #Use the masked array moutput2 to calculate Zscore_temp=(data-avg)/stddev - Zscore_temp=np.fabs((moutput2.astype(np.float64)-ens_avg)/np.where(ens_stddev<=threshold,FillValue,ens_stddev)) - - #To retrieve only the valid entries of Zscore_temp - Zscore_nomask=Zscore_temp[~Zscore_temp.mask] - - #If just test failure, calculate ZPR only (DEFAULT - not chnagable via cmd line - if test_failure: - #Zpr=the count of Zscore_nomask is less than pop_tol (3.0)/ the total count of Zscore_nomask - Zpr=np.where(Zscore_nomask<=opts_dict['pop_tol'])[0].size/float(Zscore_temp.count()) - return Zpr - - #Else calculate zpdf and return as zscore - #Count the unmasked value - count=Zscore_temp.count() - - Zscore,bins = np.histogram(Zscore_temp.compressed(),bins=nbin,range=zrange) - - #Normalize the number by dividing the count - if count != 0: - Zscore=Zscore.astype(np.float32)/count - else: - print(('count=0,sum=',np.sum(Zscore))) - return Zscore - -# -# Calculate rmsz score by compare the run file with the ensemble summary file -# -def calculate_raw_score(k, v, npts3d, npts2d, ens_avg, ens_stddev, is_SE, opts_dict, FillValue, timeslice, rmask): - count=0 - Zscore=0 - threshold = 1.0e-12 - has_zscore=True - popens=opts_dict['popens'] - if popens: #POP - minrange=opts_dict['minrange'] - maxrange=opts_dict['maxrange'] - Zscore=pop_zpdf(v,opts_dict['nbin'],(minrange,maxrange),ens_avg,ens_stddev,FillValue,threshold,rmask,opts_dict) - else: #CAM - if k in ens_avg: - if is_SE: - if ens_avg[k].ndim == 1: - npts=npts2d +def pop_zpdf( + input_array, + nbin, + zrange, + ens_avg, + ens_stddev, + FillValue, + threshold, + rmask, + opts_dict, +): + + if "test_failure" in opts_dict: + test_failure = opts_dict["test_failure"] + else: + test_failure = False + + # print("input_array.ndim = ", input_array.ndim) + + # Masked out the missing values (land) + moutput = np.ma.masked_values(input_array, FillValue) + if input_array.ndim == 3: + rmask3d = np.zeros(input_array.shape, dtype=np.int32) + for i in rmask3d: + i[:, :] = rmask[:, :] + rmask_array = rmask3d + elif input_array.ndim == 2: + rmask_array = np.zeros(input_array.shape, dtype=np.int32) + rmask_array[:, :] = rmask[:, :] + + # Now we just want the open oceans (not marginal seas) + # - so for g1xv7, those are 1,2,3,4,6 + # in the region mask - so we don't want rmask<1 or rmask>6 + moutput2 = np.ma.masked_where((rmask_array < 1) | (rmask_array > 6), moutput) + + # Use the masked array moutput2 to calculate Zscore_temp=(data-avg)/stddev + Zscore_temp = np.fabs( + (moutput2.astype(np.float64) - ens_avg) + / np.where(ens_stddev <= threshold, FillValue, ens_stddev) + ) + + # To retrieve only the valid entries of Zscore_temp + Zscore_nomask = Zscore_temp[~Zscore_temp.mask] + + # If just test failure, calculate ZPR only (DEFAULT - not chnagable via cmd line + if test_failure: + # Zpr=the count of Zscore_nomask is less than pop_tol (3.0)/ the total count of Zscore_nomask + Zpr = np.where(Zscore_nomask <= opts_dict["pop_tol"])[0].size / float( + Zscore_temp.count() + ) + return Zpr + + # Else calculate zpdf and return as zscore + # Count the unmasked value + count = Zscore_temp.count() + + Zscore, bins = np.histogram(Zscore_temp.compressed(), bins=nbin, range=zrange) + + # Normalize the number by dividing the count + if count != 0: + Zscore = Zscore.astype(np.float32) / count + else: + print(("count=0,sum=", np.sum(Zscore))) + return Zscore + + +# +# Calculate rmsz score by compare the run file with the ensemble summary file +# +def calculate_raw_score( + k, + v, + npts3d, + npts2d, + ens_avg, + ens_stddev, + is_SE, + opts_dict, + FillValue, + timeslice, + rmask, +): + count = 0 + Zscore = 0 + threshold = 1.0e-12 + has_zscore = True + popens = opts_dict["popens"] + if popens: # POP + minrange = opts_dict["minrange"] + maxrange = opts_dict["maxrange"] + Zscore = pop_zpdf( + v, + opts_dict["nbin"], + (minrange, maxrange), + ens_avg, + ens_stddev, + FillValue, + threshold, + rmask, + opts_dict, + ) + else: # CAM + if k in ens_avg: + if is_SE: + if ens_avg[k].ndim == 1: + npts = npts2d + else: + npts = npts3d else: - npts=npts3d - else: - if ens_avg[k].ndim == 2: - npts=npts2d + if ens_avg[k].ndim == 2: + npts = npts2d + else: + npts = npts3d + + count, return_val = calc_Z( + v, + ens_avg[k].astype(np.float64), + ens_stddev[k].astype(np.float64), + count, + False, + ) + Zscore = np.sum(np.square(return_val.astype(np.float64))) + if npts == count: + Zscore = 0 else: - npts=npts3d - - count,return_val=calc_Z(v,ens_avg[k].astype(np.float64),ens_stddev[k].astype(np.float64),count,False) - Zscore=np.sum(np.square(return_val.astype(np.float64))) - if npts == count: - Zscore=0 + Zscore = np.sqrt(Zscore / (npts - count)) else: - Zscore=np.sqrt(Zscore/(npts-count)) - else: - has_zscore=False - - return Zscore,has_zscore - -# -# Find the corresponding ensemble summary file from directory -# /glade/p/cesmdata/cseg/inputdata/validation/ when three + has_zscore = False + + return Zscore, has_zscore + + +# +# Find the corresponding ensemble summary file from directory +# /glade/p/cesmdata/cseg/inputdata/validation/ when three # validation files are input from the web server -# +# # ifiles are not open -def search_sumfile(opts_dict,ifiles): +def search_sumfile(opts_dict, ifiles): - sumfile_dir=opts_dict['sumfile'] - first_file = nc.Dataset(ifiles[0],"r") - machineid='' - compiler='' + sumfile_dir = opts_dict["sumfile"] + first_file = nc.Dataset(ifiles[0], "r") + machineid = "" + compiler = "" - global_att=first_file.ncattrs() + global_att = first_file.ncattrs() for attr_name in global_att: - val = getattr(first_file, attr_name) - if attr_name == 'model_version': + val = getattr(first_file, attr_name) + if attr_name == "model_version": if val.find("-") != -1: - model_version=val[0:val.find('-')] + model_version = val[0 : val.find("-")] else: - model_version=val - elif attr_name == 'compset': - compset=val - elif attr_name == 'testtype': - testtype=val - if val == 'UF-ECT': - testtype = 'uf_ensembles' - opts_dict['eet']=len(ifiles) - elif val=='ECT': - testtype='ensembles' - elif v=='POP': - testtype=val+'_ensembles' - elif attr_name == 'machineid': - machineid=val - elif attr_name == 'compiler': - compiler=val - elif attr_name == 'grid': - grid=val - - if 'testtype' in global_att: - sumfile_dir=sumfile_dir+'/'+testtype+'/' + model_version = val + elif attr_name == "compset": + compset = val + elif attr_name == "testtype": + testtype = val + if val == "UF-ECT": + testtype = "uf_ensembles" + opts_dict["eet"] = len(ifiles) + elif val == "ECT": + testtype = "ensembles" + elif v == "POP": + testtype = val + "_ensembles" + elif attr_name == "machineid": + machineid = val + elif attr_name == "compiler": + compiler = val + elif attr_name == "grid": + grid = val + + if "testtype" in global_att: + sumfile_dir = sumfile_dir + "/" + testtype + "/" else: - print("ERROR: No global attribute testtype in your validation file => EXITING....") + print( + "ERROR: No global attribute testtype in your validation file => EXITING...." + ) sys.exit(2) - if 'model_version' in global_att: - sumfile_dir=sumfile_dir+'/'+model_version+'/' + if "model_version" in global_att: + sumfile_dir = sumfile_dir + "/" + model_version + "/" else: - print("ERROR: No global attribute model_version in your validation file => EXITING....") + print( + "ERROR: No global attribute model_version in your validation file => EXITING...." + ) sys.exit(2) first_file.close() - if (os.path.exists(sumfile_dir)): - thefile_id=0 + if os.path.exists(sumfile_dir): + thefile_id = 0 for i in os.listdir(sumfile_dir): - if (os.path.isfile(sumfile_dir+i)): - sumfile_id=nc.Dataset(sumfile_dir+i,'r') - sumfile_gatt=sumfile_id.ncattrs() - if 'grid' not in sumfile_gatt and 'resolution' not in sumfile_gatt: - print("ERROR: No global attribute grid or resolution in the summary file => EXITING....") + if os.path.isfile(sumfile_dir + i): + sumfile_id = nc.Dataset(sumfile_dir + i, "r") + sumfile_gatt = sumfile_id.ncattrs() + if "grid" not in sumfile_gatt and "resolution" not in sumfile_gatt: + print( + "ERROR: No global attribute grid or resolution in the summary file => EXITING...." + ) sys.exit(2) - if 'compset' not in sumfile_gatt: + if "compset" not in sumfile_gatt: print("ERROR: No global attribute compset in the summary file") sys.exit(2) - if getattr(sumfile_id, 'resolution') == grid and getsttr(sumfile_id, 'compset') == compset: - thefile_id=sumfile_id - sumfile_id.close() - if thefile_id==0: - print("ERROR: The verification files don't have a matching ensemble summary file to compare => EXITING....") - sys.exit(2) + if ( + getattr(sumfile_id, "resolution") == grid + and getsttr(sumfile_id, "compset") == compset + ): + thefile_id = sumfile_id + sumfile_id.close() + if thefile_id == 0: + print( + "ERROR: The verification files don't have a matching ensemble summary file to compare => EXITING...." + ) + sys.exit(2) else: - print(("ERROR: Could not locate directory "+sumfile_dir + " => EXITING....")) + print(("ERROR: Could not locate directory " + sumfile_dir + " => EXITING....")) sys.exit(2) + return sumfile_dir + i, machineid, compiler - return sumfile_dir+i,machineid,compiler # # Create some variables and call a function to calculate PCA # now gm comes in at 64 bits... + def pre_PCA(gm_orig, all_var_names, whole_list, me): - #initialize - b_exit=False - gm_len=gm_orig.shape - nvar=gm_len[0] - nfile=gm_len[1] - if gm_orig.dtype == np.float32: - gm=gm_orig.astype(np.float64) + # initialize + b_exit = False + gm_len = gm_orig.shape + nvar = gm_len[0] + nfile = gm_len[1] + if gm_orig.dtype == np.float32: + gm = gm_orig.astype(np.float64) else: - gm=gm_orig[:] - - mu_gm=np.average(gm,axis=1) - sigma_gm=np.std(gm,axis=1,ddof=1) + gm = gm_orig[:] - standardized_global_mean=np.zeros(gm.shape,dtype=np.float64) - scores_gm=np.zeros(gm.shape,dtype=np.float64) + mu_gm = np.average(gm, axis=1) + sigma_gm = np.std(gm, axis=1, ddof=1) - #AB: 4/19: whole list contains variables to be removed due to very small global means (calc elsewhere), but this is not currently needed - #and whole_list will be len = 0 + standardized_global_mean = np.zeros(gm.shape, dtype=np.float64) + scores_gm = np.zeros(gm.shape, dtype=np.float64) + + # AB: 4/19: whole list contains variables to be removed due to very small global means (calc elsewhere), but this is not currently needed + # and whole_list will be len = 0 orig_len = len(whole_list) if orig_len > 0: if me.get_rank() == 0: print("\n") - print("***************************************************************************************") - print(("Warning: these ", orig_len, " variables have ~0 means (< O(e-15)) for each ensemble member, please exclude them via the json file (--jsonfile) :")) + print( + "***************************************************************************************" + ) + print( + ( + "Warning: these ", + orig_len, + " variables have ~0 means (< O(e-15)) for each ensemble member, please exclude them via the json file (--jsonfile) :", + ) + ) print((",".join(['"{0}"'.format(item) for item in whole_list]))) - print("***************************************************************************************") + print( + "***************************************************************************************" + ) print("\n") - #check for constants across ensemble + # check for constants across ensemble for var in range(nvar): - for file in range(nfile): - if np.any(sigma_gm[var]== 0.0) and all_var_names[var] not in set(whole_list): - #keep track of zeros standard deviations - whole_list.append(all_var_names[var]) - - #print list + for file in range(nfile): + if np.any(sigma_gm[var] == 0.0) and all_var_names[var] not in set( + whole_list + ): + # keep track of zeros standard deviations + whole_list.append(all_var_names[var]) + + # print list new_len = len(whole_list) - if (new_len > orig_len): + if new_len > orig_len: sub_list = whole_list[orig_len:] if me.get_rank() == 0: - print("\n") - print("*************************************************************************************") - print(("Warning: these ", new_len-orig_len, " variables are constant across ensemble members, please exclude them via the json file (--jsonfile): ")) - print("\n") - print((",".join(['"{0}"'.format(item) for item in sub_list]))) - print("*************************************************************************************") - print("\n") - - #exit if non-zero length whole_list + print("\n") + print( + "*************************************************************************************" + ) + print( + ( + "Warning: these ", + new_len - orig_len, + " variables are constant across ensemble members, please exclude them via the json file (--jsonfile): ", + ) + ) + print("\n") + print((",".join(['"{0}"'.format(item) for item in sub_list]))) + print( + "*************************************************************************************" + ) + print("\n") + + # exit if non-zero length whole_list if new_len > 0: print("=> Exiting ...") b_exit = True - #check for linear dependent vars + # check for linear dependent vars if not b_exit: for var in range(nvar): for file in range(nfile): - standardized_global_mean[var,file]=(gm[var,file]-mu_gm[var])/sigma_gm[var] + standardized_global_mean[var, file] = ( + gm[var, file] - mu_gm[var] + ) / sigma_gm[var] - eps =np.finfo(np.float32).eps + eps = np.finfo(np.float32).eps norm = np.linalg.norm(standardized_global_mean, ord=2) sh = max(standardized_global_mean.shape) - mytol = sh*norm*eps + mytol = sh * norm * eps - standardized_rank=np.linalg.matrix_rank(standardized_global_mean, mytol) + standardized_rank = np.linalg.matrix_rank(standardized_global_mean, mytol) print("STATUS: checking for dependent vars using QR...") - print(("STATUS: standardized_global_mean rank = ",standardized_rank)) + print(("STATUS: standardized_global_mean rank = ", standardized_rank)) - dep_var_list = get_dependent_vars_index(standardized_global_mean, standardized_rank) + dep_var_list = get_dependent_vars_index( + standardized_global_mean, standardized_rank + ) num_dep = len(dep_var_list) orig_len = len(whole_list) @@ -384,164 +488,203 @@ def pre_PCA(gm_orig, all_var_names, whole_list, me): sub_list = whole_list[orig_len:] print("\n") - print("********************************************************************************************") - print(("Warning: these ", num_dep, " variables are linearly dependent, please exclude them via the json file (--jsonfile): ")) + print( + "********************************************************************************************" + ) + print( + ( + "Warning: these ", + num_dep, + " variables are linearly dependent, please exclude them via the json file (--jsonfile): ", + ) + ) print("\n") print((",".join(['"{0}"'.format(item) for item in sub_list]))) - print("********************************************************************************************") + print( + "********************************************************************************************" + ) print("\n") print("=> EXITING....") - #need to exit - b_exit=True + # need to exit + b_exit = True - #now check for any variables that have less than 3% (of the ensemble size) unique values + # now check for any variables that have less than 3% (of the ensemble size) unique values if not b_exit: print("STATUS: checking for unique values across ensemble") - cts = np.count_nonzero(np.diff(np.sort(standardized_global_mean)), axis=1)+1 -# thresh = .02* standardized_global_mean.shape[1] - thresh = .03* standardized_global_mean.shape[1] + cts = np.count_nonzero(np.diff(np.sort(standardized_global_mean)), axis=1) + 1 + # thresh = .02* standardized_global_mean.shape[1] + thresh = 0.03 * standardized_global_mean.shape[1] result = np.where(cts < thresh) indices = result[0] if len(indices) > 0: - nu_list = []; + nu_list = [] for i in indices: nu_list.append(all_var_names[i]) print("\n") - print("********************************************************************************************") - print(("Warning: these ", len(indices), " variables contain fewer than 3% unique values across the ensemble, please exclude them via the json file (--jsonfile): ")) + print( + "********************************************************************************************" + ) + print( + ( + "Warning: these ", + len(indices), + " variables contain fewer than 3% unique values across the ensemble, please exclude them via the json file (--jsonfile): ", + ) + ) print("\n") print((",".join(['"{0}"'.format(item) for item in nu_list]))) - print("********************************************************************************************") + print( + "********************************************************************************************" + ) print("\n") print("=> EXITING....") - #need to exit + # need to exit b_exit = True if not b_exit: - #find principal components - loadings_gm=princomp(standardized_global_mean) - #now do coord transformation on the standardized means to get the scores - scores_gm=np.dot(loadings_gm.T,standardized_global_mean) - sigma_scores_gm =np.std(scores_gm,axis=1,ddof=1) + # find principal components + loadings_gm = princomp(standardized_global_mean) + # now do coord transformation on the standardized means to get the scores + scores_gm = np.dot(loadings_gm.T, standardized_global_mean) + sigma_scores_gm = np.std(scores_gm, axis=1, ddof=1) else: - loadings_gm = np.zeros(gm.shape,dtype=np.float64) - sigma_scores_gm =np.zeros(gm.shape,dtype=np.float64) + loadings_gm = np.zeros(gm.shape, dtype=np.float64) + sigma_scores_gm = np.zeros(gm.shape, dtype=np.float64) -# return mu_gm.astype(np.float32),sigma_gm.astype(np.float32),standardized_global_mean.astype(np.float32),loadings_gm.astype(np.float32),sigma_scores_gm.astype(np.float32),b_exit + # return mu_gm.astype(np.float32),sigma_gm.astype(np.float32),standardized_global_mean.astype(np.float32),loadings_gm.astype(np.float32),sigma_scores_gm.astype(np.float32),b_exit + + return ( + mu_gm, + sigma_gm, + standardized_global_mean, + loadings_gm, + sigma_scores_gm, + b_exit, + ) - return mu_gm, sigma_gm, standardized_global_mean, loadings_gm, sigma_scores_gm, b_exit # # Performs principal components analysis (PCA) on the p-by-n data matrix A -# rows of A correspond to (p) variables AND cols of A correspond to the (n) tests +# rows of A correspond to (p) variables AND cols of A correspond to the (n) tests # assume already standardized # -# Returns the loadings: p-by-p matrix, each column containing coefficients -# for one principal component. +# Returns the loadings: p-by-p matrix, each column containing coefficients +# for one principal component. # def princomp(standardized_global_mean): - # find covariance matrix (will be pxp) - co_mat= np.cov(standardized_global_mean) - # Calculate evals and evecs of covariance matrix (evecs are also pxp) - [evals, evecs] = np.linalg.eig(co_mat) - # Above may not be sorted - sort largest first - new_index = np.argsort(evals)[::-1] - evecs = evecs[:,new_index] - evals = evals[new_index] - - return evecs - -# + # find covariance matrix (will be pxp) + co_mat = np.cov(standardized_global_mean) + # Calculate evals and evecs of covariance matrix (evecs are also pxp) + [evals, evecs] = np.linalg.eig(co_mat) + # Above may not be sorted - sort largest first + new_index = np.argsort(evals)[::-1] + evecs = evecs[:, new_index] + evals = evals[new_index] + + return evecs + + +# # Calculate (val-avg)/stddev and exclude zero value -# -def calc_Z(val,avg, stddev, count, flag): - return_val=np.empty(val.shape,dtype=np.float32,order='C') - tol =1e-12 - if stddev[(stddev > tol)].size ==0: - if flag: - print("WARNING: ALL standard dev are < 1e-12") - flag = False - count =count + stddev[(stddev <= tol)].size - return_val = np.zeros(val.shape,dtype=np.float32,order='C') - else: - if stddev[(stddev <= tol)].size > 0: - if flag: - print("WARNING: some standard dev are < 1e-12") - flag =False - count =count + stddev[(stddev <= tol)].size - return_val[np.where(stddev <= tol)]=0. - return_val[np.where(stddev > tol)]= (val[np.where(stddev> tol)]-avg[np.where(stddev> tol)])/stddev[np.where(stddev>tol)] +# +def calc_Z(val, avg, stddev, count, flag): + return_val = np.empty(val.shape, dtype=np.float32, order="C") + tol = 1e-12 + if stddev[(stddev > tol)].size == 0: + if flag: + print("WARNING: ALL standard dev are < 1e-12") + flag = False + count = count + stddev[(stddev <= tol)].size + return_val = np.zeros(val.shape, dtype=np.float32, order="C") else: - return_val=(val-avg)/stddev - return count,return_val + if stddev[(stddev <= tol)].size > 0: + if flag: + print("WARNING: some standard dev are < 1e-12") + flag = False + count = count + stddev[(stddev <= tol)].size + return_val[np.where(stddev <= tol)] = 0.0 + return_val[np.where(stddev > tol)] = ( + val[np.where(stddev > tol)] - avg[np.where(stddev > tol)] + ) / stddev[np.where(stddev > tol)] + else: + return_val = (val - avg) / stddev + return count, return_val + # # Read a json file for the excluded/included list of variables # def read_jsonlist(metajson, method_name): - + if not os.path.exists(metajson): print("\n") - print("*************************************************************************************") - print("Warning: Specified json file does not exist: ",metajson) - print("*************************************************************************************") + print( + "*************************************************************************************" + ) + print("Warning: Specified json file does not exist: ", metajson) + print( + "*************************************************************************************" + ) print("\n") varList = [] exclude = True - return varList,exclude + return varList, exclude else: - fd=open(metajson) + fd = open(metajson) metainfo = json.load(fd) - if method_name == 'ES': - exclude=False - #varList = metainfo['ExcludedVar'] - if 'ExcludedVar' in metainfo: - exclude=True - varList = metainfo['ExcludedVar'] - elif 'IncludedVar' in metainfo: - varList = metainfo['IncludedVar'] - return varList,exclude - elif method_name == 'ESP': - var2d = metainfo['Var2d'] - var3d = metainfo['Var3d'] + if method_name == "ES": + exclude = False + # varList = metainfo['ExcludedVar'] + if "ExcludedVar" in metainfo: + exclude = True + varList = metainfo["ExcludedVar"] + elif "IncludedVar" in metainfo: + varList = metainfo["IncludedVar"] + return varList, exclude + elif method_name == "ESP": + var2d = metainfo["Var2d"] + var3d = metainfo["Var3d"] return var2d, var3d -# +# # Calculate Normalized RMSE metric # def calc_nrmse(orig_array, comp_array): - - orig_size=orig_array.size - sumsqr=np.sum(np.square(orig_array.astype(np.float64)-comp_array.astype(np.float64))) - rng=np.max(orig_array)-np.min(orig_array) - if abs(rng) < 1e-18: - rmse=0.0 - else: - rmse=np.sqrt(sumsqr/orig_size)/rng - return rmse + orig_size = orig_array.size + sumsqr = np.sum( + np.square(orig_array.astype(np.float64) - comp_array.astype(np.float64)) + ) + rng = np.max(orig_array) - np.min(orig_array) + if abs(rng) < 1e-18: + rmse = 0.0 + else: + rmse = np.sqrt(sumsqr / orig_size) / rng + + return rmse + # # Calculate weighted global mean for one level of CAM output # works in dp def area_avg(data_orig, weight, is_SE): - #TO DO: take into account missing values + # TO DO: take into account missing values if data_orig.dtype == np.float32: - data=data_orig.astype(np.float64) + data = data_orig.astype(np.float64) else: - data=data_orig[:] + data = data_orig[:] - if (is_SE == True): + if is_SE == True: a = np.average(data, weights=weight) - else: #FV - #weights are for lat - a_lat = np.average(data,axis=0, weights=weight) + else: # FV + # weights are for lat + a_lat = np.average(data, axis=0, weights=weight) a = np.average(a_lat) return a @@ -551,19 +694,20 @@ def area_avg(data_orig, weight, is_SE): # def pop_area_avg(data_orig, weight): - #Take into account missing values - #weights are for lat + # Take into account missing values + # weights are for lat if data_orig.dtype == np.float32: - data=data_orig.astype(np.float64) + data = data_orig.astype(np.float64) else: - data=data_orig[:] + data = data_orig[:] a = np.ma.average(data, weights=weight) return a -#def get_lev(file_dim_dict,lev_name): + +# def get_lev(file_dim_dict,lev_name): # return len(file_dim_dict[lev_name]) - + # # Get dimension 'lev' or 'z_t' # @@ -573,24 +717,25 @@ def get_nlev(o_files, popens): input_dims = first_file.dimensions if not popens: - nlev = len(input_dims['lev']) + nlev = len(input_dims["lev"]) else: - nlev = len(input_dims['z_t']) + nlev = len(input_dims["z_t"]) first_file.close() return nlev - + + # # Calculate area_wgt when processes cam se/cam fv/pop files # -def get_area_wgt(o_files ,is_SE, nlev, popens): +def get_area_wgt(o_files, is_SE, nlev, popens): - z_wgt={} + z_wgt = {} first_file = nc.Dataset(o_files[0], "r") input_dims = first_file.dimensions - if (is_SE == True): + if is_SE == True: ncol = len(input_dims["ncol"]) output3d = np.zeros((nlev, ncol), dtype=np.float64) output2d = np.zeros(ncol, dtype=np.float64) @@ -601,151 +746,212 @@ def get_area_wgt(o_files ,is_SE, nlev, popens): area_wgt[:] /= total else: if not popens: - nlon = len(input_dims['lon']) - nlat = len(input_dims['lat']) - gw = first_file.variables["gw"] + nlon = len(input_dims["lon"]) + nlat = len(input_dims["lat"]) + gw = first_file.variables["gw"] else: - if 'nlon' in input_dims: - nlon = len(input_dims['nlon']) - nlat = len(input_dims['nlat']) - elif 'lon' in input_dims: - nlon = len(input_dims['lon']) - nlat = len(input_dims['lat']) - gw = first_file.variables["TAREA"] - z_wgt = first_file.variables["dz"] - output3d = np.zeros((nlev, nlat, nlon),dtype=np.float64) - output2d = np.zeros((nlat, nlon),dtype=np.float64) - area_wgt = np.zeros(nlat,dtype=np.float64) #note gauss weights are length nlat + if "nlon" in input_dims: + nlon = len(input_dims["nlon"]) + nlat = len(input_dims["nlat"]) + elif "lon" in input_dims: + nlon = len(input_dims["lon"]) + nlat = len(input_dims["lat"]) + gw = first_file.variables["TAREA"] + z_wgt = first_file.variables["dz"] + output3d = np.zeros((nlev, nlat, nlon), dtype=np.float64) + output2d = np.zeros((nlat, nlon), dtype=np.float64) + area_wgt = np.zeros( + nlat, dtype=np.float64 + ) # note gauss weights are length nlat area_wgt[:] = gw[:] first_file.close() - return output3d,output2d,area_wgt,z_wgt + return output3d, output2d, area_wgt, z_wgt + + # # compute area_wgts, and then loop through all files to call calc_global_means_for_onefile # o_files are not open for CAM # 12/19 - summary file will now be double precision -def generate_global_mean_for_summary(o_files, var_name3d, var_name2d, is_SE, pepsi_gm, opts_dict): +def generate_global_mean_for_summary( + o_files, var_name3d, var_name2d, is_SE, pepsi_gm, opts_dict +): - tslice=opts_dict['tslice'] - popens=opts_dict['popens'] + tslice = opts_dict["tslice"] + popens = opts_dict["popens"] n3d = len(var_name3d) n2d = len(var_name2d) tot = n3d + n2d -# gm3d = np.zeros((n3d,len(o_files)), dtype=np.float32) -# gm2d = np.zeros((n2d,len(o_files)), dtype=np.float32) - gm3d = np.zeros((n3d,len(o_files)), dtype=np.float64) - gm2d = np.zeros((n2d,len(o_files)), dtype=np.float64) + # gm3d = np.zeros((n3d,len(o_files)), dtype=np.float32) + # gm2d = np.zeros((n2d,len(o_files)), dtype=np.float32) + gm3d = np.zeros((n3d, len(o_files)), dtype=np.float64) + gm2d = np.zeros((n2d, len(o_files)), dtype=np.float64) nlev = get_nlev(o_files, popens) - output3d,output2d,area_wgt,z_wgt = get_area_wgt(o_files, is_SE, nlev, popens) - - #loop through the input file list to calculate global means - #var_name3d=[] - for fcount,in_file in enumerate(o_files): + output3d, output2d, area_wgt, z_wgt = get_area_wgt(o_files, is_SE, nlev, popens) + + # loop through the input file list to calculate global means + # var_name3d=[] + for fcount, in_file in enumerate(o_files): - fname = nc.Dataset(in_file,"r") + fname = nc.Dataset(in_file, "r") if pepsi_gm: - # Generate global mean for pepsi challenge data timeseries daily files, they all are 2d variables - var_name2d=[] - for k,v in fname.variables.items(): - if v.typecode() == 'f': - var_name2d.append(k) - fout = open(k+"_33.txt","w") - if k == 'time': - ntslice=v[:] - for i in np.nditer(ntslice): - temp1,temp2=calc_global_mean_for_onefile(fname,area_wgt,var_name3d,var_name2d,output3d,output2d,int(i),is_SE,nlev,opts_dict) - fout.write(str(temp2[0])+'\n') + # Generate global mean for pepsi challenge data timeseries daily files, they all are 2d variables + var_name2d = [] + for k, v in fname.variables.items(): + if v.typecode() == "f": + var_name2d.append(k) + fout = open(k + "_33.txt", "w") + if k == "time": + ntslice = v[:] + for i in np.nditer(ntslice): + temp1, temp2 = calc_global_mean_for_onefile( + fname, + area_wgt, + var_name3d, + var_name2d, + output3d, + output2d, + int(i), + is_SE, + nlev, + opts_dict, + ) + fout.write(str(temp2[0]) + "\n") elif popens: - gm3d[:,fcount],gm2d[:,fcount] = calc_global_mean_for_onefile_pop(fname,area_wgt,z_wgt,var_name3d,var_name2d,output3d,output2d,tslice,is_SE,nlev,opts_dict) + gm3d[:, fcount], gm2d[:, fcount] = calc_global_mean_for_onefile_pop( + fname, + area_wgt, + z_wgt, + var_name3d, + var_name2d, + output3d, + output2d, + tslice, + is_SE, + nlev, + opts_dict, + ) else: - gm3d[:,fcount],gm2d[:,fcount] = calc_global_mean_for_onefile(fname,area_wgt,var_name3d,var_name2d,output3d,output2d,tslice,is_SE,nlev,opts_dict) + gm3d[:, fcount], gm2d[:, fcount] = calc_global_mean_for_onefile( + fname, + area_wgt, + var_name3d, + var_name2d, + output3d, + output2d, + tslice, + is_SE, + nlev, + opts_dict, + ) fname.close() - var_list=[] - #some valid CAM vars are all small entries(e.g. DTWR_H2O2 and DTWR_H2O4), so we no longer excluse them via var_list - + var_list = [] + # some valid CAM vars are all small entries(e.g. DTWR_H2O2 and DTWR_H2O4), so we no longer excluse them via var_list + + return gm3d, gm2d, var_list - return gm3d,gm2d,var_list # # Calculate global means for one OCN input file # (fname is open) NOT USED ANY LONGER -def calc_global_mean_for_onefile_pop(fname, area_wgt, z_wgt, var_name3d, var_name2d, output3d, output2d, tslice, is_SE, nlev, opts_dict): - +def calc_global_mean_for_onefile_pop( + fname, + area_wgt, + z_wgt, + var_name3d, + var_name2d, + output3d, + output2d, + tslice, + is_SE, + nlev, + opts_dict, +): + nan_flag = False n3d = len(var_name3d) n2d = len(var_name2d) -# gm3d = np.zeros((n3d),dtype=np.float32) -# gm2d = np.zeros((n2d),dtype=np.float32) - gm3d = np.zeros((n3d),dtype=np.float64) - gm2d = np.zeros((n2d),dtype=np.float64) - + # gm3d = np.zeros((n3d),dtype=np.float32) + # gm2d = np.zeros((n2d),dtype=np.float32) + gm3d = np.zeros((n3d), dtype=np.float64) + gm2d = np.zeros((n2d), dtype=np.float64) - #calculate global mean for each 3D variable + # calculate global mean for each 3D variable for count, vname in enumerate(var_name3d): gm_lev = np.zeros(nlev, dtype=np.float64) data = fname.variables[vname] if np.any(np.isnan(data)): - print("ERROR: ", vname, " data contains NaNs - please check input.") + print("ERROR: ", vname, " data contains NaNs - please check input.") nan_flag = True - output3d[:,:,:] = data[tslice,:,:,:] - dbl_output3d = output3d.astype(dtype = np.float64) + output3d[:, :, :] = data[tslice, :, :, :] + dbl_output3d = output3d.astype(dtype=np.float64) for k in range(nlev): - moutput3d=np.ma.masked_values(dbl_output3d[k,:,:],data._FillValue) + moutput3d = np.ma.masked_values(dbl_output3d[k, :, :], data._FillValue) gm_lev[k] = pop_area_avg(moutput3d, area_wgt) - #note: averaging over levels - in future, consider pressure-weighted (?) - gm3d[count] = np.average(gm_lev,weights=z_wgt) - - #calculate global mean for each 2D variable + # note: averaging over levels - in future, consider pressure-weighted (?) + gm3d[count] = np.average(gm_lev, weights=z_wgt) + + # calculate global mean for each 2D variable for count, vname in enumerate(var_name2d): data = fname.variables[vname] if np.any(np.isnan(data)): print("ERROR: ", vname, " data contains NaNs - please check input.") nan_flag = True - output2d[:,:] = data[tslice,:,:] - dbl_output2d= output2d.astype(dtype = np.float64) - moutput2d=np.ma.masked_values(dbl_output2d[:,:],data._FillValue) + output2d[:, :] = data[tslice, :, :] + dbl_output2d = output2d.astype(dtype=np.float64) + moutput2d = np.ma.masked_values(dbl_output2d[:, :], data._FillValue) gm2d_mean = pop_area_avg(moutput2d, area_wgt) - gm2d[count]=gm2d_mean - + gm2d[count] = gm2d_mean if nan_flag: print("ERROR: Nans in input data => EXITING....") sys.exit() - return gm3d,gm2d + return gm3d, gm2d + # # Calculate global means for one CAM input file # fname is open -def calc_global_mean_for_onefile(fname, area_wgt,var_name3d, var_name2d,output3d,output2d, tslice, is_SE, nlev,opts_dict): - +def calc_global_mean_for_onefile( + fname, + area_wgt, + var_name3d, + var_name2d, + output3d, + output2d, + tslice, + is_SE, + nlev, + opts_dict, +): + nan_flag = False - if 'cumul' in opts_dict: - cumul = opts_dict['cumul'] + if "cumul" in opts_dict: + cumul = opts_dict["cumul"] else: - cumul = False + cumul = False n3d = len(var_name3d) n2d = len(var_name2d) - #gm3d = np.zeros((n3d),dtype=np.float32) - #gm2d = np.zeros((n2d),dtype=np.float32) - gm3d = np.zeros((n3d),dtype=np.float64) - gm2d = np.zeros((n2d),dtype=np.float64) + # gm3d = np.zeros((n3d),dtype=np.float32) + # gm2d = np.zeros((n2d),dtype=np.float32) + gm3d = np.zeros((n3d), dtype=np.float64) + gm2d = np.zeros((n2d), dtype=np.float64) - - #calculate global mean for each 3D variable (note: area_avg casts into dp before computation) + # calculate global mean for each 3D variable (note: area_avg casts into dp before computation) for count, vname in enumerate(var_name3d): if isinstance(vname, str) == True: @@ -754,41 +960,48 @@ def calc_global_mean_for_onefile(fname, area_wgt,var_name3d, var_name2d,output3d vname_d = vname.decode("utf-8") if vname_d not in fname.variables: - print('WARNING 1: the test file does not have the variable ', vname_d, ' that is in the ensemble summary file ...') - continue + print( + "WARNING 1: the test file does not have the variable ", + vname_d, + " that is in the ensemble summary file ...", + ) + continue data = fname.variables[vname_d] if not data[tslice].size: - print("ERROR: " , vname_d, " data is empty => EXITING....") - sys.exit(2) + print("ERROR: ", vname_d, " data is empty => EXITING....") + sys.exit(2) if np.any(np.isnan(data)): - print("ERROR: ", vname_d, " data contains NaNs - please check input => EXITING") + print( + "ERROR: ", + vname_d, + " data contains NaNs - please check input => EXITING", + ) nan_flag = True continue - if (is_SE == True): - if not cumul: - temp=data[tslice].shape[0] - gm_lev = np.zeros(temp, dtype = np.float64) - for k in range(temp): - gm_lev[k] = area_avg(data[tslice,k,:], area_wgt, is_SE) + if is_SE == True: + if not cumul: + temp = data[tslice].shape[0] + gm_lev = np.zeros(temp, dtype=np.float64) + for k in range(temp): + gm_lev[k] = area_avg(data[tslice, k, :], area_wgt, is_SE) else: - gm_lev = np.zeros(nlev, dtype = np.float64) - for k in range(nlev): - gm_lev[k] = area_avg(output3d[k,:], area_wgt, is_SE) + gm_lev = np.zeros(nlev, dtype=np.float64) + for k in range(nlev): + gm_lev[k] = area_avg(output3d[k, :], area_wgt, is_SE) else: if not cumul: - temp=data[tslice].shape[0] - gm_lev = np.zeros(temp, dtype = np.float64) - for k in range(temp): - gm_lev[k] = area_avg(data[tslice,k,:,:], area_wgt, is_SE) + temp = data[tslice].shape[0] + gm_lev = np.zeros(temp, dtype=np.float64) + for k in range(temp): + gm_lev[k] = area_avg(data[tslice, k, :, :], area_wgt, is_SE) else: - gm_lev=np.zeros(nlev) - for k in range(nlev): - gm_lev[k] = area_avg(output3d[k,:,:], area_wgt, is_SE) - #note: averaging over levels could be pressure-weighted (?) - gm3d[count] = np.mean(gm_lev) - - - #calculate global mean for each 2D variable + gm_lev = np.zeros(nlev) + for k in range(nlev): + gm_lev[k] = area_avg(output3d[k, :, :], area_wgt, is_SE) + # note: averaging over levels could be pressure-weighted (?) + gm3d[count] = np.mean(gm_lev) + + # calculate global mean for each 2D variable for count, vname in enumerate(var_name2d): if isinstance(vname, str) == True: @@ -796,134 +1009,157 @@ def calc_global_mean_for_onefile(fname, area_wgt,var_name3d, var_name2d,output3d else: vname_d = vname.decode("utf-8") - - if vname_d not in fname.variables: - print('WARNING 2: the test file does not have the variable ', vname_d, ' that is in the ensemble summary file') - continue + print( + "WARNING 2: the test file does not have the variable ", + vname_d, + " that is in the ensemble summary file", + ) + continue data = fname.variables[vname_d] if np.any(np.isnan(data)): - print("ERROR: ", vname_d, " data contains NaNs - please check input => EXITING....") + print( + "ERROR: ", + vname_d, + " data contains NaNs - please check input => EXITING....", + ) nan_flag = True continue - if (is_SE == True): + if is_SE == True: if not cumul: - output2d[:] = data[tslice,:] + output2d[:] = data[tslice, :] gm2d_mean = area_avg(output2d[:], area_wgt, is_SE) else: if not cumul: - output2d[:,:] = data[tslice,:,:] - gm2d_mean = area_avg(output2d[:,:], area_wgt, is_SE) - gm2d[count]=gm2d_mean + output2d[:, :] = data[tslice, :, :] + gm2d_mean = area_avg(output2d[:, :], area_wgt, is_SE) + gm2d[count] = gm2d_mean if nan_flag: print("ERROR: Nans in input data => EXITING....") sys.exit() - return gm3d,gm2d + return gm3d, gm2d + # # Read variable values from ensemble summary file # def read_ensemble_summary(ens_file): - if(os.path.isfile(ens_file)): - fens = nc.Dataset(ens_file,"r") - else: - print('ERROR: file ens summary: ',ens_file,' not found => EXITING....') - sys.exit(2) - - is_SE = False - dims=fens.dimensions - if 'ncol' in dims: - is_SE = True - - esize = len(dims['ens_size']) - str_size = len(dims['str_size']) - - ens_avg={} - ens_stddev={} - ens_var_name=[] - ens_rmsz={} - ens_gm={} - std_gm={} - - # Retrieve the variable list from ensemble file - for k,v in fens.variables.items(): - if k== 'vars': - for i in v[0:len(v)]: - l=0 - for j in i: - if j: - l=l+1 - ens_var_name.append(i[0:l].tostring().strip()) - elif k== 'var3d': - num_var3d=len(v) - elif k== 'var2d': - num_var2d=len(v) - - for k,v in fens.variables.items(): - # Retrieve the ens_avg3d or ens_avg2d array - if k == 'ens_avg3d' or k=='ens_avg2d': - if k== 'ens_avg2d': - m=num_var3d - else: - m=0 - if v: - for i in v[0:len(v)]: - temp_name=ens_var_name[m] - ens_avg[temp_name] = i - m=m+1 - - # Retrieve the ens_stddev3d or ens_stddev2d array - elif k == 'ens_stddev3d' or k == 'ens_stddev2d': - if k== 'ens_stddev2d': - m=num_var3d - else: - m=0 - if v: - for i in v[0:len(v)]: - temp_name=ens_var_name[m] - ens_stddev[temp_name] = i - m=m+1 - # Retrieve the RMSZ score array - elif k == 'RMSZ': - m=0 - for i in v[0:len(v)]: - temp_name=ens_var_name[m] - ens_rmsz[temp_name]=i - m=m+1 - elif k == 'global_mean': - m=0 - for i in v[0:len(v)]: - temp_name=ens_var_name[m] - ens_gm[temp_name]=i - m=m+1 - elif k == 'standardized_gm': - m=0 - for i in v[0:len(v)]: - temp_name=ens_var_name[m] - std_gm[temp_name]=i - m=m+1 - #also get as array (not just dictionary) - std_gm_array = np.zeros((num_var3d+num_var2d,esize),dtype=np.float64) - std_gm_array[:] = v[:,:] - elif k == 'mu_gm': - mu_gm=np.zeros((num_var3d+num_var2d),dtype=np.float64) - mu_gm[:]=v[:] - elif k == 'sigma_gm': - sigma_gm=np.zeros((num_var3d+num_var2d),dtype=np.float64) - sigma_gm[:]=v[:] - elif k == 'loadings_gm': - loadings_gm=np.zeros((num_var3d+num_var2d,num_var3d+num_var2d),dtype=np.float64) - loadings_gm[:,:]=v[:,:] - elif k == 'sigma_scores_gm': - sigma_scores_gm=np.zeros((num_var3d+num_var2d),dtype=np.float64) - sigma_scores_gm[:]=v[:] - - - fens.close() - - return ens_var_name,ens_avg,ens_stddev,ens_rmsz,ens_gm,num_var3d,mu_gm,sigma_gm,loadings_gm,sigma_scores_gm,is_SE,std_gm, std_gm_array, str_size + if os.path.isfile(ens_file): + fens = nc.Dataset(ens_file, "r") + else: + print("ERROR: file ens summary: ", ens_file, " not found => EXITING....") + sys.exit(2) + + is_SE = False + dims = fens.dimensions + if "ncol" in dims: + is_SE = True + + esize = len(dims["ens_size"]) + str_size = len(dims["str_size"]) + + ens_avg = {} + ens_stddev = {} + ens_var_name = [] + ens_rmsz = {} + ens_gm = {} + std_gm = {} + + # Retrieve the variable list from ensemble file + for k, v in fens.variables.items(): + if k == "vars": + for i in v[0 : len(v)]: + l = 0 + for j in i: + if j: + l = l + 1 + ens_var_name.append(i[0:l].tostring().strip()) + elif k == "var3d": + num_var3d = len(v) + elif k == "var2d": + num_var2d = len(v) + + for k, v in fens.variables.items(): + # Retrieve the ens_avg3d or ens_avg2d array + if k == "ens_avg3d" or k == "ens_avg2d": + if k == "ens_avg2d": + m = num_var3d + else: + m = 0 + if v: + for i in v[0 : len(v)]: + temp_name = ens_var_name[m] + ens_avg[temp_name] = i + m = m + 1 + + # Retrieve the ens_stddev3d or ens_stddev2d array + elif k == "ens_stddev3d" or k == "ens_stddev2d": + if k == "ens_stddev2d": + m = num_var3d + else: + m = 0 + if v: + for i in v[0 : len(v)]: + temp_name = ens_var_name[m] + ens_stddev[temp_name] = i + m = m + 1 + # Retrieve the RMSZ score array + elif k == "RMSZ": + m = 0 + for i in v[0 : len(v)]: + temp_name = ens_var_name[m] + ens_rmsz[temp_name] = i + m = m + 1 + elif k == "global_mean": + m = 0 + for i in v[0 : len(v)]: + temp_name = ens_var_name[m] + ens_gm[temp_name] = i + m = m + 1 + elif k == "standardized_gm": + m = 0 + for i in v[0 : len(v)]: + temp_name = ens_var_name[m] + std_gm[temp_name] = i + m = m + 1 + # also get as array (not just dictionary) + std_gm_array = np.zeros((num_var3d + num_var2d, esize), dtype=np.float64) + std_gm_array[:] = v[:, :] + elif k == "mu_gm": + mu_gm = np.zeros((num_var3d + num_var2d), dtype=np.float64) + mu_gm[:] = v[:] + elif k == "sigma_gm": + sigma_gm = np.zeros((num_var3d + num_var2d), dtype=np.float64) + sigma_gm[:] = v[:] + elif k == "loadings_gm": + loadings_gm = np.zeros( + (num_var3d + num_var2d, num_var3d + num_var2d), dtype=np.float64 + ) + loadings_gm[:, :] = v[:, :] + elif k == "sigma_scores_gm": + sigma_scores_gm = np.zeros((num_var3d + num_var2d), dtype=np.float64) + sigma_scores_gm[:] = v[:] + + fens.close() + + return ( + ens_var_name, + ens_avg, + ens_stddev, + ens_rmsz, + ens_gm, + num_var3d, + mu_gm, + sigma_gm, + loadings_gm, + sigma_scores_gm, + is_SE, + std_gm, + std_gm_array, + str_size, + ) # @@ -931,8 +1167,8 @@ def read_ensemble_summary(ens_file): # (frun is not open) def get_ncol_nlev(frun): - o_frun = nc.Dataset(frun, "r") - input_dims=o_frun.dimensions + o_frun = nc.Dataset(frun, "r") + input_dims = o_frun.dimensions ncol = -1 nlev = -1 ilev = -1 @@ -941,31 +1177,31 @@ def get_ncol_nlev(frun): icol = -1 ilat = -1 ilon = -1 - for k,v in input_dims.items(): - if k == 'lev': + for k, v in input_dims.items(): + if k == "lev": nlev = len(v) - if k == 'ncol': + if k == "ncol": ncol = len(v) - if (k == 'lat') or (k=='nlat'): + if (k == "lat") or (k == "nlat"): nlat = len(v) - if (k == 'lon') or (k=='nlon'): + if (k == "lon") or (k == "nlon"): nlon = len(v) - - if ncol == -1 : + + if ncol == -1: one_spatial_dim = False else: one_spatial_dim = True if one_spatial_dim: - npts3d=float(nlev*ncol) - npts2d=float(ncol) + npts3d = float(nlev * ncol) + npts2d = float(ncol) else: - npts3d=float(nlev*nlat*nlon) - npts2d=float(nlat*nlon) + npts3d = float(nlev * nlat * nlon) + npts2d = float(nlat * nlon) o_frun.close() - - return npts3d,npts2d,one_spatial_dim + + return npts3d, npts2d, one_spatial_dim # @@ -973,179 +1209,204 @@ def get_ncol_nlev(frun): # the inputdir should only have all ensemble run files # def calculate_maxnormens(opts_dict, var_list): - ifiles=[] - Maxnormens={} - threshold=1e-12 - # input file directory - inputdir=opts_dict['indir'] - - # the timeslice that we want to process - tstart=opts_dict['tslice'] - - # open all files - for frun_file in os.listdir(inputdir): - if (os.path.isfile(inputdir+frun_file)): - ifiles.append(nc.Dataset(inputdir+frun_file,"r")) - else: - print("ERROR: Could not locate file= "+inputdir+frun_file + " => EXITING....") - sys.exit() - comparision={} - # loop through each variable - for k in var_list: - output=[] - # read all data of variable k from all files - for f in ifiles: - v=f.variables - output.append(v[k][tstart]) - max_val=0 - # open an output file - outmaxnormens=k+"_ens_maxnorm.txt" - fout=open(outmaxnormens,"w") - Maxnormens[k]=[] - - # calculate E(i=0:n)(maxnormens[i][x])=max(comparision[i]-E(x=0:n)(output[x])) - for n in range(len(ifiles)): - Maxnormens[k].append(0) - comparision[k]=ifiles[n].variables[k][tstart] - for m in range(len(ifiles)): - max_val=np.max(np.abs(comparision[k]-output[m])) - if Maxnormens[k][n] < max_val: - Maxnormens[k][n]=max_val - range_max=np.max((comparision[k])) - range_min=np.min((comparision[k])) - if range_max-range_min < threshold: - Maxnormens[k][n]=0. - else: - Maxnormens[k][n]=Maxnormens[k][n]/(range_max-range_min) - fout.write(str(Maxnormens[k][n])+'\n') - strtmp = k + ' : ' + 'ensmax min max' + ' : ' + '{0:9.2e}'.format(min(Maxnormens[k]))+' '+'{0:9.2e}'.format(max(Maxnormens[k])) - print(strtmp) - fout.close() + ifiles = [] + Maxnormens = {} + threshold = 1e-12 + # input file directory + inputdir = opts_dict["indir"] + + # the timeslice that we want to process + tstart = opts_dict["tslice"] + + # open all files + for frun_file in os.listdir(inputdir): + if os.path.isfile(inputdir + frun_file): + ifiles.append(nc.Dataset(inputdir + frun_file, "r")) + else: + print( + "ERROR: Could not locate file= " + + inputdir + + frun_file + + " => EXITING...." + ) + sys.exit() + comparision = {} + # loop through each variable + for k in var_list: + output = [] + # read all data of variable k from all files + for f in ifiles: + v = f.variables + output.append(v[k][tstart]) + max_val = 0 + # open an output file + outmaxnormens = k + "_ens_maxnorm.txt" + fout = open(outmaxnormens, "w") + Maxnormens[k] = [] + + # calculate E(i=0:n)(maxnormens[i][x])=max(comparision[i]-E(x=0:n)(output[x])) + for n in range(len(ifiles)): + Maxnormens[k].append(0) + comparision[k] = ifiles[n].variables[k][tstart] + for m in range(len(ifiles)): + max_val = np.max(np.abs(comparision[k] - output[m])) + if Maxnormens[k][n] < max_val: + Maxnormens[k][n] = max_val + range_max = np.max((comparision[k])) + range_min = np.min((comparision[k])) + if range_max - range_min < threshold: + Maxnormens[k][n] = 0.0 + else: + Maxnormens[k][n] = Maxnormens[k][n] / (range_max - range_min) + fout.write(str(Maxnormens[k][n]) + "\n") + strtmp = ( + k + + " : " + + "ensmax min max" + + " : " + + "{0:9.2e}".format(min(Maxnormens[k])) + + " " + + "{0:9.2e}".format(max(Maxnormens[k])) + ) + print(strtmp) + fout.close() + # # Parse options from command line or from config file # def getopt_parseconfig(opts, optkeys, caller, opts_dict): - # integer - integer = '-[0-9]+' - int_p=re.compile(integer) - # scientific notation - flt = '-*[0-9]+\.[0-9]+' - flt_p=re.compile(flt) - - - for opt,arg in opts: - if opt =='-h' and caller=='CECT': - CECT_usage() - sys.exit() - elif opt == '-h' and caller == 'ES': - EnsSum_usage() - sys.exit() - elif opt == '-h' and caller == 'ESP': - EnsSumPop_usage() - sys.exit() - elif opt == '-f': - opts_dict['orig']=arg - elif opt == '-m': - opts_dict['reqmeth']=arg - #parse config file - elif opt in ("--config"): - configfile=arg - config=configparser.ConfigParser() - config.read(configfile) - for sec in config.sections(): - for name,value in config.items(sec): - if sec== 'bool_arg' or sec == 'metrics': - opts_dict[name]=config.getboolean(sec,name) - elif sec == 'int_arg': - opts_dict[name]=config.getint(sec,name) - elif sec == 'float_arg': - opts_dict[name]=config.getfloat(sec,name) - else: - opts_dict[name]=value - - #parse command line options which might replace the settings in the config file - else: - for k in optkeys: - if k.find("=") != -1: - keyword=k[0:k.find('=')] - if opt == '--'+keyword: - if arg.isdigit(): - opts_dict[keyword]=int(arg) - else: - if flt_p.match(arg) : - opts_dict[keyword]=float(arg) - elif int_p.match(arg) : - opts_dict[keyword]=int(arg) - else: - opts_dict[keyword]=arg + # integer + integer = "-[0-9]+" + int_p = re.compile(integer) + # scientific notation + flt = "-*[0-9]+\.[0-9]+" + flt_p = re.compile(flt) + + for opt, arg in opts: + if opt == "-h" and caller == "CECT": + CECT_usage() + sys.exit() + elif opt == "-h" and caller == "ES": + EnsSum_usage() + sys.exit() + elif opt == "-h" and caller == "ESP": + EnsSumPop_usage() + sys.exit() + elif opt == "-f": + opts_dict["orig"] = arg + elif opt == "-m": + opts_dict["reqmeth"] = arg + # parse config file + elif opt in ("--config"): + configfile = arg + config = configparser.ConfigParser() + config.read(configfile) + for sec in config.sections(): + for name, value in config.items(sec): + if sec == "bool_arg" or sec == "metrics": + opts_dict[name] = config.getboolean(sec, name) + elif sec == "int_arg": + opts_dict[name] = config.getint(sec, name) + elif sec == "float_arg": + opts_dict[name] = config.getfloat(sec, name) + else: + opts_dict[name] = value + + # parse command line options which might replace the settings in the config file else: - if opt == '--'+k: - opts_dict[k]=True - return opts_dict - + for k in optkeys: + if k.find("=") != -1: + keyword = k[0 : k.find("=")] + if opt == "--" + keyword: + if arg.isdigit(): + opts_dict[keyword] = int(arg) + else: + if flt_p.match(arg): + opts_dict[keyword] = float(arg) + elif int_p.match(arg): + opts_dict[keyword] = int(arg) + else: + opts_dict[keyword] = arg + else: + if opt == "--" + k: + opts_dict[k] = True + return opts_dict + + # # Figure out the scores of the 3 new runs, standardized global means, then multiple by the loadings_gm # -def standardized(gm, mu_gm, sigma_gm, loadings_gm, all_var_names, opts_dict, ens_avg,me): - nvar=gm.shape[0] - nfile=gm.shape[1] - sum_std_mean=np.zeros((nvar,),dtype=np.float64) - standardized_mean=np.zeros(gm.shape,dtype=np.float64) +def standardized( + gm, mu_gm, sigma_gm, loadings_gm, all_var_names, opts_dict, ens_avg, me +): + nvar = gm.shape[0] + nfile = gm.shape[1] + sum_std_mean = np.zeros((nvar,), dtype=np.float64) + standardized_mean = np.zeros(gm.shape, dtype=np.float64) for var in range(nvar): - for file in range(nfile): - standardized_mean[var,file]=(gm[var,file].astype(np.float64)-mu_gm[var].astype(np.float64))/sigma_gm[var].astype(np.float64) - sum_std_mean[var]=sum_std_mean[var]+np.abs(standardized_mean[var,file]) - new_scores=np.dot(loadings_gm.T.astype(np.float64),standardized_mean) - - var_list=[] - sorted_sum_std_mean=np.argsort(sum_std_mean)[::-1] - if (opts_dict['printStdMean']) : - if me.get_rank() == 0: - print(' ') - print('************************************************************************') - print(' Sum of standardized mean of all variables in decreasing order') - print('************************************************************************') - for var in range(nvar): - var_list.append(all_var_names[sorted_sum_std_mean[var]]) - vname = all_var_names[sorted_sum_std_mean[var]] - if me.get_rank() == 0: - - if isinstance(vname, str) == True: - vname_d = vname - else: - vname_d = vname.decode("utf-8") - - print('{:>15}'.format(vname_d),'{0:9.2e}'.format(sum_std_mean[sorted_sum_std_mean[var]])) - print(' ') - return new_scores,var_list,standardized_mean + for file in range(nfile): + standardized_mean[var, file] = ( + gm[var, file].astype(np.float64) - mu_gm[var].astype(np.float64) + ) / sigma_gm[var].astype(np.float64) + sum_std_mean[var] = sum_std_mean[var] + np.abs(standardized_mean[var, file]) + new_scores = np.dot(loadings_gm.T.astype(np.float64), standardized_mean) + + var_list = [] + sorted_sum_std_mean = np.argsort(sum_std_mean)[::-1] + if opts_dict["printStdMean"]: + if me.get_rank() == 0: + print(" ") + print( + "************************************************************************" + ) + print(" Sum of standardized mean of all variables in decreasing order") + print( + "************************************************************************" + ) + for var in range(nvar): + var_list.append(all_var_names[sorted_sum_std_mean[var]]) + vname = all_var_names[sorted_sum_std_mean[var]] + if me.get_rank() == 0: + + if isinstance(vname, str) == True: + vname_d = vname + else: + vname_d = vname.decode("utf-8") + + print( + "{:>15}".format(vname_d), + "{0:9.2e}".format(sum_std_mean[sorted_sum_std_mean[var]]), + ) + print(" ") + return new_scores, var_list, standardized_mean + # # Insert rmsz scores, global mean of new run to the dictionary results # def addresults(results, key, value, var, thefile): if var in results: - temp = results[var] - if key in temp: - temp2 = temp[key] - if thefile in temp2: - temp3 = results[var][key][thefile] - else: - temp3={} - else: - temp[key]={} - temp2={} - temp3={} - temp3=value - temp2[thefile]=temp3 - temp[key]=temp2 - results[var]=temp + temp = results[var] + if key in temp: + temp2 = temp[key] + if thefile in temp2: + temp3 = results[var][key][thefile] + else: + temp3 = {} + else: + temp[key] = {} + temp2 = {} + temp3 = {} + temp3 = value + temp2[thefile] = temp3 + temp[key] = temp2 + results[var] = temp else: - results[var]={} - results[var][key]={} - results[var][key][thefile]=value - - + results[var] = {} + results[var][key] = {} + results[var][key][thefile] = value + return results @@ -1153,606 +1414,819 @@ def addresults(results, key, value, var, thefile): # Print out rmsz score failure, global mean failure summary # def printsummary(results, key, name, namerange, thefilecount, variables, label): - thefile='f'+str(thefilecount) - for k,v in results.items(): - if 'status' in v: - temp0 = v['status'] - if key in temp0: - if thefile in temp0[key]: - temp = temp0[key][thefile] - if temp < 1: - print(' ') - print(k+' ('+'{0:9.2e}'.format(v[name][thefile])+' outside of ['+'{0:9.2e}'.format(variables[k][namerange][0])+' '+'{0:9.2e}'.format(variables[k][namerange][1])+'])') + thefile = "f" + str(thefilecount) + for k, v in results.items(): + if "status" in v: + temp0 = v["status"] + if key in temp0: + if thefile in temp0[key]: + temp = temp0[key][thefile] + if temp < 1: + print(" ") + print( + k + + " (" + + "{0:9.2e}".format(v[name][thefile]) + + " outside of [" + + "{0:9.2e}".format(variables[k][namerange][0]) + + " " + + "{0:9.2e}".format(variables[k][namerange][1]) + + "])" + ) + # # Insert the range of rmsz score and global mean of the ensemble summary file to the dictionary variables # def addvariables(variables, var, vrange, thearray): - if var in variables: - variables[var][vrange]=(np.min(thearray),np.max(thearray)) - else: - variables[var]={} - variables[var][vrange]=(np.min(thearray),np.max(thearray)) + if var in variables: + variables[var][vrange] = (np.min(thearray), np.max(thearray)) + else: + variables[var] = {} + variables[var][vrange] = (np.min(thearray), np.max(thearray)) + + return variables - return variables -# +# # Evaluate if the new run rmsz score/global mean in the range of rmsz scores/global mean of the ensemble summary # def evaluatestatus(name, rangename, variables, key, results, thefile): - totalcount=0 - for k,v in results.items(): - if name in v and rangename in variables[k]: - temp0=results[k] - xrange = variables[k][rangename] - if v[name][thefile] > xrange[1] or v[name][thefile] < xrange[0]: - val=0 - else: - val=1 - if 'status' in temp0: - temp=temp0['status'] - if key in temp: - temp2 = temp[key] - else: - temp[key] = temp2 = {} - - if val == 0: - totalcount = totalcount+1 - temp2[thefile]=val - temp[key]=temp2 - results[k]['status']==temp - else: - temp0['status']={} - temp0['status'][key]={} - temp0['status'][key][thefile]=val - if val == 0: - totalcount = totalcount+1 - - return totalcount - + totalcount = 0 + for k, v in results.items(): + if name in v and rangename in variables[k]: + temp0 = results[k] + xrange = variables[k][rangename] + if v[name][thefile] > xrange[1] or v[name][thefile] < xrange[0]: + val = 0 + else: + val = 1 + if "status" in temp0: + temp = temp0["status"] + if key in temp: + temp2 = temp[key] + else: + temp[key] = temp2 = {} + + if val == 0: + totalcount = totalcount + 1 + temp2[thefile] = val + temp[key] = temp2 + results[k]["status"] == temp + else: + temp0["status"] = {} + temp0["status"][key] = {} + temp0["status"][key][thefile] = val + if val == 0: + totalcount = totalcount + 1 + + return totalcount + + # # Evaluate if the new run PCA scores pass or fail by comparing with the PCA scores of the ensemble summary # ifiles are open def comparePCAscores(ifiles, new_scores, sigma_scores_gm, opts_dict, me): - comp_array=np.zeros(new_scores.shape,dtype=np.int32) - sum=np.zeros(new_scores.shape[0],dtype=np.int32) - eachruncount=np.zeros(new_scores.shape[1],dtype=np.int32) - totalcount=0 - sum_index=[] - if me.get_rank()==0: - print('*********************************************** ') - print('PCA Test Results') - print('*********************************************** ') - - #Test to check if new_scores out of range of sigMul*sigma_scores_gm - for i in range(opts_dict['nPC']): - for j in range(new_scores.shape[1]): - if abs(new_scores[i][j]) > opts_dict['sigMul'] * (sigma_scores_gm[i]): - comp_array[i][j] = 1 - eachruncount[j]=eachruncount[j]+1 - #Only check the first nPC number of scores, and sum comp_array together - sum[i]=sum[i]+comp_array[i][j] - - - if len(ifiles) >= opts_dict['minRunFail']: - num_run_less = False - else: - num_run_less = True - #Check to see if sum is larger than min_run_fail, if so save the index of the sum - for i in range(opts_dict['nPC']): - if sum[i] >= opts_dict['minRunFail']: - totalcount=totalcount+1 - sum_index.append(i+1) - - #false_positive=check_falsepositive(opts_dict,sum_index) - - #If the length of sum_index is larger than min_PC_fail, the three runs failed. - #This doesn't apply for UF-ECT. - if opts_dict['numRunFile'] > opts_dict['eet']: - if len(sum_index) >= opts_dict['minPCFail']: - decision='FAILED' - else: - decision='PASSED' - if (num_run_less == False) and (me.get_rank()==0): - print(' ') - print("Summary: "+str(totalcount)+" PC scores failed at least "+str(opts_dict['minRunFail'])+" runs: ",sum_index) - print(' ') - print('These runs '+decision+' according to our testing criterion.') - elif me.get_rank() == 0: - print(' ') - print('The number of run files is less than minRunFail (=2), so we cannot determin an overall pass or fail.') - print(' ') - - #Record the histogram of comp_array which value is one by the PCA scores - for i in range(opts_dict['nPC']): - index_list=[] - for j in range(comp_array.shape[1]): - if comp_array[i][j] == 1: - index_list.append(j+1) - if len(index_list) > 0 and me.get_rank() == 0: - print("PC "+str(i+1)+": failed "+str(len(index_list))+" runs ",index_list) - if me.get_rank() == 0: - print(' ') - - #Record the index of comp_array which value is one - run_index=[] - - if opts_dict['eet'] >= opts_dict['numRunFile']: - eet = exhaustive_test() - faildict={} - - for j in range(comp_array.shape[1]): - index_list=[] - for i in range(opts_dict['nPC']): - if comp_array[i][j] == 1: - index_list.append(i+1) - if me.get_rank() == 0: - print("Run "+str(j+1)+": "+str(eachruncount[j])+" PC scores failed ",index_list) - run_index.append((j+1)) - faildict[str(j+1)]=set(index_list) - - passes, failures = eet.test_combinations(faildict, runsPerTest=opts_dict['numRunFile'], nRunFails=opts_dict['minRunFail']) - if me.get_rank() == 0: - print(' ') - print("%d tests failed out of %d possible tests." % (failures, passes + failures)) - print("This represents a failure percent of %.2f." % (100.*failures/float(failures + passes))) - print(' ') - if float(failures)>0.1*float(passes+failures): - decision="FAILED" - else: - decision="PASSED" - - else: - for j in range(comp_array.shape[1]): - index_list=[] - for i in range(opts_dict['nPC']): - if comp_array[i][j] == 1: - index_list.append(i+1) - if me.get_rank() == 0: - print("Run "+str(j+1)+": "+str(eachruncount[j])+" PC scores failed ",index_list) - run_index.append((j+1)) - - return run_index,decision + comp_array = np.zeros(new_scores.shape, dtype=np.int32) + sum = np.zeros(new_scores.shape[0], dtype=np.int32) + eachruncount = np.zeros(new_scores.shape[1], dtype=np.int32) + totalcount = 0 + sum_index = [] + if me.get_rank() == 0: + print("*********************************************** ") + print("PCA Test Results") + print("*********************************************** ") + + # Test to check if new_scores out of range of sigMul*sigma_scores_gm + for i in range(opts_dict["nPC"]): + for j in range(new_scores.shape[1]): + if abs(new_scores[i][j]) > opts_dict["sigMul"] * (sigma_scores_gm[i]): + comp_array[i][j] = 1 + eachruncount[j] = eachruncount[j] + 1 + # Only check the first nPC number of scores, and sum comp_array together + sum[i] = sum[i] + comp_array[i][j] + + if len(ifiles) >= opts_dict["minRunFail"]: + num_run_less = False + else: + num_run_less = True + # Check to see if sum is larger than min_run_fail, if so save the index of the sum + for i in range(opts_dict["nPC"]): + if sum[i] >= opts_dict["minRunFail"]: + totalcount = totalcount + 1 + sum_index.append(i + 1) + + # false_positive=check_falsepositive(opts_dict,sum_index) + + # If the length of sum_index is larger than min_PC_fail, the three runs failed. + # This doesn't apply for UF-ECT. + if opts_dict["numRunFile"] > opts_dict["eet"]: + if len(sum_index) >= opts_dict["minPCFail"]: + decision = "FAILED" + else: + decision = "PASSED" + if (num_run_less == False) and (me.get_rank() == 0): + print(" ") + print( + "Summary: " + + str(totalcount) + + " PC scores failed at least " + + str(opts_dict["minRunFail"]) + + " runs: ", + sum_index, + ) + print(" ") + print("These runs " + decision + " according to our testing criterion.") + elif me.get_rank() == 0: + print(" ") + print( + "The number of run files is less than minRunFail (=2), so we cannot determin an overall pass or fail." + ) + print(" ") + + # Record the histogram of comp_array which value is one by the PCA scores + for i in range(opts_dict["nPC"]): + index_list = [] + for j in range(comp_array.shape[1]): + if comp_array[i][j] == 1: + index_list.append(j + 1) + if len(index_list) > 0 and me.get_rank() == 0: + print( + "PC " + str(i + 1) + ": failed " + str(len(index_list)) + " runs ", + index_list, + ) + if me.get_rank() == 0: + print(" ") + + # Record the index of comp_array which value is one + run_index = [] + + if opts_dict["eet"] >= opts_dict["numRunFile"]: + eet = exhaustive_test() + faildict = {} + + for j in range(comp_array.shape[1]): + index_list = [] + for i in range(opts_dict["nPC"]): + if comp_array[i][j] == 1: + index_list.append(i + 1) + if me.get_rank() == 0: + print( + "Run " + + str(j + 1) + + ": " + + str(eachruncount[j]) + + " PC scores failed ", + index_list, + ) + run_index.append((j + 1)) + faildict[str(j + 1)] = set(index_list) + + passes, failures = eet.test_combinations( + faildict, + runsPerTest=opts_dict["numRunFile"], + nRunFails=opts_dict["minRunFail"], + ) + if me.get_rank() == 0: + print(" ") + print( + "%d tests failed out of %d possible tests." + % (failures, passes + failures) + ) + print( + "This represents a failure percent of %.2f." + % (100.0 * failures / float(failures + passes)) + ) + print(" ") + if float(failures) > 0.1 * float(passes + failures): + decision = "FAILED" + else: + decision = "PASSED" + + else: + for j in range(comp_array.shape[1]): + index_list = [] + for i in range(opts_dict["nPC"]): + if comp_array[i][j] == 1: + index_list.append(i + 1) + if me.get_rank() == 0: + print( + "Run " + + str(j + 1) + + ": " + + str(eachruncount[j]) + + " PC scores failed ", + index_list, + ) + run_index.append((j + 1)) + + return run_index, decision + + # # Command options for pyCECT.py # def CECT_usage(): - print('\n Compare test runs to an ensemble summary file. \n') - print(' ----------------------------') - print(' Args for pyCECT :') - print(' ----------------------------') - print(' pyCECT.py') - print(' -h : prints out this usage message') - print(' --verbose : prints out in verbose mode (off by default)') - print(' --sumfile : the ensemble summary file (generated by pyEnsSum.py)') - print(' --indir : directory containing the input run files (at least 3 files)') - print(' --tslice : which time slice to use from input run files (default = 1)') - print(' ----------------------------') - print(' Args for CAM-CECT and UF-CAM-ECT:') - print(' ----------------------------') - print(' --nPC : number of principal components (PCs) to check (default = 50, but can\'t be greater than the number of variables)') - print(' --sigMul : number of standard deviations away from the mean defining the "acceptance region" (default = 2)') - print(' --minPCFail : minimum number of PCs that must fail the specified number of runs for a FAILURE (default = 3)') - print(' --minRunFail : minimum number of runs that PCs must fail for a FAILURE (default = 2)') - print(' --numRunFile : total number of runs to include in test (default = 3)') - print(' --printVars : print out variables that fall outsie of the global mean ensemble distribution (off by default)') - print(' --printStdMean : print out sum of standardized mean of all variables in decreasing order. If test returns a FAIL, ') - print(' then output associated box plots (off by default) - requires Python seaborn package') - print(' --saveResults : save a netcdf file with scores and std global means from the test runs (savefile.nc). ') - print(' --eet : enable Ensemble Exhaustive Test (EET) to compute failure percent of runs (greater than or equal to numRunFile)') - print(' ----------------------------') - print(' Args for POP-CECT :') - print(' ----------------------------') - print(' --popens : indicate POP-ECT (required!) (tslice will bet set to 0)') - print(' --jsonfile : list the json file that specifies variables to test (required!), e.g. pop_ensemble.json') - print(' --pop_tol : set pop zscore tolerance (default is 3.0 - recommended)') - print(' --pop_threshold : set pop threshold (default is 0.9)') - print(' --input_globs : set the search pattern (wildcard) for the file(s) to compare from ') - print(' the input directory (indir), such as core48.pop.h.0003-12 or core48.pop.h.0003 (more info in README)') + print("\n Compare test runs to an ensemble summary file. \n") + print(" ----------------------------") + print(" Args for pyCECT :") + print(" ----------------------------") + print(" pyCECT.py") + print(" -h : prints out this usage message") + print(" --verbose : prints out in verbose mode (off by default)") + print( + " --sumfile : the ensemble summary file (generated by pyEnsSum.py)" + ) + print( + " --indir : directory containing the input run files (at least 3 files)" + ) + print( + " --tslice : which time slice to use from input run files (default = 1)" + ) + print(" ----------------------------") + print(" Args for CAM-CECT and UF-CAM-ECT:") + print(" ----------------------------") + print( + " --nPC : number of principal components (PCs) to check (default = 50, but can't be greater than the number of variables)" + ) + print( + ' --sigMul : number of standard deviations away from the mean defining the "acceptance region" (default = 2)' + ) + print( + " --minPCFail : minimum number of PCs that must fail the specified number of runs for a FAILURE (default = 3)" + ) + print( + " --minRunFail : minimum number of runs that PCs must fail for a FAILURE (default = 2)" + ) + print( + " --numRunFile : total number of runs to include in test (default = 3)" + ) + print( + " --printVars : print out variables that fall outsie of the global mean ensemble distribution (off by default)" + ) + print( + " --printStdMean : print out sum of standardized mean of all variables in decreasing order. If test returns a FAIL, " + ) + print( + " then output associated box plots (off by default) - requires Python seaborn package" + ) + print( + " --saveResults : save a netcdf file with scores and std global means from the test runs (savefile.nc). " + ) + print( + " --eet : enable Ensemble Exhaustive Test (EET) to compute failure percent of runs (greater than or equal to numRunFile)" + ) + print(" ----------------------------") + print(" Args for POP-CECT :") + print(" ----------------------------") + print( + " --popens : indicate POP-ECT (required!) (tslice will bet set to 0)" + ) + print( + " --jsonfile : list the json file that specifies variables to test (required!), e.g. pop_ensemble.json" + ) + print( + " --pop_tol : set pop zscore tolerance (default is 3.0 - recommended)" + ) + print(" --pop_threshold : set pop threshold (default is 0.9)") + print( + " --input_globs : set the search pattern (wildcard) for the file(s) to compare from " + ) + print( + " the input directory (indir), such as core48.pop.h.0003-12 or core48.pop.h.0003 (more info in README)" + ) + + # print 'Version 3.0.8' # # Command options for pyEnsSum.py # -def EnsSum_usage(): - print('\n Creates the summary file for an ensemble of CAM data. \n') - print(' ------------------------') - print(' Args for pyEnsSum : ') - print(' ------------------------') - print(' pyEnsSum.py') - print(' -h : prints out this usage message') - print(' --verbose : prints out in verbose mode (off by default)') - print(' --sumfile : the output summary data file (default = ens.summary.nc)') - print(' --indir : directory containing all of the ensemble runs (default = ./)') - print(' --esize : Number of ensemble members (default = 350)') - print(' --tag : Tag name used in metadata (default = cesm2_0)') - print(' --compset : Compset used in metadata (default = F2000climo)') - print(' --res : Resolution used in metadata (default = f19_f19)') - print(' --mach : Machine name used in the metadata (default = cheyenne)') - print(' --tslice : the index into the time dimension (default = 1)') - print(' --jsonfile : Jsonfile to provide that a list of variables that will ') - print(' be excluded or included (default = exclude_empty.json)') - print(' --mpi_disable : Disable mpi mode to run in serial (off by default)') -# print ' --cumul : ' - print(' --fIndex : Use this to start at ensemble member instead of 000 (so ') - print(' ensembles with numbers less than are excluded from summary file) ') - print(' ') +def EnsSum_usage(): + print("\n Creates the summary file for an ensemble of CAM data. \n") + print(" ------------------------") + print(" Args for pyEnsSum : ") + print(" ------------------------") + print(" pyEnsSum.py") + print(" -h : prints out this usage message") + print(" --verbose : prints out in verbose mode (off by default)") + print( + " --sumfile : the output summary data file (default = ens.summary.nc)" + ) + print( + " --indir : directory containing all of the ensemble runs (default = ./)" + ) + print(" --esize : Number of ensemble members (default = 350)") + print(" --tag : Tag name used in metadata (default = cesm2_0)") + print(" --compset : Compset used in metadata (default = F2000climo)") + print(" --res : Resolution used in metadata (default = f19_f19)") + print( + " --mach : Machine name used in the metadata (default = cheyenne)" + ) + print(" --tslice : the index into the time dimension (default = 1)") + print( + " --jsonfile : Jsonfile to provide that a list of variables that will " + ) + print( + " be excluded or included (default = exclude_empty.json)" + ) + print( + " --mpi_disable : Disable mpi mode to run in serial (off by default)" + ) + # print ' --cumul : ' + print( + " --fIndex : Use this to start at ensemble member instead of 000 (so " + ) + print( + " ensembles with numbers less than are excluded from summary file) " + ) + print(" ") + + # print 'Version 3.0.7' # # Command options for pyEnsSumPop.py # -def EnsSumPop_usage(): - print('\n Creates the summary file for an ensemble of POP data. \n') - print(' ------------------------') - print(' Args for pyEnsSumPop : ') - print(' ------------------------') - print(' pyEnsSumPop.py') - print(' -h : prints out this usage message') - print(' --verbose : prints out in verbose mode (off by default)') - print(' --sumfile : the output summary data file (default = pop.ens.summary.nc)') - print(' --indir : directory containing all of the ensemble runs (default = ./)') -# print ' --npert : Number of ensemble members (default = 40)' - print(' --esize : Number of ensemble members (default = 40)') - print(' (Note: backwards compatible with --npert)') - print(' --tag : Tag name used in metadata (default = cesm2_0_0)') - print(' --compset : Compset used in metadata (default = G)') - print(' --res : Resolution (used in metadata) (default = T62_g17)') - print(' --mach : Machine name used in the metadata (default = cheyenne)') - print(' --tslice : the time slice of the variable that we will use (default = 0)') - print(' --nyear : Number of years (default = 1)') - print(' --nmonth : Number of months (default = 12)') - print(' --jsonfile : Jsonfile to provide that a list of variables that will be') - print(' included (RECOMMENDED: default = pop_ensemble.json)') - print(' --mpi_disable : Disable mpi mode to run in serial (off by default)') - print(' ') +def EnsSumPop_usage(): + print("\n Creates the summary file for an ensemble of POP data. \n") + print(" ------------------------") + print(" Args for pyEnsSumPop : ") + print(" ------------------------") + print(" pyEnsSumPop.py") + print(" -h : prints out this usage message") + print(" --verbose : prints out in verbose mode (off by default)") + print( + " --sumfile : the output summary data file (default = pop.ens.summary.nc)" + ) + print( + " --indir : directory containing all of the ensemble runs (default = ./)" + ) + # print ' --npert : Number of ensemble members (default = 40)' + print(" --esize : Number of ensemble members (default = 40)") + print(" (Note: backwards compatible with --npert)") + print(" --tag : Tag name used in metadata (default = cesm2_0_0)") + print(" --compset : Compset used in metadata (default = G)") + print(" --res : Resolution (used in metadata) (default = T62_g17)") + print( + " --mach : Machine name used in the metadata (default = cheyenne)" + ) + print( + " --tslice : the time slice of the variable that we will use (default = 0)" + ) + print(" --nyear : Number of years (default = 1)") + print(" --nmonth : Number of months (default = 12)") + print( + " --jsonfile : Jsonfile to provide that a list of variables that will be" + ) + print( + " included (RECOMMENDED: default = pop_ensemble.json)" + ) + print( + " --mpi_disable : Disable mpi mode to run in serial (off by default)" + ) + print(" ") # # Random pick up three files out of a lot files # -def Random_pickup(ifiles,opts_dict): - if opts_dict['numRunFile'] > opts_dict['eet']: - nFiles = opts_dict['numRunFile'] +def Random_pickup(ifiles, opts_dict): + if opts_dict["numRunFile"] > opts_dict["eet"]: + nFiles = opts_dict["numRunFile"] else: - nFiles = opts_dict['eet'] + nFiles = opts_dict["eet"] if len(ifiles) > nFiles: - random_index=random.sample(list(range(0,len(ifiles))),nFiles) + random_index = random.sample(list(range(0, len(ifiles))), nFiles) else: - random_index=list(range(len(ifiles))) - new_ifiles=[] + random_index = list(range(len(ifiles))) + new_ifiles = [] print("Randomly pick input files:") for i in random_index: - new_ifiles.append(ifiles[i]) - print(ifiles[i]) - + new_ifiles.append(ifiles[i]) + print(ifiles[i]) return new_ifiles + # # Random pick up opts_dict['npick'] files out of a lot of OCN files # -def Random_pickup_pop(indir,opts_dict,npick): - random_year_range=opts_dict['nyear'] - random_month_range=opts_dict['nmonth'] - random_case_range=opts_dict['esize'] - - pyear=1 - pmonth=12 - - pcase=random.sample(list(range(0,random_case_range)),npick) - - - new_ifiles_temp=[] - not_pick_files=[] - for i in pcase: - wildname='*'+str(i).zfill(4)+'*'+str(pyear).zfill(4)+'-'+str(pmonth).zfill(2)+'*' +def Random_pickup_pop(indir, opts_dict, npick): + random_year_range = opts_dict["nyear"] + random_month_range = opts_dict["nmonth"] + random_case_range = opts_dict["esize"] + + pyear = 1 + pmonth = 12 + + pcase = random.sample(list(range(0, random_case_range)), npick) + + new_ifiles_temp = [] + not_pick_files = [] + for i in pcase: + wildname = ( + "*" + + str(i).zfill(4) + + "*" + + str(pyear).zfill(4) + + "-" + + str(pmonth).zfill(2) + + "*" + ) print(wildname) - for filename in os.listdir(opts_dict['indir']): - if fnmatch.fnmatch(filename,wildname): - new_ifiles_temp.append(filename) - for filename in os.listdir(opts_dict['indir']): + for filename in os.listdir(opts_dict["indir"]): + if fnmatch.fnmatch(filename, wildname): + new_ifiles_temp.append(filename) + for filename in os.listdir(opts_dict["indir"]): if filename not in new_ifiles_temp: - not_pick_files.append(filename) - with open(opts_dict['jsondir']+'random_testcase.'+str(npick)+'.'+str(opts_dict['seq'])+'.json','wb') as fout: - json.dump({'not_pick_files':not_pick_files},fout,sort_keys=True,indent=4,ensure_ascii=True) + not_pick_files.append(filename) + with open( + opts_dict["jsondir"] + + "random_testcase." + + str(npick) + + "." + + str(opts_dict["seq"]) + + ".json", + "wb", + ) as fout: + json.dump( + {"not_pick_files": not_pick_files}, + fout, + sort_keys=True, + indent=4, + ensure_ascii=True, + ) print(sorted(new_ifiles_temp)) print(sorted(not_pick_files)) return sorted(new_ifiles_temp) - + + # -# Check the false positive rate +# Check the false positive rate # (needs updating: this is only for ensemble 151) def check_falsepositive(opts_dict, sum_index): - fp=np.zeros((opts_dict['nPC'],),dtype=np.float32) - fp[0]=0.30305 - fp[1]=0.05069 - fp[2]=0.005745 - fp[3]=0.000435 - fp[4]=5.0e-05 + fp = np.zeros((opts_dict["nPC"],), dtype=np.float32) + fp[0] = 0.30305 + fp[1] = 0.05069 + fp[2] = 0.005745 + fp[3] = 0.000435 + fp[4] = 5.0e-05 nPC = 50 sigMul = 2 minPCFail = 3 minRunFail = 2 numRunFile = 3 - if opts_dict['numRunFile'] > opts_dict['eet']: - nFiles = opts_dict['numRunFile'] + if opts_dict["numRunFile"] > opts_dict["eet"]: + nFiles = opts_dict["numRunFile"] else: - nFiles = opts_dict['eet'] - - if (nPC == opts_dict['nPC']) and (sigMul == opts_dict['sigMul']) and (minPCFail == opts_dict['minPCFail']) and (minRunFail == opts_dict['minRunFail']) and (numRunFile == nFiles): - false_positive=fp[len(sum_index)-1] + nFiles = opts_dict["eet"] + + if ( + (nPC == opts_dict["nPC"]) + and (sigMul == opts_dict["sigMul"]) + and (minPCFail == opts_dict["minPCFail"]) + and (minRunFail == opts_dict["minRunFail"]) + and (numRunFile == nFiles) + ): + false_positive = fp[len(sum_index) - 1] else: - false_positive=1.0 + false_positive = 1.0 return false_positive + # # Get the shape of all variable list in tuple for all processor -# +# def get_shape(shape_tuple, shape1, rank): - lst=list(shape_tuple) - lst[0]=shape1 - shape_tuple=tuple(lst) + lst = list(shape_tuple) + lst[0] = shape1 + shape_tuple = tuple(lst) return shape_tuple - + + # # Get the mpi partition list for each processor # def get_stride_list(len_of_list, me): - slice_index=[] + slice_index = [] for i in range(me.get_size()): - index_arr=np.arange(len_of_list) - slice_index.append(index_arr[i::me.get_size()]) + index_arr = np.arange(len_of_list) + slice_index.append(index_arr[i :: me.get_size()]) return slice_index -# + +# # Gather arrays from each processor by the file_list to the master processor and make it an array # def gather_npArray_pop(npArray, me, array_shape): - the_array=np.zeros(array_shape,dtype=np.float32) - - if me.get_rank()==0: - j=me.get_rank() + the_array = np.zeros(array_shape, dtype=np.float32) + + if me.get_rank() == 0: + j = me.get_rank() if len(array_shape) == 1: - the_array[j]=npArray[0] + the_array[j] = npArray[0] elif len(array_shape) == 2: - the_array[j,:]=npArray[:] + the_array[j, :] = npArray[:] elif len(array_shape) == 3: - the_array[j,:,:]=npArray[:,:] + the_array[j, :, :] = npArray[:, :] elif len(array_shape) == 4: - the_array[j,:,:,:]=npArray[:,:,:] + the_array[j, :, :, :] = npArray[:, :, :] elif len(array_shape) == 5: - the_array[j,:,:,:,:]=npArray[:,:,:,:] - for i in range(1,me.get_size()): + the_array[j, :, :, :, :] = npArray[:, :, :, :] + for i in range(1, me.get_size()): if me.get_rank() == 0: - rank,npArray=me.collect() + rank, npArray = me.collect() if len(array_shape) == 1: - the_array[rank]=npArray[0] + the_array[rank] = npArray[0] elif len(array_shape) == 2: - the_array[rank,:]=npArray[:] + the_array[rank, :] = npArray[:] elif len(array_shape) == 3: - the_array[rank,:,:]=npArray[:,:] + the_array[rank, :, :] = npArray[:, :] elif len(array_shape) == 4: - the_array[rank,:,:,:]=npArray[:,:,:] + the_array[rank, :, :, :] = npArray[:, :, :] elif len(array_shape) == 5: - the_array[rank,:,:,:,:]=npArray[:,:,:,:] - if me.get_rank() != 0: - message={"from_rank":me.get_rank(),"shape":npArray.shape} + the_array[rank, :, :, :, :] = npArray[:, :, :, :] + if me.get_rank() != 0: + message = {"from_rank": me.get_rank(), "shape": npArray.shape} me.collect(npArray) me.sync() return the_array + # # Use input files from opts_dict['input_globs'] to get timeslices for pop ensemble # def get_files_from_glob(opts_dict): - in_files=[] - wildname='*'+str(opts_dict['input_globs'])+'*' - if (os.path.exists(opts_dict['indir'])): - full_glob_str=os.path.join(opts_dict['indir'],wildname) - glob_files=glob.glob(full_glob_str) - in_files.extend(glob_files) - in_files.sort() - else: - print('ERROR: Input directory does not exist => EXITING....') - sys.exit() - n_timeslice=[] - for fname in in_files: - istr=fname.find('.nc') - temp=(int(fname[istr-7:istr-3])-1)*12+int(fname[istr-2:istr])-1 - n_timeslice.append(temp) - return n_timeslice, in_files - -# -#POP-ECT Compare the testcase with the ensemble summary file + in_files = [] + wildname = "*" + str(opts_dict["input_globs"]) + "*" + if os.path.exists(opts_dict["indir"]): + full_glob_str = os.path.join(opts_dict["indir"], wildname) + glob_files = glob.glob(full_glob_str) + in_files.extend(glob_files) + in_files.sort() + else: + print("ERROR: Input directory does not exist => EXITING....") + sys.exit() + n_timeslice = [] + for fname in in_files: + istr = fname.find(".nc") + temp = ( + (int(fname[istr - 7 : istr - 3]) - 1) * 12 + int(fname[istr - 2 : istr]) - 1 + ) + n_timeslice.append(temp) + return n_timeslice, in_files + + +# +# POP-ECT Compare the testcase with the ensemble summary file # ifiles are not open def pop_compare_raw_score(opts_dict, ifiles, timeslice, Var3d, Var2d): - rmask_var = 'REGION_MASK' - if not opts_dict['test_failure']: - nbin=opts_dict['nbin'] + rmask_var = "REGION_MASK" + if not opts_dict["test_failure"]: + nbin = opts_dict["nbin"] else: - nbin=1 - Zscore = np.zeros((len(Var3d)+len(Var2d),len(ifiles),(nbin)),dtype=np.float32) - Zscore_tran = np.zeros((len(ifiles),len(Var3d)+len(Var2d),(nbin)),dtype=np.float32) - failure_count = np.zeros((len(ifiles)),dtype=np.int) - sum_file = nc.Dataset(opts_dict['sumfile'],'r') - for k,v in sum_file.variables.items(): - if k == 'ens_stddev2d': - ens_stddev2d=v - elif k == 'ens_avg2d': - ens_avg2d = v - elif k == 'ens_stddev3d': - ens_stddev3d=v - elif k == 'ens_avg3d': - ens_avg3d = v - elif k == 'time': + nbin = 1 + Zscore = np.zeros((len(Var3d) + len(Var2d), len(ifiles), (nbin)), dtype=np.float32) + Zscore_tran = np.zeros( + (len(ifiles), len(Var3d) + len(Var2d), (nbin)), dtype=np.float32 + ) + failure_count = np.zeros((len(ifiles)), dtype=np.int) + sum_file = nc.Dataset(opts_dict["sumfile"], "r") + for k, v in sum_file.variables.items(): + if k == "ens_stddev2d": + ens_stddev2d = v + elif k == "ens_avg2d": + ens_avg2d = v + elif k == "ens_stddev3d": + ens_stddev3d = v + elif k == "ens_avg3d": + ens_avg3d = v + elif k == "time": ens_time = v - - #check time slice 0 for zeros....indicating an incomplete summary file + # check time slice 0 for zeros....indicating an incomplete summary file sum_problem = False - all_zeros = not np.any(ens_stddev2d[0,:,:]) + all_zeros = not np.any(ens_stddev2d[0, :, :]) if all_zeros: - print('ERROR: ens_stddev2d field in summary file was not computed.') + print("ERROR: ens_stddev2d field in summary file was not computed.") sum_problem = True - all_zeros = not np.any(ens_avg2d[0,:,:]) + all_zeros = not np.any(ens_avg2d[0, :, :]) if all_zeros: - print('ERROR: ens_avg2d field in summary file was not computed.') + print("ERROR: ens_avg2d field in summary file was not computed.") sum_problem = True - all_zeros = not np.any(ens_stddev3d[0,:,:,:]) + all_zeros = not np.any(ens_stddev3d[0, :, :, :]) if all_zeros: - print('ERROR: ens_stddev3d field in summary file was not computed.') + print("ERROR: ens_stddev3d field in summary file was not computed.") sum_problem = True - all_zeros = not np.any(ens_avg3d[0,:,:,:]) + all_zeros = not np.any(ens_avg3d[0, :, :, :]) if all_zeros: - print('ERROR: ens_avg3d field in summary file was not computed.') + print("ERROR: ens_avg3d field in summary file was not computed.") sum_problem = True if sum_problem: - print('=> EXITING....') + print("=> EXITING....") sys.exit() - npts3d=0 - npts2d=0 - is_SE=False + npts3d = 0 + npts2d = 0 + is_SE = False ens_timeslice = len(ens_time) - #Get the exact month from the file names - n_timeslice=[] + # Get the exact month from the file names + n_timeslice = [] in_file_names = [] - if not opts_dict['mpi_enable']: - n_timeslice, in_file_names=get_files_from_glob(opts_dict) - #print in_file_names - temp_list=[] - for i in n_timeslice: - temp_list.append(i+1) - print('STATUS: Checkpoint month(s) = ',temp_list) - - #Compare an individual file with ensemble summary file to get zscore - for fcount,fid in enumerate(ifiles): - print(' ') - #If not in mpi_enable mode, the timeslice will be decided by the month of the input files - if not opts_dict['mpi_enable']: - timeslice=n_timeslice[fcount] + if not opts_dict["mpi_enable"]: + n_timeslice, in_file_names = get_files_from_glob(opts_dict) + # print in_file_names + temp_list = [] + for i in n_timeslice: + temp_list.append(i + 1) + print("STATUS: Checkpoint month(s) = ", temp_list) + + # Compare an individual file with ensemble summary file to get zscore + for fcount, fid in enumerate(ifiles): + print(" ") + # If not in mpi_enable mode, the timeslice will be decided by the month of the input files + if not opts_dict["mpi_enable"]: + timeslice = n_timeslice[fcount] o_fid = nc.Dataset(fid, "r") - otimeSeries = o_fid.variables - rmask=otimeSeries[rmask_var] + otimeSeries = o_fid.variables + rmask = otimeSeries[rmask_var] + + print( + "**********" + + "Run " + + str(fcount + 1) + + " (file=" + + in_file_names[fcount] + + "):" + ) - print('**********'+'Run '+str(fcount+1)+" (file=" + in_file_names[fcount]+ "):") - if timeslice >= ens_timeslice: - print('WARNING: The summary file containing only ',ens_timeslice, ' timeslices. Skipping this run evaluation...') + print( + "WARNING: The summary file containing only ", + ens_timeslice, + " timeslices. Skipping this run evaluation...", + ) continue - for vcount,var_name in enumerate(Var3d): - orig=otimeSeries[var_name][0] - FillValue=otimeSeries[var_name]._FillValue - Zscore[vcount,fcount,:],has_zscore=calculate_raw_score(var_name,orig,npts3d,npts2d,ens_avg3d[timeslice][vcount],ens_stddev3d[timeslice][vcount],is_SE,opts_dict,FillValue,0,rmask) - if opts_dict['test_failure']: - temp=Zscore[vcount,fcount,0] - print(' '+ '{:>10}'.format(var_name)+": "+'{:.2%}'.format(temp)) - if Zscore[vcount,fcount,:]< opts_dict['pop_threshold']: - failure_count[fcount]=failure_count[fcount]+1 - - for vcount,var_name in enumerate(Var2d): - orig=otimeSeries[var_name][0] - FillValue=otimeSeries[var_name]._FillValue - #print var_name,timeslice - Zscore[vcount+len(Var3d),fcount,:],has_zscore=calculate_raw_score(var_name,orig,npts3d,npts2d,ens_avg2d[timeslice][vcount],ens_stddev2d[timeslice][vcount],is_SE,opts_dict,FillValue,0,rmask) - if opts_dict['test_failure']: - temp=Zscore[vcount+len(Var3d),fcount,0] - print(' '+ '{:>10}'.format(var_name)+": "+'{:.2%}'.format(temp)) - if Zscore[vcount+len(Var3d),fcount,:]< opts_dict['pop_threshold']: - failure_count[fcount]=failure_count[fcount]+1 - - - if failure_count[fcount]>0: - print('**********'+str(failure_count[fcount])+' of '+str(len(Var3d)+len(Var2d)) +' variables failed, resulting in an overall FAIL'+'**********') + for vcount, var_name in enumerate(Var3d): + orig = otimeSeries[var_name][0] + FillValue = otimeSeries[var_name]._FillValue + Zscore[vcount, fcount, :], has_zscore = calculate_raw_score( + var_name, + orig, + npts3d, + npts2d, + ens_avg3d[timeslice][vcount], + ens_stddev3d[timeslice][vcount], + is_SE, + opts_dict, + FillValue, + 0, + rmask, + ) + if opts_dict["test_failure"]: + temp = Zscore[vcount, fcount, 0] + print( + " " + + "{:>10}".format(var_name) + + ": " + + "{:.2%}".format(temp) + ) + if Zscore[vcount, fcount, :] < opts_dict["pop_threshold"]: + failure_count[fcount] = failure_count[fcount] + 1 + + for vcount, var_name in enumerate(Var2d): + orig = otimeSeries[var_name][0] + FillValue = otimeSeries[var_name]._FillValue + # print var_name,timeslice + Zscore[vcount + len(Var3d), fcount, :], has_zscore = calculate_raw_score( + var_name, + orig, + npts3d, + npts2d, + ens_avg2d[timeslice][vcount], + ens_stddev2d[timeslice][vcount], + is_SE, + opts_dict, + FillValue, + 0, + rmask, + ) + if opts_dict["test_failure"]: + temp = Zscore[vcount + len(Var3d), fcount, 0] + print( + " " + + "{:>10}".format(var_name) + + ": " + + "{:.2%}".format(temp) + ) + if Zscore[vcount + len(Var3d), fcount, :] < opts_dict["pop_threshold"]: + failure_count[fcount] = failure_count[fcount] + 1 + + if failure_count[fcount] > 0: + print( + "**********" + + str(failure_count[fcount]) + + " of " + + str(len(Var3d) + len(Var2d)) + + " variables failed, resulting in an overall FAIL" + + "**********" + ) else: - print('**********'+str(failure_count[fcount])+' of '+str(len(Var3d)+len(Var2d)) +' variables failed, resulting in an overall PASS'+'**********') + print( + "**********" + + str(failure_count[fcount]) + + " of " + + str(len(Var3d) + len(Var2d)) + + " variables failed, resulting in an overall PASS" + + "**********" + ) o_fid.close() - sum_file.close() if has_zscore: - return Zscore,n_timeslice + return Zscore, n_timeslice else: - Zscore=0 - return Zscore,n_timeslice + Zscore = 0 + return Zscore, n_timeslice + # Get the deficit row number of the standardized global mean matrix # (AB: no longer used...) def get_failure_index(the_array): - mat_rows=the_array.shape[0] - mat_cols=the_array.shape[1] - - mat_rank=np.linalg.matrix_rank(the_array) - deficit=mat_rows-mat_rank - deficit_row=[] - x=0 - while(deficit>0): - for i in range(mat_rows): - temp_mat=np.delete(the_array,i,axis=0) - new_rank=np.linalg.matrix_rank(temp_mat) - if (new_rank == mat_rank): - #print "removing row ", i - if len(deficit_row) != 0: - #print "deficit_row=",deficit_row - x=i - for num,j in enumerate(deficit_row): - if j-num<=i: - #print "j=",j,"i=",i - x=x+1 - deficit_row.append(x) - else: - deficit_row.append(i) - - the_array=temp_mat - mat_rows=the_array.shape[0] - mat_rank=new_rank - deficit=mat_rows-mat_rank - break + mat_rows = the_array.shape[0] + mat_cols = the_array.shape[1] + + mat_rank = np.linalg.matrix_rank(the_array) + deficit = mat_rows - mat_rank + deficit_row = [] + x = 0 + while deficit > 0: + for i in range(mat_rows): + temp_mat = np.delete(the_array, i, axis=0) + new_rank = np.linalg.matrix_rank(temp_mat) + if new_rank == mat_rank: + # print "removing row ", i + if len(deficit_row) != 0: + # print "deficit_row=",deficit_row + x = i + for num, j in enumerate(deficit_row): + if j - num <= i: + # print "j=",j,"i=",i + x = x + 1 + deficit_row.append(x) + else: + deficit_row.append(i) + + the_array = temp_mat + mat_rows = the_array.shape[0] + mat_rank = new_rank + deficit = mat_rows - mat_rank + break return deficit_row + # -#Alternative method to get the linearly dependent rows (using QR for faster perf) +# Alternative method to get the linearly dependent rows (using QR for faster perf) # def get_dependent_vars_index(a_mat, orig_rank): - #initialize - dv_index = [] - - #the_array is nvars x nens - nvars = a_mat.shape[0] - - if (orig_rank < nvars): - #transpose so vars are the columns - t_mat = a_mat.transpose() - #now do a rank-revealing qr (pivots for stability) - q_mat, r_mat, piv = sla.qr(t_mat, pivoting=True) - #rank = num of nonzero diag of r - r_mat_d = np.fabs(r_mat.diagonal()) - #print r_mat_d - #AB: 4/1/19: instead of an arbitrary tolerance here, we'll just remove vars according to the - # tolerance from the rank calculation already done - rank_est = orig_rank - ind_vars_index = piv[0:rank_est] - dv_index = piv[rank_est:] - - return dv_index - + # initialize + dv_index = [] + + # the_array is nvars x nens + nvars = a_mat.shape[0] + + if orig_rank < nvars: + # transpose so vars are the columns + t_mat = a_mat.transpose() + # now do a rank-revealing qr (pivots for stability) + q_mat, r_mat, piv = sla.qr(t_mat, pivoting=True) + # rank = num of nonzero diag of r + r_mat_d = np.fabs(r_mat.diagonal()) + # print r_mat_d + # AB: 4/1/19: instead of an arbitrary tolerance here, we'll just remove vars according to the + # tolerance from the rank calculation already done + rank_est = orig_rank + ind_vars_index = piv[0:rank_est] + dv_index = piv[rank_est:] + + return dv_index def chunk(it, size): it = iter(it) - return iter(lambda:tuple(islice(it,size)),()) + return iter(lambda: tuple(islice(it, size)), ()) diff --git a/tools/statistical_ensemble_test/pyCECT/pyEnsSum.py b/tools/statistical_ensemble_test/pyCECT/pyEnsSum.py index 93788d2b626..935e96fde08 100644 --- a/tools/statistical_ensemble_test/pyCECT/pyEnsSum.py +++ b/tools/statistical_ensemble_test/pyCECT/pyEnsSum.py @@ -1,175 +1,194 @@ #!/usr/bin/env python from __future__ import print_function import configparser -import sys, getopt, os -import numpy as np +import sys, getopt, os +import numpy as np import netCDF4 as nc import time import re -from asaptools.partition import EqualStride, Duplicate,EqualLength -import asaptools.simplecomm as simplecomm +from asaptools.partition import EqualStride, Duplicate, EqualLength +import asaptools.simplecomm as simplecomm import pyEnsLib -#This routine creates a summary file from an ensemble of CAM -#output files +# This routine creates a summary file from an ensemble of CAM +# output files -def main(argv): +def main(argv): # Get command line stuff and store in a dictionary - s = 'tag= compset= esize= tslice= res= sumfile= indir= sumfiledir= mach= verbose jsonfile= mpi_enable maxnorm gmonly popens cumul regx= startMon= endMon= fIndex= mpi_disable' + s = "tag= compset= esize= tslice= res= sumfile= indir= sumfiledir= mach= verbose jsonfile= mpi_enable maxnorm gmonly popens cumul regx= startMon= endMon= fIndex= mpi_disable" optkeys = s.split() - try: + try: opts, args = getopt.getopt(argv, "h", optkeys) except getopt.GetoptError: pyEnsLib.EnsSum_usage() sys.exit(2) # Put command line options in a dictionary - also set defaults - opts_dict={} - + opts_dict = {} + # Defaults - opts_dict['tag'] = 'cesm2_0' - opts_dict['compset'] = 'F2000climo' - opts_dict['mach'] = 'cheyenne' - opts_dict['esize'] = 350 - opts_dict['tslice'] = 1 - opts_dict['res'] = 'f19_f19' - opts_dict['sumfile'] = 'ens.summary.nc' - opts_dict['indir'] = './' - opts_dict['sumfiledir'] = './' - opts_dict['jsonfile'] = 'exclude_empty.json' - opts_dict['verbose'] = False - opts_dict['mpi_enable'] = True - opts_dict['mpi_disable'] = False - opts_dict['maxnorm'] = False - opts_dict['gmonly'] = True - opts_dict['popens'] = False - opts_dict['cumul'] = False - opts_dict['regx'] = 'test' - opts_dict['startMon'] = 1 - opts_dict['endMon'] = 1 - opts_dict['fIndex'] = 151 - - # This creates the dictionary of input arguments - opts_dict = pyEnsLib.getopt_parseconfig(opts,optkeys,'ES',opts_dict) - - verbose = opts_dict['verbose'] - - st = opts_dict['esize'] + opts_dict["tag"] = "cesm2_0" + opts_dict["compset"] = "F2000climo" + opts_dict["mach"] = "cheyenne" + opts_dict["esize"] = 350 + opts_dict["tslice"] = 1 + opts_dict["res"] = "f19_f19" + opts_dict["sumfile"] = "ens.summary.nc" + opts_dict["indir"] = "./" + opts_dict["sumfiledir"] = "./" + opts_dict["jsonfile"] = "exclude_empty.json" + opts_dict["verbose"] = False + opts_dict["mpi_enable"] = True + opts_dict["mpi_disable"] = False + opts_dict["maxnorm"] = False + opts_dict["gmonly"] = True + opts_dict["popens"] = False + opts_dict["cumul"] = False + opts_dict["regx"] = "test" + opts_dict["startMon"] = 1 + opts_dict["endMon"] = 1 + opts_dict["fIndex"] = 151 + + # This creates the dictionary of input arguments + opts_dict = pyEnsLib.getopt_parseconfig(opts, optkeys, "ES", opts_dict) + + verbose = opts_dict["verbose"] + + st = opts_dict["esize"] esize = int(st) + if opts_dict["popens"] == True: + print( + "ERROR: Please use pyEnsSumPop.py for a POP ensemble (not --popens) => EXITING...." + ) + sys.exit() - if opts_dict['popens'] == True: - print("ERROR: Please use pyEnsSumPop.py for a POP ensemble (not --popens) => EXITING....") + if not ( + opts_dict["tag"] + and opts_dict["compset"] + and opts_dict["mach"] + or opts_dict["res"] + ): + print( + "ERROR: Please specify --tag, --compset, --mach and --res options => EXITING...." + ) sys.exit() - if not (opts_dict['tag'] and opts_dict['compset'] and opts_dict['mach'] or opts_dict['res']): - print('ERROR: Please specify --tag, --compset, --mach and --res options => EXITING....') - sys.exit() - - if opts_dict['mpi_disable'] == True: - opts_dict['mpi_enable'] = False + if opts_dict["mpi_disable"] == True: + opts_dict["mpi_enable"] = False # Now find file names in indir - input_dir = opts_dict['indir'] + input_dir = opts_dict["indir"] # The var list that will be excluded - ex_varlist=[] - inc_varlist=[] + ex_varlist = [] + inc_varlist = [] # Create a mpi simplecomm object - if opts_dict['mpi_enable']: - me=simplecomm.create_comm() + if opts_dict["mpi_enable"]: + me = simplecomm.create_comm() else: - me=simplecomm.create_comm(not opts_dict['mpi_enable']) - + me = simplecomm.create_comm(not opts_dict["mpi_enable"]) + if me.get_rank() == 0: - print('STATUS: Running pyEnsSum.py') + print("STATUS: Running pyEnsSum.py") - if me.get_rank() ==0 and (verbose == True): + if me.get_rank() == 0 and (verbose == True): print(opts_dict) - print('STATUS: Ensemble size for summary = ', esize) + print("STATUS: Ensemble size for summary = ", esize) - exclude=False + exclude = False if me.get_rank() == 0: - if opts_dict['jsonfile']: - inc_varlist=[] + if opts_dict["jsonfile"]: + inc_varlist = [] # Read in the excluded or included var list - ex_varlist,exclude=pyEnsLib.read_jsonlist(opts_dict['jsonfile'],'ES') + ex_varlist, exclude = pyEnsLib.read_jsonlist(opts_dict["jsonfile"], "ES") if exclude == False: - inc_varlist=ex_varlist - ex_varlist=[] - + inc_varlist = ex_varlist + ex_varlist = [] # Broadcast the excluded var list to each processor - if opts_dict['mpi_enable']: - exclude=me.partition(exclude,func=Duplicate(),involved=True) + if opts_dict["mpi_enable"]: + exclude = me.partition(exclude, func=Duplicate(), involved=True) if exclude: - ex_varlist=me.partition(ex_varlist,func=Duplicate(),involved=True) + ex_varlist = me.partition(ex_varlist, func=Duplicate(), involved=True) else: - inc_varlist=me.partition(inc_varlist,func=Duplicate(),involved=True) - - in_files=[] - if(os.path.exists(input_dir)): + inc_varlist = me.partition(inc_varlist, func=Duplicate(), involved=True) + + in_files = [] + if os.path.exists(input_dir): # Get the list of files in_files_temp = os.listdir(input_dir) - in_files=sorted(in_files_temp) + in_files = sorted(in_files_temp) # Make sure we have enough num_files = len(in_files) - if me.get_rank()==0 and (verbose == True): - print('VERBOSE: Number of files in input directory = ', num_files) - if (num_files < esize): - if me.get_rank()==0 and (verbose == True): - print('VERBOSE: Number of files in input directory (',num_files,\ - ') is less than specified ensemble size of ', esize) + if me.get_rank() == 0 and (verbose == True): + print("VERBOSE: Number of files in input directory = ", num_files) + if num_files < esize: + if me.get_rank() == 0 and (verbose == True): + print( + "VERBOSE: Number of files in input directory (", + num_files, + ") is less than specified ensemble size of ", + esize, + ) sys.exit(2) - if (num_files > esize): - if me.get_rank()==0 and (verbose == True): - print('VERBOSE: Note that the number of files in ', input_dir, \ - 'is greater than specified ensemble size of ', esize ,\ - '\nwill just use the first ', esize, 'files') + if num_files > esize: + if me.get_rank() == 0 and (verbose == True): + print( + "VERBOSE: Note that the number of files in ", + input_dir, + "is greater than specified ensemble size of ", + esize, + "\nwill just use the first ", + esize, + "files", + ) else: - if me.get_rank()==0: - print('ERROR: Input directory: ',input_dir,' not found') + if me.get_rank() == 0: + print("ERROR: Input directory: ", input_dir, " not found") sys.exit(2) - if opts_dict['cumul']: - if opts_dict['regx']: - in_files_list=get_cumul_filelist(opts_dict,opts_dict['indir'],opts_dict['regx']) - in_files=me.partition(in_files_list,func=EqualLength(),involved=True) - if me.get_rank()==0 and (verbose == True): - print('VERBOSE: in_files = ',in_files) + if opts_dict["cumul"]: + if opts_dict["regx"]: + in_files_list = get_cumul_filelist( + opts_dict, opts_dict["indir"], opts_dict["regx"] + ) + in_files = me.partition(in_files_list, func=EqualLength(), involved=True) + if me.get_rank() == 0 and (verbose == True): + print("VERBOSE: in_files = ", in_files) # Check full file names in input directory (don't open yet) - full_in_files=[] - if me.get_rank() == 0 and opts_dict['verbose']: - print('VERBOSE: Input files are: ') + full_in_files = [] + if me.get_rank() == 0 and opts_dict["verbose"]: + print("VERBOSE: Input files are: ") for onefile in in_files[0:esize]: - fname = input_dir + '/' + onefile - if me.get_rank() == 0 and opts_dict['verbose']: + fname = input_dir + "/" + onefile + if me.get_rank() == 0 and opts_dict["verbose"]: print(fname) - if (os.path.isfile(fname)): + if os.path.isfile(fname): full_in_files.append(fname) else: - if me.get_rank()==0: - print("ERROR: Could not locate file ", fname , " => EXITING....") - sys.exit() + if me.get_rank() == 0: + print("ERROR: Could not locate file ", fname, " => EXITING....") + sys.exit() - #open just the first file - first_file = nc.Dataset(full_in_files[0],"r") + # open just the first file + first_file = nc.Dataset(full_in_files[0], "r") # Store dimensions of the input fields - if me.get_rank()==0 and (verbose == True): + if me.get_rank() == 0 and (verbose == True): print("VERBOSE: Getting spatial dimensions") nlev = -1 nilev = -1 ncol = -1 nlat = -1 nlon = -1 - lonkey='' - latkey='' + lonkey = "" + latkey = "" # Look at first file and get dims input_dims = first_file.dimensions ndims = len(input_dims) @@ -181,54 +200,54 @@ def main(argv): nilev = len(input_dims["ilev"]) elif key == "ncol": ncol = len(input_dims["ncol"]) - elif (key == "nlon") or (key =="lon"): + elif (key == "nlon") or (key == "lon"): nlon = len(input_dims[key]) - lonkey=key + lonkey = key elif (key == "nlat") or (key == "lat"): nlat = len(input_dims[key]) - latkey=key - - if (nlev == -1) : - if me.get_rank()==0: - print("ERROR: could not locate a valid dimension (lev) => EXITING....") - sys.exit() - - if (( ncol == -1) and ((nlat == -1) or (nlon == -1))): - if me.get_rank()==0: - print("ERROR: Need either lat/lon or ncol => EXITING....") - sys.exit() + latkey = key + + if nlev == -1: + if me.get_rank() == 0: + print("ERROR: could not locate a valid dimension (lev) => EXITING....") + sys.exit() + + if (ncol == -1) and ((nlat == -1) or (nlon == -1)): + if me.get_rank() == 0: + print("ERROR: Need either lat/lon or ncol => EXITING....") + sys.exit() # Check if this is SE or FV data - if (ncol != -1): - is_SE = True + if ncol != -1: + is_SE = True else: - is_SE = False + is_SE = False # output dimensions - if me.get_rank()==0 and (verbose == True): - print('lev = ', nlev) - if (is_SE == True): - print('ncol = ', ncol) + if me.get_rank() == 0 and (verbose == True): + print("lev = ", nlev) + if is_SE == True: + print("ncol = ", ncol) else: - print('nlat = ', nlat) - print('nlon = ', nlon) + print("nlat = ", nlat) + print("nlon = ", nlon) - # Get 2d vars, 3d vars and all vars (For now include all variables) + # Get 2d vars, 3d vars and all vars (For now include all variables) vars_dict_all = first_file.variables # Remove the excluded variables (specified in json file) from variable dictionary if exclude: - vars_dict=vars_dict_all + vars_dict = vars_dict_all for i in ex_varlist: - if i in vars_dict: - del vars_dict[i] - #Given an included var list, remove all the variables that are not on the list + if i in vars_dict: + del vars_dict[i] + # Given an included var list, remove all the variables that are not on the list else: - vars_dict=vars_dict_all.copy() - for k,v in vars_dict_all.items(): - if (k not in inc_varlist) and (vars_dict_all[k].typecode()=='f'): - del vars_dict[k] - + vars_dict = vars_dict_all.copy() + for k, v in vars_dict_all.items(): + if (k not in inc_varlist) and (vars_dict_all[k].typecode() == "f"): + del vars_dict[k] + num_vars = len(vars_dict) str_size = 0 @@ -237,54 +256,70 @@ def main(argv): num_2d = 0 num_3d = 0 - # Which are 2d, which are 3d and max str_size - for k,v in vars_dict.items(): + # Which are 2d, which are 3d and max str_size + for k, v in vars_dict.items(): var = k - vd = v.dimensions # all the variable's dimensions (names) - vr = len(v.dimensions) # num dimension - vs = v.shape # dim values + vd = v.dimensions # all the variable's dimensions (names) + vr = len(v.dimensions) # num dimension + vs = v.shape # dim values is_2d = False is_3d = False - if (is_SE == True): # (time, lev, ncol) or (time, ncol) - if ((vr == 2) and (vs[1] == ncol)): - is_2d = True + if is_SE == True: # (time, lev, ncol) or (time, ncol) + if (vr == 2) and (vs[1] == ncol): + is_2d = True num_2d += 1 - elif ((vr == 3) and (vs[2] == ncol and vs[1] == nlev )): - is_3d = True + elif (vr == 3) and (vs[2] == ncol and vs[1] == nlev): + is_3d = True num_3d += 1 - else: # (time, lev, nlon, nlon) or (time, nlat, nlon) - if ((vr == 3) and (vs[1] == nlat and vs[2] == nlon)): - is_2d = True + else: # (time, lev, nlon, nlon) or (time, nlat, nlon) + if (vr == 3) and (vs[1] == nlat and vs[2] == nlon): + is_2d = True num_2d += 1 - elif ((vr == 4) and (vs[2] == nlat and vs[3] == nlon and (vs[1] == nlev or vs[1]==nilev ))): - is_3d = True + elif (vr == 4) and ( + vs[2] == nlat and vs[3] == nlon and (vs[1] == nlev or vs[1] == nilev) + ): + is_3d = True num_3d += 1 - - if (is_3d == True) : + + if is_3d == True: str_size = max(str_size, len(k)) d3_var_names.append(k) - elif (is_2d == True): + elif is_2d == True: str_size = max(str_size, len(k)) d2_var_names.append(k) - if me.get_rank() == 0 and (verbose == True): - print('VERBOSE: Number of variables found: ', num_3d+num_2d) - print('VERBOSE: 3D variables: '+str(num_3d)+', 2D variables: '+str(num_2d)) + print("VERBOSE: Number of variables found: ", num_3d + num_2d) + print( + "VERBOSE: 3D variables: " + str(num_3d) + ", 2D variables: " + str(num_2d) + ) - # Now sort these and combine (this sorts caps first, then lower case - + # Now sort these and combine (this sorts caps first, then lower case - # which is what we want) - d2_var_names.sort() + d2_var_names.sort() d3_var_names.sort() - if esize EXITING....") - print("************************************************************************************************************************************") - sys.exit() + if esize < num_2d + num_3d: + if me.get_rank() == 0: + print( + "************************************************************************************************************************************" + ) + print( + " ERROR: the total number of 3D and 2D variables " + + str(num_2d + num_3d) + + " is larger than the number of ensemble files " + + str(esize) + ) + print( + " Cannot generate ensemble summary file, please remove more variables from your included variable list," + ) + print( + " or add more variables in your excluded variable list => EXITING...." + ) + print( + "************************************************************************************************************************************" + ) + sys.exit() # All vars is 3d vars first (sorted), the 2d vars all_var_names = list(d3_var_names) all_var_names += d2_var_names @@ -293,19 +328,18 @@ def main(argv): # Rank 0 - Create new summary ensemble file this_sumfile = opts_dict["sumfile"] - #check if directory is valid + # check if directory is valid sum_dir = os.path.dirname(this_sumfile) if len(sum_dir) == 0: - sum_dir = '.' - if (os.path.exists(sum_dir) == False): + sum_dir = "." + if os.path.exists(sum_dir) == False: if me.get_rank() == 0: - print('ERROR: Summary file directory: ',sum_dir,' not found') + print("ERROR: Summary file directory: ", sum_dir, " not found") sys.exit(2) + if me.get_rank() == 0: - if(me.get_rank() ==0 ): - - if (verbose == True): + if verbose == True: print("VERBOSE: Creating ", this_sumfile, " ...") if os.path.exists(this_sumfile): @@ -313,52 +347,58 @@ def main(argv): nc_sumfile = nc.Dataset(this_sumfile, "w", format="NETCDF4_CLASSIC") # Set dimensions - if (verbose == True): + if verbose == True: print("VERBOSE: Setting dimensions .....") - if (is_SE == True): - nc_sumfile.createDimension('ncol', ncol) + if is_SE == True: + nc_sumfile.createDimension("ncol", ncol) else: - nc_sumfile.createDimension('nlat', nlat) - nc_sumfile.createDimension('nlon', nlon) + nc_sumfile.createDimension("nlat", nlat) + nc_sumfile.createDimension("nlon", nlon) - nc_sumfile.createDimension('nlev', nlev) - nc_sumfile.createDimension('ens_size', esize) - nc_sumfile.createDimension('nvars', num_3d + num_2d) - nc_sumfile.createDimension('nvars3d', num_3d) - nc_sumfile.createDimension('nvars2d', num_2d) - nc_sumfile.createDimension('str_size', str_size) + nc_sumfile.createDimension("nlev", nlev) + nc_sumfile.createDimension("ens_size", esize) + nc_sumfile.createDimension("nvars", num_3d + num_2d) + nc_sumfile.createDimension("nvars3d", num_3d) + nc_sumfile.createDimension("nvars2d", num_2d) + nc_sumfile.createDimension("str_size", str_size) # Set global attributes now = time.strftime("%c") - if (verbose == True): + if verbose == True: print("VERBOSE: Setting global attributes .....") nc_sumfile.creation_date = now - nc_sumfile.title = 'CAM verification ensemble summary file' + nc_sumfile.title = "CAM verification ensemble summary file" nc_sumfile.tag = opts_dict["tag"] - nc_sumfile.compset = opts_dict["compset"] + nc_sumfile.compset = opts_dict["compset"] nc_sumfile.resolution = opts_dict["res"] - nc_sumfile.machine = opts_dict["mach"] + nc_sumfile.machine = opts_dict["mach"] # Create variables - if (verbose == True): + if verbose == True: print("VERBOSE: Creating variables .....") - v_lev = nc_sumfile.createVariable("lev", 'f8', ('nlev',)) - v_vars = nc_sumfile.createVariable("vars", 'S1', ('nvars', 'str_size')) - v_var3d = nc_sumfile.createVariable("var3d", 'S1', ('nvars3d', 'str_size')) - v_var2d = nc_sumfile.createVariable("var2d", 'S1', ('nvars2d', 'str_size')) - - v_gm = nc_sumfile.createVariable("global_mean", 'f8', ('nvars', 'ens_size')) - v_standardized_gm=nc_sumfile.createVariable("standardized_gm",'f8',('nvars','ens_size')) - v_loadings_gm = nc_sumfile.createVariable('loadings_gm','f8',('nvars','nvars')) - v_mu_gm = nc_sumfile.createVariable('mu_gm','f8',('nvars',)) - v_sigma_gm = nc_sumfile.createVariable('sigma_gm','f8',('nvars',)) - v_sigma_scores_gm = nc_sumfile.createVariable('sigma_scores_gm','f8',('nvars',)) + v_lev = nc_sumfile.createVariable("lev", "f8", ("nlev",)) + v_vars = nc_sumfile.createVariable("vars", "S1", ("nvars", "str_size")) + v_var3d = nc_sumfile.createVariable("var3d", "S1", ("nvars3d", "str_size")) + v_var2d = nc_sumfile.createVariable("var2d", "S1", ("nvars2d", "str_size")) + + v_gm = nc_sumfile.createVariable("global_mean", "f8", ("nvars", "ens_size")) + v_standardized_gm = nc_sumfile.createVariable( + "standardized_gm", "f8", ("nvars", "ens_size") + ) + v_loadings_gm = nc_sumfile.createVariable( + "loadings_gm", "f8", ("nvars", "nvars") + ) + v_mu_gm = nc_sumfile.createVariable("mu_gm", "f8", ("nvars",)) + v_sigma_gm = nc_sumfile.createVariable("sigma_gm", "f8", ("nvars",)) + v_sigma_scores_gm = nc_sumfile.createVariable( + "sigma_scores_gm", "f8", ("nvars",) + ) # Assign vars, var3d and var2d - if (verbose == True): + if verbose == True: print("VERBOSE: Assigning vars, var3d, and var2d .....") - eq_all_var_names =[] + eq_all_var_names = [] eq_d3_var_names = [] eq_d2_var_names = [] @@ -366,8 +406,8 @@ def main(argv): for i in range(l_eq): tt = list(all_var_names[i]) l_tt = len(tt) - if (l_tt < str_size): - extra = list(' ')*(str_size - l_tt) + if l_tt < str_size: + extra = list(" ") * (str_size - l_tt) tt.extend(extra) eq_all_var_names.append(tt) @@ -375,8 +415,8 @@ def main(argv): for i in range(l_eq): tt = list(d3_var_names[i]) l_tt = len(tt) - if (l_tt < str_size): - extra = list(' ')*(str_size - l_tt) + if l_tt < str_size: + extra = list(" ") * (str_size - l_tt) tt.extend(extra) eq_d3_var_names.append(tt) @@ -384,8 +424,8 @@ def main(argv): for i in range(l_eq): tt = list(d2_var_names[i]) l_tt = len(tt) - if (l_tt < str_size): - extra = list(' ')*(str_size - l_tt) + if l_tt < str_size: + extra = list(" ") * (str_size - l_tt) tt.extend(extra) eq_d2_var_names.append(tt) @@ -394,166 +434,184 @@ def main(argv): v_var2d[:] = eq_d2_var_names[:] # Time-invarient metadata - if (verbose == True): + if verbose == True: print("VERBOSE: Assigning time invariant metadata .....") -# lev_data = np.zeros(num_lev,dtype=np.float64) + # lev_data = np.zeros(num_lev,dtype=np.float64) lev_data = first_file.variables["lev"] v_lev[:] = lev_data[:] - #end of rank=0 work + # end of rank=0 work - # All: - tslice = opts_dict['tslice'] - if not opts_dict['cumul']: + # All: + tslice = opts_dict["tslice"] + if not opts_dict["cumul"]: # Partition the var list - var3_list_loc=me.partition(d3_var_names,func=EqualStride(),involved=True) - var2_list_loc=me.partition(d2_var_names,func=EqualStride(),involved=True) + var3_list_loc = me.partition(d3_var_names, func=EqualStride(), involved=True) + var2_list_loc = me.partition(d2_var_names, func=EqualStride(), involved=True) else: - var3_list_loc=d3_var_names - var2_list_loc=d2_var_names + var3_list_loc = d3_var_names + var2_list_loc = d2_var_names - #close first_file + # close first_file first_file.close() # Calculate global means # if me.get_rank() == 0 and (verbose == True): print("VERBOSE: Calculating global means .....") - if not opts_dict['cumul']: - gm3d,gm2d,var_list = pyEnsLib.generate_global_mean_for_summary(full_in_files, var3_list_loc, var2_list_loc, is_SE, False, opts_dict) + if not opts_dict["cumul"]: + gm3d, gm2d, var_list = pyEnsLib.generate_global_mean_for_summary( + full_in_files, var3_list_loc, var2_list_loc, is_SE, False, opts_dict + ) if me.get_rank() == 0 and (verbose == True): print("VERBOSE: Finished calculating global means .....") - #gather to rank = 0 - if opts_dict['mpi_enable']: + # gather to rank = 0 + if opts_dict["mpi_enable"]: - if not opts_dict['cumul']: + if not opts_dict["cumul"]: # Gather the 3d variable results from all processors to the master processor - slice_index=get_stride_list(len(d3_var_names),me) - + slice_index = get_stride_list(len(d3_var_names), me) + # Gather global means 3d results - gm3d=gather_npArray(gm3d,me,slice_index,(len(d3_var_names),len(full_in_files))) + gm3d = gather_npArray( + gm3d, me, slice_index, (len(d3_var_names), len(full_in_files)) + ) # Gather 2d variable results from all processors to the master processor - slice_index=get_stride_list(len(d2_var_names),me) + slice_index = get_stride_list(len(d2_var_names), me) # Gather global means 2d results - gm2d=gather_npArray(gm2d,me,slice_index,(len(d2_var_names),len(full_in_files))) + gm2d = gather_npArray( + gm2d, me, slice_index, (len(d2_var_names), len(full_in_files)) + ) - #gather variables ro exclude (in pre_pca) - var_list=gather_list(var_list,me) + # gather variables ro exclude (in pre_pca) + var_list = gather_list(var_list, me) else: - gmall=np.concatenate((temp1,temp2),axis=0) - gmall=pyEnsLib.gather_npArray_pop(gmall,me,(me.get_size(),len(d3_var_names)+len(d2_var_names))) + gmall = np.concatenate((temp1, temp2), axis=0) + gmall = pyEnsLib.gather_npArray_pop( + gmall, me, (me.get_size(), len(d3_var_names) + len(d2_var_names)) + ) # rank =0 : complete calculations for summary file - if me.get_rank() == 0 : - if not opts_dict['cumul']: - gmall=np.concatenate((gm3d,gm2d),axis=0) + if me.get_rank() == 0: + if not opts_dict["cumul"]: + gmall = np.concatenate((gm3d, gm2d), axis=0) else: - gmall_temp=np.transpose(gmall[:,:]) - gmall=gmall_temp - - #PCA prep and calculation - mu_gm,sigma_gm,standardized_global_mean,loadings_gm,scores_gm,b_exit=pyEnsLib.pre_PCA(gmall,all_var_names,var_list,me) - - #if PCA calc encounters an error, then remove the summary file and exit + gmall_temp = np.transpose(gmall[:, :]) + gmall = gmall_temp + + # PCA prep and calculation + ( + mu_gm, + sigma_gm, + standardized_global_mean, + loadings_gm, + scores_gm, + b_exit, + ) = pyEnsLib.pre_PCA(gmall, all_var_names, var_list, me) + + # if PCA calc encounters an error, then remove the summary file and exit if b_exit: nc_sumfile.close() os.unlink(this_sumfile) print("STATUS: Summary could not be created.") sys.exit(2) - - v_gm[:,:]=gmall[:,:] - v_standardized_gm[:,:]=standardized_global_mean[:,:] - v_mu_gm[:]=mu_gm[:] - v_sigma_gm[:]=sigma_gm[:] - v_loadings_gm[:,:]=loadings_gm[:,:] - v_sigma_scores_gm[:]=scores_gm[:] + v_gm[:, :] = gmall[:, :] + v_standardized_gm[:, :] = standardized_global_mean[:, :] + v_mu_gm[:] = mu_gm[:] + v_sigma_gm[:] = sigma_gm[:] + v_loadings_gm[:, :] = loadings_gm[:, :] + v_sigma_scores_gm[:] = scores_gm[:] print("STATUS: Summary file is complete.") nc_sumfile.close() -def get_cumul_filelist(opts_dict,indir,regx): - if not opts_dict['indir']: - print('input dir is not specified') - sys.exit(2) - #regx='(pgi(.)*-(01|02))' - regx_list=["mon","gnu","pgi"] - all_files=[] - for prefix in regx_list: - for i in range(opts_dict['fIndex'],opts_dict['fIndex']+opts_dict['esize']/3): - for j in range(opts_dict['startMon'],opts_dict['endMon']+1): - mon_str=str(j).zfill(2) - regx='(^'+prefix+'(.)*'+str(i)+'(.)*-('+mon_str+'))' - #print 'regx=',regx - res=[f for f in os.listdir(indir) if re.search(regx,f)] - in_files=sorted(res) - all_files.extend(in_files) - #print "all_files=",all_files - #in_files=res - return all_files - - - +def get_cumul_filelist(opts_dict, indir, regx): + if not opts_dict["indir"]: + print("input dir is not specified") + sys.exit(2) + # regx='(pgi(.)*-(01|02))' + regx_list = ["mon", "gnu", "pgi"] + all_files = [] + for prefix in regx_list: + for i in range( + opts_dict["fIndex"], opts_dict["fIndex"] + opts_dict["esize"] / 3 + ): + for j in range(opts_dict["startMon"], opts_dict["endMon"] + 1): + mon_str = str(j).zfill(2) + regx = "(^" + prefix + "(.)*" + str(i) + "(.)*-(" + mon_str + "))" + # print 'regx=',regx + res = [f for f in os.listdir(indir) if re.search(regx, f)] + in_files = sorted(res) + all_files.extend(in_files) + # print "all_files=",all_files + # in_files=res + return all_files + # # Get the shape of all variable list in tuple for all processor -# +# def get_shape(shape_tuple, shape1, rank): - lst=list(shape_tuple) - lst[0]=shape1 - shape_tuple=tuple(lst) + lst = list(shape_tuple) + lst[0] = shape1 + shape_tuple = tuple(lst) return shape_tuple - + + # # Get the mpi partition list for each processor # def get_stride_list(len_of_list, me): - slice_index=[] + slice_index = [] for i in range(me.get_size()): - index_arr=np.arange(len_of_list) - slice_index.append(index_arr[i::me.get_size()]) + index_arr = np.arange(len_of_list) + slice_index.append(index_arr[i :: me.get_size()]) return slice_index def gather_list(var_list, me): - whole_list=[] + whole_list = [] if me.get_rank() == 0: - whole_list.extend(var_list) - for i in range(1,me.get_size()): - if me.get_rank() == 0: - rank_id,var_list=me.collect() - whole_list.extend(var_list) + whole_list.extend(var_list) + for i in range(1, me.get_size()): + if me.get_rank() == 0: + rank_id, var_list = me.collect() + whole_list.extend(var_list) if me.get_rank() != 0: - me.collect(var_list) + me.collect(var_list) me.sync() return whole_list -# + + +# # Gather arrays from each processor by the var_list to the master processor and make it an array # def gather_npArray(npArray, me, slice_index, array_shape): -# the_array=np.zeros(array_shape,dtype=np.float32) - the_array=np.zeros(array_shape,dtype=np.float64) - if me.get_rank()==0: - k=0 + # the_array=np.zeros(array_shape,dtype=np.float32) + the_array = np.zeros(array_shape, dtype=np.float64) + if me.get_rank() == 0: + k = 0 for j in slice_index[me.get_rank()]: - the_array[j,:]=npArray[k,:] - k=k+1 - for i in range(1,me.get_size()): + the_array[j, :] = npArray[k, :] + k = k + 1 + for i in range(1, me.get_size()): if me.get_rank() == 0: - rank,npArray=me.collect() - k=0 + rank, npArray = me.collect() + k = 0 for j in slice_index[rank]: - the_array[j,:]=npArray[k,:] - k=k+1 - if me.get_rank() != 0: - message={"from_rank":me.get_rank(),"shape":npArray.shape} + the_array[j, :] = npArray[k, :] + k = k + 1 + if me.get_rank() != 0: + message = {"from_rank": me.get_rank(), "shape": npArray.shape} me.collect(npArray) me.sync() return the_array - + + if __name__ == "__main__": main(sys.argv[1:]) diff --git a/tools/statistical_ensemble_test/pyCECT/pyEnsSumPop.py b/tools/statistical_ensemble_test/pyCECT/pyEnsSumPop.py index 1b5501169f0..483c9e4476d 100644 --- a/tools/statistical_ensemble_test/pyCECT/pyEnsSumPop.py +++ b/tools/statistical_ensemble_test/pyCECT/pyEnsSumPop.py @@ -1,146 +1,153 @@ #!/usr/bin/env python from __future__ import print_function import configparser -import sys, getopt, os -import numpy as np +import sys, getopt, os +import numpy as np import netCDF4 as nc import time import re from asaptools.partition import EqualStride, Duplicate -import asaptools.simplecomm as simplecomm +import asaptools.simplecomm as simplecomm import pyEnsLib -def main(argv): +def main(argv): # Get command line stuff and store in a dictionary - s = 'nyear= nmonth= npert= tag= res= mach= compset= sumfile= indir= tslice= verbose jsonfile= mpi_enable mpi_disable nrand= rand seq= jsondir= esize=' + s = "nyear= nmonth= npert= tag= res= mach= compset= sumfile= indir= tslice= verbose jsonfile= mpi_enable mpi_disable nrand= rand seq= jsondir= esize=" optkeys = s.split() - try: + try: opts, args = getopt.getopt(argv, "h", optkeys) except getopt.GetoptError: pyEnsLib.EnsSumPop_usage() sys.exit(2) # Put command line options in a dictionary - also set defaults - opts_dict={} + opts_dict = {} # Defaults - opts_dict['tag'] = 'cesm2_1_0' - opts_dict['compset'] = 'G' - opts_dict['mach'] = 'cheyenne' - opts_dict['tslice'] = 0 - opts_dict['nyear'] = 1 - opts_dict['nmonth'] = 12 - opts_dict['esize'] = 40 - opts_dict['npert'] = 0 #for backwards compatible - opts_dict['nbin'] = 40 - opts_dict['minrange'] = 0.0 - opts_dict['maxrange'] = 4.0 - opts_dict['res'] = 'T62_g17' - opts_dict['sumfile'] = 'pop.ens.summary.nc' - opts_dict['indir'] = './' - opts_dict['jsonfile'] = 'pop_ensemble.json' - opts_dict['verbose'] = True - opts_dict['mpi_enable'] = True - opts_dict['mpi_disable'] = False - #opts_dict['zscoreonly'] = True - opts_dict['popens'] = True - opts_dict['nrand'] = 40 - opts_dict['rand'] = False - opts_dict['seq'] = 0 - opts_dict['jsondir'] = './' - - # This creates the dictionary of input arguments - #print "before parseconfig" - opts_dict = pyEnsLib.getopt_parseconfig(opts,optkeys,'ESP',opts_dict) - - verbose = opts_dict['verbose'] - nbin = opts_dict['nbin'] - - if opts_dict['mpi_disable']: - opts_dict['mpi_enable'] = False - - #still have npert for backwards compatibility - check if it was set - #and override esize - if opts_dict['npert'] > 0: - user_size = opts_dict['npert'] - print('WARNING: User specified value for --npert will override --esize. Please consider using --esize instead of --npert in the future.') - opts_dict['esize'] = user_size + opts_dict["tag"] = "cesm2_1_0" + opts_dict["compset"] = "G" + opts_dict["mach"] = "cheyenne" + opts_dict["tslice"] = 0 + opts_dict["nyear"] = 1 + opts_dict["nmonth"] = 12 + opts_dict["esize"] = 40 + opts_dict["npert"] = 0 # for backwards compatible + opts_dict["nbin"] = 40 + opts_dict["minrange"] = 0.0 + opts_dict["maxrange"] = 4.0 + opts_dict["res"] = "T62_g17" + opts_dict["sumfile"] = "pop.ens.summary.nc" + opts_dict["indir"] = "./" + opts_dict["jsonfile"] = "pop_ensemble.json" + opts_dict["verbose"] = True + opts_dict["mpi_enable"] = True + opts_dict["mpi_disable"] = False + # opts_dict['zscoreonly'] = True + opts_dict["popens"] = True + opts_dict["nrand"] = 40 + opts_dict["rand"] = False + opts_dict["seq"] = 0 + opts_dict["jsondir"] = "./" + + # This creates the dictionary of input arguments + # print "before parseconfig" + opts_dict = pyEnsLib.getopt_parseconfig(opts, optkeys, "ESP", opts_dict) + + verbose = opts_dict["verbose"] + nbin = opts_dict["nbin"] + + if opts_dict["mpi_disable"]: + opts_dict["mpi_enable"] = False + + # still have npert for backwards compatibility - check if it was set + # and override esize + if opts_dict["npert"] > 0: + user_size = opts_dict["npert"] + print( + "WARNING: User specified value for --npert will override --esize. Please consider using --esize instead of --npert in the future." + ) + opts_dict["esize"] = user_size # Now find file names in indir - input_dir = opts_dict['indir'] + input_dir = opts_dict["indir"] # Create a mpi simplecomm object - if opts_dict['mpi_enable']: - me=simplecomm.create_comm() + if opts_dict["mpi_enable"]: + me = simplecomm.create_comm() else: - me=simplecomm.create_comm(False) + me = simplecomm.create_comm(False) - if opts_dict['jsonfile']: + if opts_dict["jsonfile"]: # Read in the included var list - Var2d,Var3d=pyEnsLib.read_jsonlist(opts_dict['jsonfile'],'ESP') - str_size=0 + Var2d, Var3d = pyEnsLib.read_jsonlist(opts_dict["jsonfile"], "ESP") + str_size = 0 for str in Var3d: if str_size < len(str): - str_size=len(str) + str_size = len(str) for str in Var2d: if str_size < len(str): - str_size=len(str) + str_size = len(str) if me.get_rank() == 0: - print('STATUS: Running pyEnsSumPop!') - + print("STATUS: Running pyEnsSumPop!") + if verbose: print("VERBOSE: opts_dict = ") print(opts_dict) - in_files=[] - if(os.path.exists(input_dir)): + in_files = [] + if os.path.exists(input_dir): # Pick up the 'nrand' random number of input files to generate summary files - if opts_dict['rand']: - in_files=pyEnsLib.Random_pickup_pop(input_dir,opts_dict,opts_dict['nrand']) - else: - # Get the list of files - in_files_temp = os.listdir(input_dir) - in_files=sorted(in_files_temp) + if opts_dict["rand"]: + in_files = pyEnsLib.Random_pickup_pop( + input_dir, opts_dict, opts_dict["nrand"] + ) + else: + # Get the list of files + in_files_temp = os.listdir(input_dir) + in_files = sorted(in_files_temp) num_files = len(in_files) else: if me.get_rank() == 0: - print('ERROR: Input directory: ',input_dir,' not found => EXITING....') + print("ERROR: Input directory: ", input_dir, " not found => EXITING....") sys.exit(2) - #make sure we have enough files - files_needed = opts_dict['nmonth'] * opts_dict['esize'] * opts_dict['nyear'] - if (num_files < files_needed): + # make sure we have enough files + files_needed = opts_dict["nmonth"] * opts_dict["esize"] * opts_dict["nyear"] + if num_files < files_needed: if me.get_rank() == 0: - print('ERROR: Input directory does not contain enough files (must be esize*nyear*nmonth = ', files_needed, ' ) and it has only ', num_files, ' files).') + print( + "ERROR: Input directory does not contain enough files (must be esize*nyear*nmonth = ", + files_needed, + " ) and it has only ", + num_files, + " files).", + ) sys.exit(2) - + # Partition the input file list (ideally we have one processor per month) + in_file_list = me.partition(in_files, func=EqualStride(), involved=True) - #Partition the input file list (ideally we have one processor per month) - in_file_list=me.partition(in_files,func=EqualStride(),involved=True) - # Check the files in the input directory - full_in_files=[] - if me.get_rank() == 0 and opts_dict['verbose']: - print('VERBOSE: Input files are:') + full_in_files = [] + if me.get_rank() == 0 and opts_dict["verbose"]: + print("VERBOSE: Input files are:") for onefile in in_file_list: - fname = input_dir + '/' + onefile - if opts_dict['verbose']: - print( "my_rank = ", me.get_rank(), " ", fname) - if (os.path.isfile(fname)): + fname = input_dir + "/" + onefile + if opts_dict["verbose"]: + print("my_rank = ", me.get_rank(), " ", fname) + if os.path.isfile(fname): full_in_files.append(fname) else: - print("ERROR: Could not locate file: "+ fname + " => EXITING....") - sys.exit() + print("ERROR: Could not locate file: " + fname + " => EXITING....") + sys.exit() - - #open just the first file (all procs) - first_file = nc.Dataset(full_in_files[0],"r") + # open just the first file (all procs) + first_file = nc.Dataset(full_in_files[0], "r") # Store dimensions of the input fields if (verbose == True) and me.get_rank() == 0: @@ -164,14 +171,13 @@ def main(argv): elif key == "nlat": nlat = len(input_dims["nlat"]) - # Rank 0: prepare new summary ensemble file this_sumfile = opts_dict["sumfile"] - if (me.get_rank() == 0 ): + if me.get_rank() == 0: if os.path.exists(this_sumfile): os.unlink(this_sumfile) - if verbose: + if verbose: print("VERBOSE: Creating ", this_sumfile, " ...") nc_sumfile = nc.Dataset(this_sumfile, "w", format="NETCDF4_CLASSIC") @@ -179,48 +185,57 @@ def main(argv): # Set dimensions if verbose: print("VERBOSE: Setting dimensions .....") - nc_sumfile.createDimension('nlat', nlat) - nc_sumfile.createDimension('nlon', nlon) - nc_sumfile.createDimension('nlev', nlev) - nc_sumfile.createDimension('time',None) - nc_sumfile.createDimension('ens_size', opts_dict['esize']) - nc_sumfile.createDimension('nbin', opts_dict['nbin']) - nc_sumfile.createDimension('nvars', len(Var3d) + len(Var2d)) - nc_sumfile.createDimension('nvars3d', len(Var3d)) - nc_sumfile.createDimension('nvars2d', len(Var2d)) - nc_sumfile.createDimension('str_size', str_size) + nc_sumfile.createDimension("nlat", nlat) + nc_sumfile.createDimension("nlon", nlon) + nc_sumfile.createDimension("nlev", nlev) + nc_sumfile.createDimension("time", None) + nc_sumfile.createDimension("ens_size", opts_dict["esize"]) + nc_sumfile.createDimension("nbin", opts_dict["nbin"]) + nc_sumfile.createDimension("nvars", len(Var3d) + len(Var2d)) + nc_sumfile.createDimension("nvars3d", len(Var3d)) + nc_sumfile.createDimension("nvars2d", len(Var2d)) + nc_sumfile.createDimension("str_size", str_size) # Set global attributes now = time.strftime("%c") if verbose: print("VERBOSE: Setting global attributes .....") nc_sumfile.creation_date = now - nc_sumfile.title = 'POP verification ensemble summary file' - nc_sumfile.tag = opts_dict["tag"] + nc_sumfile.title = "POP verification ensemble summary file" + nc_sumfile.tag = opts_dict["tag"] nc_sumfile.compset = opts_dict["compset"] - nc_sumfile.resolution = opts_dict["res"] - nc_sumfile.machine = opts_dict["mach"] + nc_sumfile.resolution = opts_dict["res"] + nc_sumfile.machine = opts_dict["mach"] # Create variables if verbose: print("VERBOSE: Creating variables .....") - v_lev = nc_sumfile.createVariable("z_t", 'f', ('nlev',)) - v_vars = nc_sumfile.createVariable("vars", 'S1', ('nvars', 'str_size')) - v_var3d = nc_sumfile.createVariable("var3d", 'S1', ('nvars3d', 'str_size')) - v_var2d = nc_sumfile.createVariable("var2d", 'S1', ('nvars2d', 'str_size')) - v_time = nc_sumfile.createVariable("time",'d',('time',)) - v_ens_avg3d = nc_sumfile.createVariable("ens_avg3d", 'f', ('time','nvars3d', 'nlev', 'nlat', 'nlon')) - v_ens_stddev3d = nc_sumfile.createVariable("ens_stddev3d", 'f', ('time','nvars3d', 'nlev', 'nlat', 'nlon')) - v_ens_avg2d = nc_sumfile.createVariable("ens_avg2d", 'f', ('time','nvars2d', 'nlat', 'nlon')) - v_ens_stddev2d = nc_sumfile.createVariable("ens_stddev2d", 'f', ('time','nvars2d', 'nlat', 'nlon')) - v_RMSZ = nc_sumfile.createVariable("RMSZ", 'f', ('time','nvars', 'ens_size','nbin')) - + v_lev = nc_sumfile.createVariable("z_t", "f", ("nlev",)) + v_vars = nc_sumfile.createVariable("vars", "S1", ("nvars", "str_size")) + v_var3d = nc_sumfile.createVariable("var3d", "S1", ("nvars3d", "str_size")) + v_var2d = nc_sumfile.createVariable("var2d", "S1", ("nvars2d", "str_size")) + v_time = nc_sumfile.createVariable("time", "d", ("time",)) + v_ens_avg3d = nc_sumfile.createVariable( + "ens_avg3d", "f", ("time", "nvars3d", "nlev", "nlat", "nlon") + ) + v_ens_stddev3d = nc_sumfile.createVariable( + "ens_stddev3d", "f", ("time", "nvars3d", "nlev", "nlat", "nlon") + ) + v_ens_avg2d = nc_sumfile.createVariable( + "ens_avg2d", "f", ("time", "nvars2d", "nlat", "nlon") + ) + v_ens_stddev2d = nc_sumfile.createVariable( + "ens_stddev2d", "f", ("time", "nvars2d", "nlat", "nlon") + ) + v_RMSZ = nc_sumfile.createVariable( + "RMSZ", "f", ("time", "nvars", "ens_size", "nbin") + ) # Assign vars, var3d and var2d if verbose: print("VERBOSE: Assigning vars, var3d, and var2d .....") - eq_all_var_names =[] + eq_all_var_names = [] eq_d3_var_names = [] eq_d2_var_names = [] all_var_names = list(Var3d) @@ -229,8 +244,8 @@ def main(argv): for i in range(l_eq): tt = list(all_var_names[i]) l_tt = len(tt) - if (l_tt < str_size): - extra = list(' ')*(str_size - l_tt) + if l_tt < str_size: + extra = list(" ") * (str_size - l_tt) tt.extend(extra) eq_all_var_names.append(tt) @@ -238,8 +253,8 @@ def main(argv): for i in range(l_eq): tt = list(Var3d[i]) l_tt = len(tt) - if (l_tt < str_size): - extra = list(' ')*(str_size - l_tt) + if l_tt < str_size: + extra = list(" ") * (str_size - l_tt) tt.extend(extra) eq_d3_var_names.append(tt) @@ -247,8 +262,8 @@ def main(argv): for i in range(l_eq): tt = list(Var2d[i]) l_tt = len(tt) - if (l_tt < str_size): - extra = list(' ')*(str_size - l_tt) + if l_tt < str_size: + extra = list(" ") * (str_size - l_tt) tt.extend(extra) eq_d2_var_names.append(tt) @@ -262,71 +277,90 @@ def main(argv): vars_dict = first_file.variables lev_data = vars_dict["z_t"] v_lev[:] = lev_data[:] - - #end of rank 0 - #All: + # end of rank 0 + + # All: # Time-varient metadata if verbose: - if me.get_rank() == 0: + if me.get_rank() == 0: print("VERBOSE: Assigning time variant metadata .....") vars_dict = first_file.variables - time_value = vars_dict['time'] + time_value = vars_dict["time"] time_array = np.array([time_value]) - time_array = pyEnsLib.gather_npArray_pop(time_array,me,(me.get_size(),)) + time_array = pyEnsLib.gather_npArray_pop(time_array, me, (me.get_size(),)) + if me.get_rank() == 0: + v_time[:] = time_array[:] + + # Assign zero values to first time slice of RMSZ and avg and stddev for 2d & 3d + # in case of a calculation problem before finishing + e_size = opts_dict["esize"] + b_size = opts_dict["nbin"] + z_ens_avg3d = np.zeros((len(Var3d), nlev, nlat, nlon), dtype=np.float32) + z_ens_stddev3d = np.zeros((len(Var3d), nlev, nlat, nlon), dtype=np.float32) + z_ens_avg2d = np.zeros((len(Var2d), nlat, nlon), dtype=np.float32) + z_ens_stddev2d = np.zeros((len(Var2d), nlat, nlon), dtype=np.float32) + z_RMSZ = np.zeros(((len(Var3d) + len(Var2d)), e_size, b_size), dtype=np.float32) + + # rank 0 (put zero values in summary file) if me.get_rank() == 0: - v_time[:]=time_array[:] - - #Assign zero values to first time slice of RMSZ and avg and stddev for 2d & 3d - #in case of a calculation problem before finishing - e_size = opts_dict['esize'] - b_size = opts_dict['nbin'] - z_ens_avg3d=np.zeros((len(Var3d),nlev,nlat,nlon),dtype=np.float32) - z_ens_stddev3d=np.zeros((len(Var3d),nlev,nlat,nlon),dtype=np.float32) - z_ens_avg2d=np.zeros((len(Var2d),nlat,nlon),dtype=np.float32) - z_ens_stddev2d=np.zeros((len(Var2d),nlat,nlon),dtype=np.float32) - z_RMSZ = np.zeros(((len(Var3d)+len(Var2d)),e_size,b_size), dtype=np.float32) - - #rank 0 (put zero values in summary file) - if me.get_rank() == 0 : - v_RMSZ[0,:,:,:]=z_RMSZ[:,:,:] - v_ens_avg3d[0,:,:,:,:]=z_ens_avg3d[:,:,:,:] - v_ens_stddev3d[0,:,:,:,:]=z_ens_stddev3d[:,:,:,:] - v_ens_avg2d[0,:,:,:]=z_ens_avg2d[:,:,:] - v_ens_stddev2d[0,:,:,:]=z_ens_stddev2d[:,:,:] - - #close file[0] + v_RMSZ[0, :, :, :] = z_RMSZ[:, :, :] + v_ens_avg3d[0, :, :, :, :] = z_ens_avg3d[:, :, :, :] + v_ens_stddev3d[0, :, :, :, :] = z_ens_stddev3d[:, :, :, :] + v_ens_avg2d[0, :, :, :] = z_ens_avg2d[:, :, :] + v_ens_stddev2d[0, :, :, :] = z_ens_stddev2d[:, :, :] + + # close file[0] first_file.close() - # Calculate RMSZ scores - if (verbose == True and me.get_rank() == 0): + # Calculate RMSZ scores + if verbose == True and me.get_rank() == 0: print("VERBOSE: Calculating RMSZ scores .....") - zscore3d,zscore2d,ens_avg3d,ens_stddev3d,ens_avg2d,ens_stddev2d=pyEnsLib.calc_rmsz(full_in_files,Var3d,Var2d,opts_dict) + ( + zscore3d, + zscore2d, + ens_avg3d, + ens_stddev3d, + ens_avg2d, + ens_stddev2d, + ) = pyEnsLib.calc_rmsz(full_in_files, Var3d, Var2d, opts_dict) - if (verbose == True and me.get_rank() == 0): + if verbose == True and me.get_rank() == 0: print("VERBOSE: Finished with RMSZ scores .....") # Collect from all processors - if opts_dict['mpi_enable'] : + if opts_dict["mpi_enable"]: # Gather the 3d variable results from all processors to the master processor - zmall=np.concatenate((zscore3d,zscore2d),axis=0) - zmall=pyEnsLib.gather_npArray_pop(zmall,me,(me.get_size(),len(Var3d)+len(Var2d),len(full_in_files),nbin)) - - ens_avg3d=pyEnsLib.gather_npArray_pop(ens_avg3d,me,(me.get_size(),len(Var3d),nlev,(nlat),nlon)) - ens_avg2d=pyEnsLib.gather_npArray_pop(ens_avg2d,me,(me.get_size(),len(Var2d),(nlat),nlon)) - ens_stddev3d=pyEnsLib.gather_npArray_pop(ens_stddev3d,me,(me.get_size(),len(Var3d),nlev,(nlat),nlon)) - ens_stddev2d=pyEnsLib.gather_npArray_pop(ens_stddev2d,me,(me.get_size(),len(Var2d),(nlat),nlon)) + zmall = np.concatenate((zscore3d, zscore2d), axis=0) + zmall = pyEnsLib.gather_npArray_pop( + zmall, + me, + (me.get_size(), len(Var3d) + len(Var2d), len(full_in_files), nbin), + ) + + ens_avg3d = pyEnsLib.gather_npArray_pop( + ens_avg3d, me, (me.get_size(), len(Var3d), nlev, (nlat), nlon) + ) + ens_avg2d = pyEnsLib.gather_npArray_pop( + ens_avg2d, me, (me.get_size(), len(Var2d), (nlat), nlon) + ) + ens_stddev3d = pyEnsLib.gather_npArray_pop( + ens_stddev3d, me, (me.get_size(), len(Var3d), nlev, (nlat), nlon) + ) + ens_stddev2d = pyEnsLib.gather_npArray_pop( + ens_stddev2d, me, (me.get_size(), len(Var2d), (nlat), nlon) + ) # Assign to summary file: - if me.get_rank() == 0 : + if me.get_rank() == 0: - v_RMSZ[:,:,:,:]=zmall[:,:,:,:] - v_ens_avg3d[:,:,:,:,:]=ens_avg3d[:,:,:,:,:] - v_ens_stddev3d[:,:,:,:,:]=ens_stddev3d[:,:,:,:,:] - v_ens_avg2d[:,:,:,:]=ens_avg2d[:,:,:,:] - v_ens_stddev2d[:,:,:,:]=ens_stddev2d[:,:,:,:] + v_RMSZ[:, :, :, :] = zmall[:, :, :, :] + v_ens_avg3d[:, :, :, :, :] = ens_avg3d[:, :, :, :, :] + v_ens_stddev3d[:, :, :, :, :] = ens_stddev3d[:, :, :, :, :] + v_ens_avg2d[:, :, :, :] = ens_avg2d[:, :, :, :] + v_ens_stddev2d[:, :, :, :] = ens_stddev2d[:, :, :, :] print("STATUS: PyEnsSumPop has completed.") diff --git a/tools/statistical_ensemble_test/pyCECT/pyPlots.py b/tools/statistical_ensemble_test/pyCECT/pyPlots.py index 512618f6c9a..bd0a4e8d86d 100644 --- a/tools/statistical_ensemble_test/pyCECT/pyPlots.py +++ b/tools/statistical_ensemble_test/pyCECT/pyPlots.py @@ -5,55 +5,55 @@ import numpy as np import seaborn as sns -#change to input argument later -filename = 'savefile.nc' -test = 'hr_mpeO3_9ts' -ptest = ' ('+test+')' +# change to input argument later +filename = "savefile.nc" +test = "hr_mpeO3_9ts" +ptest = " (" + test + ")" ds = xr.open_dataset(filename) -test_size = ds.dims['test_size'] -ens_size = ds.dims['ens_size'] -nvars = ds.dims['nvars'] +test_size = ds.dims["test_size"] +ens_size = ds.dims["ens_size"] +nvars = ds.dims["nvars"] -#get var list and names -vars = ds['vars'].values +# get var list and names +vars = ds["vars"].values -#get test scores and means -t_scores = ds['scores'].values -t_std_gm = ds['std_gm'].values +# get test scores and means +t_scores = ds["scores"].values +t_std_gm = ds["std_gm"].values -#get ens scores distribution and ens means -ens_score_dist = ds['ens_sigma_scores'].values -ens_std_gm = ds['ens_std_gm'].values +# get ens scores distribution and ens means +ens_score_dist = ds["ens_sigma_scores"].values +ens_std_gm = ds["ens_std_gm"].values all_outside99 = [] two_outside99 = [] one_outside99 = [] all_oneside_IQR = [] -#go through each variables +# go through each variables for i, thisvar in enumerate(vars): - #print i - #print thisvar - #ensemble distribution information - p995 = np.percentile(ens_std_gm[i,:],99.5) - p75 = np.percentile(ens_std_gm[i,:],75) - p25 = np.percentile(ens_std_gm[i,:],25) - p05 = np.percentile(ens_std_gm[i,:],0.5) - #print p995 - + # print i + # print thisvar + # ensemble distribution information + p995 = np.percentile(ens_std_gm[i, :], 99.5) + p75 = np.percentile(ens_std_gm[i, :], 75) + p25 = np.percentile(ens_std_gm[i, :], 25) + p05 = np.percentile(ens_std_gm[i, :], 0.5) + # print p995 + isout_995 = 0 isout_75 = 0 isout_25 = 0 - #go through the test cases + # go through the test cases - #outside of 995 or all on one side? + # outside of 995 or all on one side? for j in range(test_size): - #print j - thisval = t_std_gm[i,j] + # print j + thisval = t_std_gm[i, j] if thisval > p995 or thisval < p05: - isout_995 = isout_995+1 + isout_995 = isout_995 + 1 if thisval > p75: isout_75 = isout_75 + 1 if thisval < p25: @@ -65,7 +65,7 @@ two_outside99.append(i) elif isout_995 == 3: all_outside99.append(i) - + if isout_75 == 3 or isout_25 == 3: all_oneside_IQR.append(i) @@ -77,95 +77,95 @@ c = set(one_outside99) | set(two_outside99) | set(all_outside99) | set(all_oneside_IQR) uni = len(c) -print "total variables = ", nvars -print "one test outside 99th percentile = ", num_one99 -print "two tests outside 99th percentile = ", num_two99 -print "three (all) tests outside 99th percentile = ", num_all99 -print "all tests on one side of IQR = ", num_oneside -print "unique number of variables that fall into the above categories = ", uni +print("total variables = ", nvars) +print("one test outside 99th percentile = ", num_one99) +print("two tests outside 99th percentile = ", num_two99) +print("three (all) tests outside 99th percentile = ", num_all99) +print("all tests on one side of IQR = ", num_oneside) +print("unique number of variables that fall into the above categories = ", uni) -#now make plots +# now make plots ens_list_array = [] test_points = [] -flierprops = dict(marker='x', markerfacecolor='gray', markersize=1) -#all outside -if (num_all99 > 0): - sf_name = 'all_out99_' + test + '.png' - for i in all_outside99: - ens_list_array.append(ens_std_gm[i,:]) - test_points.append(t_std_gm[i,:]) +flierprops = dict(marker="x", markerfacecolor="gray", markersize=1) +# all outside +if num_all99 > 0: + sf_name = "all_out99_" + test + ".png" + for i in all_outside99: + ens_list_array.append(ens_std_gm[i, :]) + test_points.append(t_std_gm[i, :]) labels = vars[all_outside99] f = plt.figure() - sns.boxplot(data=ens_list_array, flierprops=flierprops, whis=[0.5,99.5]) -# sns.boxplot(data=ens_list_array, fliersize= 2.0) - sns.stripplot(data = test_points, jitter = True, color="r", size=3, marker="D") - plt.title('Variables with all (three) tests outside the 99th percentile'+ ptest) - plt.ylabel('standardized global means') - plt.xticks(range(num_all99), labels, fontsize=8,rotation='vertical') + sns.boxplot(data=ens_list_array, flierprops=flierprops, whis=[0.5, 99.5]) + # sns.boxplot(data=ens_list_array, fliersize= 2.0) + sns.stripplot(data=test_points, jitter=True, color="r", size=3, marker="D") + plt.title("Variables with all (three) tests outside the 99th percentile" + ptest) + plt.ylabel("standardized global means") + plt.xticks(range(num_all99), labels, fontsize=8, rotation="vertical") plt.subplots_adjust(bottom=0.2) - plt.savefig(sf_name, bbox_inches='tight') + plt.savefig(sf_name, bbox_inches="tight") f.clear() plt.close(f) -#two outside -if (num_two99 > 0): - sf_name = 'two_out99_' + test + '.png' +# two outside +if num_two99 > 0: + sf_name = "two_out99_" + test + ".png" ens_list_array = [] - test_points = [] - for i in two_outside99: - ens_list_array.append(ens_std_gm[i,:]) - test_points.append(t_std_gm[i,:]) + test_points = [] + for i in two_outside99: + ens_list_array.append(ens_std_gm[i, :]) + test_points.append(t_std_gm[i, :]) labels = vars[two_outside99] f = plt.figure() - sns.boxplot(data=ens_list_array, flierprops=flierprops, whis=[0.5,99.5]) -# sns.boxplot(data=ens_list_array, fliersize= 2.0) - sns.stripplot(data = test_points, jitter = True, color="r", size=3, marker="D") - plt.title('Variables with two tests outside the 99th percentile' + ptest) - plt.ylabel('standardized global means') - plt.xticks(range(num_two99), labels, fontsize=8,rotation='vertical') + sns.boxplot(data=ens_list_array, flierprops=flierprops, whis=[0.5, 99.5]) + # sns.boxplot(data=ens_list_array, fliersize= 2.0) + sns.stripplot(data=test_points, jitter=True, color="r", size=3, marker="D") + plt.title("Variables with two tests outside the 99th percentile" + ptest) + plt.ylabel("standardized global means") + plt.xticks(range(num_two99), labels, fontsize=8, rotation="vertical") plt.subplots_adjust(bottom=0.2) - plt.savefig(sf_name, bbox_inches='tight') + plt.savefig(sf_name, bbox_inches="tight") f.clear() plt.close(f) -#one outside -if (num_one99 > 0): - sf_name = 'one_out99_' + test + '.png' +# one outside +if num_one99 > 0: + sf_name = "one_out99_" + test + ".png" ens_list_array = [] test_points = [] - for i in one_outside99: - ens_list_array.append(ens_std_gm[i,:]) - test_points.append(t_std_gm[i,:]) + for i in one_outside99: + ens_list_array.append(ens_std_gm[i, :]) + test_points.append(t_std_gm[i, :]) labels = vars[one_outside99] f = plt.figure() - sns.boxplot(data=ens_list_array, flierprops=flierprops, whis=[0.5,99.5]) -# sns.boxplot(data=ens_list_array, fliersize= 2.0) - sns.stripplot(data = test_points, jitter = True, color="r", size=3, marker="D") - plt.title('Variables with one test outside the 99th percentile' + ptest) - plt.ylabel('standardized global means') - plt.xticks(range(num_one99), labels, fontsize=8,rotation='vertical') + sns.boxplot(data=ens_list_array, flierprops=flierprops, whis=[0.5, 99.5]) + # sns.boxplot(data=ens_list_array, fliersize= 2.0) + sns.stripplot(data=test_points, jitter=True, color="r", size=3, marker="D") + plt.title("Variables with one test outside the 99th percentile" + ptest) + plt.ylabel("standardized global means") + plt.xticks(range(num_one99), labels, fontsize=8, rotation="vertical") plt.subplots_adjust(bottom=0.2) - plt.savefig(sf_name, bbox_inches='tight') + plt.savefig(sf_name, bbox_inches="tight") f.clear() plt.close(f) -#oneside -if (num_oneside > 0): - sf_name = 'oneside_IQR_' + test + '.png' +# oneside +if num_oneside > 0: + sf_name = "oneside_IQR_" + test + ".png" ens_list_array = [] test_points = [] - for i in all_oneside_IQR: - ens_list_array.append(ens_std_gm[i,:]) - test_points.append(t_std_gm[i,:]) + for i in all_oneside_IQR: + ens_list_array.append(ens_std_gm[i, :]) + test_points.append(t_std_gm[i, :]) labels = vars[all_oneside_IQR] f = plt.figure() - sns.boxplot(data=ens_list_array, flierprops=flierprops, whis=[0.5,99.5]) -# sns.boxplot(data=ens_list_array, fliersize= 2.0) - sns.stripplot(data = test_points, jitter = True, color="r", size=3, marker="D") - plt.title('Variables with all tests on one side of the IQR' + ptest) - plt.ylabel('standardized global means') - plt.xticks(range(num_oneside), labels, fontsize=8,rotation='vertical') + sns.boxplot(data=ens_list_array, flierprops=flierprops, whis=[0.5, 99.5]) + # sns.boxplot(data=ens_list_array, fliersize= 2.0) + sns.stripplot(data=test_points, jitter=True, color="r", size=3, marker="D") + plt.title("Variables with all tests on one side of the IQR" + ptest) + plt.ylabel("standardized global means") + plt.xticks(range(num_oneside), labels, fontsize=8, rotation="vertical") plt.subplots_adjust(bottom=0.2) - plt.savefig(sf_name, bbox_inches='tight') + plt.savefig(sf_name, bbox_inches="tight") f.clear() plt.close(f) diff --git a/tools/statistical_ensemble_test/pyCECT/test_pyEnsSum.sh b/tools/statistical_ensemble_test/pyCECT/test_pyEnsSum.sh index e1c2dbc66dd..d2cbfa66c6b 100644 --- a/tools/statistical_ensemble_test/pyCECT/test_pyEnsSum.sh +++ b/tools/statistical_ensemble_test/pyCECT/test_pyEnsSum.sh @@ -11,4 +11,3 @@ setenv TMPDIR /glade/scratch/$USER/temp mkdir -p $TMPDIR mpiexec_mpt python pyEnsSum.py --esize 350 --indir /glade/p/cisl/asap/pycect_sample_data/cam_c1.2.2.1/uf_cam_ens_files --sumfile uf.ens.c1.2.2.1_fc5.ne30.nc --tslice 1 --tag cesm1.2.2.1 --compset FC5 --res ne30_ne30 --mach cheyenne --verbose --jsonfile excluded_varlist.json - diff --git a/tools/statistical_ensemble_test/single_run.py b/tools/statistical_ensemble_test/single_run.py index 321692c281c..fe182fb5b86 100644 --- a/tools/statistical_ensemble_test/single_run.py +++ b/tools/statistical_ensemble_test/single_run.py @@ -6,316 +6,354 @@ # Command options # def disp_usage(callType): - if callType == 'ensemble.py': - print('\nSets up multiple CESM cases for either an ensemble of runs or a small (CAM-ECT = 3, POP-ECT = 1)') - print('test set (default). Then use pyCECT utilities to create an ensemble') - print('summary file or to evaluate the small test set of runs against the ensemble.') - print(' ') - print('----------------------------') - print('ensemble.py :') + if callType == "ensemble.py": + print( + "\nSets up multiple CESM cases for either an ensemble of runs or a small (CAM-ECT = 3, POP-ECT = 1)" + ) + print("test set (default). Then use pyCECT utilities to create an ensemble") + print( + "summary file or to evaluate the small test set of runs against the ensemble." + ) + print(" ") + print("----------------------------") + print("ensemble.py :") else: - print('\nSets up a single CESM case. ') - print(' ') - print('----------------------------') - print('single_run.py :') - print('----------------------------') - print(' ') - print('Required flags:') - if callType == 'single_run.py': - print(' --case Case name passed on to create_newcase (incl. full path AND same)') + print("\nSets up a single CESM case. ") + print(" ") + print("----------------------------") + print("single_run.py :") + print("----------------------------") + print(" ") + print("Required flags:") + if callType == "single_run.py": + print( + " --case Case name passed on to create_newcase (incl. full path AND same)" + ) else: - print(' --case Case name passed on to create_newcase (incl. full path AND must end in ".000")') - print(' --mach Machine name passed on to create_newcase') - print(' ') + print( + ' --case Case name passed on to create_newcase (incl. full path AND must end in ".000")' + ) + print(" --mach Machine name passed on to create_newcase") + print(" ") print('Optional flags (+ all "--" options to create_newcase): ') - print(' --project Project number to charge in job scripts') - print(' --ect Specify whether ensemble is for CAM-ECT or POP-ECT (default = cam)') - if callType == 'single_run.py': - print(' --pertlim Run (CAM or POP) with specified non-zero pertlim') - print(' --walltime Amount of walltime requested (default = 4:30 (CAM-ECT) 2:00 (POP-ECT), or 0:10 with --uf enabled)') - print(' --compiler Compiler to use (default = same as Machine default) ') - print(' --compset Compset to use (default = F2000climo (CAM-ECT) or G (POP-ECT))') - print(' --res Resolution to run (default = f19_f19 (CAM-ECT) or T62_g17 (POP-ECT))') - print(' --uf Enable ninth time step runs (ultra-fast mode for CAM-ECT) - otherwise the default is 12-month runs') - if callType == 'ensemble.py': - print(' --nb Disables auto building the root case of the ensemble') - print(' --ns Disables auto submitting any members of the ensemble') - print(' --ensemble Build the ensemble (instead of building case(s) with random pertlim values for verification),') - print(' and specify the number of ensemble members to generate (e.g.: 151 for CAM-ECT annual averages ') - print(' or 350 for ultra-fast CAM-ECT mode or 40 for POP-ECT)') + print(" --project Project number to charge in job scripts") + print( + " --ect Specify whether ensemble is for CAM-ECT or POP-ECT (default = cam)" + ) + if callType == "single_run.py": + print(" --pertlim Run (CAM or POP) with specified non-zero pertlim") + print( + " --walltime Amount of walltime requested (default = 4:30 (CAM-ECT) 2:00 (POP-ECT), or 0:10 with --uf enabled)" + ) + print(" --compiler Compiler to use (default = same as Machine default) ") + print( + " --compset Compset to use (default = F2000climo (CAM-ECT) or G (POP-ECT))" + ) + print( + " --res Resolution to run (default = f19_f19 (CAM-ECT) or T62_g17 (POP-ECT))" + ) + print( + " --uf Enable ninth time step runs (ultra-fast mode for CAM-ECT) - otherwise the default is 12-month runs" + ) + if callType == "ensemble.py": + print( + " --nb Disables auto building the root case of the ensemble" + ) + print( + " --ns Disables auto submitting any members of the ensemble" + ) + print( + " --ensemble Build the ensemble (instead of building case(s) with random pertlim values for verification)," + ) + print( + " and specify the number of ensemble members to generate (e.g.: 151 for CAM-ECT annual averages " + ) + print( + " or 350 for ultra-fast CAM-ECT mode or 40 for POP-ECT)" + ) else: - print(' --nb Disables building (and submitting) the single case') - print(' --ns Disables submitting the single case') - print(' --help, -h Prints out this usage message') + print(" --nb Disables building (and submitting) the single case") + print(" --ns Disables submitting the single case") + print(" --help, -h Prints out this usage message") + ######## def process_args_dict(caller, caller_argv): + # Pull in and analyze the command line arguements + s = "case= mach= project= compiler= compset= res= uf nb ns ensemble= verbose silent test multi-driver pecount= nist= mpilib= pesfile= gridfile= srcroot= output-root= script-root= queue= user-modes-dir= input-dir= pertlim= walltime= h ect=" - # Pull in and analyze the command line arguements - s='case= mach= project= compiler= compset= res= uf nb ns ensemble= verbose silent test multi-driver pecount= nist= mpilib= pesfile= gridfile= srcroot= output-root= script-root= queue= user-modes-dir= input-dir= pertlim= walltime= h ect=' - - optkeys=s.split() + optkeys = s.split() try: - opts, args = getopt.getopt(caller_argv,"hf:",optkeys) + opts, args = getopt.getopt(caller_argv, "hf:", optkeys) except getopt.GetoptError: print("\nERROR: unrecognized command line argument") disp_usage(caller) sys.exit(2) - #check for help - for opt,arg in opts: - if opt == '-h': + # check for help + for opt, arg in opts: + if opt == "-h": disp_usage(caller) sys.exit() - #opts_dict and defaults - opts_dict={} - opts_dict['walltime']='00:00' - opts_dict['pertlim']= '0' - opts_dict['nb'] = False - opts_dict['ns'] = False - opts_dict['uf'] = False - opts_dict['ensemble'] = 0 - opts_dict['ect'] = 'cam' - #for create newcase - opts_dict['verbose'] = False - opts_dict['silent'] = False - opts_dict['test'] = False - opts_dict['multi-driver'] = False - opts_dict['case'] = 'NONE' - opts_dict['mach'] = 'NONE' - opts_dict['compset'] = 'NONE' - opts_dict['res'] = 'NONE' - - s_case_flags = '' - - #opts_dict = utility.getopt_parseconfig(opts, optkeys, caller, opts_dict) + # opts_dict and defaults + opts_dict = {} + opts_dict["walltime"] = "00:00" + opts_dict["pertlim"] = "0" + opts_dict["nb"] = False + opts_dict["ns"] = False + opts_dict["uf"] = False + opts_dict["ensemble"] = 0 + opts_dict["ect"] = "cam" + # for create newcase + opts_dict["verbose"] = False + opts_dict["silent"] = False + opts_dict["test"] = False + opts_dict["multi-driver"] = False + opts_dict["case"] = "NONE" + opts_dict["mach"] = "NONE" + opts_dict["compset"] = "NONE" + opts_dict["res"] = "NONE" + + s_case_flags = "" + + # opts_dict = utility.getopt_parseconfig(opts, optkeys, caller, opts_dict) for opt, arg in opts: - if opt == '--case': - opts_dict['case'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--mach': - opts_dict['mach'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--project': - opts_dict['project'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--compset': - opts_dict['compset'] = arg - #required - add to flags later - elif opt == '--res': - opts_dict['res'] = arg - #required - add to flags later - elif opt == '--ect': - opts_dict['ect'] = arg - elif opt == '--ensemble': - opts_dict['ensemble'] = int(arg) - elif opt == '--compiler': - opts_dict['compiler'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--pertlim': - if caller == 'ensemble.py': + if opt == "--case": + opts_dict["case"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--mach": + opts_dict["mach"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--project": + opts_dict["project"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--compset": + opts_dict["compset"] = arg + # required - add to flags later + elif opt == "--res": + opts_dict["res"] = arg + # required - add to flags later + elif opt == "--ect": + opts_dict["ect"] = arg + elif opt == "--ensemble": + opts_dict["ensemble"] = int(arg) + elif opt == "--compiler": + opts_dict["compiler"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--pertlim": + if caller == "ensemble.py": print("WARNING: pertlim ignored for ensemble.py.") - opts_dict['pertlim'] = "0" + opts_dict["pertlim"] = "0" else: - opts_dict['pertlim'] = arg - elif opt == '--project': - opts_dict['project'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--uf': - opts_dict['uf'] = True - elif opt == '--nb': - opts_dict['nb'] = True - elif opt == '--ns': - opts_dict['ns'] = True - elif opt == '--verbose': - opts_dict['verbose'] = True - s_case_flags += ' ' + opt - elif opt == '--silent': - opts_dict['silent'] = True - s_case_flags += ' ' + opt - elif opt == '--test': - opts_dict['test'] = True - s_case_flags += ' ' + opt - elif opt == '--multi-driver': - opts_dict['multi-driver'] = True - s_case_flags += ' ' + opt - elif opt == '--nist': - opts_dict['nist'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--pecount': - opts_dict['pecount'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--mpilib': - opts_dict['mpilib'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--pesfile': - opts_dict['pesfile'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--srcroot': - opts_dict['srcroot'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--output-root': - opts_dict['output-root'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--script-root': - opts_dict['script-root'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--queue': - opts_dict['queue'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--input-dir': - opts_dict['input-dir'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--user-modes-dir': - opts_dict['user-modes-dir'] = arg - s_case_flags += ' ' + opt + ' ' + arg - elif opt == '--walltime': - opts_dict['walltime'] = arg - #add below - - #check required things: case, machine - if opts_dict['mach'] == 'NONE': - print('Error: Must specify machine (--mach)') + opts_dict["pertlim"] = arg + elif opt == "--project": + opts_dict["project"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--uf": + opts_dict["uf"] = True + elif opt == "--nb": + opts_dict["nb"] = True + elif opt == "--ns": + opts_dict["ns"] = True + elif opt == "--verbose": + opts_dict["verbose"] = True + s_case_flags += " " + opt + elif opt == "--silent": + opts_dict["silent"] = True + s_case_flags += " " + opt + elif opt == "--test": + opts_dict["test"] = True + s_case_flags += " " + opt + elif opt == "--multi-driver": + opts_dict["multi-driver"] = True + s_case_flags += " " + opt + elif opt == "--nist": + opts_dict["nist"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--pecount": + opts_dict["pecount"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--mpilib": + opts_dict["mpilib"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--pesfile": + opts_dict["pesfile"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--srcroot": + opts_dict["srcroot"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--output-root": + opts_dict["output-root"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--script-root": + opts_dict["script-root"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--queue": + opts_dict["queue"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--input-dir": + opts_dict["input-dir"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--user-modes-dir": + opts_dict["user-modes-dir"] = arg + s_case_flags += " " + opt + " " + arg + elif opt == "--walltime": + opts_dict["walltime"] = arg + # add below + + # check required things: case, machine + if opts_dict["mach"] == "NONE": + print("Error: Must specify machine (--mach)") sys.exit() - if opts_dict['case'] == 'NONE': - print('Error: Must specify case (--case)') + if opts_dict["case"] == "NONE": + print("Error: Must specify case (--case)") sys.exit() else: - case = opts_dict['case'] - if caller == 'ensemble.py': - if case[-4:] != '.000': - print('Error: when using ensemble.py, the case name (--case) must end in ".000".') + case = opts_dict["case"] + if caller == "ensemble.py": + if case[-4:] != ".000": + print( + 'Error: when using ensemble.py, the case name (--case) must end in ".000".' + ) sys.exit() case_dir = os.path.dirname(case) if os.path.isdir(case_dir) == False: - print('Error: Need a valid full path with the case name (--case).') + print("Error: Need a valid full path with the case name (--case).") sys.exit() - #defaults for resolution and case - if opts_dict['ect'] == 'pop': - if opts_dict['compset'] == 'NONE': - opts_dict['compset'] = 'G' - if opts_dict['res'] == 'NONE': - opts_dict['res'] = 'T62_g17' - else: #cam - if opts_dict['compset'] == 'NONE': - opts_dict['compset'] = 'F2000climo' - if opts_dict['res'] == 'NONE': - opts_dict['res'] = 'f19_f19' - - if opts_dict['walltime'] == '00:00': - if opts_dict['uf'] == True: - opts_dict['walltime'] = '00:10' + # defaults for resolution and case + if opts_dict["ect"] == "pop": + if opts_dict["compset"] == "NONE": + opts_dict["compset"] = "G" + if opts_dict["res"] == "NONE": + opts_dict["res"] = "T62_g17" + else: # cam + if opts_dict["compset"] == "NONE": + opts_dict["compset"] = "F2000climo" + if opts_dict["res"] == "NONE": + opts_dict["res"] = "f19_f19" + + if opts_dict["walltime"] == "00:00": + if opts_dict["uf"] == True: + opts_dict["walltime"] = "00:10" else: - if opts_dict['ect'] == 'pop': - opts_dict['walltime'] = '02:00' + if opts_dict["ect"] == "pop": + opts_dict["walltime"] = "02:00" else: - opts_dict['walltime'] = '04:30' - s_case_flags += ' --walltime ' + opts_dict['walltime'] + opts_dict["walltime"] = "04:30" + s_case_flags += " --walltime " + opts_dict["walltime"] return opts_dict, s_case_flags + ####### + def single_case(opts_dict, case_flags, stat_dir): - #scripts dir + # scripts dir ret = os.chdir(stat_dir) - ret = os.chdir('../../scripts') + ret = os.chdir("../../scripts") ##res and compset are required for create_newcase - case_flags += ' --compset ' + opts_dict['compset'] + ' --res ' + opts_dict['res'] + ' --run-unsupported' - - #create newcase - print('STATUS: create_newcase flags = ' + case_flags) - command = './create_newcase ' + case_flags + case_flags += ( + " --compset " + + opts_dict["compset"] + + " --res " + + opts_dict["res"] + + " --run-unsupported" + ) + + # create newcase + print("STATUS: create_newcase flags = " + case_flags) + command = "./create_newcase " + case_flags ret = os.system(command) - if (ret != 0): - print('ERROR: create_newcase returned a non-zero exit code.') + if ret != 0: + print("ERROR: create_newcase returned a non-zero exit code.") sys.exit() - #modify namelist settings - this_case = opts_dict['case'] - print('STATUS: case = ' + this_case) + # modify namelist settings + this_case = opts_dict["case"] + print("STATUS: case = " + this_case) ret = os.chdir(this_case) - command = 'chmod u+w *' + command = "chmod u+w *" ret = os.system(command) - command = 'cp env_run.xml env_run.xml.orig' + command = "cp env_run.xml env_run.xml.orig" ret = os.system(command) print("STATUS: Adjusting env_run.xml....") - command = './xmlchange --file env_run.xml --id BFBFLAG --val TRUE' + command = "./xmlchange --file env_run.xml --id BFBFLAG --val TRUE" ret = os.system(command) - command = './xmlchange --file env_run.xml --id DOUT_S --val FALSE' + command = "./xmlchange --file env_run.xml --id DOUT_S --val FALSE" ret = os.system(command) - command = './xmlchange --file env_run.xml --id REST_OPTION --val never' + command = "./xmlchange --file env_run.xml --id REST_OPTION --val never" ret = os.system(command) - #time steps - if opts_dict['ect'] == 'pop': - command = './xmlchange --file env_run.xml --id STOP_OPTION --val nyears' - ret = os.system(command) - command = ' ./xmlchange --file env_run.xml --id STOP_N --val 1' - ret = os.system(command) + # time steps + if opts_dict["ect"] == "pop": + command = "./xmlchange --file env_run.xml --id STOP_OPTION --val nyears" + ret = os.system(command) + command = " ./xmlchange --file env_run.xml --id STOP_N --val 1" + ret = os.system(command) else: - if opts_dict['uf'] == True: - command = './xmlchange --file env_run.xml --id STOP_OPTION --val nsteps' + if opts_dict["uf"] == True: + command = "./xmlchange --file env_run.xml --id STOP_OPTION --val nsteps" ret = os.system(command) - command = ' ./xmlchange --file env_run.xml --id STOP_N --val 9' + command = " ./xmlchange --file env_run.xml --id STOP_N --val 9" ret = os.system(command) else: - command = './xmlchange --file env_run.xml --id STOP_OPTION --val nmonths' + command = "./xmlchange --file env_run.xml --id STOP_OPTION --val nmonths" ret = os.system(command) - command = './xmlchange --file env_run.xml --id STOP_N --val 12' + command = "./xmlchange --file env_run.xml --id STOP_N --val 12" ret = os.system(command) - print('STATUS: running setup for single case...') - command = './case.setup' + print("STATUS: running setup for single case...") + command = "./case.setup" ret = os.system(command) print("STATUS: Adjusting user_nl_* files....") - #POP-ECT - if opts_dict['ect'] == 'pop': - if os.path.isfile('user_nl_pop') == True: + # POP-ECT + if opts_dict["ect"] == "pop": + if os.path.isfile("user_nl_pop") == True: with open("user_nl_pop", "a") as f: - if opts_dict['pertlim'] != "0": - text = "\ninit_ts_perturb = {}".format(opts_dict['pertlim']) + if opts_dict["pertlim"] != "0": + text = "\ninit_ts_perturb = {}".format(opts_dict["pertlim"]) f.write(text) else: print("Warning: no user_nl_pop found") else: - #CAM-ECT - #cam - if os.path.isfile('user_nl_cam') == True: - if opts_dict['uf'] == True: + # CAM-ECT + # cam + if os.path.isfile("user_nl_cam") == True: + if opts_dict["uf"] == True: text1 = "\navgflag_pertape = 'I'" text2 = "\nnhtfrq = 9" else: text1 = "\navgflag_pertape = 'A'" text2 = "\nnhtfrq = -8760" - text3 = "\ninithist = 'NONE'" + text3 = "\ninithist = 'NONE'" with open("user_nl_cam", "a") as f: f.write(text1) f.write(text2) f.write(text3) - if opts_dict['pertlim'] != "0": - text = "\npertlim = " + opts_dict['pertlim'] + if opts_dict["pertlim"] != "0": + text = "\npertlim = " + opts_dict["pertlim"] f.write(text) else: print("Warning: no user_nl_cam found") - #clm - if os.path.isfile('user_nl_clm') == True: - if opts_dict['uf'] == True: + # clm + if os.path.isfile("user_nl_clm") == True: + if opts_dict["uf"] == True: text1 = "\nhist_avgflag_pertape = 'I'" text2 = "\nhist_nhtfrq = 9" else: @@ -326,71 +364,71 @@ def single_case(opts_dict, case_flags, stat_dir): f.write(text1) f.write(text2) - #disable ice output - if os.path.isfile('user_nl_cice') == True: + # disable ice output + if os.path.isfile("user_nl_cice") == True: text = "\nhistfreq = 'x','x','x','x','x'" with open("user_nl_cice", "a") as f: f.write(text) - #pop - if os.path.isfile('user_nl_pop') == True: + # pop + if os.path.isfile("user_nl_pop") == True: text = ["'\nn_tavg_streams = 1"] text.append("\nldiag_bsf = .false.") text.append("\nldiag_global_tracer_budgets = .false.") text.append("\nldiag_velocity = .false.") - text.append("\ndiag_gm_bolus = .false." ) + text.append("\ndiag_gm_bolus = .false.") text.append("\nltavg_nino_diags_requested = .false.") - text.append("\nmoc_requested = .false." ) + text.append("\nmoc_requested = .false.") text.append("\nn_heat_trans_requested = .false.") - text.append("\nn_salt_trans_requested = .false." ) + text.append("\nn_salt_trans_requested = .false.") test.append("\ntavg_freq_opt = 'once', 'never', 'never'") text.append("\ntavg_file_freq_opt = 'once', 'never', 'never'") - text.append("\ndiag_cfl_freq_opt = 'never'" ) + text.append("\ndiag_cfl_freq_opt = 'never'") text.append("\ndiag_global_freq_opt = 'never'") - text.append("\ndiag_transp_freq_opt = 'never'" ) + text.append("\ndiag_transp_freq_opt = 'never'") with open("user_nl_pop", "a") as f: for i in range(len(text)): f.write(text[i]) - #preview namelists + # preview namelists print("STATUS: Updating namelists....") - command = './preview_namelists' + command = "./preview_namelists" ret = os.system(command) # Build executable nb = opts_dict["nb"] ns = opts_dict["ns"] - print ('STATUS: no-build = ' + str(nb)) - print ('STATUS: no-submit = ' + str(ns)) - if nb == False : + print("STATUS: no-build = " + str(nb)) + print("STATUS: no-submit = " + str(ns)) + if nb == False: print("STATUS: building case ...") - command = './case.build' + command = "./case.build" ret = os.system(command) if ret != 0: print("Error building...") sys.exit() if ns == False: - command = './case.submit' + command = "./case.submit" ret = os.system(command) ######## def main(argv): - caller = 'single_run.py' + caller = "single_run.py" - #directory with single_run.py and ensemble.py + # directory with single_run.py and ensemble.py stat_dir = os.path.dirname(os.path.realpath(__file__)) - print( "STATUS: stat_dir = " + stat_dir) + print("STATUS: stat_dir = " + stat_dir) opts_dict, case_flags = process_args_dict(caller, argv) - single_case(opts_dict, case_flags, stat_dir) print("STATUS: completed single run setup.") + ######## if __name__ == "__main__": main(sys.argv[1:])