diff --git a/cime_config/config_tests.xml b/cime_config/config_tests.xml
index 5c86ade09e51..43d7519c428f 100644
--- a/cime_config/config_tests.xml
+++ b/cime_config/config_tests.xml
@@ -169,6 +169,7 @@ LII CLM initial condition interpolation test
22
0
FALSE
+ TRUE
@@ -179,6 +180,7 @@ LII CLM initial condition interpolation test
ndays
11
FALSE
+ TRUE
@@ -393,6 +395,10 @@ LII CLM initial condition interpolation test
1
-1
FALSE
+ FALSE
+ none
+ $STOP_OPTION
+ $STOP_N
diff --git a/driver_cpl/cime_config/config_component.xml b/driver_cpl/cime_config/config_component.xml
index 7abe633670c0..8fbdb8959bb8 100644
--- a/driver_cpl/cime_config/config_component.xml
+++ b/driver_cpl/cime_config/config_component.xml
@@ -2956,12 +2956,22 @@
char
- /UNSET
+ UNSET
test
env_test.xml
standard full pathname of the cprnc executable
+
+ char
+ UNSET
+ user_mods
+ env_case.xml
+ path to user mods under TESTS_MODS_DIR or USER_MODS_DIR
+
+
+
+
diff --git a/scripts/Tools/component_generate_baseline b/scripts/Tools/component_generate_baseline
index 8fbc4bd2a255..0dcca7681c8b 100755
--- a/scripts/Tools/component_generate_baseline
+++ b/scripts/Tools/component_generate_baseline
@@ -35,18 +35,25 @@ OR
parser.add_argument("-b", "--baseline-dir",
help="Use custom baseline dir")
+ parser.add_argument("-o", "--allow-baseline-overwrite", action="store_true",
+ help="By default an attempt to overwrite an existing baseline directory "
+ "will raise an error. Specifying this option allows "
+ "existing baseline directories to be silently overwritten.")
+
args = parser.parse_args(args[1:])
CIME.utils.handle_standard_logging_options(args)
- return args.caseroot, args.baseline_dir
+ return args.caseroot, args.baseline_dir, args.allow_baseline_overwrite
###############################################################################
def _main_func(description):
###############################################################################
- caseroot, baseline_dir = parse_command_line(sys.argv, description)
+ caseroot, baseline_dir, allow_baseline_overwrite = \
+ parse_command_line(sys.argv, description)
with Case(caseroot) as case:
- success, comments = generate_baseline(case, baseline_dir)
+ success, comments = generate_baseline(case, baseline_dir,
+ allow_baseline_overwrite)
print comments
sys.exit(0 if success else 1)
diff --git a/scripts/create_test b/scripts/create_test
index 39a71856e8af..fa5044d24f4f 100755
--- a/scripts/create_test
+++ b/scripts/create_test
@@ -257,7 +257,7 @@ OR
args.no_batch = True
if args.test_id is None:
- args.test_id = CIME.utils.get_utc_timestamp()
+ args.test_id = CIME.utils.get_timestamp()
if args.testfile is not None:
with open(args.testfile, "r") as fd:
diff --git a/utils/python/CIME/SystemTests/nck.py b/utils/python/CIME/SystemTests/nck.py
index 09902cde3f7c..d00edea7b8d8 100644
--- a/utils/python/CIME/SystemTests/nck.py
+++ b/utils/python/CIME/SystemTests/nck.py
@@ -1,126 +1,53 @@
"""
-Implementation of the CIME NCK test. This class inherits from SystemTestsCommon
+Implementation of the CIME NCK test: Tests multi-instance
+
+This does two runs: In the first, we use one instance per component; in the
+second, we use two instances per components. NTASKS are changed in each run so
+that the number of tasks per instance is the same for both runs.
-Build two exectuables for this test, the first is a default build the
-second halves the number of tasks and runs two instances for each component
Lay all of the components out sequentially
"""
-import shutil
+
from CIME.XML.standard_module_setup import *
+from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo
from CIME.case_setup import case_setup
-import CIME.utils
-from CIME.SystemTests.system_tests_common import SystemTestsCommon
logger = logging.getLogger(__name__)
-class NCK(SystemTestsCommon):
+class NCK(SystemTestsCompareTwo):
def __init__(self, case):
- """
- initialize a test object
- """
- SystemTestsCommon.__init__(self, case)
-
- def build_phase(self, sharedlib_only=False, model_only=False):
- '''
- build can be called once (sharedlib_only and model_only both False)
- or twice (once with each true)
- This test requires a sharedlib build for both phases
- we must handle both cases correctly
- '''
- exeroot = self._case.get_value("EXEROOT")
- cime_model = CIME.utils.get_model()
- if not model_only:
- machpes1 = os.path.join("LockedFiles","env_mach_pes.orig.xml")
- if os.path.isfile(machpes1):
- shutil.copy(machpes1,"env_mach_pes.xml")
- else:
- shutil.copy("env_mach_pes.xml", machpes1)
-
- # Build two exectuables for this test, the first is a default build, the
- # second halves the number of tasks and runs two instances for each component
- # Lay all of the components out sequentially
- for bld in range(1,3):
- logging.warn("Starting bld %s"%bld)
- machpes = os.path.join("LockedFiles","env_mach_pes.NCK%s.xml"%bld)
- if model_only:
- # This file should have been created in the sharedlib_only phase
- shutil.copy(machpes,"env_mach_pes.xml")
- self._case.read_xml()
- else:
- for comp in ['ATM','OCN','WAV','GLC','ICE','ROF','LND']:
- self._case.set_value("NINST_%s"%comp, bld)
- ntasks = self._case.get_value("NTASKS_%s"%comp)
- if(bld == 1):
- if ( ntasks > 1 ):
- self._case.set_value("NTASKS_%s"%comp, int(ntasks/2))
- else:
- self._case.set_value("NTASKS_%s"%comp, ntasks*2)
- self._case.flush()
-
- case_setup(self._case, test_mode=True, reset=True)
- if not sharedlib_only:
- self.clean_build()
-
- self.build_indv(sharedlib_only=sharedlib_only, model_only=model_only)
- if not model_only:
- shutil.copy("env_mach_pes.xml", machpes)
- if not sharedlib_only:
- shutil.move("%s/%s.exe"%(exeroot,cime_model),"%s/%s.exe.NCK%s"%(exeroot,cime_model,bld))
- shutil.copy("env_build.xml",os.path.join("LockedFiles","env_build.NCK%s.xml"%bld))
-
- # Because mira/cetus interprets its run script differently than
- # other systems we need to copy the original env_mach_pes.xml back
-# shutil.copy(machpes1,"env_mach_pes.xml")
-# shutil.copy("env_mach_pes.xml",
-# os.path.join("LockedFiles","env_mach_pes.xml"))
-
- def run_phase(self):
- os.chdir(self._caseroot)
-
- exeroot = self._case.get_value("EXEROOT")
- cime_model = CIME.utils.get_model()
-
- # Reset beginning test settings
- expect(os.path.exists("LockedFiles/env_mach_pes.NCK1.xml"),
- "ERROR: LockedFiles/env_mach_pes.NCK1.xml does not exist\n"
- " this would been produced in the build - must run case.test_build")
-
- shutil.copy("LockedFiles/env_mach_pes.NCK1.xml", "env_mach_pes.xml")
- shutil.copy("env_mach_pes.xml", "LockedFiles/env_mach_pes.xml")
- shutil.copy("%s/%s.exe.NCK1" % (exeroot, cime_model),
- "%s/%s.exe" % (exeroot, cime_model))
- shutil.copy("LockedFiles/env_build.NCK1.xml", "env_build.xml")
- shutil.copy("env_build.xml", "LockedFiles/env_build.xml")
-
- stop_n = self._case.get_value("STOP_N")
- stop_option = self._case.get_value("STOP_OPTION")
-
- self._case.set_value("HIST_N", stop_n)
- self._case.set_value("HIST_OPTION", stop_option)
- self._case.set_value("CONTINUE_RUN", False)
- self._case.set_value("REST_OPTION", "none")
- self._case.flush()
-
- #======================================================================
- # do an initial run test with NINST 1
- #======================================================================
- logger.info("default: doing a %s %s with NINST1" % (stop_n, stop_option))
- self.run_indv()
-
- #======================================================================
- # do an initial run test with NINST 2
- # want to run on same pe counts per instance and same cpl pe count
- #======================================================================
-
- os.remove("%s/%s.exe" % (exeroot, cime_model))
- shutil.copy("%s/%s.exe.NCK2" % (exeroot, cime_model),
- "%s/%s.exe" % (exeroot, cime_model))
- shutil.copy("LockedFiles/env_build.NCK2.xml", "env_build.xml")
- shutil.copy("env_build.xml", "LockedFiles/env_build.xml")
- shutil.copy("LockedFiles/env_mach_pes.NCK2.xml", "env_mach_pes.xml")
- shutil.copy("env_mach_pes.xml", "LockedFiles/env_mach_pes.xml")
-
- logger.info("default: doing a %s %s with NINST2" % (stop_n, stop_option))
- self.run_indv(suffix="multiinst")
- self._component_compare_test("base", "multiinst")
+ self._comp_classes = []
+ SystemTestsCompareTwo.__init__(self, case,
+ separate_builds = True,
+ run_two_suffix = 'multiinst',
+ run_one_description = 'one instance',
+ run_two_description = 'two instances')
+
+ def _common_setup(self):
+ # We start by halving the number of tasks for both cases. This ensures
+ # that we use the same number of tasks per instance in both cases: For
+ # the two-instance case, we'll double this halved number, so you may
+ # think that the halving was unnecessary; but it's needed in case the
+ # original NTASKS was odd. (e.g., for NTASKS originally 15, we want to
+ # use NTASKS = int(15/2) * 2 = 14 tasks for case two.)
+ self._comp_classes = self._case.get_value("COMP_CLASSES").split(',')
+ self._comp_classes.remove("DRV")
+ for comp in self._comp_classes:
+ ntasks = self._case.get_value("NTASKS_%s"%comp)
+ if ( ntasks > 1 ):
+ self._case.set_value("NTASKS_%s"%comp, int(ntasks/2))
+
+ def _case_one_setup(self):
+ for comp in self._comp_classes:
+ self._case.set_value("NINST_%s"%comp, 1)
+
+ case_setup(self._case, test_mode=True, reset=True)
+
+ def _case_two_setup(self):
+ for comp in self._comp_classes:
+ self._case.set_value("NINST_%s"%comp, 2)
+ ntasks = self._case.get_value("NTASKS_%s"%comp)
+ self._case.set_value("NTASKS_%s"%comp, ntasks*2)
+
+ case_setup(self._case, test_mode=True, reset=True)
diff --git a/utils/python/CIME/SystemTests/system_tests_common.py b/utils/python/CIME/SystemTests/system_tests_common.py
index d101daeb220b..136c47b0c2e6 100644
--- a/utils/python/CIME/SystemTests/system_tests_common.py
+++ b/utils/python/CIME/SystemTests/system_tests_common.py
@@ -152,9 +152,6 @@ def run(self):
logger.warning(excmsg)
append_status(excmsg, sfile="TestStatus.log")
- # Always try to report, should NOT throw an exception
- self.report()
-
# Writing the run status should be the very last thing due to wait_for_tests
time_taken = time.time() - start_time
status = TEST_PASS_STATUS if success else TEST_FAIL_STATUS
@@ -225,12 +222,6 @@ def _coupler_log_indicates_run_complete(self):
logger.info("%s is not compressed, assuming run failed"%newestcpllogfile)
return False
- def report(self):
- """
- Please explain what kind of things happen in report
- """
- pass
-
def _component_compare_move(self, suffix):
comments = move(self._case, suffix)
append_status(comments, sfile="TestStatus.log")
diff --git a/utils/python/CIME/XML/pes.py b/utils/python/CIME/XML/pes.py
index 0efb853cfffe..01ea3e940cf8 100644
--- a/utils/python/CIME/XML/pes.py
+++ b/utils/python/CIME/XML/pes.py
@@ -67,7 +67,7 @@ def find_pes_layout(self, grid, compset, machine, pesize_opts='M'):
pes_ntasks, pes_nthrds, pes_rootpe, other_settings = {}, {}, {}, {}
for node in pe_select:
vid = node.tag
- logger.warn("vid is %s"%vid)
+ logger.debug("vid is %s"%vid)
if "ntasks" in vid:
for child in node:
pes_ntasks[child.tag.upper()] = child.text
diff --git a/utils/python/CIME/case.py b/utils/python/CIME/case.py
index cedf850bdde7..e56d642c3fe7 100644
--- a/utils/python/CIME/case.py
+++ b/utils/python/CIME/case.py
@@ -321,29 +321,29 @@ def _set_compset_and_pesfile(self, compset_name, user_compset=False, pesfile=Non
# Determine the compsets file for this component
compsets_filename = files.get_value("COMPSETS_SPEC_FILE", {"component":component})
- pes_filename = files.get_value("PES_SPEC_FILE" , {"component":component})
- tests_filename = files.get_value("TESTS_SPEC_FILE" , {"component":component}, resolved=False)
- tests_mods_dir = files.get_value("TESTS_MODS_DIR" , {"component":component}, resolved=False)
- user_mods_dir = files.get_value("USER_MODS_DIR" , {"component":component}, resolved=False)
# If the file exists, read it and see if there is a match for the compset alias or longname
if (os.path.isfile(compsets_filename)):
compsets = Compsets(compsets_filename)
match = compsets.get_compset_match(name=compset_name)
+ pesfile = files.get_value("PES_SPEC_FILE" , {"component":component})
if match is not None:
- self._pesfile = pes_filename
+ self._pesfile = pesfile
self._compsetsfile = compsets_filename
self._compsetname = match
- self.set_value("COMPSETS_SPEC_FILE" ,
+ tests_filename = files.get_value("TESTS_SPEC_FILE" , {"component":component}, resolved=False)
+ tests_mods_dir = files.get_value("TESTS_MODS_DIR" , {"component":component}, resolved=False)
+ user_mods_dir = files.get_value("USER_MODS_DIR" , {"component":component}, resolved=False)
+ self.set_lookup_value("COMPSETS_SPEC_FILE" ,
files.get_value("COMPSETS_SPEC_FILE", {"component":component}, resolved=False))
- self.set_value("TESTS_SPEC_FILE" , tests_filename)
- self.set_value("TESTS_MODS_DIR" , tests_mods_dir)
- self.set_value("USER_MODS_DIR" , user_mods_dir)
- self.set_value("PES_SPEC_FILE" ,
+ self.set_lookup_value("TESTS_SPEC_FILE" , tests_filename)
+ self.set_lookup_value("TESTS_MODS_DIR" , tests_mods_dir)
+ self.set_lookup_value("USER_MODS_DIR" , user_mods_dir)
+ self.set_lookup_value("PES_SPEC_FILE" ,
files.get_value("PES_SPEC_FILE" , {"component":component}, resolved=False))
logger.info("Compset longname is %s " %(match))
logger.info("Compset specification file is %s" %(compsets_filename))
- logger.info("Pes specification file is %s" %(pes_filename))
+ logger.info("Pes specification file is %s" %(pesfile))
return
if user_compset is True:
@@ -351,7 +351,7 @@ def _set_compset_and_pesfile(self, compset_name, user_compset=False, pesfile=Non
logger.warn("Could not find a compset match for either alias or longname in %s" %(compset_name))
self._compsetname = compset_name
self._pesfile = pesfile
- self.set_value("PES_SPEC_FILE", pesfile)
+ self.set_lookup_value("PES_SPEC_FILE", pesfile)
else:
expect(False,
"Could not find a compset match for either alias or longname in %s" %(compset_name))
@@ -821,6 +821,7 @@ def apply_user_mods(self, user_mods_dir=None):
else:
user_mods_path = self.get_value('USER_MODS_DIR')
user_mods_path = os.path.join(user_mods_path, user_mods_dir)
+ self.set_value("USER_MODS_FULLPATH",user_mods_path)
ninst_vals = {}
for i in xrange(1,len(self._component_classes)):
comp_class = self._component_classes[i]
diff --git a/utils/python/CIME/case_run.py b/utils/python/CIME/case_run.py
index eb3c0df001a9..89d4bfd3ca20 100644
--- a/utils/python/CIME/case_run.py
+++ b/utils/python/CIME/case_run.py
@@ -1,8 +1,6 @@
from CIME.XML.standard_module_setup import *
from CIME.case_submit import submit
-from CIME.XML.files import Files
-from CIME.XML.component import Component
from CIME.XML.machines import Machines
from CIME.utils import append_status, touch, gzip_existing_file
from CIME.check_lockedfiles import check_lockedfiles
@@ -329,19 +327,8 @@ def save_logs(case, lid):
caseroot = case.get_value("CASEROOT")
rundir = case.get_value("RUNDIR")
-
- # get components
- files = Files()
- config_file = files.get_value("CONFIG_DRV_FILE")
- component = Component(config_file)
- comps = [x.lower() for x in component.get_valid_model_components()]
- comps = [x.replace('drv', 'cpl') for x in comps]
- model = [case.get_value("MODEL")]
- comps = comps + model
-
- # for each component, compress log files and copy to logdir
- for comp in comps:
- logfile = os.path.join(rundir, comp + '.log.' + lid)
+ logfiles = glob.glob(os.path.join(rundir,"*.log.%s"%(lid)))
+ for logfile in logfiles:
if os.path.isfile(logfile):
logfile_gz = gzip_existing_file(logfile)
shutil.copy(logfile_gz,
diff --git a/utils/python/CIME/case_setup.py b/utils/python/CIME/case_setup.py
index 4713e70c6061..fed306c2a2d7 100644
--- a/utils/python/CIME/case_setup.py
+++ b/utils/python/CIME/case_setup.py
@@ -258,12 +258,16 @@ def _case_setup_impl(case, caseroot, casebaseid, clean=False, test_mode=False, r
_build_usernl_files(case, "drv", "cpl")
- if case.get_value("TEST"):
+ user_mods_path = case.get_value("USER_MODS_USE_CASE")
+ if user_mods_path is not None:
+ apply_user_mods(caseroot, user_mods_path=user_mods_path, ninst=ninst)
+ elif case.get_value("TEST"):
test_mods = parse_test_name(casebaseid)[6]
if test_mods is not None:
user_mods_path = os.path.join(case.get_value("TESTS_MODS_DIR"), test_mods)
apply_user_mods(caseroot, user_mods_path=user_mods_path, ninst=ninst)
+
# Run preview namelists for scripts
logger.info("preview_namelists")
preview_namelists(case)
diff --git a/utils/python/CIME/hist_utils.py b/utils/python/CIME/hist_utils.py
index 395af87fa952..31258c33eb1e 100644
--- a/utils/python/CIME/hist_utils.py
+++ b/utils/python/CIME/hist_utils.py
@@ -25,11 +25,12 @@ def _get_all_hist_files(testcase, model, from_dir, suffix=""):
test_hists.extend(glob.glob("%s/%s.%s*.h.*.nc%s" % (from_dir, testcase, model, suffix)))
# suffix == "" implies baseline comparison, baseline hist files have simpler names
+
if suffix == "":
- test_hists.extend(glob.glob("%s/%s.h.nc" % (from_dir, model)))
- test_hists.extend(glob.glob("%s/%s.h?.nc" % (from_dir, model)))
- test_hists.extend(glob.glob("%s/%s.h.*.nc" % (from_dir, model)))
- test_hists.extend(glob.glob("%s/%s.h?.*.nc" % (from_dir, model)))
+ test_hists.extend(glob.glob("%s/%s*.h.nc" % (from_dir, model)))
+ test_hists.extend(glob.glob("%s/%s*.h?.nc" % (from_dir, model)))
+ test_hists.extend(glob.glob("%s/%s*.h.*.nc" % (from_dir, model)))
+ test_hists.extend(glob.glob("%s/%s*.h?.*.nc" % (from_dir, model)))
test_hists.sort()
return test_hists
@@ -38,24 +39,9 @@ def _get_latest_hist_files(testcase, model, from_dir, suffix=""):
test_hists = _get_all_hist_files(testcase, model, from_dir, suffix)
latest_files = {}
histlist = []
- date_match = re.compile(r"(\d\d\d\d)-(\d\d)-(\d\d)-(\d\d\d\d\d).nc")
for hist in test_hists:
ext = get_extension(model, hist)
- if ext in latest_files:
- s1 = date_match.search(hist)
- if s1 is None:
- latest_files[ext] = hist
- continue
- (yr,month,day,time) = s1.group(1,2,3,4)
- s2 = date_match.search(latest_files[ext])
- (pyr,pmonth,pday,ptime) = s2.group(1,2,3,4)
- if yr > pyr or (yr == pyr and month > pmonth) or \
- (yr == pyr and month == pmonth and day > pday) or \
- (yr == pyr and month == pmonth and day == pday and time > ptime):
- latest_files[ext] = hist
- logger.debug("ext %s hist %s %s"%(ext,hist,latest_files))
- else:
- latest_files[ext] = hist
+ latest_files[ext] = hist
for key in latest_files.keys():
histlist.append(latest_files[key])
@@ -93,6 +79,7 @@ def move(case, suffix):
return comments
+
def _hists_match(model, hists1, hists2, suffix1="", suffix2=""):
"""
return (num in set 1 but not 2 , num in set 2 but not 1, matchups)
@@ -105,14 +92,30 @@ def _hists_match(model, hists1, hists2, suffix1="", suffix2=""):
>>> hists2 = ['cpl.h2.nc.SUF2', 'cpl.h3.nc.SUF2', 'cpl.h4.nc.SUF2']
>>> _hists_match('cpl', hists1, hists2, 'SUF1', 'SUF2')
(['FOO.G.cpl.h1.nc.SUF1'], ['cpl.h4.nc.SUF2'], [('FOO.G.cpl.h2.nc.SUF1', 'cpl.h2.nc.SUF2'), ('FOO.G.cpl.h3.nc.SUF1', 'cpl.h3.nc.SUF2')])
+ >>> hists1 = ['cam.h0.1850-01-08-00000.nc']
+ >>> hists2 = ['cam_0001.h0.1850-01-08-00000.nc','cam_0002.h0.1850-01-08-00000.nc']
+ >>> _hists_match('cam', hists1, hists2, '', '')
+ ([], [], [('cam.h0.1850-01-08-00000.nc', 'cam_0001.h0.1850-01-08-00000.nc'), ('cam.h0.1850-01-08-00000.nc', 'cam_0002.h0.1850-01-08-00000.nc')])
+ >>> hists1 = ['cam_0001.h0.1850-01-08-00000.nc.base','cam_0002.h0.1850-01-08-00000.nc.base']
+ >>> hists2 = ['cam_0001.h0.1850-01-08-00000.nc.rest','cam_0002.h0.1850-01-08-00000.nc.rest']
+ >>> _hists_match('cam', hists1, hists2, 'base', 'rest')
+ ([], [], [('cam_0001.h0.1850-01-08-00000.nc.base', 'cam_0001.h0.1850-01-08-00000.nc.rest'), ('cam_0002.h0.1850-01-08-00000.nc.base', 'cam_0002.h0.1850-01-08-00000.nc.rest')])
"""
normalized1, normalized2 = [], []
- for hists, suffix, normalized in [(hists1, suffix1, normalized1), (hists2, suffix2, normalized2)]:
+ multi_normalized1, multi_normalized2 = [], []
+ multiinst = False
+
+ for hists, suffix, normalized, multi_normalized in [(hists1, suffix1, normalized1, multi_normalized1), (hists2, suffix2, normalized2, multi_normalized2)]:
for hist in hists:
normalized_name = hist[hist.rfind(model):]
- if suffix1 != "":
- expect(normalized_name.endswith(suffix), "How did '%s' hot have suffix '%s'" % (hist, suffix))
- normalized_name = normalized_name[:len(normalized_name) - len(suffix)]
+ if suffix != "":
+ expect(normalized_name.endswith(suffix), "How did '%s' not have suffix '%s'" % (hist, suffix))
+ normalized_name = normalized_name[:len(normalized_name) - len(suffix) - 1]
+
+ m = re.search("(.+)_[0-9]{4}(.+.nc)",normalized_name)
+ if m is not None:
+ multiinst = True
+ multi_normalized.append(m.group(1)+m.group(2))
normalized.append(normalized_name)
@@ -123,9 +126,38 @@ def _hists_match(model, hists1, hists2, suffix1="", suffix2=""):
two_not_one = sorted([hists2[normalized2.index(item)] for item in set_of_2_not_1])
both = set(normalized1) & set(normalized2)
+
match_ups = sorted([ (hists1[normalized1.index(item)], hists2[normalized2.index(item)]) for item in both])
- expect(len(match_ups) + len(set_of_1_not_2) == len(hists1), "Programming error1")
- expect(len(match_ups) + len(set_of_2_not_1) == len(hists2), "Programming error2")
+
+ # Special case - comparing multiinstance to single instance files
+
+ if multi_normalized1 != multi_normalized2:
+ # in this case hists1 contains multiinstance hists2 does not
+ if set(multi_normalized1) == set(normalized2):
+ for idx, norm_hist1 in enumerate(multi_normalized1):
+ for idx1, hist2 in enumerate(hists2):
+ norm_hist2 = normalized2[idx1]
+ if norm_hist1 == norm_hist2:
+ match_ups.append((hists1[idx], hist2))
+ if hist2 in two_not_one:
+ two_not_one.remove(hist2)
+ if hists1[idx] in one_not_two:
+ one_not_two.remove(hists1[idx])
+ # in this case hists2 contains multiinstance hists1 does not
+ if set(multi_normalized2) == set(normalized1):
+ for idx, norm_hist2 in enumerate(multi_normalized2):
+ for idx1, hist1 in enumerate(hists1):
+ norm_hist1 = normalized1[idx1]
+ if norm_hist2 == norm_hist1:
+ match_ups.append((hist1, hists2[idx]))
+ if hist1 in one_not_two:
+ one_not_two.remove(hist1)
+ if hists2[idx] in two_not_one:
+ two_not_one.remove(hists2[idx])
+
+ if not multiinst:
+ expect(len(match_ups) + len(set_of_1_not_2) == len(hists1), "Programming error1")
+ expect(len(match_ups) + len(set_of_2_not_1) == len(hists2), "Programming error2")
return one_not_two, two_not_one, match_ups
@@ -138,12 +170,13 @@ def _compare_hists(case, from_dir1, from_dir2, suffix1="", suffix2=""):
num_compared = 0
comments = "Comparing hists for case '%s' dir1='%s', suffix1='%s', dir2='%s' suffix2='%s'\n" % \
(testcase, from_dir1, suffix1, from_dir2, suffix2)
-
+ multiinst_cpl_compare = False
for model in _iter_model_file_substrs(case):
+ if model == 'cpl' and suffix2 == 'multiinst':
+ multiinst_cpl_compare = True
comments += " comparing model '%s'\n" % model
hists1 = _get_latest_hist_files(testcase, model, from_dir1, suffix1)
hists2 = _get_latest_hist_files(testcase, model, from_dir2, suffix2)
-
if len(hists1) == 0 and len(hists2) == 0:
comments += " no hist files found for model %s\n" % model
continue
@@ -157,8 +190,9 @@ def _compare_hists(case, from_dir1, from_dir2, suffix1="", suffix2=""):
all_success = False
num_compared += len(match_ups)
+
for hist1, hist2 in match_ups:
- success, cprnc_comments = cprnc(hist1, hist2, case, from_dir1)
+ success, cprnc_comments = cprnc(model, hist1, hist2, case, from_dir1, multiinst_cpl_compare)
if success:
comments += " %s matched %s\n" % (hist1, hist2)
else:
@@ -184,7 +218,7 @@ def compare_test(case, suffix1, suffix2):
return _compare_hists(case, rundir, rundir, suffix1, suffix2)
-def cprnc(file1, file2, case, rundir):
+def cprnc(model, file1, file2, case, rundir, multiinst_cpl_compare=False):
"""
Run cprnc to compare two individual nc files
@@ -197,8 +231,29 @@ def cprnc(file1, file2, case, rundir):
"""
cprnc_exe = case.get_value("CCSM_CPRNC")
basename = os.path.basename(file1)
- stat, out, _ = run_cmd("%s -m %s %s 2>&1 | tee %s/%s.cprnc.out" % (cprnc_exe, file1, file2, rundir, basename))
- return (stat == 0 and "files seem to be IDENTICAL" in out, out)
+ multiinst_regex = re.compile(r'.*%s[^_]*(_[0-9]{4})[.]h.?[.][^.]+?[.]nc' % model)
+ mstr = ''
+ mstr1 = ''
+ mstr2 = ''
+ # If one is a multiinstance file but the other is not add an instance string
+ m1 = multiinst_regex.match(file1)
+ m2 = multiinst_regex.match(file2)
+ if m1 is not None:
+ mstr1 = m1.group(1)
+ if m2 is not None:
+ mstr2 = m2.group(1)
+ if mstr1 != mstr2:
+ mstr = mstr1+mstr2
+
+ stat, out, _ = run_cmd("%s -m %s %s 2>&1 | tee %s/%s%s.cprnc.out" % (cprnc_exe, file1, file2, rundir, basename, mstr))
+ if multiinst_cpl_compare:
+ # In a multiinstance test the cpl hist file will have a different number of
+ # dimensions and so cprnc will indicate that the files seem to be DIFFERENT
+ # in this case we only want to check that the fields we are able to compare
+ # have no differences.
+ return (stat == 0 and " 0 had non-zero differences" in out, out)
+ else:
+ return (stat == 0 and "files seem to be IDENTICAL" in out, out)
def compare_baseline(case, baseline_dir=None):
"""
@@ -242,19 +297,28 @@ def get_extension(model, filepath):
'hi'
>>> get_extension("cpl", "TESTRUNDIFF_Mmpi-serial.f19_g16_rx1.A.melvin_gnu.C.fake_testing_only_20160816_164150-20160816_164240.cpl.h.nc")
'h'
+ >>> get_extension("clm","clm2_0002.h0.1850-01-06-00000.nc")
+ '0002.h0'
"""
basename = os.path.basename(filepath)
- ext_regex = re.compile(r'.*%s.*[.](h.?)([.][^.]+)?[.]nc' % model)
+ ext_regex = re.compile(r'.*%s[^_]*_?([0-9]{4})?[.](h.?)([.][^.]+)?[.]nc' % model)
+
m = ext_regex.match(basename)
expect(m is not None, "Failed to get extension for file '%s'" % filepath)
- return m.groups()[0]
+ if m.group(1) is not None:
+ result = m.group(1)+'.'+m.group(2)
+ else:
+ result = m.group(2)
-def generate_baseline(case, baseline_dir=None):
+ return result
+
+def generate_baseline(case, baseline_dir=None, allow_baseline_overwrite=False):
"""
copy the current test output to baseline result
case - The case containing the hist files to be copied into baselines
baseline_dir - Optionally, specify a specific baseline dir, otherwise it will be computed from case config
+ allow_baseline_overwrite must be true to generate baselines to an existing directory.
returns (SUCCESS, comments)
"""
@@ -269,6 +333,10 @@ def generate_baseline(case, baseline_dir=None):
if not os.path.isdir(basegen_dir):
os.makedirs(basegen_dir)
+ if (os.path.isdir(os.path.join(basegen_dir,testcase)) and
+ not allow_baseline_overwrite):
+ expect(False, " Cowardly refusing to overwrite existing baseline directory")
+
comments = "Generating baselines into '%s'\n" % basegen_dir
num_gen = 0
for model in _iter_model_file_substrs(case):
@@ -282,6 +350,16 @@ def generate_baseline(case, baseline_dir=None):
if os.path.exists(baseline):
os.remove(baseline)
+ #special care for multi-instance cases,
+ #only keep first instance and
+ #remove instance string from filename
+ m = re.search("(.*%s.*)_([0-9]{4})(.h.*)"%model, baseline)
+ if m is not None:
+ if m.group(2) != '0001':
+ continue
+ baseline = m.group(1)+m.group(3)
+
+ logger.debug("Found multiinstance hist file %s"%hist)
shutil.copy(hist, baseline)
comments += " generating baseline '%s' from file %s\n" % (baseline, hist)
diff --git a/utils/python/CIME/test_scheduler.py b/utils/python/CIME/test_scheduler.py
index ca75cd852f14..9bb336eb999e 100644
--- a/utils/python/CIME/test_scheduler.py
+++ b/utils/python/CIME/test_scheduler.py
@@ -46,7 +46,7 @@ def __init__(self, test_names, test_data=None,
###########################################################################
self._cime_root = CIME.utils.get_cime_root()
self._cime_model = CIME.utils.get_model()
-
+ self._allow_baseline_overwrite = allow_baseline_overwrite
self._save_timing = save_timing
self._queue = queue
self._test_data = {} if test_data is None else test_data # Format: {test_name -> {data_name -> data}}
@@ -80,7 +80,7 @@ def __init__(self, test_names, test_data=None,
self._test_root = self._test_root.replace("$PROJECT", self._project)
self._test_root = os.path.abspath(self._test_root)
- self._test_id = test_id if test_id is not None else CIME.utils.get_utc_timestamp()
+ self._test_id = test_id if test_id is not None else CIME.utils.get_timestamp()
self._compiler = self._machobj.get_default_compiler() if compiler is None else compiler
@@ -114,11 +114,16 @@ def __init__(self, test_names, test_data=None,
"Missing baseline comparison directory %s" % full_baseline_dir)
# the following is to assure that the existing generate directory is not overwritten
- if self._baseline_gen_name and not allow_baseline_overwrite:
+ if self._baseline_gen_name:
full_baseline_dir = os.path.join(self._baseline_root, self._baseline_gen_name)
- expect(not os.path.isdir(full_baseline_dir),
- "Refusing to overwrite existing baseline directory, use -o to force overwrite %s" % full_baseline_dir)
-
+ existing_baselines = []
+ for test_name in test_names:
+ test_baseline = os.path.join(full_baseline_dir, test_name)
+ if os.path.isdir(test_baseline):
+ existing_baselines.append(test_baseline)
+ expect(allow_baseline_overwrite or len(existing_baselines) == 0,
+ "Baseline directories already exists %s\n"\
+ "Use --allow_baseline_overwrite to avoid this error"%existing_baselines)
else:
self._baseline_root = None
@@ -368,7 +373,7 @@ def _xml_phase(self, test):
if self._baseline_gen_name:
test_argv += " -generate %s" % self._baseline_gen_name
basegen_case_fullpath = os.path.join(self._baseline_root,self._baseline_gen_name, test)
- logger.warn("basegen_case is %s"%basegen_case_fullpath)
+ logger.debug("basegen_case is %s"%basegen_case_fullpath)
envtest.set_value("BASELINE_NAME_GEN", self._baseline_gen_name)
envtest.set_value("BASEGEN_CASE", os.path.join(self._baseline_gen_name, test))
if self._baseline_cmp_name:
diff --git a/utils/python/CIME/utils.py b/utils/python/CIME/utils.py
index fbf7a675eb1f..09d054b1d259 100644
--- a/utils/python/CIME/utils.py
+++ b/utils/python/CIME/utils.py
@@ -489,14 +489,17 @@ def find_proc_id(proc_name=None,
return list(rv)
-def get_utc_timestamp(timestamp_format="%Y%m%d_%H%M%S"):
+def get_timestamp(timestamp_format="%Y%m%d_%H%M%S", utc_time=False):
"""
Get a string representing the current UTC time in format: YYMMDD_HHMMSS
The format can be changed if needed.
"""
- utc_time_tuple = time.gmtime()
- return time.strftime(timestamp_format, utc_time_tuple)
+ if utc_time:
+ time_tuple = time.gmtime()
+ else:
+ time_tuple = time.localtime()
+ return time.strftime(timestamp_format, time_tuple)
def get_project(machobj=None):
"""
diff --git a/utils/python/jenkins_generic_job.py b/utils/python/jenkins_generic_job.py
index 8f595c1f823d..6fe3fcf6d4aa 100644
--- a/utils/python/jenkins_generic_job.py
+++ b/utils/python/jenkins_generic_job.py
@@ -107,7 +107,7 @@ def jenkins_generic_job(generate_baselines, submit_to_cdash, no_batch,
batch_args = "--no-batch" if no_batch else ""
pjob_arg = "" if parallel_jobs is None else "-j %d" % parallel_jobs
- test_id = "%s_%s" % (test_id_root, CIME.utils.get_utc_timestamp())
+ test_id = "%s_%s" % (test_id_root, CIME.utils.get_timestamp())
create_test_cmd = "./create_test %s --test-root %s -t %s %s %s %s" % \
(test_suite, test_root, test_id, baseline_args, batch_args, pjob_arg)
diff --git a/utils/python/tests/scripts_regression_tests.py b/utils/python/tests/scripts_regression_tests.py
index a10342641ee6..dd4f5e3d280f 100755
--- a/utils/python/tests/scripts_regression_tests.py
+++ b/utils/python/tests/scripts_regression_tests.py
@@ -169,7 +169,7 @@ def setUp(self):
self._do_teardown = []
def test_createnewcase(self):
- testdir = os.path.join(self._testroot, 'scripts_regression_tests.testcreatenewcase.%s'% CIME.utils.get_utc_timestamp())
+ testdir = os.path.join(self._testroot, 'scripts_regression_tests.testcreatenewcase.%s'% CIME.utils.get_timestamp())
if os.path.exists(testdir):
shutil.rmtree(testdir)
@@ -182,7 +182,7 @@ def test_createnewcase(self):
self._do_teardown.append(testdir)
def test_user_mods(self):
- testdir = os.path.join(self._testroot, 'scripts_regression_tests.testusermods.%s'% CIME.utils.get_utc_timestamp())
+ testdir = os.path.join(self._testroot, 'scripts_regression_tests.testusermods.%s'% CIME.utils.get_timestamp())
if os.path.exists(testdir):
shutil.rmtree(testdir)
@@ -218,10 +218,10 @@ class M_TestWaitForTests(unittest.TestCase):
def setUp(self):
###########################################################################
self._testroot = MACHINE.get_value("CESMSCRATCHROOT")
- self._testdir_all_pass = os.path.join(self._testroot, 'scripts_regression_tests.testdir_all_pass.%s'% CIME.utils.get_utc_timestamp())
- self._testdir_with_fail = os.path.join(self._testroot, 'scripts_regression_tests.testdir_with_fail.%s'% CIME.utils.get_utc_timestamp())
- self._testdir_unfinished = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished.%s'% CIME.utils.get_utc_timestamp())
- self._testdir_unfinished2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished2.%s'% CIME.utils.get_utc_timestamp())
+ self._testdir_all_pass = os.path.join(self._testroot, 'scripts_regression_tests.testdir_all_pass.%s'% CIME.utils.get_timestamp())
+ self._testdir_with_fail = os.path.join(self._testroot, 'scripts_regression_tests.testdir_with_fail.%s'% CIME.utils.get_timestamp())
+ self._testdir_unfinished = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished.%s'% CIME.utils.get_timestamp())
+ self._testdir_unfinished2 = os.path.join(self._testroot, 'scripts_regression_tests.testdir_unfinished2.%s'% CIME.utils.get_timestamp())
testdirs = [self._testdir_all_pass, self._testdir_with_fail, self._testdir_unfinished, self._testdir_unfinished2]
for testdir in testdirs:
if os.path.exists(testdir):
@@ -422,7 +422,7 @@ def setUp(self):
###########################################################################
self._thread_error = None
self._unset_proxy = setup_proxy()
- self._baseline_name = "fake_testing_only_%s" % CIME.utils.get_utc_timestamp()
+ self._baseline_name = "fake_testing_only_%s" % CIME.utils.get_timestamp()
self._machine = MACHINE.get_machine_name()
self._baseline_area = MACHINE.get_value("CCSM_BASELINE")
self._testroot = MACHINE.get_value("CESMSCRATCHROOT")
@@ -483,10 +483,10 @@ def test_create_test_rebless_namelist(self):
else:
genarg = "-g %s -o"%self._baseline_name
comparg = "-c %s"%self._baseline_name
- self.simple_test(True, "%s -n -t %s-%s" % (genarg,self._baseline_name, CIME.utils.get_utc_timestamp()))
+ self.simple_test(True, "%s -n -t %s-%s" % (genarg,self._baseline_name, CIME.utils.get_timestamp()))
# Basic namelist compare
- self.simple_test(True, "%s -n -t %s-%s" % (comparg, self._baseline_name, CIME.utils.get_utc_timestamp()))
+ self.simple_test(True, "%s -n -t %s-%s" % (comparg, self._baseline_name, CIME.utils.get_timestamp()))
# Modify namelist
fake_nl = """
@@ -508,13 +508,13 @@ def test_create_test_rebless_namelist(self):
nl_file.write(fake_nl)
# Basic namelist compare should now fail
- self.simple_test(False, "%s -n -t %s-%s" % (comparg, self._baseline_name, CIME.utils.get_utc_timestamp()))
+ self.simple_test(False, "%s -n -t %s-%s" % (comparg, self._baseline_name, CIME.utils.get_timestamp()))
# Regen
- self.simple_test(True, "%s -n -t %s-%s" % (genarg, self._baseline_name, CIME.utils.get_utc_timestamp()))
+ self.simple_test(True, "%s -n -t %s-%s" % (genarg, self._baseline_name, CIME.utils.get_timestamp()))
# Basic namelist compare should now pass again
- self.simple_test(True, "%s -n -t %s-%s" % (comparg, self._baseline_name, CIME.utils.get_utc_timestamp()))
+ self.simple_test(True, "%s -n -t %s-%s" % (comparg, self._baseline_name, CIME.utils.get_timestamp()))
###############################################################################
class O_TestTestScheduler(TestCreateTestCommon):
@@ -600,7 +600,7 @@ def test_a_phases(self):
def test_b_full(self):
###########################################################################
tests = update_acme_tests.get_full_test_names(["cime_test_only"], self._machine, self._compiler)
- test_id="%s-%s" % (self._baseline_name, CIME.utils.get_utc_timestamp())
+ test_id="%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
ct = TestScheduler(tests, test_id=test_id, no_batch=NO_BATCH)
build_fail_test = [item for item in tests if "TESTBUILDFAIL." in item][0]
@@ -664,6 +664,8 @@ class P_TestJenkinsGenericJob(TestCreateTestCommon):
###########################################################################
def setUp(self):
###########################################################################
+ if CIME.utils.get_model() != "acme":
+ self.skipTest("Skipping Jenkins tests. ACME feature")
TestCreateTestCommon.setUp(self)
# Need to run in a subdir in order to not have CTest clash. Name it
@@ -726,7 +728,7 @@ def test_jenkins_generic_job(self):
self.simple_test(True, "-t cime_test_only_pass -g -b %s" % self._baseline_name)
self.assert_num_leftovers()
- build_name = "jenkins_generic_job_pass_%s" % CIME.utils.get_utc_timestamp()
+ build_name = "jenkins_generic_job_pass_%s" % CIME.utils.get_timestamp()
self.simple_test(True, "-t cime_test_only_pass -b %s" % self._baseline_name, build_name=build_name)
self.assert_num_leftovers() # jenkins_generic_job should have automatically cleaned up leftovers from prior run
assert_dashboard_has_build(self, build_name)
@@ -734,7 +736,7 @@ def test_jenkins_generic_job(self):
###########################################################################
def test_jenkins_generic_job_kill(self):
###########################################################################
- build_name = "jenkins_generic_job_kill_%s" % CIME.utils.get_utc_timestamp()
+ build_name = "jenkins_generic_job_kill_%s" % CIME.utils.get_timestamp()
run_thread = threading.Thread(target=self.threaded_test, args=(False, " -t cime_test_only_slow_pass -b master --baseline-compare=no", build_name))
run_thread.daemon = True
run_thread.start()
@@ -788,16 +790,16 @@ def test_bless_test_results(self):
genarg = "-g %s -o"%self._baseline_name
comparg = "-c %s"%self._baseline_name
- self.simple_test(True, "%s -t %s-%s" % (genarg, self._baseline_name, CIME.utils.get_utc_timestamp()))
+ self.simple_test(True, "%s -t %s-%s" % (genarg, self._baseline_name, CIME.utils.get_timestamp()))
# Hist compare should pass
- self.simple_test(True, "%s -t %s-%s" % (comparg, self._baseline_name, CIME.utils.get_utc_timestamp()))
+ self.simple_test(True, "%s -t %s-%s" % (comparg, self._baseline_name, CIME.utils.get_timestamp()))
# Change behavior
os.environ["TESTRUNDIFF_ALTERNATE"] = "True"
# Hist compare should now fail
- test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_utc_timestamp())
+ test_id = "%s-%s" % (self._baseline_name, CIME.utils.get_timestamp())
self.simple_test(False, "%s -t %s" % (comparg, test_id))
# compare_test_results should detect the fail
@@ -814,7 +816,7 @@ def test_bless_test_results(self):
run_cmd_no_fail("%s/bless_test_results --hist-only --force -b %s -t %s" % (TOOLS_DIR, self._baseline_name, test_id))
# Hist compare should now pass again
- self.simple_test(True, "%s -t %s-%s" % (comparg, self._baseline_name, CIME.utils.get_utc_timestamp()))
+ self.simple_test(True, "%s -t %s-%s" % (comparg, self._baseline_name, CIME.utils.get_timestamp()))
###############################################################################
@unittest.skip("Disabling this test until we figure out how to integrate ACME tests and CIME xml files.")
@@ -1004,7 +1006,8 @@ def setUp(self):
self._testdirs = []
self._do_teardown = []
- testdir = os.path.join(self._testroot, 'scripts_regression_tests.testscripts.%s'% CIME.utils.get_utc_timestamp())
+ testdir = os.path.join(self._testroot, 'scripts_regression_tests.testscripts.%s'% CIME.utils.get_timestamp())
+
if os.path.exists(testdir):
shutil.rmtree(testdir)