From 30c6e5640a92c688e2262687b858b3457ff951ac Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 16 Mar 2017 18:26:59 -0400 Subject: [PATCH 01/12] Numerous titan fixes. 1) Remove CESM_REPO. 2) Add better support for netcdf and pnetcdf in config*.xml 3) Pio1 needs to use the findNetcdf.cmake from pio2 --- cime_config/acme/machines/config_compilers.xml | 2 ++ cime_config/acme/machines/config_machines.xml | 9 +++------ externals/pio1/pio/CMakeLists.txt | 2 ++ 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/cime_config/acme/machines/config_compilers.xml b/cime_config/acme/machines/config_compilers.xml index 97d61978c5a7..4738389048c1 100644 --- a/cime_config/acme/machines/config_compilers.xml +++ b/cime_config/acme/machines/config_compilers.xml @@ -823,6 +823,8 @@ for mct, etc. -O2 -O2 + $(NETCDFROOT) + $(PNETCDFROOT) --host=Linux lustre $(shell nf-config --flibs) diff --git a/cime_config/acme/machines/config_machines.xml b/cime_config/acme/machines/config_machines.xml index dad0a2d52bc3..bca7680ca3a2 100644 --- a/cime_config/acme/machines/config_machines.xml +++ b/cime_config/acme/machines/config_machines.xml @@ -1460,7 +1460,7 @@ subversion subversion/1.8.3 cmake - cmake/2.8.10.2 + cmake3/3.6.0 @@ -1533,18 +1533,15 @@ cray-parallel-netcdf/1.7.0 - $COMPILER $MPILIB - $CESM_REPO 1 1 1 128M + /opt/cray/netcdf-hdf5parallel/4.4.1.1/PGI/15.3/ + /opt/cray/parallel-netcdf/1.7.0/PGI/15.3 128M diff --git a/externals/pio1/pio/CMakeLists.txt b/externals/pio1/pio/CMakeLists.txt index ae07c36cdd29..3b10c1a2cbda 100644 --- a/externals/pio1/pio/CMakeLists.txt +++ b/externals/pio1/pio/CMakeLists.txt @@ -30,6 +30,8 @@ endif() # Netcdf is required +SET (CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../pio2/cmake" ${CMAKE_MODULE_PATH}) + #SET (NETCDF_FIND_COMPONENTS F90) FIND_PACKAGE(NetCDF "4.3.3" COMPONENTS C Fortran) IF (${NetCDF_Fortran_FOUND}) From 3c538f3a2b8bdd09c4f681032b77f6606a60f71d Mon Sep 17 00:00:00 2001 From: James Foucar Date: Tue, 21 Mar 2017 15:37:37 -0400 Subject: [PATCH 02/12] Fix aprun command construction --- utils/python/CIME/case.py | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/utils/python/CIME/case.py b/utils/python/CIME/case.py index fc6b1542f0db..828560bc7cff 100644 --- a/utils/python/CIME/case.py +++ b/utils/python/CIME/case.py @@ -98,13 +98,14 @@ def __init__(self, case_root=None, read_only=True): self.thread_count = None + self.total_tasks = None self.tasks_per_node = None self.num_nodes = None self.tasks_per_numa = None self.cores_per_task = None # check if case has been configured and if so initialize derived if self.get_value("CASEROOT") is not None: - self.initialize_derived_attributes() + self._initialize_derived_attributes() def check_if_comp_var(self, vid): @@ -117,20 +118,20 @@ def check_if_comp_var(self, vid): return vid, comp, iscompvar return vid, comp, iscompvar - def initialize_derived_attributes(self): + def _initialize_derived_attributes(self): """ These are derived variables which can be used in the config_* files for variable substitution using the {{ var }} syntax """ env_mach_pes = self.get_env("mach_pes") comp_classes = self.get_values("COMP_CLASSES") - total_tasks = env_mach_pes.get_total_tasks(comp_classes) pes_per_node = self.get_value("PES_PER_NODE") + self.total_tasks = env_mach_pes.get_total_tasks(comp_classes) self.thread_count = env_mach_pes.get_max_thread_count(comp_classes) - self.tasks_per_node = env_mach_pes.get_tasks_per_node(total_tasks, self.thread_count) - logger.debug("total_tasks %s thread_count %s"%(total_tasks, self.thread_count)) - self.num_nodes = env_mach_pes.get_total_nodes(total_tasks, self.thread_count) + self.tasks_per_node = env_mach_pes.get_tasks_per_node(self.total_tasks, self.thread_count) + logger.debug("total_tasks %s thread_count %s"%(self.total_tasks, self.thread_count)) + self.num_nodes = env_mach_pes.get_total_nodes(self.total_tasks, self.thread_count) self.tasks_per_numa = int(math.ceil(self.tasks_per_node / 2.0)) smt_factor = max(1,int(self.get_value("MAX_TASKS_PER_NODE") / pes_per_node)) @@ -138,8 +139,6 @@ def initialize_derived_attributes(self): threads_per_core = 1 if (threads_per_node <= pes_per_node) else smt_factor self.cores_per_task = self.thread_count / threads_per_core - return total_tasks - # Define __enter__ and __exit__ so that we can use this as a context manager # and force a flush on exit. def __enter__(self): @@ -802,10 +801,10 @@ def configure(self, compset_name, grid_name, machine_name=None, if test: self.set_value("TEST",True) - total_tasks = self.initialize_derived_attributes() + self._initialize_derived_attributes() # Make sure that parallel IO is not specified if total_tasks==1 - if total_tasks == 1: + if self.total_tasks == 1: for compclass in self._component_classes: key = "PIO_TYPENAME_%s"%compclass pio_typename = self.get_value(key) @@ -813,7 +812,7 @@ def configure(self, compset_name, grid_name, machine_name=None, self.set_value(key, "netcdf") # Set TOTAL_CORES - self.set_value("TOTAL_CORES", total_tasks * self.cores_per_task ) + self.set_value("TOTAL_CORES", self.total_tasks * self.cores_per_task ) def get_compset_var_settings(self): compset_obj = Compsets(infile=self.get_value("COMPSETS_SPEC_FILE")) @@ -1091,15 +1090,16 @@ def get_mpirun_cmd(self, job="case.run"): } executable, args = env_mach_specific.get_mpirun(self, mpi_attribs, job=job) - # special case for aprun if using < 1 full node + + # special case for aprun if executable == "aprun": - totalpes = self.get_value("TOTALPES") - pes_per_node = self.get_value("PES_PER_NODE") - if totalpes < pes_per_node: - args["tasks_per_node"] = "-N "+str(totalpes) + args["aprun special args"] = "" + if self.get_value("COMPILER") == "intel" and self.tasks_per_node > 1: + args["aprun special args"] += " -S %d -cc numa_node" % self.tasks_per_numa - mpi_arg_string = " ".join(args.values()) + args["aprun special args"] = " -n %d -N %d -d %d " % (self.total_tasks, self.tasks_per_node, self.thread_count) + mpi_arg_string = " ".join(args.values()) if self.get_value("BATCH_SYSTEM") == "cobalt": mpi_arg_string += " : " From e5b31db49af990eb21587668479d27b4505b425b Mon Sep 17 00:00:00 2001 From: Jayesh Krishna Date: Wed, 22 Mar 2017 16:08:12 -0400 Subject: [PATCH 03/12] Fixing walltime for titan debug queue Fixing the walltime format and changing the walltime for small jobs that need to go to the debug queue to 1hr --- cime_config/acme/machines/config_batch.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cime_config/acme/machines/config_batch.xml b/cime_config/acme/machines/config_batch.xml index 556757d2c9cb..9b24540351ea 100644 --- a/cime_config/acme/machines/config_batch.xml +++ b/cime_config/acme/machines/config_batch.xml @@ -330,8 +330,8 @@ -l nodes={{ num_nodes }} - batch - debug + batch + debug From f8b6c4f7c21dbc21af17b3b80f368bc1a706f208 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Wed, 22 Mar 2017 17:08:42 -0600 Subject: [PATCH 04/12] Re-introduce task_maker algorithm to compute aprun --- utils/python/CIME/aprun.py | 129 +++++++++++++++++++++++++++++++++++++ utils/python/CIME/case.py | 7 +- 2 files changed, 131 insertions(+), 5 deletions(-) create mode 100755 utils/python/CIME/aprun.py diff --git a/utils/python/CIME/aprun.py b/utils/python/CIME/aprun.py new file mode 100755 index 000000000000..e409ae040cc8 --- /dev/null +++ b/utils/python/CIME/aprun.py @@ -0,0 +1,129 @@ +""" +Aprun is far too complex to handle purely through XML. We need python +code to compute and assemble aprun commands. +""" + +from CIME.XML.standard_module_setup import * + +import math + +logger = logging.getLogger(__name__) + +############################################################################### +def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, + max_tasks_per_node, pes_per_node, + pio_numtasks, pio_async_interface, + compiler, machine, run_exe): +############################################################################### + """ + No one really understands this code, but we can at least test it. + + >>> ntasks = [] + >>> nthreads = [] + >>> rootpes = [] + >>> pstrids = [] + >>> max_tasks_per_node = 1 + >>> pes_per_node = 1 + >>> pio_num_tasks = 1 + >>> pio_async_interface = True + >>> compiler = "pgi" + >>> machine = "titan" + >>> run_exe = "acme.exe" + >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, pes_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) + '' + """ + max_tasks_per_node = 1 if max_tasks_per_node < 1 else max_tasks_per_node + + total_tasks = 0 + for ntask, rootpe, pstrid in zip(ntasks, rootpes, pstrids): + tt = rootpe + (ntask - 1) * pstrid + 1 + total_tasks = max(tt, total_tasks) + + # Check if we need to add pio's tasks to the total task count + if pio_async_interface: + total_tasks += pio_numtasks if pio_numtasks > 0 else pes_per_node + + # Compute max threads for each mpi task + maxt = [0] * total_tasks + for ntask, nthrd, rootpe, pstrid in zip(ntasks, nthreads, rootpes, pstrids): + c2 = 0 + while c2 < ntask: + s = rootpe + c2 * pstrid + if nthrd > maxt[s]: + maxt[s] = nthrd + + c2 += 1 + + logger.info("total tasks is: %s" % total_tasks) + + # make sure all maxt values at least 1, don't know why we start at index 1 + for c1 in xrange(1, total_tasks): + if maxt[c1] < 1: + maxt[c1] = 1 + + # Compute task and thread settings for batch commands + tasks_per_node, task_count, thread_count, max_thread_count, aprun = \ + 0, 1, maxt[0], maxt[0], "" + for c1 in xrange(1, total_tasks): + if maxt[c1] != thread_count: + tasks_per_node = min(pes_per_node, max_tasks_per_node / thread_count) + + tasks_per_node = min(task_count, tasks_per_node) + + # Compute for every subset + task_per_numa = int(math.ceil(tasks_per_node / 2.0)) + # Option for Titan + if machine == "titan" and tasks_per_node > 1: + if compiler == "intel": + aprun += " -S %d -cc numa_node " % task_per_numa + else: + aprun += " -S %d " % task_per_numa + + aprun += " -n %d -N %d -d %d %s :" % (task_count, tasks_per_node, thread_count, run_exe) + + thread_count = maxt[c1] + max_thread_count = max(max_thread_count, maxt[c1]) + task_count = 1 + + else: + task_count += 1 + + if pes_per_node > 0: + tasks_per_node = min(pes_per_node, max_tasks_per_node / thread_count) + else: + tasks_per_node = max_tasks_per_node / thread_count + + tasks_per_node = min(task_count, tasks_per_node) + + task_per_numa = int(math.ceil(tasks_per_node / 2.0)) + + # Special option for Titan or intel compiler + if compiler == "intel" or machine == "titan" and tasks_per_node > 1: + aprun += " -S %d -cc numa_node " % task_per_numa + + aprun += " -n %d -N %d -d %d %s " % (task_count, tasks_per_node, thread_count, run_exe) + + return aprun + +############################################################################### +def get_aprun_cmd_for_case(case, run_exe): +############################################################################### + """ + Given a case, construct and return the aprun command + """ + models = case.get_values("COMP_CLASSES") + ntasks, nthreads, rootpes, pstrids = [], [], [], [] + for model in models: + model = "CPL" if model == "DRV" else model + for the_list, item_name in zip([ntasks, nthreads, rootpes, pstrids], + ["NTASKS", "NTHREADS", "ROOTPE", "PSTRID"]): + the_list.append(case.get_value("_".join([item_name, model]))) + + return _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, + case.get_value("MAX_TASKS_PER_NODE"), + case.get_value("PES_PER_NODE"), + case.get_value("PIO_NUMTASKS"), + case.get_value("PIO_ASYNC_INTERFACE"), + case.get_value("COMPILER"), + case.get_value("MACH"), + run_exe) diff --git a/utils/python/CIME/case.py b/utils/python/CIME/case.py index 828560bc7cff..42f391091787 100644 --- a/utils/python/CIME/case.py +++ b/utils/python/CIME/case.py @@ -33,6 +33,7 @@ from CIME.XML.env_batch import EnvBatch from CIME.user_mod_support import apply_user_mods from CIME.case_setup import case_setup +from CIME.aprun import get_aprun_cmd_for_case logger = logging.getLogger(__name__) @@ -1093,11 +1094,7 @@ def get_mpirun_cmd(self, job="case.run"): # special case for aprun if executable == "aprun": - args["aprun special args"] = "" - if self.get_value("COMPILER") == "intel" and self.tasks_per_node > 1: - args["aprun special args"] += " -S %d -cc numa_node" % self.tasks_per_numa - - args["aprun special args"] = " -n %d -N %d -d %d " % (self.total_tasks, self.tasks_per_node, self.thread_count) + return get_aprun_cmd_for_case(self, run_exe) mpi_arg_string = " ".join(args.values()) From 4b6d0e5e4ab18e13003cc3af28c759fbcb2dea8f Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 23 Mar 2017 12:22:31 -0600 Subject: [PATCH 05/12] Add unit test --- utils/python/CIME/aprun.py | 35 ++++++++++++++++++----------------- utils/python/CIME/case.py | 12 ++++++------ 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/utils/python/CIME/aprun.py b/utils/python/CIME/aprun.py index e409ae040cc8..66359d856fea 100755 --- a/utils/python/CIME/aprun.py +++ b/utils/python/CIME/aprun.py @@ -18,19 +18,19 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, """ No one really understands this code, but we can at least test it. - >>> ntasks = [] - >>> nthreads = [] - >>> rootpes = [] - >>> pstrids = [] - >>> max_tasks_per_node = 1 - >>> pes_per_node = 1 - >>> pio_num_tasks = 1 - >>> pio_async_interface = True + >>> ntasks = [512, 675, 168, 512, 128, 168, 168, 512, 1] + >>> nthreads = [2, 2, 2, 2, 4, 2, 2, 2, 1] + >>> rootpes = [0, 0, 512, 0, 680, 512, 512, 0, 0] + >>> pstrids = [1, 1, 1, 1, 1, 1, 1, 1, 1] + >>> max_tasks_per_node = 16 + >>> pes_per_node = 16 + >>> pio_numtasks = -1 + >>> pio_async_interface = False >>> compiler = "pgi" >>> machine = "titan" >>> run_exe = "acme.exe" >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, pes_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) - '' + 'aprun -S 4 -n 680 -N 8 -d 2 acme.exe : -S 2 -n 128 -N 4 -d 4 acme.exe ' """ max_tasks_per_node = 1 if max_tasks_per_node < 1 else max_tasks_per_node @@ -57,13 +57,13 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, logger.info("total tasks is: %s" % total_tasks) # make sure all maxt values at least 1, don't know why we start at index 1 - for c1 in xrange(1, total_tasks): + for c1 in xrange(0, total_tasks): if maxt[c1] < 1: maxt[c1] = 1 # Compute task and thread settings for batch commands tasks_per_node, task_count, thread_count, max_thread_count, aprun = \ - 0, 1, maxt[0], maxt[0], "" + 0, 1, maxt[0], maxt[0], "aprun" for c1 in xrange(1, total_tasks): if maxt[c1] != thread_count: tasks_per_node = min(pes_per_node, max_tasks_per_node / thread_count) @@ -74,10 +74,9 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, task_per_numa = int(math.ceil(tasks_per_node / 2.0)) # Option for Titan if machine == "titan" and tasks_per_node > 1: + aprun += " -S %d" % task_per_numa if compiler == "intel": - aprun += " -S %d -cc numa_node " % task_per_numa - else: - aprun += " -S %d " % task_per_numa + aprun += " -cc numa_node" aprun += " -n %d -N %d -d %d %s :" % (task_count, tasks_per_node, thread_count, run_exe) @@ -97,9 +96,11 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, task_per_numa = int(math.ceil(tasks_per_node / 2.0)) - # Special option for Titan or intel compiler - if compiler == "intel" or machine == "titan" and tasks_per_node > 1: - aprun += " -S %d -cc numa_node " % task_per_numa + # Special option for Titan with intel compiler + if machine == "titan" and tasks_per_node > 1: + aprun += " -S %d" % task_per_numa + if compiler == "intel": + aprun += " -cc numa_node" aprun += " -n %d -N %d -d %d %s " % (task_count, tasks_per_node, thread_count, run_exe) diff --git a/utils/python/CIME/case.py b/utils/python/CIME/case.py index 42f391091787..a4d49aaa5465 100644 --- a/utils/python/CIME/case.py +++ b/utils/python/CIME/case.py @@ -1094,14 +1094,14 @@ def get_mpirun_cmd(self, job="case.run"): # special case for aprun if executable == "aprun": - return get_aprun_cmd_for_case(self, run_exe) - - mpi_arg_string = " ".join(args.values()) + return get_aprun_cmd_for_case(self, run_exe) + " " + run_misc_suffix + else: + mpi_arg_string = " ".join(args.values()) - if self.get_value("BATCH_SYSTEM") == "cobalt": - mpi_arg_string += " : " + if self.get_value("BATCH_SYSTEM") == "cobalt": + mpi_arg_string += " : " - return "%s %s %s" % (executable if executable is not None else "", mpi_arg_string, run_suffix) + return "%s %s %s" % (executable if executable is not None else "", mpi_arg_string, run_suffix) def set_model_version(self, model): version = "unknown" From e395279346d4c7a9545bcc1a946de244e3e94671 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 23 Mar 2017 12:52:50 -0600 Subject: [PATCH 06/12] Add test for intel compiler --- utils/python/CIME/aprun.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/utils/python/CIME/aprun.py b/utils/python/CIME/aprun.py index 66359d856fea..a79289fb9436 100755 --- a/utils/python/CIME/aprun.py +++ b/utils/python/CIME/aprun.py @@ -31,6 +31,9 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, >>> run_exe = "acme.exe" >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, pes_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) 'aprun -S 4 -n 680 -N 8 -d 2 acme.exe : -S 2 -n 128 -N 4 -d 4 acme.exe ' + >>> compiler = "intel" + >>> _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, max_tasks_per_node, pes_per_node, pio_numtasks, pio_async_interface, compiler, machine, run_exe) + 'aprun -S 4 -cc numa_node -n 680 -N 8 -d 2 acme.exe : -S 2 -cc numa_node -n 128 -N 4 -d 4 acme.exe ' """ max_tasks_per_node = 1 if max_tasks_per_node < 1 else max_tasks_per_node From c7e3abf6eb6cea7d838d312fc65ad4d762b1d036 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 23 Mar 2017 12:55:08 -0600 Subject: [PATCH 07/12] Remove outdated comment --- utils/python/CIME/aprun.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/python/CIME/aprun.py b/utils/python/CIME/aprun.py index a79289fb9436..994886336cd8 100755 --- a/utils/python/CIME/aprun.py +++ b/utils/python/CIME/aprun.py @@ -59,7 +59,7 @@ def _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, logger.info("total tasks is: %s" % total_tasks) - # make sure all maxt values at least 1, don't know why we start at index 1 + # make sure all maxt values at least 1 for c1 in xrange(0, total_tasks): if maxt[c1] < 1: maxt[c1] = 1 From 3f38b20a52fd3c22a889d58eac53c2a37a753bc2 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Thu, 23 Mar 2017 16:47:20 -0600 Subject: [PATCH 08/12] Fix spelling of NTHRDS --- utils/python/CIME/aprun.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/python/CIME/aprun.py b/utils/python/CIME/aprun.py index 994886336cd8..25b4f3c13150 100755 --- a/utils/python/CIME/aprun.py +++ b/utils/python/CIME/aprun.py @@ -120,7 +120,7 @@ def get_aprun_cmd_for_case(case, run_exe): for model in models: model = "CPL" if model == "DRV" else model for the_list, item_name in zip([ntasks, nthreads, rootpes, pstrids], - ["NTASKS", "NTHREADS", "ROOTPE", "PSTRID"]): + ["NTASKS", "NTHRDS", "ROOTPE", "PSTRID"]): the_list.append(case.get_value("_".join([item_name, model]))) return _get_aprun_cmd_for_case_impl(ntasks, nthreads, rootpes, pstrids, From f378bedc79224b68b7a23c4a1e746749a09295f5 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Fri, 24 Mar 2017 16:42:40 -0400 Subject: [PATCH 09/12] Make netcdfroot less hardcoded for titan My usual trick of using 'which ncdump' does not work on titan. --- cime_config/acme/machines/config_machines.xml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cime_config/acme/machines/config_machines.xml b/cime_config/acme/machines/config_machines.xml index bca7680ca3a2..7e7311193b06 100644 --- a/cime_config/acme/machines/config_machines.xml +++ b/cime_config/acme/machines/config_machines.xml @@ -1540,8 +1540,14 @@ 1 1 128M - /opt/cray/netcdf-hdf5parallel/4.4.1.1/PGI/15.3/ - /opt/cray/parallel-netcdf/1.7.0/PGI/15.3 + /opt/cray/netcdf-hdf5parallel/4.4.1.1/PGI/15.3/ + /opt/cray/parallel-netcdf/1.7.0/PGI/15.3 + /opt/cray/netcdf-hdf5parallel/4.4.1.1/PGI/15.3/ + /opt/cray/parallel-netcdf/1.7.0/PGI/15.3 + /opt/cray/netcdf-hdf5parallel/4.4.1.1/INTEL/15.0/ + /opt/cray/parallel-netcdf/1.7.0/INTEL/15.0 + /opt/cray/netcdf-hdf5parallel/4.4.1.1/CRAY/8.3/ + /opt/cray/parallel-netcdf/1.7.0/CRAY/8.3 128M From 9794385cfdf0ad7e9c4a97f47deff8bb9369566d Mon Sep 17 00:00:00 2001 From: James Foucar Date: Fri, 24 Mar 2017 15:53:47 -0600 Subject: [PATCH 10/12] Fix macro maker Needed to pass in compiler and mpi Also add intel to melvin for testing. --- cime_config/acme/machines/config_compilers.xml | 11 +++++++++++ cime_config/acme/machines/config_machines.xml | 5 +++-- utils/python/CIME/BuildTools/configure.py | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/cime_config/acme/machines/config_compilers.xml b/cime_config/acme/machines/config_compilers.xml index 4738389048c1..94e4a123e92e 100644 --- a/cime_config/acme/machines/config_compilers.xml +++ b/cime_config/acme/machines/config_compilers.xml @@ -644,6 +644,17 @@ for mct, etc. /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install + + -O2 + -O2 + --host=Linux + $(NETCDFROOT) + $(PNETCDFROOT) + $(shell $(NETCDF_PATH)/bin/nf-config --flibs) -lblas -llapack + -lstdc++ -lmpi_cxx + /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install + + -O2 -O2 diff --git a/cime_config/acme/machines/config_machines.xml b/cime_config/acme/machines/config_machines.xml index 7e7311193b06..fcc8b436ecfa 100644 --- a/cime_config/acme/machines/config_machines.xml +++ b/cime_config/acme/machines/config_machines.xml @@ -507,7 +507,7 @@ sonproxy.sandia.gov:80 acme_developer LINUX - gnu + gnu,intel openmpi,mpi-serial $ENV{HOME}/acme/scratch $CIME_OUTPUT_ROOT/$CASE/run @@ -545,7 +545,8 @@ sems-env sems-git sems-python/2.7.9 - sems-gcc/5.3.0 + sems-gcc/5.3.0 + sems-intel/16.0.3 sems-openmpi/1.8.7 sems-cmake/2.8.12 sems-netcdf/4.3.2/parallel diff --git a/utils/python/CIME/BuildTools/configure.py b/utils/python/CIME/BuildTools/configure.py index 3d53f8938b91..b610b087aa3c 100644 --- a/utils/python/CIME/BuildTools/configure.py +++ b/utils/python/CIME/BuildTools/configure.py @@ -37,7 +37,7 @@ def configure(machobj, output_dir, macros_format, compiler, mpilib, debug, sysos """ # Macros generation. suffixes = {'Makefile': 'make', 'CMake': 'cmake'} - macro_maker = Compilers(machobj) + macro_maker = Compilers(machobj, compiler=compiler, mpilib=mpilib) for form in macros_format: out_file_name = os.path.join(output_dir,"Macros."+suffixes[form]) macro_maker.write_macros_file(macros_file=out_file_name, output_format=suffixes[form]) From fe4224ab104195c1cc4f24bee9f1f915406c9978 Mon Sep 17 00:00:00 2001 From: Patrick Worley Date: Sat, 25 Mar 2017 10:55:48 -0400 Subject: [PATCH 11/12] update modules to support netcdf4 files on Titan The current module cray-netcdf-hdf5parallel for Titan (7.4.0) does not support netcdf4 files when used with PGI. It is also out of sync with the explicit path for the environment variable NETCDFROOT. Updated the module to cray-netcdf-hdf5parallel/7.4.1.1. [BFB] - Bit-For-Bit --- cime_config/acme/machines/config_machines.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cime_config/acme/machines/config_machines.xml b/cime_config/acme/machines/config_machines.xml index fcc8b436ecfa..6431ddd94f95 100644 --- a/cime_config/acme/machines/config_machines.xml +++ b/cime_config/acme/machines/config_machines.xml @@ -1527,10 +1527,10 @@ - cray-netcdf/4.4.0 + cray-netcdf/4.4.1.1 - cray-netcdf-hdf5parallel/4.4.0 + cray-netcdf-hdf5parallel/4.4.1.1 cray-parallel-netcdf/1.7.0 From f15461dc09f612e2d9be188d41c9bfca7a9f28d5 Mon Sep 17 00:00:00 2001 From: James Foucar Date: Mon, 27 Mar 2017 12:19:00 -0600 Subject: [PATCH 12/12] Fix type in melvin config change --- cime_config/acme/machines/config_machines.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cime_config/acme/machines/config_machines.xml b/cime_config/acme/machines/config_machines.xml index 6431ddd94f95..86963b7d63d6 100644 --- a/cime_config/acme/machines/config_machines.xml +++ b/cime_config/acme/machines/config_machines.xml @@ -545,7 +545,7 @@ sems-env sems-git sems-python/2.7.9 - sems-gcc/5.3.0 + sems-gcc/5.3.0 sems-intel/16.0.3 sems-openmpi/1.8.7 sems-cmake/2.8.12