Skip to content

Commit

Permalink
Merge branch 'azamat/theta/update-ne120-hires-atm-pes' (PR E3SM-Proje…
Browse files Browse the repository at this point in the history
…ct#4662)

Update high-res F-compset PE-layout on Theta
Also, cleanup and remove PE-layouts for decommissioned machines.

[BFB]
  • Loading branch information
amametjanov committed Nov 15, 2021
2 parents ee4f5f0 + f953db7 commit d8ca1a9
Show file tree
Hide file tree
Showing 11 changed files with 140 additions and 2,750 deletions.
1,635 changes: 114 additions & 1,521 deletions cime_config/allactive/config_pesall.xml

Large diffs are not rendered by default.

19 changes: 1 addition & 18 deletions cime_config/machines/config_batch.xml
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@
</batch_system>
<!-- for lawrence livermore computing -->

<!-- for NERSC machines: edison,cori-haswell,cori-knl -->
<!-- for NERSC machines: cori-haswell,cori-knl -->
<batch_system type="nersc_slurm" >
<batch_query per_job_arg="-j">squeue</batch_query>
<batch_submit>sbatch</batch_submit>
Expand Down Expand Up @@ -314,23 +314,6 @@
</queues>
</batch_system>

<batch_system MACH="eos" type="pbs" >
<directives>
<directive>-A {{ project }}</directive>
<directive>-l nodes={{ num_nodes }}</directive>
</directives>
<queues>
<queue walltimemax="00:30:00" default="true">batch</queue>
</queues>
</batch_system>

<batch_system MACH="edison" type="nersc_slurm" >
<queues>
<queue walltimemax="00:30:00" nodemax="512" strict="true">debug</queue>
<queue walltimemax="01:30:00" default="true">regular</queue>
</queues>
</batch_system>

<batch_system MACH="cori-haswell" type="nersc_slurm">
<directives>
<directive> --constraint=haswell</directive>
Expand Down
91 changes: 0 additions & 91 deletions cime_config/machines/config_machines.xml
Original file line number Diff line number Diff line change
Expand Up @@ -2435,97 +2435,6 @@

</machine>

<machine MACH="eos">
<DESC>ORNL XC30, os is CNL, 16 pes/node, batch system is PBS</DESC>
<NODENAME_REGEX>eos</NODENAME_REGEX>
<OS>CNL</OS>
<COMPILERS>intel</COMPILERS>
<MPILIBS>mpich</MPILIBS>
<SAVE_TIMING_DIR>$ENV{PROJWORK}/$PROJECT</SAVE_TIMING_DIR>
<SAVE_TIMING_DIR_PROJECTS>cli115,cli127,cli106,csc190</SAVE_TIMING_DIR_PROJECTS>
<CIME_OUTPUT_ROOT>$ENV{HOME}/acme_scratch/$PROJECT</CIME_OUTPUT_ROOT>
<DIN_LOC_ROOT>/lustre/atlas1/cli900/world-shared/cesm/inputdata</DIN_LOC_ROOT>
<DIN_LOC_ROOT_CLMFORC>/lustre/atlas1/cli900/world-shared/cesm/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
<DOUT_S_ROOT>$ENV{MEMBERWORK}/$PROJECT/archive/$CASE</DOUT_S_ROOT>
<BASELINE_ROOT>/lustre/atlas1/cli900/world-shared/cesm/baselines/$COMPILER</BASELINE_ROOT>
<CCSM_CPRNC>/lustre/atlas1/cli900/world-shared/cesm/tools/cprnc/cprnc.eos</CCSM_CPRNC>
<GMAKE_J>8</GMAKE_J>
<TESTS>e3sm_developer</TESTS>
<BATCH_SYSTEM>pbs</BATCH_SYSTEM>
<SUPPORTED_BY>E3SM</SUPPORTED_BY>
<MAX_TASKS_PER_NODE>32</MAX_TASKS_PER_NODE>
<MAX_MPITASKS_PER_NODE>16</MAX_MPITASKS_PER_NODE>
<PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
<mpirun mpilib="mpich">
<executable>aprun</executable>
<arguments>
<arg name="hyperthreading" default="2"> -j {{ hyperthreading }}</arg>
<arg name="tasks_per_numa"> -S {{ tasks_per_numa }}</arg>
<arg name="num_tasks"> -n {{ total_tasks }}</arg>
<arg name="tasks_per_node"> -N $MAX_MPITASKS_PER_NODE</arg>
<arg name="thread_count"> -d $ENV{OMP_NUM_THREADS}</arg>
<arg name="numa_node"> -cc numa_node</arg>
</arguments>
</mpirun>
<mpirun mpilib="mpi-serial">
<executable/>
</mpirun>
<module_system type="module">
<init_path lang="sh">$MODULESHOME/init/sh</init_path>
<init_path lang="csh">$MODULESHOME/init/csh</init_path>
<init_path lang="perl">$MODULESHOME/init/perl.pm</init_path>
<init_path lang="python">$MODULESHOME/init/python.py</init_path>
<cmd_path lang="sh">module</cmd_path>
<cmd_path lang="csh">module</cmd_path>
<cmd_path lang="perl">$MODULESHOME/bin/modulecmd perl</cmd_path>
<cmd_path lang="python">$MODULESHOME/bin/modulecmd python</cmd_path>
<modules>
<command name="rm">intel</command>
<command name="rm">cray</command>
<command name="rm">cray-parallel-netcdf</command>
<command name="rm">cray-libsci</command>
<command name="rm">cray-netcdf</command>
<command name="rm">cray-netcdf-hdf5parallel</command>
<command name="rm">netcdf</command>
</modules>
<modules compiler="intel">
<command name="load">intel/18.0.1.163</command>
<command name="load">papi</command>
</modules>
<modules compiler="cray">
<command name="load">PrgEnv-cray</command>
<command name="switch">cce cce/8.1.9</command>
<command name="load">cray-libsci/12.1.00</command>
</modules>
<modules compiler="gnu">
<command name="load">PrgEnv-gnu</command>
<command name="switch">gcc gcc/4.8.0</command>
<command name="load">cray-libsci/12.1.00</command>
</modules>
<modules mpilib="mpi-serial">
<command name="load">cray-netcdf/4.3.2</command>
</modules>
<modules mpilib="!mpi-serial">
<command name="load">cray-netcdf-hdf5parallel/4.3.3.1</command>
<command name="load">cray-parallel-netcdf/1.6.1</command>
</modules>
<modules>
<command name="load">cmake3/3.2.3</command>
<command name="load">python/2.7.9</command>
</modules>
</module_system>
<RUNDIR>$ENV{MEMBERWORK}/$PROJECT/$CASE/run</RUNDIR>
<EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
<environment_variables>
<env name="MPICH_ENV_DISPLAY">1</env>
<env name="MPICH_VERSION_DISPLAY">1</env>
<!-- This increases the stack size, which is necessary
for CICE to run threaded on this machine -->
<env name="OMP_STACKSIZE">64M</env>

</environment_variables>
</machine>

<machine MACH="grizzly">
<DESC>LANL Linux Cluster, 36 pes/node, batch system slurm</DESC>
<NODENAME_REGEX>gr-fe.*.lanl.gov</NODENAME_REGEX>
Expand Down
2 changes: 0 additions & 2 deletions cime_config/testmods_dirs/allactive/v1cmip6/shell_commands
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
#!/bin/bash
./xmlchange --append CAM_CONFIG_OPTS='-cosp'
./xmlchange --id BUDGETS --val TRUE
if [ `./xmlquery --value MACH` == cetus ]||[ `./xmlquery --value MACH` == mira ]; then sed s/64M/128M/ env_mach_specific.xml >tmp && mv tmp env_mach_specific.xml; fi

Loading

0 comments on commit d8ca1a9

Please sign in to comment.