From f8acf41271f6e4809ce37fcbf7f464487b1a8c09 Mon Sep 17 00:00:00 2001 From: Ryan Forsyth Date: Wed, 9 Oct 2024 17:23:21 -0500 Subject: [PATCH] Improve input validation and testing --- ...st_min_case_add_dependencies_chrysalis.cfg | 6 +- ...e_e3sm_diags_depend_on_climo_chrysalis.cfg | 6 +- ..._diags_depend_on_climo_mvm_1_chrysalis.cfg | 4 +- ..._diags_depend_on_climo_mvm_2_chrysalis.cfg | 8 +- ...case_e3sm_diags_depend_on_ts_chrysalis.cfg | 6 +- ...3sm_diags_depend_on_ts_mvm_1_chrysalis.cfg | 4 +- ...3sm_diags_depend_on_ts_mvm_2_chrysalis.cfg | 8 +- ...ase_e3sm_diags_diurnal_cycle_chrysalis.cfg | 6 +- ...sm_diags_diurnal_cycle_mvm_1_chrysalis.cfg | 4 +- ...sm_diags_diurnal_cycle_mvm_2_chrysalis.cfg | 8 +- ...3sm_diags_lat_lon_land_mvm_1_chrysalis.cfg | 4 +- ...3sm_diags_lat_lon_land_mvm_2_chrysalis.cfg | 8 +- ...n_case_e3sm_diags_streamflow_chrysalis.cfg | 6 +- ..._e3sm_diags_streamflow_mvm_1_chrysalis.cfg | 4 +- ..._e3sm_diags_streamflow_mvm_2_chrysalis.cfg | 8 +- ..._case_e3sm_diags_tc_analysis_chrysalis.cfg | 8 +- ...e3sm_diags_tc_analysis_mvm_1_chrysalis.cfg | 6 +- ...e3sm_diags_tc_analysis_mvm_2_chrysalis.cfg | 10 +- ...m_diags_tropical_subseasonal_chrysalis.cfg | 4 +- ...s_tropical_subseasonal_mvm_1_chrysalis.cfg | 4 +- ...s_tropical_subseasonal_mvm_2_chrysalis.cfg | 8 +- ...se_global_time_series_custom_chrysalis.cfg | 4 +- ...lobal_time_series_original_8_chrysalis.cfg | 4 +- ...ime_series_original_8_no_ocn_chrysalis.cfg | 4 +- .../test_min_case_ilamb_chrysalis.cfg | 4 +- ...est_min_case_ilamb_land_only_chrysalis.cfg | 4 +- .../test_min_case_mpas_analysis_chrysalis.cfg | 4 +- ...e_tc_analysis_simultaneous_1_chrysalis.cfg | 6 +- ...e_tc_analysis_simultaneous_2_chrysalis.cfg | 6 +- .../test_weekly_bundles_chrysalis.cfg | 22 +- ...test_weekly_comprehensive_v2_chrysalis.cfg | 68 ++- ...test_weekly_comprehensive_v3_chrysalis.cfg | 76 ++- .../update_weekly_expected_files_chrysalis.sh | 6 +- tests/integration/template_weekly_bundles.cfg | 14 +- .../template_weekly_comprehensive_v2.cfg | 56 +- .../template_weekly_comprehensive_v3.cfg | 70 ++- tests/integration/test_weekly.py | 4 +- tests/integration/utils.py | 14 +- tests/test_sections.py | 38 +- tests/test_zppy_e3sm_diags.py | 573 ++++++++++++++++++ tests/test_zppy_global_time_series.py | 203 +++++++ tests/test_zppy_ilamb.py | 47 ++ tests/test_zppy_utils.py | 497 +++++++++++++++ zppy/__main__.py | 145 +++-- zppy/bundle.py | 53 +- zppy/climo.py | 111 ++-- zppy/e3sm_diags.py | 458 +++++++------- zppy/global_time_series.py | 286 ++++----- zppy/ilamb.py | 143 +++-- zppy/mpas_analysis.py | 115 ++-- zppy/tc_analysis.py | 71 +-- zppy/templates/default.ini | 49 +- zppy/templates/e3sm_diags.bash | 8 +- zppy/templates/tc_analysis.bash | 7 + zppy/ts.py | 117 ++-- zppy/utils.py | 471 +++++++++----- 56 files changed, 2712 insertions(+), 1176 deletions(-) mode change 100644 => 100755 tests/integration/generated/update_weekly_expected_files_chrysalis.sh create mode 100644 tests/test_zppy_e3sm_diags.py create mode 100644 tests/test_zppy_global_time_series.py create mode 100644 tests/test_zppy_ilamb.py create mode 100644 tests/test_zppy_utils.py diff --git a/tests/integration/generated/test_min_case_add_dependencies_chrysalis.cfg b/tests/integration/generated/test_min_case_add_dependencies_chrysalis.cfg index 6c0559c4..7f6a2b84 100644 --- a/tests/integration/generated/test_min_case_add_dependencies_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_add_dependencies_chrysalis.cfg @@ -14,12 +14,12 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_add_dependencies_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_add_dependencies_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" # ts is in 5 year increments ts_num_years = 5 -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_add_dependencies_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_add_dependencies_www/test-pr628-20241011v12" # We want to produce diagnostics for 10 years. years = "1985:1995:10", @@ -81,7 +81,7 @@ years = "1985:1995:5" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 diff --git a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_chrysalis.cfg index 416771ae..ea3a163a 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_www/test-pr628-20241011v12" years = "1985:1989:4", [climo] @@ -24,7 +24,7 @@ walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 diff --git a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_mvm_1_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_mvm_1_chrysalis.cfg index dab95aba..e96de2d8 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_mvm_1_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_mvm_1_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_mvm_1_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_mvm_1_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_mvm_1_www/test-pr628-20241011v12" years = "1985:1989:4", [climo] diff --git a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_mvm_2_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_mvm_2_chrysalis.cfg index 036e17a7..a80f0723 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_mvm_2_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_climo_mvm_2_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_mvm_2_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_mvm_2_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_mvm_2_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_mvm_2_www/test-pr628-20241011v12" years = "1995:1999:4", [climo] @@ -24,7 +24,7 @@ walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 @@ -42,7 +42,7 @@ walltime = "5:00:00" ref_start_yr = 1985 ref_years = "1985-1988", # Use _1 as reference - reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_mvm_1_output/unique_id/v3.LR.historical_0051/post/atm/180x360_aave/clim" + reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_climo_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051/post/atm/180x360_aave/clim" run_type = "model_vs_model" sets = "lat_lon","zonal_mean_xy","zonal_mean_2d","polar","cosp_histogram","meridional_mean_2d","annual_cycle_zonal_mean","zonal_mean_2d_stratosphere", short_ref_name = "v3.LR.historical_0051" diff --git a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_chrysalis.cfg index 5a686029..7c7dc8ba 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_www/test-pr628-20241011v12" years = "1985:1989:4", [ts] @@ -25,7 +25,7 @@ walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 diff --git a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_mvm_1_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_mvm_1_chrysalis.cfg index fdb3025a..1212fd24 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_mvm_1_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_mvm_1_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_mvm_1_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_mvm_1_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_mvm_1_www/test-pr628-20241011v12" years = "1985:1989:4", [ts] diff --git a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_mvm_2_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_mvm_2_chrysalis.cfg index 72219931..5c339953 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_mvm_2_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_depend_on_ts_mvm_2_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_mvm_2_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_mvm_2_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_mvm_2_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_mvm_2_www/test-pr628-20241011v12" years = "1995:1999:4", [ts] @@ -25,7 +25,7 @@ walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 @@ -44,7 +44,7 @@ walltime = "5:00:00" ref_start_yr = 1985 ref_years = "1985-1988", # Use _1 as reference - reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_mvm_1_output/unique_id/v3.LR.historical_0051/post/atm/180x360_aave/clim" + reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_depend_on_ts_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051/post/atm/180x360_aave/clim" run_type = "model_vs_model" sets = "enso_diags","qbo", short_ref_name = "v3.LR.historical_0051" diff --git a/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_chrysalis.cfg index 481b7fc1..55340dd1 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_www/test-pr628-20241011v12" years = "1985:1989:4", [climo] @@ -24,7 +24,7 @@ walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 diff --git a/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_mvm_1_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_mvm_1_chrysalis.cfg index 0160066c..5be32f12 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_mvm_1_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_mvm_1_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_mvm_1_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_mvm_1_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_mvm_1_www/test-pr628-20241011v12" years = "1985:1989:4", [climo] diff --git a/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_mvm_2_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_mvm_2_chrysalis.cfg index b0109858..612cbeb5 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_mvm_2_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_diurnal_cycle_mvm_2_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_mvm_2_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_mvm_2_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_mvm_2_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_mvm_2_www/test-pr628-20241011v12" years = "1995:1999:4", [climo] @@ -24,7 +24,7 @@ walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 @@ -40,7 +40,7 @@ walltime = "5:00:00" ref_name = "v3.LR.historical_0051" ref_years = "1985-1988", # Use _1 as reference - reference_data_path_climo_diurnal = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_mvm_1_output/unique_id/v3.LR.historical_0051/post/atm/180x360_aave/clim_diurnal_8xdaily" + reference_data_path_climo_diurnal = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_diurnal_cycle_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051/post/atm/180x360_aave/clim_diurnal_8xdaily" run_type = "model_vs_model" sets = "diurnal_cycle", short_ref_name = "v3.LR.historical_0051" diff --git a/tests/integration/generated/test_min_case_e3sm_diags_lat_lon_land_mvm_1_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_lat_lon_land_mvm_1_chrysalis.cfg index f8074879..bfba7957 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_lat_lon_land_mvm_1_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_lat_lon_land_mvm_1_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_lat_lon_land_mvm_1_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_lat_lon_land_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_lat_lon_land_mvm_1_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_lat_lon_land_mvm_1_www/test-pr628-20241011v12" years = "1985:1989:4", [climo] diff --git a/tests/integration/generated/test_min_case_e3sm_diags_lat_lon_land_mvm_2_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_lat_lon_land_mvm_2_chrysalis.cfg index bb585138..f62e9d14 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_lat_lon_land_mvm_2_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_lat_lon_land_mvm_2_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_lat_lon_land_mvm_2_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_lat_lon_land_mvm_2_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_lat_lon_land_mvm_2_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_lat_lon_land_mvm_2_www/test-pr628-20241011v12" years = "1995:1999:4", [climo] @@ -24,7 +24,7 @@ walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 @@ -41,7 +41,7 @@ walltime = "5:00:00" ref_start_yr = 1985 ref_years = "1985-1988", # Use _1 as reference - reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_lat_lon_land_mvm_1_output/unique_id/v3.LR.historical_0051/post/lnd/180x360_aave/clim" + reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_lat_lon_land_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051/post/lnd/180x360_aave/clim" run_type = "model_vs_model" sets = "lat_lon_land", short_ref_name = "v3.LR.historical_0051" diff --git a/tests/integration/generated/test_min_case_e3sm_diags_streamflow_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_streamflow_chrysalis.cfg index 797adc44..34ba3148 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_streamflow_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_streamflow_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_www/test-pr628-20241011v12" years = "1985:1989:4", [ts] @@ -27,7 +27,7 @@ walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 diff --git a/tests/integration/generated/test_min_case_e3sm_diags_streamflow_mvm_1_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_streamflow_mvm_1_chrysalis.cfg index bb8de9cf..4e193c8e 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_streamflow_mvm_1_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_streamflow_mvm_1_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_mvm_1_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_mvm_1_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_mvm_1_www/test-pr628-20241011v12" years = "1985:1989:4", [ts] diff --git a/tests/integration/generated/test_min_case_e3sm_diags_streamflow_mvm_2_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_streamflow_mvm_2_chrysalis.cfg index f7d43fc6..060331ce 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_streamflow_mvm_2_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_streamflow_mvm_2_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_mvm_2_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_mvm_2_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_mvm_2_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_mvm_2_www/test-pr628-20241011v12" years = "1995:1999:4", [ts] @@ -27,7 +27,7 @@ walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 @@ -45,7 +45,7 @@ walltime = "5:00:00" ref_start_yr = 1985 ref_years = "1985-1988", # Use _1 as reference - reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_mvm_1_output/unique_id/v3.LR.historical_0051/post/atm/180x360_aave/clim" + reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_streamflow_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051/post/atm/180x360_aave/clim" # reference_data_path_ts_rof determined automatically run_type = "model_vs_model" sets="streamflow" diff --git a/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_chrysalis.cfg index ee24e496..bafdc8e8 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_chrysalis.cfg @@ -6,20 +6,20 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_www/test-pr628-20241011v12" years = "1985:1987:2", [tc_analysis] active = True -scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_scratch/unique_id/v3.LR.historical_0051" +scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_scratch/test-pr628-20241011v12/v3.LR.historical_0051" walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 diff --git a/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_mvm_1_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_mvm_1_chrysalis.cfg index f9611ca1..e2cc8349 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_mvm_1_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_mvm_1_chrysalis.cfg @@ -6,13 +6,13 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_1_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_2_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_2_www/test-pr628-20241011v12" years = "1985:1987:2", [tc_analysis] active = True -scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_1_scratch/unique_id/v3.LR.historical_0051" +scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_1_scratch/test-pr628-20241011v12/v3.LR.historical_0051" walltime = "00:30:00" diff --git a/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_mvm_2_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_mvm_2_chrysalis.cfg index 3e42bc99..cb13cab4 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_mvm_2_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_tc_analysis_mvm_2_chrysalis.cfg @@ -6,20 +6,20 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_2_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_2_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_2_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_2_www/test-pr628-20241011v12" years = "1995:1997:2", [tc_analysis] active = True -scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_2_scratch/unique_id/v3.LR.historical_0051" +scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_2_scratch/test-pr628-20241011v12/v3.LR.historical_0051" walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 @@ -37,7 +37,7 @@ walltime = "5:00:00" ref_start_yr = 1985 ref_years = "1985-1986", # Use _1 as reference - reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_1_output/unique_id/v3.LR.historical_0051/post/atm/180x360_aave/clim" + reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tc_analysis_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051/post/atm/180x360_aave/clim" # reference_data_path_tc determined automatically run_type = "model_vs_model" sets = "tc_analysis", diff --git a/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_chrysalis.cfg index feaa8fe1..bb010d01 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_www/test-pr628-20241011v12" years = "1985:1989:4", [ts] diff --git a/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_mvm_1_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_mvm_1_chrysalis.cfg index 7e9a2d31..36e774c6 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_mvm_1_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_mvm_1_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_mvm_1_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_mvm_1_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_mvm_1_www/test-pr628-20241011v12" years = "1985:1989:4", [ts] diff --git a/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_mvm_2_chrysalis.cfg b/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_mvm_2_chrysalis.cfg index 7a997083..153b18bc 100644 --- a/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_mvm_2_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_e3sm_diags_tropical_subseasonal_mvm_2_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_mvm_2_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_mvm_2_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_mvm_2_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_mvm_2_www/test-pr628-20241011v12" years = "1995:1999:4", [ts] @@ -25,7 +25,7 @@ walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 @@ -44,7 +44,7 @@ walltime = "5:00:00" ref_start_yr = 1985 ref_years = "1985-1988", # Use _1 as reference - reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_mvm_1_output/unique_id/v3.LR.historical_0051/post/atm/180x360_aave/clim" + reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_e3sm_diags_tropical_subseasonal_mvm_1_output/test-pr628-20241011v12/v3.LR.historical_0051/post/atm/180x360_aave/clim" # reference_data_path_daily is determined automatically run_type = "model_vs_model" sets = "tropical_subseasonal", diff --git a/tests/integration/generated/test_min_case_global_time_series_custom_chrysalis.cfg b/tests/integration/generated/test_min_case_global_time_series_custom_chrysalis.cfg index 3a648737..851cd937 100644 --- a/tests/integration/generated/test_min_case_global_time_series_custom_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_global_time_series_custom_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_global_time_series_custom_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_global_time_series_custom_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_global_time_series_custom_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_global_time_series_custom_www/test-pr628-20241011v12" [ts] active = True diff --git a/tests/integration/generated/test_min_case_global_time_series_original_8_chrysalis.cfg b/tests/integration/generated/test_min_case_global_time_series_original_8_chrysalis.cfg index d1395bb3..9f050990 100644 --- a/tests/integration/generated/test_min_case_global_time_series_original_8_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_global_time_series_original_8_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_global_time_series_original_8_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_global_time_series_original_8_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_global_time_series_original_8_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_global_time_series_original_8_www/test-pr628-20241011v12" [ts] active = True diff --git a/tests/integration/generated/test_min_case_global_time_series_original_8_no_ocn_chrysalis.cfg b/tests/integration/generated/test_min_case_global_time_series_original_8_no_ocn_chrysalis.cfg index 188b041b..08704926 100644 --- a/tests/integration/generated/test_min_case_global_time_series_original_8_no_ocn_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_global_time_series_original_8_no_ocn_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_global_time_series_original_8_no_ocn_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_global_time_series_original_8_no_ocn_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_global_time_series_original_8_no_ocn_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_global_time_series_original_8_no_ocn_www/test-pr628-20241011v12" [ts] active = True diff --git a/tests/integration/generated/test_min_case_ilamb_chrysalis.cfg b/tests/integration/generated/test_min_case_ilamb_chrysalis.cfg index 2c361140..0c223858 100644 --- a/tests/integration/generated/test_min_case_ilamb_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_ilamb_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_ilamb_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_ilamb_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_ilamb_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_ilamb_www/test-pr628-20241011v12" [ts] active = True diff --git a/tests/integration/generated/test_min_case_ilamb_land_only_chrysalis.cfg b/tests/integration/generated/test_min_case_ilamb_land_only_chrysalis.cfg index 89b08f57..12a1119c 100644 --- a/tests/integration/generated/test_min_case_ilamb_land_only_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_ilamb_land_only_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_ilamb_land_only_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_ilamb_land_only_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_ilamb_land_only_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_ilamb_land_only_www/test-pr628-20241011v12" [ts] active = True diff --git a/tests/integration/generated/test_min_case_mpas_analysis_chrysalis.cfg b/tests/integration/generated/test_min_case_mpas_analysis_chrysalis.cfg index 0cf4232b..4ee6e2e4 100644 --- a/tests/integration/generated/test_min_case_mpas_analysis_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_mpas_analysis_chrysalis.cfg @@ -6,10 +6,10 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_mpas_analysis_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_mpas_analysis_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_mpas_analysis_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_mpas_analysis_www/test-pr628-20241011v12" [mpas_analysis] active = True diff --git a/tests/integration/generated/test_min_case_tc_analysis_simultaneous_1_chrysalis.cfg b/tests/integration/generated/test_min_case_tc_analysis_simultaneous_1_chrysalis.cfg index 212baec1..8f1f1e02 100644 --- a/tests/integration/generated/test_min_case_tc_analysis_simultaneous_1_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_tc_analysis_simultaneous_1_chrysalis.cfg @@ -6,13 +6,13 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_1_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_1_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_1_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_1_www/test-pr628-20241011v12" years = "1985:1987:2", [tc_analysis] active = True -scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_1_scratch/unique_id/v3.LR.historical_0051" +scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_1_scratch/test-pr628-20241011v12/v3.LR.historical_0051" walltime = "00:30:00" diff --git a/tests/integration/generated/test_min_case_tc_analysis_simultaneous_2_chrysalis.cfg b/tests/integration/generated/test_min_case_tc_analysis_simultaneous_2_chrysalis.cfg index 3a116479..77929ff5 100644 --- a/tests/integration/generated/test_min_case_tc_analysis_simultaneous_2_chrysalis.cfg +++ b/tests/integration/generated/test_min_case_tc_analysis_simultaneous_2_chrysalis.cfg @@ -8,13 +8,13 @@ environment_commands = "" input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_2_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_2_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_2_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_2_www/test-pr628-20241011v12" years = "1985:1987:2", [tc_analysis] active = True -scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_2_scratch/unique_id/v3.LR.historical_0051" +scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_min_case_tc_analysis_simultaneous_2_scratch/test-pr628-20241011v12/v3.LR.historical_0051" walltime = "00:30:00" diff --git a/tests/integration/generated/test_weekly_bundles_chrysalis.cfg b/tests/integration/generated/test_weekly_bundles_chrysalis.cfg index c4c1b5db..c561cfb2 100644 --- a/tests/integration/generated/test_weekly_bundles_chrysalis.cfg +++ b/tests/integration/generated/test_weekly_bundles_chrysalis.cfg @@ -30,11 +30,11 @@ input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" # To run this test, edit `output` and `www` in this file, along with `actual_images_dir` in test_bundles.py -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_bundles_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_bundles_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "compute" qos = "regular" walltime = "07:00:00" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_weekly_bundles_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_weekly_bundles_www/test-pr628-20241011v12" [bundle] @@ -95,15 +95,16 @@ years = "1985:1989:2", mapping_file = "" vars = "RIVER_DISCHARGE_OVER_LAND_LIQ" -[tc_analysis] -active = True -bundle = "bundle3" # Let bundle1 finish first because "e3sm_diags: atm_monthly_180x360_aave_mvm" requires "ts: atm_monthly_180x360_aave" -scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_weekly_bundles_scratch/unique_id/v3.LR.historical_0051" -years = "1985:1989:2", +# TODO: Add "tc_analysis" back in after empty dat is resolved. +# [tc_analysis] +# active = True +# bundle = "bundle3" # Let bundle1 finish first because "e3sm_diags: atm_monthly_180x360_aave_mvm" requires "ts: atm_monthly_180x360_aave" +# scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_weekly_bundles_scratch/test-pr628-20241011v12/v3.LR.historical_0051" +# years = "1985:1989:2", [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' ref_final_yr = 1989 ref_start_yr = 1985 @@ -126,9 +127,10 @@ years = "1985:1989:2", ref_name = "v3.LR.historical_0051" ref_start_yr = 1985 ref_years = "1985-1986", - reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_bundles_output/unique_id/v3.LR.historical_0051/post/atm/180x360_aave/clim" + reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_bundles_output/test-pr628-20241011v12/v3.LR.historical_0051/post/atm/180x360_aave/clim" run_type = "model_vs_model" - sets = "polar","enso_diags","streamflow","tc_analysis", + # TODO: Add "tc_analysis" back in after empty dat is resolved. + sets = "polar","enso_diags","streamflow", short_ref_name = "v3.LR.historical_0051" swap_test_ref = False tag = "model_vs_model" diff --git a/tests/integration/generated/test_weekly_comprehensive_v2_chrysalis.cfg b/tests/integration/generated/test_weekly_comprehensive_v2_chrysalis.cfg index 7b6987e1..4d08936c 100644 --- a/tests/integration/generated/test_weekly_comprehensive_v2_chrysalis.cfg +++ b/tests/integration/generated/test_weekly_comprehensive_v2_chrysalis.cfg @@ -3,14 +3,15 @@ case = "v2.LR.historical_0201" constraint = "" dry_run = "False" environment_commands = "" +fail_on_dependency_skip = True input = /lcrc/group/e3sm/ac.forsyth2//E3SMv2/v2.LR.historical_0201 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v2_output/unique_id/v2.LR.historical_0201" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v2_output/test-pr628-20241011v12/v2.LR.historical_0201" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_weekly_comprehensive_v2_www/unique_id" -years = "1850:1854:2", +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_weekly_comprehensive_v2_www/test-pr628-20241011v12" +years = "1980:1984:2", [climo] active = True @@ -45,6 +46,12 @@ walltime = "00:30:00" input_subdir = "archive/atm/hist" ts_fmt = "cmip" + [[ atm_daily_180x360_aave ]] + frequency = "daily" + input_files = "eam.h1" + input_subdir = "archive/atm/hist" + vars = "PRECT" + [[ rof_monthly ]] extra_vars = 'areatotal2' frequency = "monthly" @@ -59,7 +66,7 @@ walltime = "00:30:00" input_files = "eam.h0" input_subdir = "archive/atm/hist" mapping_file = "glb" - years = "1850:1860:5", + years = "1980:1990:5", [[ lnd_monthly_glb ]] frequency = "monthly" @@ -67,7 +74,7 @@ walltime = "00:30:00" input_subdir = "archive/lnd/hist" mapping_file = "glb" vars = "LAISHA,LAISUN" - years = "1850:1860:5", + years = "1980:1990:5", [[ land_monthly ]] extra_vars = "landfrac" @@ -79,31 +86,39 @@ walltime = "00:30:00" [tc_analysis] active = True -scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_weekly_comprehensive_v2_scratch/unique_id/v2.LR.historical_0201" +scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_weekly_comprehensive_v2_scratch/test-pr628-20241011v12/v2.LR.historical_0201" walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +climo_diurnal_frequency = "diurnal_8xdaily" +climo_diurnal_subsection = "atm_monthly_diurnal_8xdaily_180x360_aave" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 partition = "compute" qos = "regular" -ref_final_yr = 1851 -ref_start_yr = 1850 -ref_years = "1850-1851", +ref_end_yr = 1981 +ref_final_yr = 1981 +ref_start_yr = 1980 +ref_years = "1980-1981", +# Include all sets +# min_case_e3sm_diags_depend_on_climo: "lat_lon","zonal_mean_xy","zonal_mean_2d","polar","cosp_histogram","meridional_mean_2d","annual_cycle_zonal_mean","zonal_mean_2d_stratosphere", +# min_case_e3sm_diags_depend_on_ts: "enso_diags","qbo", +# min_case_e3sm_diags_diurnal_cycle: "diurnal_cycle", +# min_case_e3sm_diags_streamflow: "streamflow", +# min_case_e3sm_diags_tc_analysis: "tc_analysis", +# min_case_e3sm_diags_tropical_subseasonal: "tropical_subseasonal", +sets = "lat_lon","zonal_mean_xy","zonal_mean_2d","polar","cosp_histogram","meridional_mean_2d","annual_cycle_zonal_mean","zonal_mean_2d_stratosphere","enso_diags","qbo","diurnal_cycle","streamflow","tc_analysis","tropical_subseasonal", short_name = "v2.LR.historical_0201" ts_num_years = 2 walltime = "5:00:00" -years = "1852:1854:2", +years = "1982:1984:2", [[ atm_monthly_180x360_aave ]] - climo_diurnal_frequency = "diurnal_8xdaily" - climo_diurnal_subsection = "atm_monthly_diurnal_8xdaily_180x360_aave" climo_subsection = "atm_monthly_180x360_aave" dc_obs_climo = '/lcrc/group/e3sm/public_html/e3sm_diags_test_data/unit_test_complete_run/obs/climatology' - sets = "lat_lon","enso_diags","diurnal_cycle","streamflow","tc_analysis","tc_analysis", [[ atm_monthly_180x360_aave_mvm ]] # Test model-vs-model using the same files as the reference @@ -112,14 +127,15 @@ years = "1852:1854:2", partition = "compute" qos = "regular" ref_name = "v2.LR.historical_0201" - reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v2_output/unique_id/v2.LR.historical_0201/post/atm/180x360_aave/clim" + reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v2_output/test-pr628-20241011v12/v2.LR.historical_0201/post/atm/180x360_aave/clim" run_type = "model_vs_model" - sets = "lat_lon", short_ref_name = "same simulation" swap_test_ref = False tag = "model_vs_model" + ts_daily_subsection = "atm_monthly_180x360_aave" ts_num_years_ref = 2 ts_subsection = "atm_monthly_180x360_aave" + years = "1980:1982:2", [[ lnd_monthly_mvm_lnd ]] # Test model-vs-model using the same files as the reference @@ -128,7 +144,7 @@ years = "1852:1854:2", partition = "compute" qos = "regular" ref_name = "v2.LR.historical_0201" - reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v2_output/unique_id/v2.LR.historical_0201/post/lnd/180x360_aave/clim" + reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v2_output/test-pr628-20241011v12/v2.LR.historical_0201/post/lnd/180x360_aave/clim" run_type = "model_vs_model" sets = "lat_lon_land", short_ref_name = "same simulation" @@ -138,28 +154,28 @@ years = "1852:1854:2", [mpas_analysis] active = True -anomalyRefYear = 1850 -climo_years ="1850-1854", "1855-1860", -enso_years = "1850-1854", "1855-1860", +anomalyRefYear = 1980 +climo_years ="1980-1984", "1985-1990", +enso_years = "1980-1984", "1985-1990", mesh = "EC30to60E2r2" parallelTaskCount = 6 partition = "compute" qos = "regular" shortTermArchive = True -ts_years = "1850-1854", "1850-1860", +ts_years = "1980-1984", "1980-1990", walltime = "00:30:00" [global_time_series] active = True -climo_years ="1850-1854", "1855-1860", +climo_years ="1980-1984", "1985-1990", experiment_name = "v2.LR.historical_0201" figstr = "v2.LR.historical_0201" -moc_file=mocTimeSeries_1850-1860.nc +moc_file=mocTimeSeries_1980-1990.nc plots_lnd = "LAISHA,LAISUN" ts_num_years = 5 -ts_years = "1850-1854", "1850-1860", +ts_years = "1980-1984", "1980-1990", walltime = "00:30:00" -years = "1850-1860", +years = "1980-1990", [ilamb] active = True @@ -168,4 +184,4 @@ partition = "compute" short_name = "v2.LR.historical_0201" ts_num_years = 2 walltime = "2:00:00" -years = "1850:1854:2", +years = "1980:1984:2", diff --git a/tests/integration/generated/test_weekly_comprehensive_v3_chrysalis.cfg b/tests/integration/generated/test_weekly_comprehensive_v3_chrysalis.cfg index 6d37de66..86efc271 100644 --- a/tests/integration/generated/test_weekly_comprehensive_v3_chrysalis.cfg +++ b/tests/integration/generated/test_weekly_comprehensive_v3_chrysalis.cfg @@ -3,13 +3,16 @@ case = "v3.LR.historical_0051" constraint = "" dry_run = "False" environment_commands = "" +fail_on_dependency_skip = True +guess_path_parameters = False +guess_section_parameters = False input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/v3.LR.historical_0051 input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" -output = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v3_output/unique_id/v3.LR.historical_0051" +output = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v3_output/test-pr628-20241011v12/v3.LR.historical_0051" partition = "debug" qos = "regular" -www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_weekly_comprehensive_v3_www/unique_id" +www = "/lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_weekly_comprehensive_v3_www/test-pr628-20241011v12" years = "1985:1989:2", [climo] @@ -46,6 +49,12 @@ walltime = "00:30:00" input_subdir = "archive/atm/hist" ts_fmt = "cmip" + [[ atm_daily_180x360_aave ]] + frequency = "daily" + input_files = "eam.h1" + input_subdir = "archive/atm/hist" + vars = "PRECT" + [[ rof_monthly ]] extra_vars = 'areatotal2' frequency = "monthly" @@ -79,49 +88,79 @@ walltime = "00:30:00" ts_fmt = "cmip" vars = "FSH,RH2M,LAISHA,LAISUN,QINTR,QOVER,QRUNOFF,QSOIL,QVEGE,QVEGT,SOILICE,SOILLIQ,SOILWATER_10CM,TSA,TSOI,H2OSNO,TOTLITC,CWDC,SOIL1C,SOIL2C,SOIL3C,SOIL4C,WOOD_HARVESTC,TOTVEGC,NBP,GPP,AR,HR" -[tc_analysis] -active = True -scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_weekly_comprehensive_v3_scratch/unique_id/v3.LR.historical_0051" -walltime = "00:30:00" +# TODO: Add "tc_analysis" back in after empty dat is resolved. +# [tc_analysis] +# active = True +# scratch = "/lcrc/globalscratch/ac.forsyth2/zppy_weekly_comprehensive_v3_scratch/test-pr628-20241011v12/v3.LR.historical_0051" +# walltime = "00:30:00" [e3sm_diags] active = True -environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731" +climo_diurnal_frequency = "diurnal_8xdaily" +climo_diurnal_subsection = "atm_monthly_diurnal_8xdaily_180x360_aave" +climo_subsection = "atm_monthly_180x360_aave" +environment_commands = "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003" grid = '180x360_aave' multiprocessing = True num_workers = 8 partition = "compute" qos = "regular" +ref_end_yr = 1986 ref_final_yr = 1986 ref_start_yr = 1985 ref_years = "1985-1986", +# Include all sets +# min_case_e3sm_diags_depend_on_climo: "lat_lon","zonal_mean_xy","zonal_mean_2d","polar","cosp_histogram","meridional_mean_2d","annual_cycle_zonal_mean","zonal_mean_2d_stratosphere", +# min_case_e3sm_diags_depend_on_ts: "enso_diags","qbo", +# min_case_e3sm_diags_diurnal_cycle: "diurnal_cycle", +# min_case_e3sm_diags_streamflow: "streamflow", +# min_case_e3sm_diags_tc_analysis: "tc_analysis", +# min_case_e3sm_diags_tropical_subseasonal: "tropical_subseasonal", +# TODO: Add "tc_analysis" back in after empty dat is resolved. +sets = "lat_lon","zonal_mean_xy","zonal_mean_2d","polar","cosp_histogram","meridional_mean_2d","annual_cycle_zonal_mean","zonal_mean_2d_stratosphere","enso_diags","qbo","diurnal_cycle","streamflow","tropical_subseasonal", short_name = "v3.LR.historical_0051" +ts_daily_subsection = "atm_daily_180x360_aave" ts_num_years = 2 +ts_subsection = "atm_monthly_180x360_aave" walltime = "5:00:00" years = "1987:1989:2" +# Reference paths +# Used for mvo and mvm, if ts_num_years is set +obs_ts = "/lcrc/group/e3sm/diagnostics/observations/Atm/time-series/" +# mvo & mvm tc_analysis only +tc_obs = "/lcrc/group/e3sm/diagnostics/observations/Atm/tc-analysis/" [[ atm_monthly_180x360_aave ]] - climo_diurnal_frequency = "diurnal_8xdaily" - climo_diurnal_subsection = "atm_monthly_diurnal_8xdaily_180x360_aave" - climo_subsection = "atm_monthly_180x360_aave" + # Reference paths + reference_data_path = "/lcrc/group/e3sm/diagnostics/observations/Atm/climatology/" + # mvo diurnal_cycle only + # NOTE: This is NOT the guess zppy would have made! dc_obs_climo = '/lcrc/group/e3sm/public_html/e3sm_diags_test_data/unit_test_complete_run/obs/climatology' - sets = "lat_lon","enso_diags","diurnal_cycle","streamflow","tc_analysis","tropical_subseasonal", + # mvo streamflow only + streamflow_obs_ts = "/lcrc/group/e3sm/diagnostics/observations/Atm/time-series/" [[ atm_monthly_180x360_aave_mvm ]] # Test model-vs-model using the same files as the reference - climo_subsection = "atm_monthly_180x360_aave" diff_title = "Difference" partition = "compute" qos = "regular" ref_name = "v3.LR.historical_0051" - reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v3_output/unique_id/v3.LR.historical_0051/post/atm/180x360_aave/clim" run_type = "model_vs_model" - sets = "lat_lon", short_ref_name = "same simulation" swap_test_ref = False tag = "model_vs_model" ts_num_years_ref = 2 - ts_subsection = "atm_monthly_180x360_aave" + # Reference paths + reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v3_output/test-pr628-20241011v12/v3.LR.historical_0051/post/atm/180x360_aave/clim" + # mvm streamflow only + gauges_path = "/lcrc/group/e3sm/diagnostics/observations/Atm/time-series/GSIM/GSIM_catchment_characteristics_all_1km2.csv" + reference_data_path_ts_rof = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v3_output/test-pr628-20241011v12/v3.LR.historical_0051/post/rof/native/ts/monthly" + # mvm diurnal_cycle only + reference_data_path_climo_diurnal = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v3_output/test-pr628-20241011v12/v3.LR.historical_0051/post/atm/180x360_aave/clim_diurnal_8xdaily" + # mvm "enso_diags", "qbo", "area_mean_time_series" + reference_data_path_ts = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v3_output/test-pr628-20241011v12/v3.LR.historical_0051/post/atm/180x360_aave/ts/monthly" + # mvm tropical_subseasonal only + reference_data_path_ts_daily = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v3_output/test-pr628-20241011v12/v3.LR.historical_0051/post/atm/180x360_aave/ts/daily" [[ lnd_monthly_mvm_lnd ]] # Test model-vs-model using the same files as the reference @@ -130,13 +169,15 @@ years = "1987:1989:2" partition = "compute" qos = "regular" ref_name = "v3.LR.historical_0051" - reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v3_output/unique_id/v3.LR.historical_0051/post/lnd/180x360_aave/clim" run_type = "model_vs_model" sets = "lat_lon_land", short_ref_name = "same simulation" swap_test_ref = False tag = "model_vs_model" ts_num_years_ref = 2 + # Reference paths + reference_data_path = "/lcrc/group/e3sm/ac.forsyth2/zppy_weekly_comprehensive_v3_output/test-pr628-20241011v12/v3.LR.historical_0051/post/lnd/180x360_aave/clim" + [mpas_analysis] active = True @@ -165,9 +206,12 @@ years = "1985-1995", [ilamb] active = True +ilamb_obs = "/lcrc/group/e3sm/diagnostics/ilamb_data" nodes = 8 partition = "compute" short_name = "v3.LR.historical_0051" +ts_atm_subsection = "atm_monthly_180x360_aave" +ts_land_subsection = "land_monthly" ts_num_years = 2 walltime = "2:00:00" years = "1985:1989:4" diff --git a/tests/integration/generated/update_weekly_expected_files_chrysalis.sh b/tests/integration/generated/update_weekly_expected_files_chrysalis.sh old mode 100644 new mode 100755 index a2967214..df94aa49 --- a/tests/integration/generated/update_weekly_expected_files_chrysalis.sh +++ b/tests/integration/generated/update_weekly_expected_files_chrysalis.sh @@ -9,13 +9,13 @@ do # Your output will now become the new expectation. # Copy output so you don't have to rerun zppy to generate the output. if [[ "${test_name,,}" == "comprehensive_v2" ]]; then - cp -r /lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_weekly_${test_name}_www/unique_id/v2.LR.historical_0201 /lcrc/group/e3sm/public_html/zppy_test_resources/expected_${test_name} + cp -r /lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_weekly_${test_name}_www/test-pr628-20241011v12/v2.LR.historical_0201 /lcrc/group/e3sm/public_html/zppy_test_resources/expected_${test_name} else - cp -r /lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_weekly_${test_name}_www/unique_id/v3.LR.historical_0051 /lcrc/group/e3sm/public_html/zppy_test_resources/expected_${test_name} + cp -r /lcrc/group/e3sm/public_html/diagnostic_output/ac.forsyth2/zppy_weekly_${test_name}_www/test-pr628-20241011v12/v3.LR.historical_0051 /lcrc/group/e3sm/public_html/zppy_test_resources/expected_${test_name} fi if [[ "${test_name,,}" == "bundles" ]]; then mkdir -p /lcrc/group/e3sm/public_html/zppy_test_resources/expected_bundles/bundle_files - cp -r /lcrc/group/e3sm/ac.forsyth2/zppy_weekly_bundles_output/unique_id/v3.LR.historical_0051/post/scripts/bundle*.bash /lcrc/group/e3sm/public_html/zppy_test_resources/expected_bundles/bundle_files + cp -r /lcrc/group/e3sm/ac.forsyth2/zppy_weekly_bundles_output/test-pr628-20241011v12/v3.LR.historical_0051/post/scripts/bundle*.bash /lcrc/group/e3sm/public_html/zppy_test_resources/expected_bundles/bundle_files fi zppy_top_level=$(pwd) cd /lcrc/group/e3sm/public_html/zppy_test_resources/expected_${test_name} diff --git a/tests/integration/template_weekly_bundles.cfg b/tests/integration/template_weekly_bundles.cfg index 3c8a0d64..2ade2706 100644 --- a/tests/integration/template_weekly_bundles.cfg +++ b/tests/integration/template_weekly_bundles.cfg @@ -95,11 +95,12 @@ years = "1985:1989:2", mapping_file = "" vars = "RIVER_DISCHARGE_OVER_LAND_LIQ" -[tc_analysis] -active = True -bundle = "bundle3" # Let bundle1 finish first because "e3sm_diags: atm_monthly_180x360_aave_mvm" requires "ts: atm_monthly_180x360_aave" -scratch = "#expand scratch#zppy_weekly_bundles_scratch/#expand unique_id#/#expand case_name#" -years = "1985:1989:2", +# TODO: Add "tc_analysis" back in after empty dat is resolved. +# [tc_analysis] +# active = True +# bundle = "bundle3" # Let bundle1 finish first because "e3sm_diags: atm_monthly_180x360_aave_mvm" requires "ts: atm_monthly_180x360_aave" +# scratch = "#expand scratch#zppy_weekly_bundles_scratch/#expand unique_id#/#expand case_name#" +# years = "1985:1989:2", [e3sm_diags] active = True @@ -128,7 +129,8 @@ years = "1985:1989:2", ref_years = "1985-1986", reference_data_path = "#expand user_output#zppy_weekly_bundles_output/#expand unique_id#/#expand case_name#/post/atm/180x360_aave/clim" run_type = "model_vs_model" - sets = "polar","enso_diags","streamflow","tc_analysis", + # TODO: Add "tc_analysis" back in after empty dat is resolved. + sets = "polar","enso_diags","streamflow", short_ref_name = "#expand case_name#" swap_test_ref = False tag = "model_vs_model" diff --git a/tests/integration/template_weekly_comprehensive_v2.cfg b/tests/integration/template_weekly_comprehensive_v2.cfg index 93cde966..3d4bd746 100644 --- a/tests/integration/template_weekly_comprehensive_v2.cfg +++ b/tests/integration/template_weekly_comprehensive_v2.cfg @@ -3,6 +3,7 @@ case = "#expand case_name_v2#" constraint = "#expand constraint#" dry_run = "#expand dry_run#" environment_commands = "#expand environment_commands#" +fail_on_dependency_skip = True input = #expand user_input#/E3SMv2/#expand case_name_v2# input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" @@ -10,7 +11,7 @@ output = "#expand user_output#zppy_weekly_comprehensive_v2_output/#expand unique partition = "#expand partition_short#" qos = "#expand qos_short#" www = "#expand user_www#zppy_weekly_comprehensive_v2_www/#expand unique_id#" -years = "1850:1854:2", +years = "1980:1984:2", [climo] active = True @@ -45,6 +46,12 @@ walltime = "00:30:00" input_subdir = "archive/atm/hist" ts_fmt = "cmip" + [[ atm_daily_180x360_aave ]] + frequency = "daily" + input_files = "eam.h1" + input_subdir = "archive/atm/hist" + vars = "PRECT" + [[ rof_monthly ]] extra_vars = 'areatotal2' frequency = "monthly" @@ -59,7 +66,7 @@ walltime = "00:30:00" input_files = "eam.h0" input_subdir = "archive/atm/hist" mapping_file = "glb" - years = "1850:1860:5", + years = "1980:1990:5", [[ lnd_monthly_glb ]] frequency = "monthly" @@ -67,7 +74,7 @@ walltime = "00:30:00" input_subdir = "archive/lnd/hist" mapping_file = "glb" vars = "LAISHA,LAISUN" - years = "1850:1860:5", + years = "1980:1990:5", [[ land_monthly ]] extra_vars = "landfrac" @@ -84,26 +91,34 @@ walltime = "00:30:00" [e3sm_diags] active = True +climo_diurnal_frequency = "diurnal_8xdaily" +climo_diurnal_subsection = "atm_monthly_diurnal_8xdaily_180x360_aave" environment_commands = "#expand diags_environment_commands#" grid = '180x360_aave' multiprocessing = True num_workers = 8 partition = "#expand partition_long#" qos = "#expand qos_long#" -ref_final_yr = 1851 -ref_start_yr = 1850 -ref_years = "1850-1851", +ref_end_yr = 1981 +ref_final_yr = 1981 +ref_start_yr = 1980 +ref_years = "1980-1981", +# Include all sets +# min_case_e3sm_diags_depend_on_climo: "lat_lon","zonal_mean_xy","zonal_mean_2d","polar","cosp_histogram","meridional_mean_2d","annual_cycle_zonal_mean","zonal_mean_2d_stratosphere", +# min_case_e3sm_diags_depend_on_ts: "enso_diags","qbo", +# min_case_e3sm_diags_diurnal_cycle: "diurnal_cycle", +# min_case_e3sm_diags_streamflow: "streamflow", +# min_case_e3sm_diags_tc_analysis: "tc_analysis", +# min_case_e3sm_diags_tropical_subseasonal: "tropical_subseasonal", +sets = "lat_lon","zonal_mean_xy","zonal_mean_2d","polar","cosp_histogram","meridional_mean_2d","annual_cycle_zonal_mean","zonal_mean_2d_stratosphere","enso_diags","qbo","diurnal_cycle","streamflow","tc_analysis","tropical_subseasonal", short_name = "#expand case_name_v2#" ts_num_years = 2 walltime = "#expand diags_walltime#" -years = "1852:1854:2", +years = "1982:1984:2", [[ atm_monthly_180x360_aave ]] - climo_diurnal_frequency = "diurnal_8xdaily" - climo_diurnal_subsection = "atm_monthly_diurnal_8xdaily_180x360_aave" climo_subsection = "atm_monthly_180x360_aave" dc_obs_climo = '/lcrc/group/e3sm/public_html/e3sm_diags_test_data/unit_test_complete_run/obs/climatology' - sets = "lat_lon","enso_diags","diurnal_cycle","streamflow","tc_analysis","tc_analysis", [[ atm_monthly_180x360_aave_mvm ]] # Test model-vs-model using the same files as the reference @@ -114,12 +129,13 @@ years = "1852:1854:2", ref_name = "#expand case_name_v2#" reference_data_path = "#expand user_output#zppy_weekly_comprehensive_v2_output/#expand unique_id#/#expand case_name_v2#/post/atm/180x360_aave/clim" run_type = "model_vs_model" - sets = "lat_lon", short_ref_name = "same simulation" swap_test_ref = False tag = "model_vs_model" + ts_daily_subsection = "atm_monthly_180x360_aave" ts_num_years_ref = 2 ts_subsection = "atm_monthly_180x360_aave" + years = "1980:1982:2", [[ lnd_monthly_mvm_lnd ]] # Test model-vs-model using the same files as the reference @@ -138,28 +154,28 @@ years = "1852:1854:2", [mpas_analysis] active = True -anomalyRefYear = 1850 -climo_years ="1850-1854", "1855-1860", -enso_years = "1850-1854", "1855-1860", +anomalyRefYear = 1980 +climo_years ="1980-1984", "1985-1990", +enso_years = "1980-1984", "1985-1990", mesh = "EC30to60E2r2" parallelTaskCount = 6 partition = "#expand partition_long#" qos = "#expand qos_long#" shortTermArchive = True -ts_years = "1850-1854", "1850-1860", +ts_years = "1980-1984", "1980-1990", walltime = "#expand mpas_analysis_walltime#" [global_time_series] active = True -climo_years ="1850-1854", "1855-1860", +climo_years ="1980-1984", "1985-1990", experiment_name = "#expand case_name_v2#" figstr = "#expand case_name_v2#" -moc_file=mocTimeSeries_1850-1860.nc +moc_file=mocTimeSeries_1980-1990.nc plots_lnd = "LAISHA,LAISUN" ts_num_years = 5 -ts_years = "1850-1854", "1850-1860", +ts_years = "1980-1984", "1980-1990", walltime = "00:30:00" -years = "1850-1860", +years = "1980-1990", [ilamb] active = True @@ -168,4 +184,4 @@ partition = "#expand partition_long#" short_name = "#expand case_name_v2#" ts_num_years = 2 walltime = "2:00:00" -years = "1850:1854:2", +years = "1980:1984:2", diff --git a/tests/integration/template_weekly_comprehensive_v3.cfg b/tests/integration/template_weekly_comprehensive_v3.cfg index ef2fbb23..4eb511f4 100644 --- a/tests/integration/template_weekly_comprehensive_v3.cfg +++ b/tests/integration/template_weekly_comprehensive_v3.cfg @@ -3,6 +3,9 @@ case = "#expand case_name#" constraint = "#expand constraint#" dry_run = "#expand dry_run#" environment_commands = "#expand environment_commands#" +fail_on_dependency_skip = True +guess_path_parameters = False +guess_section_parameters = False input = /lcrc/group/e3sm2/ac.wlin/E3SMv3/#expand case_name# input_subdir = archive/atm/hist mapping_file = "map_ne30pg2_to_cmip6_180x360_aave.20200201.nc" @@ -46,6 +49,12 @@ walltime = "00:30:00" input_subdir = "archive/atm/hist" ts_fmt = "cmip" + [[ atm_daily_180x360_aave ]] + frequency = "daily" + input_files = "eam.h1" + input_subdir = "archive/atm/hist" + vars = "PRECT" + [[ rof_monthly ]] extra_vars = 'areatotal2' frequency = "monthly" @@ -79,49 +88,79 @@ walltime = "00:30:00" ts_fmt = "cmip" vars = "FSH,RH2M,LAISHA,LAISUN,QINTR,QOVER,QRUNOFF,QSOIL,QVEGE,QVEGT,SOILICE,SOILLIQ,SOILWATER_10CM,TSA,TSOI,H2OSNO,TOTLITC,CWDC,SOIL1C,SOIL2C,SOIL3C,SOIL4C,WOOD_HARVESTC,TOTVEGC,NBP,GPP,AR,HR" -[tc_analysis] -active = True -scratch = "#expand scratch#zppy_weekly_comprehensive_v3_scratch/#expand unique_id#/#expand case_name#" -walltime = "00:30:00" +# TODO: Add "tc_analysis" back in after empty dat is resolved. +# [tc_analysis] +# active = True +# scratch = "#expand scratch#zppy_weekly_comprehensive_v3_scratch/#expand unique_id#/#expand case_name#" +# walltime = "00:30:00" [e3sm_diags] active = True +climo_diurnal_frequency = "diurnal_8xdaily" +climo_diurnal_subsection = "atm_monthly_diurnal_8xdaily_180x360_aave" +climo_subsection = "atm_monthly_180x360_aave" environment_commands = "#expand diags_environment_commands#" grid = '180x360_aave' multiprocessing = True num_workers = 8 partition = "#expand partition_long#" qos = "#expand qos_long#" +ref_end_yr = 1986 ref_final_yr = 1986 ref_start_yr = 1985 ref_years = "1985-1986", +# Include all sets +# min_case_e3sm_diags_depend_on_climo: "lat_lon","zonal_mean_xy","zonal_mean_2d","polar","cosp_histogram","meridional_mean_2d","annual_cycle_zonal_mean","zonal_mean_2d_stratosphere", +# min_case_e3sm_diags_depend_on_ts: "enso_diags","qbo", +# min_case_e3sm_diags_diurnal_cycle: "diurnal_cycle", +# min_case_e3sm_diags_streamflow: "streamflow", +# min_case_e3sm_diags_tc_analysis: "tc_analysis", +# min_case_e3sm_diags_tropical_subseasonal: "tropical_subseasonal", +# TODO: Add "tc_analysis" back in after empty dat is resolved. +sets = "lat_lon","zonal_mean_xy","zonal_mean_2d","polar","cosp_histogram","meridional_mean_2d","annual_cycle_zonal_mean","zonal_mean_2d_stratosphere","enso_diags","qbo","diurnal_cycle","streamflow","tropical_subseasonal", short_name = "#expand case_name#" +ts_daily_subsection = "atm_daily_180x360_aave" ts_num_years = 2 +ts_subsection = "atm_monthly_180x360_aave" walltime = "#expand diags_walltime#" years = "1987:1989:2" +# Reference paths +# Used for mvo and mvm, if ts_num_years is set +obs_ts = "#expand diagnostics_base_path#/observations/Atm/time-series/" +# mvo & mvm tc_analysis only +tc_obs = "#expand diagnostics_base_path#/observations/Atm/tc-analysis/" [[ atm_monthly_180x360_aave ]] - climo_diurnal_frequency = "diurnal_8xdaily" - climo_diurnal_subsection = "atm_monthly_diurnal_8xdaily_180x360_aave" - climo_subsection = "atm_monthly_180x360_aave" + # Reference paths + reference_data_path = "#expand diagnostics_base_path#/observations/Atm/climatology/" + # mvo diurnal_cycle only + # NOTE: This is NOT the guess zppy would have made! dc_obs_climo = '/lcrc/group/e3sm/public_html/e3sm_diags_test_data/unit_test_complete_run/obs/climatology' - sets = "lat_lon","enso_diags","diurnal_cycle","streamflow","tc_analysis","tropical_subseasonal", + # mvo streamflow only + streamflow_obs_ts = "#expand diagnostics_base_path#/observations/Atm/time-series/" [[ atm_monthly_180x360_aave_mvm ]] # Test model-vs-model using the same files as the reference - climo_subsection = "atm_monthly_180x360_aave" diff_title = "Difference" partition = "#expand partition_long#" qos = "#expand qos_long#" ref_name = "#expand case_name#" - reference_data_path = "#expand user_output#zppy_weekly_comprehensive_v3_output/#expand unique_id#/#expand case_name#/post/atm/180x360_aave/clim" run_type = "model_vs_model" - sets = "lat_lon", short_ref_name = "same simulation" swap_test_ref = False tag = "model_vs_model" ts_num_years_ref = 2 - ts_subsection = "atm_monthly_180x360_aave" + # Reference paths + reference_data_path = "#expand user_output#zppy_weekly_comprehensive_v3_output/#expand unique_id#/#expand case_name#/post/atm/180x360_aave/clim" + # mvm streamflow only + gauges_path = "#expand diagnostics_base_path#/observations/Atm/time-series/GSIM/GSIM_catchment_characteristics_all_1km2.csv" + reference_data_path_ts_rof = "#expand user_output#zppy_weekly_comprehensive_v3_output/#expand unique_id#/#expand case_name#/post/rof/native/ts/monthly" + # mvm diurnal_cycle only + reference_data_path_climo_diurnal = "#expand user_output#zppy_weekly_comprehensive_v3_output/#expand unique_id#/#expand case_name#/post/atm/180x360_aave/clim_diurnal_8xdaily" + # mvm "enso_diags", "qbo", "area_mean_time_series" + reference_data_path_ts = "#expand user_output#zppy_weekly_comprehensive_v3_output/#expand unique_id#/#expand case_name#/post/atm/180x360_aave/ts/monthly" + # mvm tropical_subseasonal only + reference_data_path_ts_daily = "#expand user_output#zppy_weekly_comprehensive_v3_output/#expand unique_id#/#expand case_name#/post/atm/180x360_aave/ts/daily" [[ lnd_monthly_mvm_lnd ]] # Test model-vs-model using the same files as the reference @@ -130,13 +169,15 @@ years = "1987:1989:2" partition = "#expand partition_long#" qos = "#expand qos_long#" ref_name = "#expand case_name#" - reference_data_path = "#expand user_output#zppy_weekly_comprehensive_v3_output/#expand unique_id#/#expand case_name#/post/lnd/180x360_aave/clim" run_type = "model_vs_model" sets = "lat_lon_land", short_ref_name = "same simulation" swap_test_ref = False tag = "model_vs_model" ts_num_years_ref = 2 + # Reference paths + reference_data_path = "#expand user_output#zppy_weekly_comprehensive_v3_output/#expand unique_id#/#expand case_name#/post/lnd/180x360_aave/clim" + [mpas_analysis] active = True @@ -165,9 +206,12 @@ years = "1985-1995", [ilamb] active = True +ilamb_obs = "#expand diagnostics_base_path#/ilamb_data" nodes = 8 partition = "#expand partition_long#" short_name = "#expand case_name#" +ts_atm_subsection = "atm_monthly_180x360_aave" +ts_land_subsection = "land_monthly" ts_num_years = 2 walltime = "2:00:00" years = "1985:1989:4" diff --git a/tests/integration/test_weekly.py b/tests/integration/test_weekly.py index e3929bae..d919cafc 100644 --- a/tests/integration/test_weekly.py +++ b/tests/integration/test_weekly.py @@ -68,8 +68,8 @@ def test_bundles_bash_file_list(self): "e3sm_diags_atm_monthly_180x360_aave_mvm_model_vs_model_1987-1988_vs_1985-1986.bash", "global_time_series_1985-1995.bash", "ilamb_1985-1986.bash", - "tc_analysis_1985-1986.bash", - "tc_analysis_1987-1988.bash", + # "tc_analysis_1985-1986.bash", + # "tc_analysis_1987-1988.bash", "ts_atm_monthly_180x360_aave_1985-1986-0002.bash", "ts_atm_monthly_180x360_aave_1987-1988-0002.bash", "ts_atm_monthly_glb_1985-1989-0005.bash", diff --git a/tests/integration/utils.py b/tests/integration/utils.py index 2c6d6bdf..484e9fb0 100644 --- a/tests/integration/utils.py +++ b/tests/integration/utils.py @@ -7,7 +7,7 @@ from mache import MachineInfo from PIL import Image, ImageChops, ImageDraw -UNIQUE_ID = "unique_id" +UNIQUE_ID = "test-pr628-20241011v12" # Image checking ########################################################## @@ -62,21 +62,20 @@ def compare_images( mismatched_images.append(image_name) - simple_image_name = image_name.split("/")[-1].split(".")[0] shutil.copy( path_to_actual_png, - os.path.join(diff_dir, "{}_actual.png".format(simple_image_name)), + os.path.join(diff_dir, "{}_actual.png".format(image_name)), ) shutil.copy( path_to_expected_png, - os.path.join(diff_dir, "{}_expected.png".format(simple_image_name)), + os.path.join(diff_dir, "{}_expected.png".format(image_name)), ) # https://stackoverflow.com/questions/41405632/draw-a-rectangle-and-a-text-in-it-using-pil draw = ImageDraw.Draw(diff) (left, upper, right, lower) = diff.getbbox() draw.rectangle(((left, upper), (right, lower)), outline="red") diff.save( - os.path.join(diff_dir, "{}_diff.png".format(simple_image_name)), + os.path.join(diff_dir, "{}_diff.png".format(image_name)), "PNG", ) @@ -139,7 +138,7 @@ def get_chyrsalis_expansions(config): "constraint": "", # To run this test, replace conda environment with your e3sm_diags dev environment # To use default environment_commands, set to "" - "diags_environment_commands": "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_20240731", + "diags_environment_commands": "source /home/ac.forsyth2/miniconda3/etc/profile.d/conda.sh; conda activate e3sm_diags_1003", "diags_walltime": "5:00:00", "e3sm_to_cmip_environment_commands": "", "environment_commands_test": "", @@ -228,6 +227,7 @@ def get_expansions(): expansions = get_perlmutter_expansions(config) else: raise ValueError(f"Unsupported machine={machine}") + expansions["diagnostics_base_path"] = config.get("diagnostics", "base_path") expansions["machine"] = machine expansions["unique_id"] = UNIQUE_ID return expansions @@ -345,4 +345,4 @@ def generate_cfgs(unified_testing=False, dry_run=False): if __name__ == "__main__": - generate_cfgs(unified_testing=False) + generate_cfgs(unified_testing=False, dry_run=False) diff --git a/tests/test_sections.py b/tests/test_sections.py index 5bde86b6..76266b66 100644 --- a/tests/test_sections.py +++ b/tests/test_sections.py @@ -5,7 +5,7 @@ from configobj import ConfigObj, Section from validate import Validator -from zppy.utils import getTasks +from zppy.utils import get_tasks def compare(tester, actual, expected): @@ -73,8 +73,11 @@ def test_sections(self): "dry_run": False, "e3sm_to_cmip_environment_commands": "", "environment_commands": "", + "fail_on_dependency_skip": False, "frequency": "monthly", "grid": "", + "guess_section_parameters": True, + "guess_path_parameters": True, "input": "INPUT", "input_files": "eam.h0", "input_subdir": "INPUT_SUBDIR", @@ -112,7 +115,7 @@ def test_sections(self): "years": ["0001:0020:5"], } compare(self, actual_section, expected_section) - actual_tasks = getTasks(config, section_name) + actual_tasks = get_tasks(config, section_name) self.assertEqual(len(actual_tasks), 1) actual_task = actual_tasks[0] expected_task = { @@ -130,8 +133,11 @@ def test_sections(self): "e3sm_to_cmip_environment_commands": "", "environment_commands": "", "extra_vars": "", + "fail_on_dependency_skip": False, "frequency": "monthly", "grid": "", + "guess_section_parameters": True, + "guess_path_parameters": True, "input": "INPUT", "input_component": "", "input_files": "eam.h0", @@ -170,7 +176,7 @@ def test_sections(self): "years": ["0001:0050:50"], } compare(self, actual_section, expected_section) - actual_tasks = getTasks(config, section_name) + actual_tasks = get_tasks(config, section_name) compare(self, len(actual_tasks), 1) actual_task = actual_tasks[0] expected_task = { @@ -185,8 +191,11 @@ def test_sections(self): "e3sm_to_cmip_environment_commands": "", "environment_commands": "", "exclude": False, + "fail_on_dependency_skip": False, "frequency": "monthly", "grid": "", + "guess_section_parameters": True, + "guess_path_parameters": True, "input": "INPUT", "input_component": "", "input_files": "eam.h0", @@ -213,14 +222,14 @@ def test_sections(self): section_name = "tc_analysis" actual_section = config[section_name] self.assertTrue(actual_section["active"] == "False") - actual_tasks = getTasks(config, section_name) + actual_tasks = get_tasks(config, section_name) self.assertEqual(len(actual_tasks), 0) # e3sm_diags: test an excluded task section_name = "e3sm_diags" actual_section = config[section_name] self.assertTrue("active" not in actual_section.keys()) - actual_tasks = getTasks(config, section_name) + actual_tasks = get_tasks(config, section_name) self.assertEqual(len(actual_tasks), 0) def test_subsections(self): @@ -239,8 +248,11 @@ def test_subsections(self): "dry_run": False, "e3sm_to_cmip_environment_commands": "", "environment_commands": "", + "fail_on_dependency_skip": False, "frequency": "monthly", "grid": "", + "guess_section_parameters": True, + "guess_path_parameters": True, "input": "INPUT", "input_files": "eam.h0", "input_subdir": "INPUT_SUBDIR", @@ -298,7 +310,7 @@ def test_subsections(self): "vars": "FSNTOA,FLUT,FSNT,FLNT,FSNS,FLNS,SHFLX,QFLX,PRECC,PRECL,PRECSC,PRECSL,TS,TREFHT", } compare(self, actual_section, expected_section) - actual_tasks = getTasks(config, section_name) + actual_tasks = get_tasks(config, section_name) self.assertEqual(len(actual_tasks), 2) actual_task = actual_tasks[0] expected_task = { @@ -316,8 +328,11 @@ def test_subsections(self): "e3sm_to_cmip_environment_commands": "", "environment_commands": "", "extra_vars": "", + "fail_on_dependency_skip": False, "frequency": "monthly", "grid": "", + "guess_section_parameters": True, + "guess_path_parameters": True, "input": "INPUT", "input_component": "", "input_files": "eam.h0", @@ -357,8 +372,11 @@ def test_subsections(self): "e3sm_to_cmip_environment_commands": "", "environment_commands": "", "extra_vars": "", + "fail_on_dependency_skip": False, "frequency": "monthly", "grid": "", + "guess_section_parameters": True, + "guess_path_parameters": True, "input": "INPUT", "input_component": "", "input_files": "eam.h0", @@ -413,7 +431,7 @@ def test_subsections(self): "years": ["0001:0050:50"], } compare(self, actual_section, expected_section) - actual_tasks = getTasks(config, section_name) + actual_tasks = get_tasks(config, section_name) self.assertEqual(len(actual_tasks), 2) actual_task = actual_tasks[0] expected_task = { @@ -428,8 +446,11 @@ def test_subsections(self): "e3sm_to_cmip_environment_commands": "", "environment_commands": "", "exclude": False, + "fail_on_dependency_skip": False, "frequency": "monthly", "grid": "", + "guess_section_parameters": True, + "guess_path_parameters": True, "input": "INPUT", "input_component": "", "input_files": "eam.h0", @@ -464,8 +485,11 @@ def test_subsections(self): "e3sm_to_cmip_environment_commands": "", "environment_commands": "", "exclude": False, + "fail_on_dependency_skip": False, "frequency": "monthly", "grid": "", + "guess_section_parameters": True, + "guess_path_parameters": True, "input": "INPUT", "input_component": "", "input_files": "eam.h0", diff --git a/tests/test_zppy_e3sm_diags.py b/tests/test_zppy_e3sm_diags.py new file mode 100644 index 00000000..53020769 --- /dev/null +++ b/tests/test_zppy_e3sm_diags.py @@ -0,0 +1,573 @@ +import unittest +from typing import Any, Dict, List + +from zppy.e3sm_diags import ( + add_climo_dependencies, + add_ts_dependencies, + check_and_define_parameters, + check_mvm_only_parameters_for_bash, + check_parameters_for_bash, +) +from zppy.utils import ParameterNotProvidedError + + +class TestZppyE3SMDiags(unittest.TestCase): + def test_check_parameters_for_bash(self): + # diurnal_cycle + c = {"sets": ["diurnal_cycle"], "climo_diurnal_frequency": "diurnal_8xdaily"} + check_parameters_for_bash(c) + c = {"sets": ["diurnal_cycle"], "climo_diurnal_frequency": ""} + self.assertRaises(ParameterNotProvidedError, check_parameters_for_bash, c) + + # enso_diags + c = {"sets": ["enso_diags"], "ref_start_yr": "1990"} + check_parameters_for_bash(c) + c = {"sets": ["enso_diags"], "ref_start_yr": ""} + self.assertRaises(ParameterNotProvidedError, check_parameters_for_bash, c) + + # qbo + c = {"sets": ["qbo"], "ref_final_yr": "2000", "ref_start_yr": "1990"} + check_parameters_for_bash(c) + c = {"sets": ["qbo"], "ref_final_yr": "", "ref_start_yr": "1990"} + self.assertRaises(ParameterNotProvidedError, check_parameters_for_bash, c) + c = {"sets": ["qbo"], "ref_final_yr": "2000", "ref_start_yr": ""} + self.assertRaises(ParameterNotProvidedError, check_parameters_for_bash, c) + + # tropical_subseasonal + c = {"sets": ["tropical_subseasonal"], "ref_end_yr": "2000"} + check_parameters_for_bash(c) + c = {"sets": ["tropical_subseasonal"], "ref_end_yr": ""} + self.assertRaises(ParameterNotProvidedError, check_parameters_for_bash, c) + + def test_check_mvm_only_parameters_for_bash(self): + z0 = {"diff_title": "a", "ref_name": "b", "short_ref_name": "c"} + z1 = {"diff_title": "", "ref_name": "b", "short_ref_name": "c"} + z2 = {"diff_title": "a", "ref_name": "", "short_ref_name": "c"} + z3 = {"diff_title": "a", "ref_name": "b", "short_ref_name": ""} + c: Dict[str, Any] = {"sets": []} + c.update(z0) + check_mvm_only_parameters_for_bash(c) + c.update(z1) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(z2) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(z3) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + + d0 = { + "ref_final_yr": "2000", + "ref_start_yr": "1990", + "ts_num_years_ref": "2", + "ts_subsection": "sub", + } + d1 = { + "ref_final_yr": "", + "ref_start_yr": "1990", + "ts_num_years_ref": "2", + "ts_subsection": "sub", + } + d2 = { + "ref_final_yr": "2000", + "ref_start_yr": "", + "ts_num_years_ref": "2", + "ts_subsection": "sub", + } + d3 = { + "ref_final_yr": "2000", + "ref_start_yr": "1990", + "ts_num_years_ref": "", + "ts_subsection": "sub", + } + d4 = { + "ref_final_yr": "2000", + "ref_start_yr": "1990", + "ts_num_years_ref": "2", + "ts_subsection": "", + } + + # Load required parameters into all of the dicts above. + d0.update(z0) + d1.update(z0) + d2.update(z0) + d3.update(z0) + d4.update(z0) + + # area_mean_time_series + c = {"sets": ["area_mean_time_series"]} + c.update(d0) + check_mvm_only_parameters_for_bash(c) + c.update(d1) + check_mvm_only_parameters_for_bash(c) # ref_final_yr not needed + c.update(d2) + check_mvm_only_parameters_for_bash(c) # ref_start_yr not needed + c.update(d3) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d4) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + + # enso_diags + c = {"sets": ["enso_diags"]} + c.update(d0) + check_mvm_only_parameters_for_bash(c) + c.update(d1) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d2) + check_mvm_only_parameters_for_bash(c) # ref_start_yr not needed + c.update(d3) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d4) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + + # qbo + c = {"sets": ["qbo"]} + c.update(d0) + check_mvm_only_parameters_for_bash(c) + c.update(d1) + check_mvm_only_parameters_for_bash(c) # ref_final_yr not needed + c.update(d2) + check_mvm_only_parameters_for_bash(c) # ref_start_yr not needed + c.update(d3) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d4) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + + # streamflow + c = {"sets": ["streamflow"]} + c.update(d0) + check_mvm_only_parameters_for_bash(c) + c.update(d1) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d2) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d3) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d4) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + + # tc_analysis + c = {"sets": ["tc_analysis"]} + c.update(d0) + check_mvm_only_parameters_for_bash(c) + c.update(d1) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d2) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d3) + check_mvm_only_parameters_for_bash(c) # ts_num_years_ref not needed + c.update(d4) + check_mvm_only_parameters_for_bash(c) # ts_subsection not needed + + # tropical_subseasonal + c = {"sets": ["tropical_subseasonal"]} + c.update(d0) + check_mvm_only_parameters_for_bash(c) + c.update(d1) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d2) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d3) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + c.update(d4) + self.assertRaises( + ParameterNotProvidedError, check_mvm_only_parameters_for_bash, c + ) + + def test_check_and_define_parameters(self): + # test_zppy_utils.py tests the guessing functionality turned off. + # So, we'll only test it turned on here. + guesses = {"guess_path_parameters": True, "guess_section_parameters": True} + prefix_requirements = { + "subsection": "sub", + "tag": "tag", + "year1": 1990, + "year2": 2000, + "ref_year1": 1980, + "ref_year2": 1990, + } + base: Dict[str, Any] = {"diagnostics_base_path": "diags/post"} + base.update(guesses) + base.update(prefix_requirements) + + mvm_base = dict() + mvm_base.update(base) + required_for_mvm = { + "diff_title": "diff_title", + "ref_name": "ref_name", + "short_ref_name": "short_ref_name", + } + mvm_base.update(required_for_mvm) + + # No sets, mvo + c: Dict[str, Any] = { + "sets": [], + "run_type": "model_vs_obs", + "reference_data_path": "a", + } + c.update(base) + check_and_define_parameters(c) + self.assertEqual(c["reference_data_path"], "a") + self.assertEqual(c["prefix"], "e3sm_diags_sub_tag_1990-2000") + + # No sets, mvm + c = {"sets": [], "run_type": "model_vs_model", "reference_data_path": ""} + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual( + c["reference_data_path"], "diags/post/observations/Atm/climatology/" + ) + self.assertEqual(c["prefix"], "e3sm_diags_sub_tag_1990-2000_vs_1980-1990") + + # No sets, bad run_type + c = {"sets": [], "run_type": "invalid", "reference_data_path": ""} + c.update(base) + self.assertRaises(ValueError, check_and_define_parameters, c) + + # ts_num_years => obs_ts, mvo + c = { + "sets": [], + "run_type": "model_vs_obs", + "reference_data_path": "", + "ts_num_years": 3, + "obs_ts": "a", + } + c.update(base) + check_and_define_parameters(c) + self.assertEqual(c["obs_ts"], "a") + + c = { + "sets": [], + "run_type": "model_vs_obs", + "reference_data_path": "", + "ts_num_years": 3, + "obs_ts": "", + } + c.update(base) + check_and_define_parameters(c) + self.assertEqual(c["obs_ts"], "diags/post/observations/Atm/time-series/") + + # ts_num_years => obs_ts, mvm + c = { + "sets": [], + "run_type": "model_vs_model", + "reference_data_path": "", + "ts_num_years": 3, + "obs_ts": "a", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual(c["obs_ts"], "a") + + c = { + "sets": [], + "run_type": "model_vs_model", + "reference_data_path": "", + "ts_num_years": 3, + "obs_ts": "", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual(c["obs_ts"], "diags/post/observations/Atm/time-series/") + + # area_mean_time_series/enso_diags/qbo, mvm + for diags_set in ["area_mean_time_series", "enso_diags", "qbo"]: + c = { + "sets": [diags_set], + "run_type": "model_vs_model", + "reference_data_path": "", + "reference_data_path_ts": "a", + "grid": "grid", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual(c["reference_data_path_ts"], "a") + + c = { + "sets": [diags_set], + "run_type": "model_vs_model", + "reference_data_path": "", + "reference_data_path_ts": "", + "grid": "grid", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual( + c["reference_data_path_ts"], "diags/post/atm/grid/ts/monthly" + ) + + # diurnal_cycle, mvo + c = { + "sets": ["diurnal_cycle"], + "run_type": "model_vs_obs", + "reference_data_path": "", + "dc_obs_climo": "a", + } + c.update(base) + check_and_define_parameters(c) + self.assertEqual(c["dc_obs_climo"], "a") + + c = { + "sets": ["diurnal_cycle"], + "run_type": "model_vs_obs", + "reference_data_path": "", + "dc_obs_climo": "", + } + c.update(base) + check_and_define_parameters(c) + self.assertEqual(c["dc_obs_climo"], "diags/post/observations/Atm/climatology/") + + # diurnal_cycle, mvm + c = { + "sets": ["diurnal_cycle"], + "run_type": "model_vs_model", + "reference_data_path": "", + "reference_data_path_climo_diurnal": "a", + "grid": "grid", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual(c["reference_data_path_climo_diurnal"], "a") + + c = { + "sets": ["diurnal_cycle"], + "run_type": "model_vs_model", + "reference_data_path": "", + "reference_data_path_climo_diurnal": "", + "grid": "grid", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual( + c["reference_data_path_climo_diurnal"], + "diags/post/atm/grid/clim_diurnal_8xdaily", + ) + + # streamflow, mvo + c = { + "sets": ["streamflow"], + "run_type": "model_vs_obs", + "reference_data_path": "", + "streamflow_obs_ts": "a", + "ts_num_years": 3, + "obs_ts": "", + } + c.update(base) + check_and_define_parameters(c) + self.assertEqual(c["streamflow_obs_ts"], "a") + + c = { + "sets": ["streamflow"], + "run_type": "model_vs_obs", + "reference_data_path": "", + "streamflow_obs_ts": "", + "ts_num_years": 3, + "obs_ts": "", + } + c.update(base) + check_and_define_parameters(c) + self.assertEqual( + c["streamflow_obs_ts"], "diags/post/observations/Atm/time-series/" + ) + + # streamflow, mvm + c = { + "sets": ["streamflow"], + "run_type": "model_vs_model", + "reference_data_path": "", + "reference_data_path_ts_rof": "a", + "gauges_path": "b", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual(c["reference_data_path_ts_rof"], "a") + self.assertEqual(c["gauges_path"], "b") + + c = { + "sets": ["streamflow"], + "run_type": "model_vs_model", + "reference_data_path": "", + "reference_data_path_ts_rof": "", + "gauges_path": "", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual( + c["reference_data_path_ts_rof"], "diags/post/rof/native/ts/monthly" + ) + self.assertEqual( + c["gauges_path"], + "diags/post/observations/Atm/time-series/GSIM/GSIM_catchment_characteristics_all_1km2.csv", + ) + + # tc_analysis, mvo + c = { + "sets": ["tc_analysis"], + "run_type": "model_vs_obs", + "reference_data_path": "", + "tc_obs": "a", + } + c.update(base) + check_and_define_parameters(c) + self.assertEqual(c["tc_obs"], "a") + + c = { + "sets": ["tc_analysis"], + "run_type": "model_vs_obs", + "reference_data_path": "", + "tc_obs": "", + } + c.update(base) + check_and_define_parameters(c) + self.assertEqual(c["tc_obs"], "diags/post/observations/Atm/tc-analysis/") + + # tc_analysis, mvm + c = { + "sets": ["tc_analysis"], + "run_type": "model_vs_model", + "reference_data_path": "", + "tc_obs": "a", + "reference_data_path_tc": "b", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual(c["tc_obs"], "a") + self.assertEqual(c["reference_data_path_tc"], "b") + + c = { + "sets": ["tc_analysis"], + "run_type": "model_vs_model", + "reference_data_path": "", + "tc_obs": "", + "reference_data_path_tc": "", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual(c["tc_obs"], "diags/post/observations/Atm/tc-analysis/") + self.assertEqual( + c["reference_data_path_tc"], "diags/post/atm/tc-analysis_1980_1990" + ) + + # tropical_subseasonal, mvm + c = { + "sets": ["tropical_subseasonal"], + "run_type": "model_vs_model", + "reference_data_path": "", + "reference_data_path_ts_daily": "a", + "grid": "grid", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual(c["reference_data_path_ts_daily"], "a") + + c = { + "sets": ["tropical_subseasonal"], + "run_type": "model_vs_model", + "reference_data_path": "", + "reference_data_path_ts_daily": "", + "grid": "grid", + } + c.update(mvm_base) + check_and_define_parameters(c) + self.assertEqual( + c["reference_data_path_ts_daily"], "diags/post/atm/grid/ts/daily" + ) + + def test_add_climo_dependencies(self): + base: Dict[str, Any] = {"year1": 1980, "year2": 1990} + sets = [ + "lat_lon", + "zonal_mean_xy", + "zonal_mean_2d", + "polar", + "cosp_histogram", + "meridional_mean_2d", + "annual_cycle_zonal_mean", + "zonal_mean_2d_stratosphere", + ] + for diags_set in sets: + c: Dict[str, Any] = {"sets": [diags_set], "climo_subsection": "csub"} + c.update(base) + dependencies: List[str] = [] + add_climo_dependencies(c, dependencies, "script_dir") + self.assertEqual(dependencies, ["script_dir/climo_csub_1980-1990.status"]) + + c = {"sets": ["diurnal_cycle"], "climo_diurnal_subsection": "cdsub"} + c.update(base) + dependencies = [] + add_climo_dependencies(c, dependencies, "script_dir") + self.assertEqual(dependencies, ["script_dir/climo_cdsub_1980-1990.status"]) + + c = {"sets": ["tc_analysis"]} + c.update(base) + dependencies = [] + add_climo_dependencies(c, dependencies, "script_dir") + self.assertEqual(dependencies, ["script_dir/tc_analysis_1980-1990.status"]) + + def test_add_ts_dependencies(self): + base: Dict[str, Any] = { + "ts_num_years": 5, + "ts_subsection": "sub", + "ts_daily_subsection": "dsub", + } + sets = ["area_mean_time_series", "enso_diags", "qbo"] + for diags_set in sets: + c: Dict[str, Any] = {"sets": [diags_set]} + c.update(base) + dependencies: List[str] = [] + add_ts_dependencies(c, dependencies, "script_dir", 1980) + self.assertEqual(dependencies, ["script_dir/ts_sub_1980-1984-0005.status"]) + + c = {"sets": ["streamflow"]} + c.update(base) + dependencies = [] + add_ts_dependencies(c, dependencies, "script_dir", 1980) + self.assertEqual( + dependencies, ["script_dir/ts_rof_monthly_1980-1984-0005.status"] + ) + + c = {"sets": ["tropical_subseasonal"]} + c.update(base) + dependencies = [] + add_ts_dependencies(c, dependencies, "script_dir", 1980) + self.assertEqual(dependencies, ["script_dir/ts_dsub_1980-1984-0005.status"]) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_zppy_global_time_series.py b/tests/test_zppy_global_time_series.py new file mode 100644 index 00000000..572ea893 --- /dev/null +++ b/tests/test_zppy_global_time_series.py @@ -0,0 +1,203 @@ +import unittest +from typing import Any, Dict, List + +from zppy.global_time_series import determine_and_add_dependencies, determine_components + + +class TestZppyGlobalTimeSeries(unittest.TestCase): + def test_determine_components(self): + # Test non-legacy + c: Dict[str, Any] = { + "plot_names": "", + "plots_original": "", + "plots_atm": ["a"], + "plots_ice": "", + "plots_lnd": "", + "plots_ocn": "", + } + determine_components(c) + self.assertEqual(c["use_atm"], True) + self.assertEqual(c["use_ice"], False) + self.assertEqual(c["use_lnd"], False) + self.assertEqual(c["use_ocn"], False) + self.assertEqual(c["plots_atm"], ["a"]) + self.assertEqual(c["plots_ice"], "None") + self.assertEqual(c["plots_lnd"], "None") + self.assertEqual(c["plots_ocn"], "None") + + c = { + "plot_names": "", + "plots_original": "", + "plots_atm": "", + "plots_ice": ["a"], + "plots_lnd": "", + "plots_ocn": "", + } + determine_components(c) + self.assertEqual(c["use_atm"], False) + self.assertEqual(c["use_ice"], True) + self.assertEqual(c["use_lnd"], False) + self.assertEqual(c["use_ocn"], False) + self.assertEqual(c["plots_atm"], "None") + self.assertEqual(c["plots_ice"], ["a"]) + self.assertEqual(c["plots_lnd"], "None") + self.assertEqual(c["plots_ocn"], "None") + + c = { + "plot_names": "", + "plots_original": "", + "plots_atm": "", + "plots_ice": "", + "plots_lnd": ["a"], + "plots_ocn": "", + } + determine_components(c) + self.assertEqual(c["use_atm"], False) + self.assertEqual(c["use_ice"], False) + self.assertEqual(c["use_lnd"], True) + self.assertEqual(c["use_ocn"], False) + self.assertEqual(c["plots_atm"], "None") + self.assertEqual(c["plots_ice"], "None") + self.assertEqual(c["plots_lnd"], ["a"]) + self.assertEqual(c["plots_ocn"], "None") + + c = { + "plot_names": "", + "plots_original": "", + "plots_atm": "", + "plots_ice": "", + "plots_lnd": "", + "plots_ocn": ["a"], + } + determine_components(c) + self.assertEqual(c["use_atm"], False) + self.assertEqual(c["use_ice"], False) + self.assertEqual(c["use_lnd"], False) + self.assertEqual(c["use_ocn"], True) + self.assertEqual(c["plots_atm"], "None") + self.assertEqual(c["plots_ice"], "None") + self.assertEqual(c["plots_lnd"], "None") + self.assertEqual(c["plots_ocn"], ["a"]) + + # Test legacy + base = {"plots_atm": "", "plots_ice": "", "plots_lnd": "", "plots_ocn": ""} + + c = { + "plot_names": ["a"], + "plots_original": "gets_overwritten", + "atmosphere_only": False, + } + c.update(base) + determine_components(c) + self.assertEqual(c["plots_original"], ["a"]) + self.assertEqual(c["use_atm"], True) + self.assertEqual(c["use_ice"], False) + self.assertEqual(c["use_lnd"], False) + self.assertEqual(c["use_ocn"], False) + self.assertEqual(c["plots_atm"], "None") + self.assertEqual(c["plots_ice"], "None") + self.assertEqual(c["plots_lnd"], "None") + self.assertEqual(c["plots_ocn"], "None") + + for ocn_set in ["change_ohc", "max_moc", "change_sea_level"]: + c = { + "plot_names": "", + "plots_original": [ocn_set], + "atmosphere_only": False, + } + c.update(base) + determine_components(c) + self.assertEqual(c["plots_original"], [ocn_set]) + self.assertEqual(c["use_atm"], True) + self.assertEqual(c["use_ice"], False) + self.assertEqual(c["use_lnd"], False) + self.assertEqual(c["use_ocn"], True) + self.assertEqual(c["plots_atm"], "None") + self.assertEqual(c["plots_ice"], "None") + self.assertEqual(c["plots_lnd"], "None") + self.assertEqual(c["plots_ocn"], "None") + + c = {"plot_names": "", "plots_original": ["a"], "atmosphere_only": True} + c.update(base) + determine_components(c) + self.assertEqual(c["plots_original"], ["a"]) + self.assertEqual(c["use_atm"], True) + self.assertEqual(c["use_ice"], False) + self.assertEqual(c["use_lnd"], False) + self.assertEqual(c["use_ocn"], False) + self.assertEqual(c["plots_atm"], "None") + self.assertEqual(c["plots_ice"], "None") + self.assertEqual(c["plots_lnd"], "None") + self.assertEqual(c["plots_ocn"], "None") + + def test_determine_and_add_dependencies(self): + c: Dict[str, Any] = { + "use_atm": True, + "use_lnd": False, + "use_ocn": False, + "year1": 1980, + "year2": 1990, + "ts_num_years": 5, + } + dependencies: List[str] = [] + determine_and_add_dependencies(c, dependencies, "script_dir") + expected = [ + "script_dir/ts_atm_monthly_glb_1980-1984-0005.status", + "script_dir/ts_atm_monthly_glb_1985-1989-0005.status", + ] + self.assertEqual(dependencies, expected) + + c = { + "use_atm": False, + "use_lnd": True, + "use_ocn": False, + "year1": 1980, + "year2": 1990, + "ts_num_years": 5, + } + dependencies = [] + determine_and_add_dependencies(c, dependencies, "script_dir") + expected = [ + "script_dir/ts_lnd_monthly_glb_1980-1984-0005.status", + "script_dir/ts_lnd_monthly_glb_1985-1989-0005.status", + ] + self.assertEqual(dependencies, expected) + + c = { + "use_atm": False, + "use_lnd": False, + "use_ocn": True, + "ts_years": "1980:1990:10", + "climo_years": "1980:1990:10", + } + dependencies = [] + determine_and_add_dependencies(c, dependencies, "script_dir") + expected = ["script_dir/mpas_analysis_ts_1980-1989_climo_1980-1989.status"] + self.assertEqual(dependencies, expected) + + c = { + "use_atm": False, + "use_lnd": False, + "use_ocn": True, + "ts_years": "", + "climo_years": "1980:1990:10", + } + dependencies = [] + self.assertRaises( + Exception, determine_and_add_dependencies, c, dependencies, "script_dir" + ) + c = { + "use_atm": False, + "use_lnd": False, + "use_ocn": True, + "ts_years": "1980:1990:10", + "climo_years": "", + } + dependencies = [] + self.assertRaises( + Exception, determine_and_add_dependencies, c, dependencies, "script_dir" + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_zppy_ilamb.py b/tests/test_zppy_ilamb.py new file mode 100644 index 00000000..bc0f8f46 --- /dev/null +++ b/tests/test_zppy_ilamb.py @@ -0,0 +1,47 @@ +import unittest +from typing import List + +from zppy.ilamb import determine_and_add_dependencies + + +class TestZppyILAMB(unittest.TestCase): + def test_determine_and_add_dependencies(self): + c = { + "land_only": True, + "ts_land_subsection": "land_monthly", + "year1": 1980, + "year2": 1990, + "ts_num_years": 5, + } + dependencies: List[str] = [] + determine_and_add_dependencies(c, dependencies, "script_dir") + expected = [ + "script_dir/ts_land_monthly_1980-1984-0005.status", + "script_dir/ts_land_monthly_1985-1989-0005.status", + ] + self.assertEqual(dependencies, expected) + + # Have zppy guess the subsection names + c = { + "land_only": False, + "ts_land_subsection": "", + "ts_atm_subsection": "", + "year1": 1980, + "year2": 1990, + "ts_num_years": 5, + "guess_path_parameters": True, + "guess_section_parameters": True, + } + dependencies = [] + determine_and_add_dependencies(c, dependencies, "script_dir") + expected = [ + "script_dir/ts_land_monthly_1980-1984-0005.status", + "script_dir/ts_land_monthly_1985-1989-0005.status", + "script_dir/ts_atm_monthly_180x360_aave_1980-1984-0005.status", + "script_dir/ts_atm_monthly_180x360_aave_1985-1989-0005.status", + ] + self.assertEqual(dependencies, expected) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_zppy_utils.py b/tests/test_zppy_utils.py new file mode 100644 index 00000000..4340d351 --- /dev/null +++ b/tests/test_zppy_utils.py @@ -0,0 +1,497 @@ +import unittest +from typing import List + +from zppy.utils import ( + ParameterGuessType, + ParameterNotProvidedError, + add_dependencies, + check_required_parameters, + define_or_guess, + define_or_guess2, + get_active_status, + get_file_names, + get_guess_type_parameter, + get_url_message, + get_years, + set_component_and_prc_typ, + set_grid, + set_mapping_file, +) + + +class TestZppyUtils(unittest.TestCase): + def test_get_active_status(self): + # Test bool input + task = {"active": True} + self.assertTrue(get_active_status(task)) + task = {"active": False} + self.assertFalse(get_active_status(task)) + + # Test str input + task = {"active": "True"} # type: ignore + self.assertTrue(get_active_status(task)) + task = {"active": "False"} # type: ignore + self.assertFalse(get_active_status(task)) + + # Test bad value + task = {"active": "bad input"} # type: ignore + self.assertRaises(ValueError, get_active_status, task) + + # Test bad type + task = {"active": 5} # type: ignore + self.assertRaises(TypeError, get_active_status, task) + + def test_get_guess_type_parameter(self): + actual = get_guess_type_parameter(ParameterGuessType.SECTION_GUESS) + self.assertEqual(actual, "guess_section_parameters") + + actual = get_guess_type_parameter(ParameterGuessType.PATH_GUESS) + self.assertEqual(actual, "guess_path_parameters") + + def test_get_url_message(self): + c = { + "web_portal_base_path": "a", + "web_portal_base_url": "b", + "www": "a/c", + "case": "d", + } + actual = get_url_message(c, "task_name") + self.assertEqual(actual, "URL: b/c/d/task_name") + + c = { + "web_portal_base_path": "a", + "web_portal_base_url": "b", + "www": "c", + "case": "d", + } + actual = get_url_message(c, "task_name") + self.assertEqual(actual, "Could not determine URL from www=c") + + # def test_initialize_template + + # def test_get_tasks + + def test_set_mapping_file(self): + # Test no-change cases + c = {"mapping_file": ""} + set_mapping_file(c) + self.assertEqual(c["mapping_file"], "") + + c = {"mapping_file": "glb"} + set_mapping_file(c) + self.assertEqual(c["mapping_file"], "glb") + + c = {"mapping_file": "dir/file"} + set_mapping_file(c) + self.assertEqual(c["mapping_file"], "dir/file") + + # Now, the function should do something + c = {"mapping_file": "file", "diagnostics_base_path": "base"} + set_mapping_file(c) + self.assertEqual(c["mapping_file"], "base/maps/file") + + def test_set_grid(self): + c = {"grid": "grid"} + set_grid(c) + self.assertEqual(c["grid"], "grid") + + c = {"grid": "", "mapping_file": ""} + set_grid(c) + self.assertEqual(c["grid"], "native") + + c = {"grid": "", "mapping_file": "glb"} + set_grid(c) + self.assertEqual(c["grid"], "glb") + + # TODO: test a realistic mapping file + + def test_set_component_and_prc_typ(self): + # Test without input_files + c = {"input_component": "cam"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "atm") + self.assertEqual(c["prc_typ"], "cam") + + c = {"input_component": "eam"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "atm") + self.assertEqual(c["prc_typ"], "eam") + + c = {"input_component": "eamxx"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "atm") + self.assertEqual(c["prc_typ"], "eamxx") + + c = {"input_component": "cpl"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "cpl") + self.assertEqual(c["prc_typ"], "sgs") + + c = {"input_component": "clm2"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "lnd") + self.assertEqual(c["prc_typ"], "clm") + + c = {"input_component": "elm"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "lnd") + self.assertEqual(c["prc_typ"], "elm") + + c = {"input_component": "mosart"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "rof") + self.assertEqual(c["prc_typ"], "sgs") + + # Test with input_files + c = {"input_component": "", "input_files": "cam.extension"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "atm") + self.assertEqual(c["prc_typ"], "cam") + + c = {"input_component": "", "input_files": "eam.extension"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "atm") + self.assertEqual(c["prc_typ"], "eam") + + c = {"input_component": "", "input_files": "eamxx.extension"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "atm") + self.assertEqual(c["prc_typ"], "eamxx") + + c = {"input_component": "", "input_files": "cpl.extension"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "cpl") + self.assertEqual(c["prc_typ"], "sgs") + + c = {"input_component": "", "input_files": "clm2.extension"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "lnd") + self.assertEqual(c["prc_typ"], "clm") + + c = {"input_component": "", "input_files": "elm.extension"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "lnd") + self.assertEqual(c["prc_typ"], "elm") + + c = {"input_component": "", "input_files": "mosart.extension"} + set_component_and_prc_typ(c) + self.assertEqual(c["component"], "rof") + self.assertEqual(c["prc_typ"], "sgs") + + # Test error case + c = {"input_component": "", "input_files": ""} + self.assertRaises(ValueError, set_component_and_prc_typ, c) + + def test_check_required_parameters(self): + # Parameter is required + # a, b need parameter p, and we want sets a, b, c + c = {"sets": ["a", "b", "c"], "p": "exists"} + check_required_parameters(c, set(["a", "b"]), "p") + + # Parameter isn't required based on the sets we want + # z needs parameter p, but we only want sets a, b, c + c = {"sets": ["a", "b", "c"], "p": ""} + check_required_parameters(c, set(["z"]), "p") + + # Parameter is required + # a, b need parameter p, and we want sets a, b, c + c = {"sets": ["a", "b", "c"], "p": ""} + self.assertRaises( + ParameterNotProvidedError, + check_required_parameters, + c, + set(["a", "b"]), + "p", + ) + + def test_get_years(self): + self.assertEqual(get_years("1980:1990:05"), [(1980, 1984), (1985, 1989)]) + self.assertEqual(get_years("1980-1990"), [(1980, 1990)]) + + self.assertEqual(get_years(["1980:1990:05"]), [(1980, 1984), (1985, 1989)]) + self.assertEqual(get_years(["1980-1990"]), [(1980, 1990)]) + + self.assertEqual( + get_years(["1980:1990:05", "2000:2010:05"]), + [(1980, 1984), (1985, 1989), (2000, 2004), (2005, 2009)], + ) + self.assertEqual( + get_years(["1980-1990", "2000-2005"]), [(1980, 1990), (2000, 2005)] + ) + + self.assertRaises(ValueError, get_years, "1980") + self.assertRaises(ValueError, get_years, "1980:1990") + self.assertRaises(ValueError, get_years, "1980:1990:05:03") + self.assertRaises(ValueError, get_years, "1980-1990-05") + + self.assertRaises( + ValueError, get_years, ["1983-1993", "1980"] + ) # one year set works + self.assertRaises(ValueError, get_years, ["1980:1990"]) + self.assertRaises(ValueError, get_years, ["1980:1990:05:03"]) + self.assertRaises(ValueError, get_years, ["1980-1990-05"]) + + # This one is in fact a value error, but not one we raised directly + self.assertRaises(ValueError, get_years, "1980-1990:05:03") + + def test_define_or_guess(self): + # First choice is defined + c = { + "first_choice": "a", + "second_choice": "b", + "guess_path_parameters": True, + "guess_section_parameters": True, + } + actual = define_or_guess( + c, "first_choice", "second_choice", ParameterGuessType.PATH_GUESS + ) + self.assertEqual(actual, "a") + actual = define_or_guess( + c, "first_choice", "second_choice", ParameterGuessType.SECTION_GUESS + ) + self.assertEqual(actual, "a") + + c = { + "first_choice": "a", + "second_choice": "b", + "guess_path_parameters": True, + "guess_section_parameters": False, + } + actual = define_or_guess( + c, "first_choice", "second_choice", ParameterGuessType.PATH_GUESS + ) + self.assertEqual(actual, "a") + actual = define_or_guess( + c, "first_choice", "second_choice", ParameterGuessType.SECTION_GUESS + ) + self.assertEqual(actual, "a") + + c = { + "first_choice": "a", + "second_choice": "b", + "guess_path_parameters": False, + "guess_section_parameters": True, + } + actual = define_or_guess( + c, "first_choice", "second_choice", ParameterGuessType.PATH_GUESS + ) + self.assertEqual(actual, "a") + actual = define_or_guess( + c, "first_choice", "second_choice", ParameterGuessType.SECTION_GUESS + ) + self.assertEqual(actual, "a") + + # First choice is undefined + c = { + "first_choice": "", + "second_choice": "b", + "guess_path_parameters": True, + "guess_section_parameters": True, + } + actual = define_or_guess( + c, "first_choice", "second_choice", ParameterGuessType.PATH_GUESS + ) + self.assertEqual(actual, "b") + actual = define_or_guess( + c, "first_choice", "second_choice", ParameterGuessType.SECTION_GUESS + ) + self.assertEqual(actual, "b") + + c = { + "first_choice": "", + "second_choice": "b", + "guess_path_parameters": True, + "guess_section_parameters": False, + } + actual = define_or_guess( + c, "first_choice", "second_choice", ParameterGuessType.PATH_GUESS + ) + self.assertEqual(actual, "b") + self.assertRaises( + ParameterNotProvidedError, + define_or_guess, + c, + "first_choice", + "second_choice", + ParameterGuessType.SECTION_GUESS, + ) + + c = { + "first_choice": "", + "second_choice": "b", + "guess_path_parameters": False, + "guess_section_parameters": True, + } + self.assertRaises( + ParameterNotProvidedError, + define_or_guess, + c, + "first_choice", + "second_choice", + ParameterGuessType.PATH_GUESS, + ) + actual = define_or_guess( + c, "first_choice", "second_choice", ParameterGuessType.SECTION_GUESS + ) + self.assertEqual(actual, "b") + + def test_define_or_guess2(self): + # The required parameter has a value + c = { + "required_parameter": "a", + "guess_path_parameters": True, + "guess_section_parameters": True, + } + define_or_guess2( + c, "required_parameter", "backup_option", ParameterGuessType.PATH_GUESS + ) + self.assertEqual(c["required_parameter"], "a") + c = { + "required_parameter": "a", + "guess_path_parameters": True, + "guess_section_parameters": True, + } + define_or_guess2( + c, "required_parameter", "backup_option", ParameterGuessType.SECTION_GUESS + ) + self.assertEqual(c["required_parameter"], "a") + + c = { + "required_parameter": "a", + "guess_path_parameters": True, + "guess_section_parameters": False, + } + define_or_guess2( + c, "required_parameter", "backup_option", ParameterGuessType.PATH_GUESS + ) + self.assertEqual(c["required_parameter"], "a") + c = { + "required_parameter": "a", + "guess_path_parameters": True, + "guess_section_parameters": False, + } + define_or_guess2( + c, "required_parameter", "backup_option", ParameterGuessType.SECTION_GUESS + ) + self.assertEqual(c["required_parameter"], "a") + + c = { + "required_parameter": "a", + "guess_path_parameters": False, + "guess_section_parameters": True, + } + define_or_guess2( + c, "required_parameter", "backup_option", ParameterGuessType.PATH_GUESS + ) + self.assertEqual(c["required_parameter"], "a") + c = { + "required_parameter": "a", + "guess_path_parameters": False, + "guess_section_parameters": True, + } + define_or_guess2( + c, "required_parameter", "backup_option", ParameterGuessType.SECTION_GUESS + ) + self.assertEqual(c["required_parameter"], "a") + + # The required parameter is undefined + c = { + "required_parameter": "", + "guess_path_parameters": True, + "guess_section_parameters": True, + } + define_or_guess2( + c, "required_parameter", "backup_option", ParameterGuessType.PATH_GUESS + ) + self.assertEqual(c["required_parameter"], "backup_option") + c = { + "required_parameter": "", + "guess_path_parameters": True, + "guess_section_parameters": True, + } + define_or_guess2( + c, "required_parameter", "backup_option", ParameterGuessType.SECTION_GUESS + ) + self.assertEqual(c["required_parameter"], "backup_option") + + c = { + "required_parameter": "", + "guess_path_parameters": True, + "guess_section_parameters": False, + } + define_or_guess2( + c, "required_parameter", "backup_option", ParameterGuessType.PATH_GUESS + ) + self.assertEqual(c["required_parameter"], "backup_option") + c = { + "required_parameter": "", + "guess_path_parameters": True, + "guess_section_parameters": False, + } + self.assertRaises( + ParameterNotProvidedError, + define_or_guess2, + c, + "required_parameter", + "backup_option", + ParameterGuessType.SECTION_GUESS, + ) + + c = { + "required_parameter": "", + "guess_path_parameters": False, + "guess_section_parameters": True, + } + self.assertRaises( + ParameterNotProvidedError, + define_or_guess2, + c, + "required_parameter", + "backup_option", + ParameterGuessType.PATH_GUESS, + ) + c = { + "required_parameter": "", + "guess_path_parameters": False, + "guess_section_parameters": True, + } + define_or_guess2( + c, "required_parameter", "backup_option", ParameterGuessType.SECTION_GUESS + ) + self.assertEqual(c["required_parameter"], "backup_option") + + def test_get_file_names(self): + bash, settings, status = get_file_names("script_dir", "prefix") + self.assertEqual(bash, "script_dir/prefix.bash") + self.assertEqual(settings, "script_dir/prefix.settings") + self.assertEqual(status, "script_dir/prefix.status") + + # def test_check_status + + # def test_make_executable + + def test_add_dependencies(self): + dependencies: List[str] = [] + add_dependencies(dependencies, "script_dir", "prefix", "sub", 1980, 1990, 10) + self.assertEqual(dependencies, ["script_dir/prefix_sub_1980-1989-0010.status"]) + + dependencies = [] + add_dependencies(dependencies, "script_dir", "prefix", "sub", 1980, 1990, 2) + expected = [ + "script_dir/prefix_sub_1980-1981-0002.status", + "script_dir/prefix_sub_1982-1983-0002.status", + "script_dir/prefix_sub_1984-1985-0002.status", + "script_dir/prefix_sub_1986-1987-0002.status", + "script_dir/prefix_sub_1988-1989-0002.status", + ] + self.assertEqual(dependencies, expected) + + # def test_write_settings_file + + # def test_submit_script + + # def test_print_url + + +if __name__ == "__main__": + unittest.main() diff --git a/zppy/__main__.py b/zppy/__main__.py index adf8d067..4e871478 100644 --- a/zppy/__main__.py +++ b/zppy/__main__.py @@ -3,7 +3,7 @@ import importlib import io import os -from typing import List +from typing import Any, List, Tuple from configobj import ConfigObj from mache import MachineInfo @@ -17,12 +17,45 @@ from zppy.mpas_analysis import mpas_analysis from zppy.tc_analysis import tc_analysis from zppy.ts import ts -from zppy.utils import checkStatus, submitScript +from zppy.utils import check_status, submit_script -# FIXME: C901 'main' is too complex (19) -def main(): # noqa: C901 +def main(): + args = _get_args() + print( + "For help, please see https://e3sm-project.github.io/zppy. Ask questions at https://github.com/E3SM-Project/zppy/discussions/categories/q-a." + ) + # Subdirectory where templates are located + template_dir: str = os.path.join(os.path.dirname(__file__), "templates") + # Read configuration file and validate it + default_config: str = os.path.join(template_dir, "default.ini") + user_config: ConfigObj = ConfigObj(args.config, configspec=default_config) + user_config, plugins = _handle_plugins(user_config, default_config, args) + config: ConfigObj = _handle_campaigns(user_config, default_config, template_dir) + # Validate + _validate_config(config) + # Add templateDir to config + config["default"]["templateDir"] = template_dir + # Output script directory + output = config["default"]["output"] + username = os.environ.get("USER") + output = output.replace("$USER", username) + script_dir = os.path.join(output, "post/scripts") + job_ids_file = os.path.join(script_dir, "jobids.txt") + try: + os.makedirs(script_dir) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise OSError("Cannot create script directory") + pass + machine_info = _get_machine_info(config) + config = _determine_parameters(machine_info, config) + if args.last_year: + config["default"]["last_year"] = args.last_year + _launch_scripts(config, script_dir, job_ids_file, plugins) + +def _get_args(): # Command line parser parser = argparse.ArgumentParser( description="Launch E3SM post-processing tasks", usage="zppy -c " @@ -34,18 +67,12 @@ def main(): # noqa: C901 "-l", "--last-year", type=int, help="last year to process", required=False ) args = parser.parse_args() + return args - print( - "For help, please see https://e3sm-project.github.io/zppy. Ask questions at https://github.com/E3SM-Project/zppy/discussions/categories/q-a." - ) - - # Subdirectory where templates are located - templateDir = os.path.join(os.path.dirname(__file__), "templates") - - # Read configuration file and validate it - default_config = os.path.join(templateDir, "default.ini") - user_config = ConfigObj(args.config, configspec=default_config) +def _handle_plugins( + user_config: ConfigObj, default_config: str, args +) -> Tuple[ConfigObj, List[Any]]: # Load all external plugins. Build a list. plugins = [] if "plugins" in user_config["default"].keys(): @@ -55,7 +82,7 @@ def main(): # noqa: C901 plugin_module = importlib.import_module(plugin_name) except BaseException: raise ValueError( - "Could not load external zppy plugin module {}".format(plugin_name) + f"Could not load external zppy plugin module {plugin_name}" ) # Path plugin_path = plugin_module.__path__[0] @@ -63,7 +90,6 @@ def main(): # noqa: C901 plugins.append( {"name": plugin_name, "module": plugin_module, "path": plugin_path} ) - # Read configuration files again, this time including all plugins with open(default_config) as f: default = f.read() @@ -75,44 +101,44 @@ def main(): # noqa: C901 with open(plugin_default_file) as f: default += "\n" + f.read() user_config = ConfigObj(args.config, configspec=io.StringIO(default)) + return user_config, plugins + +def _handle_campaigns( + user_config: ConfigObj, default_config: str, template_dir: str +) -> ConfigObj: # Handle 'campaign' option if "campaign" in user_config["default"]: campaign = user_config["default"]["campaign"] else: campaign = "none" if campaign != "none": - campaign_file = os.path.join(templateDir, "{}.cfg".format(campaign)) + campaign_file = os.path.join(template_dir, f"{campaign}.cfg") if not os.path.exists(campaign_file): - raise ValueError( - "{} does not appear to be a known campaign".format(campaign) - ) + raise ValueError(f"{campaign} does not appear to be a known campaign") campaign_config = ConfigObj(campaign_file, configspec=default_config) # merge such that user_config takes priority over campaign_config campaign_config.merge(user_config) config = campaign_config else: config = user_config + return config - # Validate - _validate_config(config) - # Add templateDir to config - config["default"]["templateDir"] = templateDir +def _validate_config(config): + validator = Validator() + + result = config.validate(validator) + if result is not True: + print("Validation results={}".format(result)) + raise ValueError( + "Configuration file validation failed. Parameters listed as false in the validation results have invalid values." + ) + else: + print("Configuration file validation passed.") - # Output script directory - output = config["default"]["output"] - username = os.environ.get("USER") - output = output.replace("$USER", username) - scriptDir = os.path.join(output, "post/scripts") - job_ids_file = os.path.join(scriptDir, "jobids.txt") - try: - os.makedirs(scriptDir) - except OSError as exc: - if exc.errno != errno.EEXIST: - raise OSError("Cannot create script directory") - pass +def _get_machine_info(config: ConfigObj) -> MachineInfo: if ("machine" not in config["default"]) or (config["default"]["machine"] == ""): if "E3SMU_MACHINE" in os.environ: # Use the machine identified by E3SM-Unified @@ -125,7 +151,10 @@ def main(): # noqa: C901 # If `machine` is set, then MachineInfo can bypass the # `discover_machine()` function. machine = config["default"]["machine"] - machine_info = MachineInfo(machine=machine) + return MachineInfo(machine=machine) + + +def _determine_parameters(machine_info: MachineInfo, config: ConfigObj) -> ConfigObj: default_machine = machine_info.machine ( default_account, @@ -177,37 +206,37 @@ def main(): # noqa: C901 config["default"][ "environment_commands" ] = f"source {unified_base}/load_latest_e3sm_unified_{machine}.sh" + return config - if args.last_year: - config["default"]["last_year"] = args.last_year +def _launch_scripts(config: ConfigObj, script_dir, job_ids_file, plugins) -> None: existing_bundles: List[Bundle] = [] # predefined bundles - existing_bundles = predefined_bundles(config, scriptDir, existing_bundles) + existing_bundles = predefined_bundles(config, script_dir, existing_bundles) # climo tasks - existing_bundles = climo(config, scriptDir, existing_bundles, job_ids_file) + existing_bundles = climo(config, script_dir, existing_bundles, job_ids_file) # time series tasks - existing_bundles = ts(config, scriptDir, existing_bundles, job_ids_file) + existing_bundles = ts(config, script_dir, existing_bundles, job_ids_file) # tc_analysis tasks - existing_bundles = tc_analysis(config, scriptDir, existing_bundles, job_ids_file) + existing_bundles = tc_analysis(config, script_dir, existing_bundles, job_ids_file) # e3sm_diags tasks - existing_bundles = e3sm_diags(config, scriptDir, existing_bundles, job_ids_file) + existing_bundles = e3sm_diags(config, script_dir, existing_bundles, job_ids_file) # mpas_analysis tasks - existing_bundles = mpas_analysis(config, scriptDir, existing_bundles, job_ids_file) + existing_bundles = mpas_analysis(config, script_dir, existing_bundles, job_ids_file) # global time series tasks existing_bundles = global_time_series( - config, scriptDir, existing_bundles, job_ids_file + config, script_dir, existing_bundles, job_ids_file ) # ilamb tasks - existing_bundles = ilamb(config, scriptDir, existing_bundles, job_ids_file) + existing_bundles = ilamb(config, script_dir, existing_bundles, job_ids_file) # zppy external plugins for plugin in plugins: @@ -215,34 +244,22 @@ def main(): # noqa: C901 plugin_func = getattr(plugin["module"], plugin["name"]) # Call plugin existing_bundles = plugin_func( - plugin["path"], config, scriptDir, existing_bundles, job_ids_file + plugin["path"], config, script_dir, existing_bundles, job_ids_file ) # Submit bundle jobs for b in existing_bundles: - skip = checkStatus(b.bundle_status) + skip = check_status(b.bundle_status) if skip: continue b.display_dependencies() b.render(config) if not b.dry_run: - submitScript( + submit_script( b.bundle_file, b.bundle_status, b.export, job_ids_file, - dependFiles=b.dependencies_external, + dependFiles=list(b.dependencies_external), + fail_on_dependency_skip=config["default"]["fail_on_dependency_skip"], ) - - -def _validate_config(config): - validator = Validator() - - result = config.validate(validator) - if result is not True: - print("Validation results={}".format(result)) - raise ValueError( - "Configuration file validation failed. Parameters listed as false in the validation results have invalid values." - ) - else: - print("Configuration file validation passed.") diff --git a/zppy/bundle.py b/zppy/bundle.py index 5fdde06c..c9c4fa7a 100644 --- a/zppy/bundle.py +++ b/zppy/bundle.py @@ -1,10 +1,10 @@ import os import os.path -from typing import List, Set +from typing import Any, Dict, List, Set -import jinja2 +from configobj import ConfigObj -from zppy.utils import getTasks, makeExecutable +from zppy.utils import get_tasks, initialize_template, make_executable # ----------------------------------------------------------------------------- @@ -36,15 +36,8 @@ def __init__(self, c): self.export: str = "NONE" - def render(self, config): - - # Initialize jinja2 template engine - templateLoader = jinja2.FileSystemLoader( - searchpath=config["default"]["templateDir"] - ) - templateEnv = jinja2.Environment(loader=templateLoader) - template = templateEnv.get_template("bundle.bash") - + def render(self, config) -> None: + template, _ = initialize_template(config, "bundle.bash") # Populate dictionary c = {} c["machine"] = config["default"]["machine"] @@ -64,16 +57,12 @@ def render(self, config): # Create script with open(self.bundle_file, "w") as f: f.write(template.render(**c)) - makeExecutable(self.bundle_file) - - return - - def add_task(self, scriptFile, dependFiles): + make_executable(self.bundle_file) + def add_task(self, script_file, depend_files) -> None: # Add tasks and dependencies - self.tasks.append(scriptFile) - self.dependencies.update(dependFiles) - + self.tasks.append(script_file) + self.dependencies.update(depend_files) # Sort through dependencies to determine in or out of bundle # Remove extensions before performing inclusion test. tasks = [os.path.splitext(t)[0] for t in self.tasks] @@ -85,7 +74,7 @@ def add_task(self, scriptFile, dependFiles): self.dependencies_external.add(dependency) # Useful for debugging - def display_dependencies(self): + def display_dependencies(self) -> None: print(f"Displaying dependencies for {self.bundle_name}") print("dependencies_internal:") if self.dependencies_internal: @@ -106,7 +95,13 @@ def display_dependencies(self): # ----------------------------------------------------------------------------- -def handle_bundles(c, scriptFile, export, dependFiles=[], existing_bundles=[]): +def handle_bundles( + c: Dict[str, Any], + script_file, + export, + dependFiles=[], + existing_bundles: List[Bundle] = [], +) -> List[Bundle]: bundle_name = c["bundle"] if bundle_name == "": return existing_bundles @@ -120,28 +115,26 @@ def handle_bundles(c, scriptFile, export, dependFiles=[], existing_bundles=[]): # So, the bundle does not already exist bundle = Bundle(c) existing_bundles.append(bundle) - bundle.add_task(scriptFile, dependFiles) + bundle.add_task(script_file, dependFiles) if export == "ALL": # If one task requires export="ALL", then the bundle script will need it as well bundle.export = export - return existing_bundles # ----------------------------------------------------------------------------- -def predefined_bundles(config, scriptDir, existing_bundles): - +def predefined_bundles( + config: ConfigObj, script_dir: str, existing_bundles: List[Bundle] +) -> List[Bundle]: # --- List of tasks --- - tasks = getTasks(config, "bundle") + tasks = get_tasks(config, "bundle") if len(tasks) == 0: return existing_bundles - # --- Create new bundles as needed --- for c in tasks: if c["subsection"] is not None: c["bundle"] = c["subsection"] - c["scriptDir"] = scriptDir + c["scriptDir"] = script_dir bundle = Bundle(c) existing_bundles.append(bundle) - return existing_bundles diff --git a/zppy/climo.py b/zppy/climo.py index 71d8a7f1..716d4ace 100644 --- a/zppy/climo.py +++ b/zppy/climo.py @@ -1,108 +1,79 @@ -import os -import pprint -import re +from typing import Any, Dict, List, Tuple -import jinja2 +from configobj import ConfigObj from zppy.bundle import handle_bundles from zppy.utils import ( - checkStatus, - getComponent, - getTasks, - getYears, - makeExecutable, - setMappingFile, - submitScript, + ParameterGuessType, + check_status, + define_or_guess, + get_file_names, + get_tasks, + get_years, + initialize_template, + make_executable, + set_component_and_prc_typ, + set_grid, + set_mapping_file, + submit_script, + write_settings_file, ) # ----------------------------------------------------------------------------- -def climo(config, scriptDir, existing_bundles, job_ids_file): +def climo(config: ConfigObj, script_dir: str, existing_bundles, job_ids_file): - # --- Initialize jinja2 template engine --- - templateLoader = jinja2.FileSystemLoader( - searchpath=config["default"]["templateDir"] - ) - templateEnv = jinja2.Environment(loader=templateLoader) - template = templateEnv.get_template("climo.bash") + template, _ = initialize_template(config, "climo.bash") # --- List of climo tasks --- - tasks = getTasks(config, "climo") + tasks: List[Dict[str, Any]] = get_tasks(config, "climo") if len(tasks) == 0: return existing_bundles # --- Generate and submit climo scripts --- for c in tasks: - - setMappingFile(c) - - # Grid name (if not explicitly defined) - # 'native' if no remapping - # or extracted from mapping filename - if c["grid"] == "": - if c["mapping_file"] == "": - c["grid"] = "native" - else: - tmp = os.path.basename(c["mapping_file"]) - # FIXME: W605 invalid escape sequence '\.' - tmp = re.sub("\.[^.]*\.nc$", "", tmp) # noqa: W605 - tmp = tmp.split("_") - if tmp[0] == "map": - c["grid"] = "%s_%s" % (tmp[-2], tmp[-1]) - else: - raise ValueError( - "Cannot extract target grid name from mapping file %s" - % (c["mapping_file"]) - ) - - # Output component (for directory structure) and procedure type for ncclimo - c["component"], c["prc_typ"] = getComponent( - c["input_component"], c["input_files"] - ) - + set_mapping_file(c) + set_grid(c) + set_component_and_prc_typ(c) + year_sets: List[Tuple[int, int]] = get_years(c["years"]) # Loop over year sets - year_sets = getYears(c["years"]) for s in year_sets: - c["yr_start"] = s[0] c["yr_end"] = s[1] if ("last_year" in c.keys()) and (c["yr_end"] > c["last_year"]): continue # Skip this year set - c["scriptDir"] = scriptDir - if c["subsection"]: - sub = c["subsection"] - else: - sub = c["grid"] - prefix = "climo_%s_%04d-%04d" % (sub, c["yr_start"], c["yr_end"]) + c["scriptDir"] = script_dir + sub: str = define_or_guess( + c, "subsection", "grid", ParameterGuessType.SECTION_GUESS + ) + prefix: str = f"climo_{sub}_{c['yr_start']:04d}-{c['yr_end']:04d}" print(prefix) c["prefix"] = prefix - scriptFile = os.path.join(scriptDir, "%s.bash" % (prefix)) - statusFile = os.path.join(scriptDir, "%s.status" % (prefix)) - settingsFile = os.path.join(scriptDir, "%s.settings" % (prefix)) - skip = checkStatus(statusFile) + bash_file, settings_file, status_file = get_file_names(script_dir, prefix) + skip: bool = check_status(status_file) if skip: continue - # Create script - with open(scriptFile, "w") as f: + with open(bash_file, "w") as f: f.write(template.render(**c)) - makeExecutable(scriptFile) - - with open(settingsFile, "w") as sf: - p = pprint.PrettyPrinter(indent=2, stream=sf) - p.pprint(c) - p.pprint(s) - + make_executable(bash_file) + write_settings_file(settings_file, c, s) export = "ALL" existing_bundles = handle_bundles( - c, scriptFile, export, existing_bundles=existing_bundles + c, bash_file, export, existing_bundles=existing_bundles ) if not c["dry_run"]: if c["bundle"] == "": # Submit job - submitScript(scriptFile, statusFile, export, job_ids_file) + submit_script( + bash_file, + status_file, + export, + job_ids_file, + fail_on_dependency_skip=c["fail_on_dependency_skip"], + ) else: - print("...adding to bundle '%s'" % (c["bundle"])) + print(f"...adding to bundle {c['bundle']}") print(f" environment_commands={c['environment_commands']}") diff --git a/zppy/e3sm_diags.py b/zppy/e3sm_diags.py index d874d8e3..2ae1607e 100644 --- a/zppy/e3sm_diags.py +++ b/zppy/e3sm_diags.py @@ -1,51 +1,50 @@ import os -import pprint -from typing import List +from typing import Any, Dict, List, Set, Tuple -import jinja2 +from configobj import ConfigObj from zppy.bundle import handle_bundles from zppy.utils import ( + ParameterGuessType, + ParameterNotProvidedError, add_dependencies, - checkStatus, - getTasks, - getYears, - makeExecutable, + check_required_parameters, + check_status, + define_or_guess, + define_or_guess2, + get_file_names, + get_tasks, + get_years, + initialize_template, + make_executable, print_url, - submitScript, + submit_script, + write_settings_file, ) # ----------------------------------------------------------------------------- -# FIXME: C901 'e3sm_diags' is too complex (20) -def e3sm_diags(config, scriptDir, existing_bundles, job_ids_file): # noqa: C901 +def e3sm_diags(config: ConfigObj, script_dir: str, existing_bundles, job_ids_file): - # Initialize jinja2 template engine - templateLoader = jinja2.FileSystemLoader( - searchpath=config["default"]["templateDir"] - ) - templateEnv = jinja2.Environment(loader=templateLoader) - template = templateEnv.get_template("e3sm_diags.bash") + template, _ = initialize_template(config, "e3sm_diags.bash") # --- List of e3sm_diags tasks --- - tasks = getTasks(config, "e3sm_diags") + tasks: List[Dict[str, Any]] = get_tasks(config, "e3sm_diags") if len(tasks) == 0: return existing_bundles # --- Generate and submit e3sm_diags scripts --- dependencies: List[str] = [] - for c in tasks: - - c["scriptDir"] = scriptDir - + check_parameters_for_bash(c) + c["scriptDir"] = script_dir if "ts_num_years" in c.keys(): c["ts_num_years"] = int(c["ts_num_years"]) - # Loop over year sets - year_sets = getYears(c["years"]) + year_sets: List[Tuple[int, int]] = get_years(c["years"]) + ref_year_sets: List[Tuple[int, int]] if ("ref_years" in c.keys()) and (c["ref_years"] != [""]): - ref_year_sets = getYears(c["ref_years"]) + ref_year_sets = get_years(c["ref_years"]) else: ref_year_sets = year_sets for s, rs in zip(year_sets, ref_year_sets): @@ -55,197 +54,29 @@ def e3sm_diags(config, scriptDir, existing_bundles, job_ids_file): # noqa: C901 continue # Skip this year set c["ref_year1"] = rs[0] c["ref_year2"] = rs[1] - if c["subsection"]: - c["sub"] = c["subsection"] - else: - c["sub"] = c["grid"] - # Make a guess for observation paths, if need be - if c["reference_data_path"] == "": - c[ - "reference_data_path" - ] = f"{c['diagnostics_base_path']}/observations/Atm/climatology/" - if ("tc_analysis" in c["sets"]) and (c["tc_obs"] == ""): - c[ - "tc_obs" - ] = f"{c['diagnostics_base_path']}/observations/Atm/tc-analysis/" - if ("ts_num_years" in c.keys()) and (c["obs_ts"] == ""): - c[ - "obs_ts" - ] = f"{c['diagnostics_base_path']}/observations/Atm/time-series/" - if c["run_type"] == "model_vs_obs": - prefix = "e3sm_diags_%s_%s_%04d-%04d" % ( - c["sub"], - c["tag"], - c["year1"], - c["year2"], - ) - elif c["run_type"] == "model_vs_model": - prefix = "e3sm_diags_%s_%s_%04d-%04d_vs_%04d-%04d" % ( - c["sub"], - c["tag"], - c["year1"], - c["year2"], - c["ref_year1"], - c["ref_year2"], - ) - reference_data_path = ( - c["reference_data_path"].split("/post")[0] + "/post" - ) - if ("diurnal_cycle" in c["sets"]) and ( - c["reference_data_path_climo_diurnal"] == "" - ): - c[ - "reference_data_path_climo_diurnal" - ] = f"{reference_data_path}/atm/{c['grid']}/clim_diurnal_8xdaily" - if ("tc_analysis" in c["sets"]) and (c["reference_data_path_tc"] == ""): - c[ - "reference_data_path_tc" - ] = f"{reference_data_path}/atm/tc-analysis_{c['ref_year1']}_{c['ref_year2']}" - if ("ts_num_years" in c.keys()) and (c["reference_data_path_ts"] == ""): - c[ - "reference_data_path_ts" - ] = f"{reference_data_path}/atm/{c['grid']}/ts/monthly" - if ("streamflow" in c["sets"]) and ( - c["reference_data_path_ts_rof"] == "" - ): - c[ - "reference_data_path_ts_rof" - ] = f"{reference_data_path}/rof/native/ts/monthly" - if c["gauges_path"] == "": - gauges_path_prefix = c["diagnostics_base_path"] - gauges_path_suffix = "observations/Atm/time-series/GSIM/GSIM_catchment_characteristics_all_1km2.csv" - c["gauges_path"] = os.path.join( - gauges_path_prefix, gauges_path_suffix - ) - if ("tropical_subseasonal" in c["sets"]) and ( - c["reference_data_path_ts_daily"] == "" - ): - c[ - "reference_data_path_ts_daily" - ] = f"{reference_data_path}/atm/{c['grid']}/ts/daily" - else: - raise ValueError("Invalid run_type={}".format(c["run_type"])) - if "diurnal_cycle" in c["sets"]: - if c["dc_obs_climo"] == "": - c["dc_obs_climo"] = c["reference_data_path"] - if ("streamflow" in c["sets"]) and (c["streamflow_obs_ts"] == ""): - c["streamflow_obs_ts"] = c["obs_ts"] - print(prefix) - c["prefix"] = prefix - scriptFile = os.path.join(scriptDir, "%s.bash" % (prefix)) - statusFile = os.path.join(scriptDir, "%s.status" % (prefix)) - settingsFile = os.path.join(scriptDir, "%s.settings" % (prefix)) - skip = checkStatus(statusFile) + check_and_define_parameters(c) + bash_file, settings_file, status_file = get_file_names( + script_dir, c["prefix"] + ) + skip: bool = check_status(status_file) if skip: continue - # Create script - with open(scriptFile, "w") as f: + with open(bash_file, "w") as f: f.write(template.render(**c)) - makeExecutable(scriptFile) - + make_executable(bash_file) # List of dependencies - depend_on_climo = set( - [ - "lat_lon", - "zonal_mean_xy", - "zonal_mean_2d", - "polar", - "cosp_histogram", - "meridional_mean_2d", - "annual_cycle_zonal_mean", - "zonal_mean_2d_stratosphere", - ] - ) - in_sets = set(c["sets"]) - # Check if any requested sets depend on climo: - if depend_on_climo & in_sets: - if "climo_subsection" in c.keys() and c["climo_subsection"] != "": - climo_sub = c["climo_subsection"] - else: - climo_sub = c["sub"] - dependencies.append( - os.path.join( - scriptDir, - "climo_%s_%04d-%04d.status" - % (climo_sub, c["year1"], c["year2"]), - ), - ) - if "diurnal_cycle" in c["sets"]: - dependencies.append( - os.path.join( - scriptDir, - "climo_%s_%04d-%04d.status" - % (c["climo_diurnal_subsection"], c["year1"], c["year2"]), - ) - ) - if "tc_analysis" in c["sets"]: - dependencies.append( - os.path.join( - scriptDir, - "tc_analysis_%04d-%04d.status" % (c["year1"], c["year2"]), - ) - ) + add_climo_dependencies(c, dependencies, script_dir) # Iterate from year1 to year2 incrementing by the number of years per time series file. if "ts_num_years" in c.keys(): for yr in range(c["year1"], c["year2"], c["ts_num_years"]): - start_yr = yr - end_yr = yr + c["ts_num_years"] - 1 - if "ts_subsection" in c.keys() and c["ts_subsection"] != "": - ts_sub = c["ts_subsection"] - else: - ts_sub = c["sub"] - - if ( - "ts_daily_subsection" in c.keys() - and c["ts_daily_subsection"] != "" - ): - ts_daily_sub = c["ts_daily_subsection"] - else: - ts_daily_sub = c["sub"] - if ( - ("enso_diags" in c["sets"]) - or ("qbo" in c["sets"]) - or ("area_mean_time_series" in c["sets"]) - ): - add_dependencies( - dependencies, - scriptDir, - "ts", - ts_sub, - start_yr, - end_yr, - c["ts_num_years"], - ) - if "streamflow" in c["sets"]: - add_dependencies( - dependencies, - scriptDir, - "ts", - "rof_monthly", - start_yr, - end_yr, - c["ts_num_years"], - ) - if "tropical_subseasonal" in c["sets"]: - add_dependencies( - dependencies, - scriptDir, - "ts", - ts_daily_sub, - start_yr, - end_yr, - c["ts_num_years"], - ) - with open(settingsFile, "w") as sf: - p = pprint.PrettyPrinter(indent=2, stream=sf) - p.pprint(c) - p.pprint(s) - + add_ts_dependencies(c, dependencies, script_dir, yr) + c["dependencies"] = dependencies + write_settings_file(settings_file, c, s) export = "ALL" existing_bundles = handle_bundles( c, - scriptFile, + bash_file, export, dependFiles=dependencies, existing_bundles=existing_bundles, @@ -253,12 +84,13 @@ def e3sm_diags(config, scriptDir, existing_bundles, job_ids_file): # noqa: C901 if not c["dry_run"]: if c["bundle"] == "": # Submit job - submitScript( - scriptFile, - statusFile, + submit_script( + bash_file, + status_file, export, job_ids_file, dependFiles=dependencies, + fail_on_dependency_skip=c["fail_on_dependency_skip"], ) # Due to a `socket.gaierror: [Errno -2] Name or service not known` error when running e3sm_diags with tc_analysis @@ -267,11 +99,219 @@ def e3sm_diags(config, scriptDir, existing_bundles, job_ids_file): # noqa: C901 # Note that this line should still be executed even if jobid == -1 # The later tc_analysis-using e3sm_diags tasks still depend on this task (and thus will also fail). # Add to the dependency list - dependencies.append(statusFile) + dependencies.append(status_file) else: - print("...adding to bundle '%s'" % (c["bundle"])) + print(f"...adding to bundle {c['bundle']}") print(f" environment_commands={c['environment_commands']}") print_url(c, "e3sm_diags") return existing_bundles + + +def check_parameters_for_bash(c: Dict[str, Any]) -> None: + # Check parameters that aren't used until e3sm_diags.bash is run + check_required_parameters(c, set(["tropical_subseasonal"]), "ref_end_yr") + check_required_parameters(c, set(["qbo"]), "ref_final_yr") + check_required_parameters(c, set(["enso_diags", "qbo"]), "ref_start_yr") + check_required_parameters(c, set(["diurnal_cycle"]), "climo_diurnal_frequency") + + +def check_mvm_only_parameters_for_bash(c: Dict[str, Any]) -> None: + # Check mvm-specific parameters that aren't used until e3sm_diags.bash is run. + if c["diff_title"] == "": + raise ParameterNotProvidedError("diff_title") + if c["ref_name"] == "": + raise ParameterNotProvidedError("ref_name") + if c["short_ref_name"] == "": + raise ParameterNotProvidedError("short_ref_name") + + check_required_parameters( + c, + set(["enso_diags", "tropical_subseasonal", "streamflow", "tc_analysis"]), + "ref_final_yr", + ) + check_required_parameters( + c, set(["tropical_subseasonal", "streamflow", "tc_analysis"]), "ref_start_yr" + ) + ts_sets = set( + [ + "enso_diags", + "qbo", + "area_mean_time_series", + "tropical_subseasonal", + "streamflow", + ] + ) + check_required_parameters(c, ts_sets, "ts_num_years_ref") + check_required_parameters(c, ts_sets, "ts_subsection") + + +def check_and_define_parameters(c: Dict[str, Any]) -> None: + c["sub"] = define_or_guess( + c, "subsection", "grid", ParameterGuessType.SECTION_GUESS + ) + define_or_guess2( + c, + "reference_data_path", + f"{c['diagnostics_base_path']}/observations/Atm/climatology/", + ParameterGuessType.PATH_GUESS, + ) + if "tc_analysis" in c["sets"]: + define_or_guess2( + c, + "tc_obs", + f"{c['diagnostics_base_path']}/observations/Atm/tc-analysis/", + ParameterGuessType.PATH_GUESS, + ) + # TODO: do this based on sets, rather than by relying on the user setting ts_num_years + if "ts_num_years" in c.keys(): + define_or_guess2( + c, + "obs_ts", + f"{c['diagnostics_base_path']}/observations/Atm/time-series/", + ParameterGuessType.PATH_GUESS, + ) + prefix: str + if c["run_type"] == "model_vs_obs": + prefix = f"e3sm_diags_{c['sub']}_{c['tag']}_{c['year1']:04d}-{c['year2']:04d}" + if "diurnal_cycle" in c["sets"]: + define_or_guess2( + c, + "dc_obs_climo", + c["reference_data_path"], + ParameterGuessType.PATH_GUESS, + ) + if "streamflow" in c["sets"]: + define_or_guess2( + c, "streamflow_obs_ts", c["obs_ts"], ParameterGuessType.PATH_GUESS + ) + elif c["run_type"] == "model_vs_model": + check_mvm_only_parameters_for_bash(c) + prefix = f"e3sm_diags_{c['sub']}_{c['tag']}_{c['year1']:04d}-{c['year2']:04d}_vs_{c['ref_year1']:04d}-{c['ref_year2']:04d}" + reference_data_path = c["reference_data_path"].split("/post")[0] + "/post" + if "diurnal_cycle" in c["sets"]: + define_or_guess2( + c, + "reference_data_path_climo_diurnal", + f"{reference_data_path}/atm/{c['grid']}/clim_diurnal_8xdaily", + ParameterGuessType.PATH_GUESS, + ) + if ("tc_analysis" in c["sets"]) and (c["reference_data_path_tc"] == ""): + # We have to guess parameters here, + # because multiple year sets are defined in a single subtask. + c[ + "reference_data_path_tc" + ] = f"{reference_data_path}/atm/tc-analysis_{c['ref_year1']}_{c['ref_year2']}" + if set(["enso_diags", "qbo", "area_mean_time_series"]) & set(c["sets"]): + define_or_guess2( + c, + "reference_data_path_ts", + f"{reference_data_path}/atm/{c['grid']}/ts/monthly", + ParameterGuessType.PATH_GUESS, + ) + if "tropical_subseasonal" in c["sets"]: + define_or_guess2( + c, + "reference_data_path_ts_daily", + f"{reference_data_path}/atm/{c['grid']}/ts/daily", + ParameterGuessType.PATH_GUESS, + ) + if "streamflow" in c["sets"]: + define_or_guess2( + c, + "reference_data_path_ts_rof", + f"{reference_data_path}/rof/native/ts/monthly", + ParameterGuessType.PATH_GUESS, + ) + define_or_guess2( + c, + "gauges_path", + os.path.join( + c["diagnostics_base_path"], + "observations/Atm/time-series/GSIM/GSIM_catchment_characteristics_all_1km2.csv", + ), + ParameterGuessType.PATH_GUESS, + ) + else: + raise ValueError(f"Invalid run_type={c['run_type']}") + print(prefix) + c["prefix"] = prefix + + +def add_climo_dependencies( + c: Dict[str, Any], dependencies: List[str], script_dir: str +) -> None: + depend_on_climo: Set[str] = set( + [ + "lat_lon", + "zonal_mean_xy", + "zonal_mean_2d", + "polar", + "cosp_histogram", + "meridional_mean_2d", + "annual_cycle_zonal_mean", + "zonal_mean_2d_stratosphere", + ] + ) + # Check if any requested sets depend on climo: + status_suffix: str = f"_{c['year1']:04d}-{c['year2']:04d}.status" + if depend_on_climo & set(c["sets"]): + climo_sub = define_or_guess( + c, "climo_subsection", "sub", ParameterGuessType.SECTION_GUESS + ) + dependencies.append( + os.path.join(script_dir, f"climo_{climo_sub}{status_suffix}"), + ) + if "diurnal_cycle" in c["sets"]: + dependencies.append( + os.path.join( + script_dir, f"climo_{c['climo_diurnal_subsection']}{status_suffix}" + ) + ) + if "tc_analysis" in c["sets"]: + dependencies.append(os.path.join(script_dir, f"tc_analysis{status_suffix}")) + + +def add_ts_dependencies( + c: Dict[str, Any], dependencies: List[str], script_dir: str, yr: int +): + start_yr = yr + end_yr = yr + c["ts_num_years"] - 1 + ts_sub = define_or_guess( + c, "ts_subsection", "sub", ParameterGuessType.SECTION_GUESS + ) + ts_daily_sub = define_or_guess( + c, "ts_daily_subsection", "sub", ParameterGuessType.SECTION_GUESS + ) + depend_on_ts: Set[str] = set(["enso_diags", "qbo", "area_mean_time_series"]) + if depend_on_ts & set(c["sets"]): + add_dependencies( + dependencies, + script_dir, + "ts", + ts_sub, + start_yr, + end_yr, + c["ts_num_years"], + ) + if "streamflow" in c["sets"]: + add_dependencies( + dependencies, + script_dir, + "ts", + "rof_monthly", + start_yr, + end_yr, + c["ts_num_years"], + ) + if "tropical_subseasonal" in c["sets"]: + add_dependencies( + dependencies, + script_dir, + "ts", + ts_daily_sub, + start_yr, + end_yr, + c["ts_num_years"], + ) diff --git a/zppy/global_time_series.py b/zppy/global_time_series.py index 0ca682fd..674c8187 100644 --- a/zppy/global_time_series.py +++ b/zppy/global_time_series.py @@ -1,197 +1,75 @@ import os -import pprint -from typing import List - -import jinja2 +from typing import Any, Dict, List from zppy.bundle import handle_bundles from zppy.utils import ( add_dependencies, - checkStatus, - getTasks, - getYears, - makeExecutable, + check_status, + get_file_names, + get_tasks, + get_years, + initialize_template, + make_executable, print_url, - submitScript, + submit_script, + write_settings_file, ) # ----------------------------------------------------------------------------- -# FIXME: C901 'run' is too complex (19) -def global_time_series(config, scriptDir, existing_bundles, job_ids_file): # noqa: C901 +def global_time_series(config, script_dir, existing_bundles, job_ids_file): - # Initialize jinja2 template engine - templateLoader = jinja2.FileSystemLoader( - searchpath=config["default"]["templateDir"] - ) - templateEnv = jinja2.Environment(loader=templateLoader) - template = templateEnv.get_template("global_time_series.bash") + template, template_env = initialize_template(config, "global_time_series.bash") # --- List of global_time_series tasks --- - tasks = getTasks(config, "global_time_series") + tasks: List[Dict[str, Any]] = get_tasks(config, "global_time_series") if len(tasks) == 0: return existing_bundles # --- Generate and submit global_time_series scripts --- for c in tasks: - c["ts_num_years"] = int(c["ts_num_years"]) - # Loop over year sets - year_sets = getYears(c["years"]) + year_sets = get_years(c["years"]) for s in year_sets: c["year1"] = s[0] c["year2"] = s[1] if ("last_year" in c.keys()) and (c["year2"] > c["last_year"]): continue # Skip this year set - c["scriptDir"] = scriptDir - prefix = "global_time_series_%04d-%04d" % (c["year1"], c["year2"]) + c["scriptDir"] = script_dir + prefix: str = f"global_time_series_{c['year1']:04d}-{c['year2']:04d}" print(prefix) c["prefix"] = prefix - scriptFile = os.path.join(scriptDir, "%s.bash" % (prefix)) - statusFile = os.path.join(scriptDir, "%s.status" % (prefix)) - settingsFile = os.path.join(scriptDir, "%s.settings" % (prefix)) - skip = checkStatus(statusFile) + bash_file, settings_file, status_file = get_file_names(script_dir, prefix) + skip: bool = check_status(status_file) if skip: continue - - # Handle legacy parameter - if c["plot_names"]: - print("warning: plot_names for global_time_series is deprecated.") - print( - "Setting plot_names will override the new parameter, plots_original." - ) - c["plots_original"] = c["plot_names"] - - # Determine which components are needed - c["use_atm"] = False - c["use_ice"] = False - c["use_lnd"] = False - c["use_ocn"] = False - if c["plots_original"]: - c["use_atm"] = True - if c["atmosphere_only"]: - print( - "warning: atmosphere_only for global_time_series is deprecated." - ) - print( - "preferred method: remove the 3 ocean plots (change_ohc,max_moc,change_sea_level) from plots_original." - ) - has_original_ocn_plots = ( - ("change_ohc" in c["plots_original"]) - or ("max_moc" in c["plots_original"]) - or ("change_sea_level" in c["plots_original"]) - ) - if (not c["atmosphere_only"]) and has_original_ocn_plots: - c["use_ocn"] = True - else: - # For better string processing in global_time_series.bash - c["plots_original"] = "None" - if c["plots_atm"]: - c["use_atm"] = True - else: - # For better string processing in global_time_series.bash - c["plots_atm"] = "None" - if c["plots_ice"]: - c["use_ice"] = True - else: - # For better string processing in global_time_series.bash - c["plots_ice"] = "None" - if c["plots_lnd"]: - c["use_lnd"] = True - else: - # For better string processing in global_time_series.bash - c["plots_lnd"] = "None" - if c["plots_ocn"]: - c["use_ocn"] = True - else: - # For better string processing in global_time_series.bash - c["plots_ocn"] = "None" - + determine_components(c) # Load useful scripts - c["global_time_series_dir"] = os.path.join( - scriptDir, "{}_dir".format(prefix) - ) + c["global_time_series_dir"] = os.path.join(script_dir, f"{prefix}_dir") if not os.path.exists(c["global_time_series_dir"]): os.mkdir(c["global_time_series_dir"]) scripts = ["coupled_global.py", "readTS.py", "ocean_month.py"] for script in scripts: - script_template = templateEnv.get_template(script) + script_template = template_env.get_template(script) script_file = os.path.join(c["global_time_series_dir"], script) with open(script_file, "w") as f: f.write(script_template.render(**c)) - makeExecutable(script_file) - + make_executable(script_file) # Create script - with open(scriptFile, "w") as f: + with open(bash_file, "w") as f: f.write(template.render(**c)) - makeExecutable(scriptFile) - + make_executable(bash_file) # List of dependencies dependencies: List[str] = [] - # Add Time Series dependencies - if c["use_atm"]: - # Iterate from year1 to year2 incrementing by the number of years per time series file. - for yr in range(c["year1"], c["year2"], c["ts_num_years"]): - start_yr = yr - end_yr = yr + c["ts_num_years"] - 1 - add_dependencies( - dependencies, - scriptDir, - "ts", - "atm_monthly_glb", - start_yr, - end_yr, - c["ts_num_years"], - ) - if c["use_lnd"]: - for yr in range(c["year1"], c["year2"], c["ts_num_years"]): - start_yr = yr - end_yr = yr + c["ts_num_years"] - 1 - add_dependencies( - dependencies, - scriptDir, - "ts", - "lnd_monthly_glb", - start_yr, - end_yr, - c["ts_num_years"], - ) - if c["use_ocn"]: - # Add MPAS Analysis dependencies - ts_year_sets = getYears(c["ts_years"]) - climo_year_sets = getYears(c["climo_years"]) - if (not ts_year_sets) or (not climo_year_sets): - raise Exception( - "ts_years and climo_years must both be set for ocn plots." - ) - for ts_year_set, climo_year_set in zip(ts_year_sets, climo_year_sets): - c["ts_year1"] = ts_year_set[0] - c["ts_year2"] = ts_year_set[1] - c["climo_year1"] = climo_year_set[0] - c["climo_year2"] = climo_year_set[1] - dependencies.append( - os.path.join( - scriptDir, - "mpas_analysis_ts_%04d-%04d_climo_%04d-%04d.status" - % ( - c["ts_year1"], - c["ts_year2"], - c["climo_year1"], - c["climo_year2"], - ), - ) - ) - - with open(settingsFile, "w") as sf: - p = pprint.PrettyPrinter(indent=2, stream=sf) - p.pprint(c) - p.pprint(s) - + # Add Global Time Series dependencies + determine_and_add_dependencies(c, dependencies, script_dir) + c["dependencies"] = dependencies + write_settings_file(settings_file, c, s) export = "NONE" existing_bundles = handle_bundles( c, - scriptFile, + bash_file, export, dependFiles=dependencies, existing_bundles=existing_bundles, @@ -199,17 +77,117 @@ def global_time_series(config, scriptDir, existing_bundles, job_ids_file): # no if not c["dry_run"]: if c["bundle"] == "": # Submit job - submitScript( - scriptFile, - statusFile, + submit_script( + bash_file, + status_file, export, job_ids_file, dependFiles=dependencies, + fail_on_dependency_skip=c["fail_on_dependency_skip"], ) else: - print("...adding to bundle '%s'" % (c["bundle"])) + print(f"...adding to bundle {c['bundle']}") print(f" environment_commands={c['environment_commands']}") print_url(c, "global_time_series") return existing_bundles + + +def determine_components(c: Dict[str, Any]) -> None: + # Handle legacy parameter + if c["plot_names"]: + print("warning: plot_names for global_time_series is deprecated.") + print("Setting plot_names will override the new parameter, plots_original.") + c["plots_original"] = c["plot_names"] + # Determine which components are needed + c["use_atm"] = False + c["use_ice"] = False + c["use_lnd"] = False + c["use_ocn"] = False + if c["plots_original"]: + c["use_atm"] = True + if c["atmosphere_only"]: + print("warning: atmosphere_only for global_time_series is deprecated.") + print( + "preferred method: remove the 3 ocean plots (change_ohc,max_moc,change_sea_level) from plots_original." + ) + has_original_ocn_plots = ( + ("change_ohc" in c["plots_original"]) + or ("max_moc" in c["plots_original"]) + or ("change_sea_level" in c["plots_original"]) + ) + if (not c["atmosphere_only"]) and has_original_ocn_plots: + c["use_ocn"] = True + else: + # For better string processing in global_time_series.bash + c["plots_original"] = "None" + if c["plots_atm"]: + c["use_atm"] = True + else: + # For better string processing in global_time_series.bash + c["plots_atm"] = "None" + if c["plots_ice"]: + c["use_ice"] = True + else: + # For better string processing in global_time_series.bash + c["plots_ice"] = "None" + if c["plots_lnd"]: + c["use_lnd"] = True + else: + # For better string processing in global_time_series.bash + c["plots_lnd"] = "None" + if c["plots_ocn"]: + c["use_ocn"] = True + else: + # For better string processing in global_time_series.bash + c["plots_ocn"] = "None" + + +def determine_and_add_dependencies( + c: Dict[str, Any], dependencies: List[str], script_dir: str +) -> None: + if c["use_atm"]: + # Iterate from year1 to year2 incrementing by the number of years per time series file. + for yr in range(c["year1"], c["year2"], c["ts_num_years"]): + start_yr = yr + end_yr = yr + c["ts_num_years"] - 1 + add_dependencies( + dependencies, + script_dir, + "ts", + "atm_monthly_glb", + start_yr, + end_yr, + c["ts_num_years"], + ) + if c["use_lnd"]: + for yr in range(c["year1"], c["year2"], c["ts_num_years"]): + start_yr = yr + end_yr = yr + c["ts_num_years"] - 1 + add_dependencies( + dependencies, + script_dir, + "ts", + "lnd_monthly_glb", + start_yr, + end_yr, + c["ts_num_years"], + ) + if c["use_ocn"]: + # Add MPAS Analysis dependencies + ts_year_sets = get_years(c["ts_years"]) + climo_year_sets = get_years(c["climo_years"]) + if (not ts_year_sets) or (not climo_year_sets): + raise Exception("ts_years and climo_years must both be set for ocn plots.") + for ts_year_set, climo_year_set in zip(ts_year_sets, climo_year_sets): + c["ts_year1"] = ts_year_set[0] + c["ts_year2"] = ts_year_set[1] + c["climo_year1"] = climo_year_set[0] + c["climo_year2"] = climo_year_set[1] + dependencies.append( + os.path.join( + script_dir, + f"mpas_analysis_ts_{c['ts_year1']:04d}-{c['ts_year2']:04d}_climo_{c['climo_year1']:04d}-{c['climo_year2']:04d}.status", + ) + ) diff --git a/zppy/ilamb.py b/zppy/ilamb.py index f1928f8b..3d214f48 100644 --- a/zppy/ilamb.py +++ b/zppy/ilamb.py @@ -1,128 +1,125 @@ import os -import pprint -from typing import List +from typing import Any, Dict, List -import jinja2 +from configobj import ConfigObj from zppy.bundle import handle_bundles from zppy.utils import ( + ParameterGuessType, add_dependencies, - checkStatus, - getTasks, - getYears, - makeExecutable, + check_status, + define_or_guess2, + get_file_names, + get_tasks, + get_years, + initialize_template, + make_executable, print_url, - submitScript, + submit_script, + write_settings_file, ) # ----------------------------------------------------------------------------- -def ilamb(config, scriptDir, existing_bundles, job_ids_file): +def ilamb(config: ConfigObj, script_dir: str, existing_bundles, job_ids_file): - # Initialize jinja2 template engine - templateLoader = jinja2.FileSystemLoader( - searchpath=config["default"]["templateDir"] - ) - templateEnv = jinja2.Environment(loader=templateLoader) - template = templateEnv.get_template("ilamb.bash") + template, _ = initialize_template(config, "ilamb.bash") # --- List of ilamb tasks --- - tasks = getTasks(config, "ilamb") + tasks: List[Dict[str, Any]] = get_tasks(config, "ilamb") if len(tasks) == 0: return existing_bundles # --- Generate and submit ilamb scripts --- dependencies: List[str] = [] - for c in tasks: - if "ts_num_years" in c.keys(): c["ts_num_years"] = int(c["ts_num_years"]) - # Loop over year sets - year_sets = getYears(c["years"]) + year_sets = get_years(c["years"]) for s in year_sets: c["year1"] = s[0] c["year2"] = s[1] - c["scriptDir"] = scriptDir - if c["subsection"]: - c["sub"] = c["subsection"] - else: - c["sub"] = c["grid"] - - if c["ilamb_obs"] == "": - ilamb_obs_prefix = c["diagnostics_base_path"] - ilamb_obs_suffix = "ilamb_data" - c["ilamb_obs"] = os.path.join(ilamb_obs_prefix, ilamb_obs_suffix) - - # List of dependencies - add_dependencies( - dependencies, - scriptDir, - "ts", - c["ts_land_subsection"], - c["year1"], - c["year2"], - c["ts_num_years"], - ) - if not c["land_only"]: - add_dependencies( - dependencies, - scriptDir, - "ts", - c["ts_atm_subsection"], - c["year1"], - c["year2"], - c["ts_num_years"], - ) - - prefix = "ilamb_%04d-%04d" % ( - c["year1"], - c["year2"], + c["scriptDir"] = script_dir + define_or_guess2( + c, + "ilamb_obs", + os.path.join(c["diagnostics_base_path"], "ilamb_data"), + ParameterGuessType.PATH_GUESS, ) + # List of dependencies + determine_and_add_dependencies(c, dependencies, script_dir) + prefix: str = f"ilamb_{c['year1']:04d}-{c['year2']:04d}" c["prefix"] = prefix print(prefix) - scriptFile = os.path.join(scriptDir, "%s.bash" % (prefix)) - statusFile = os.path.join(scriptDir, "%s.status" % (prefix)) - settingsFile = os.path.join(scriptDir, "%s.settings" % (prefix)) - skip = checkStatus(statusFile) + bash_file, settings_file, status_file = get_file_names(script_dir, prefix) + skip: bool = check_status(status_file) if skip: continue - # Create script - with open(scriptFile, "w") as f: + with open(bash_file, "w") as f: f.write(template.render(**c)) - makeExecutable(scriptFile) - - with open(settingsFile, "w") as sf: - p = pprint.PrettyPrinter(indent=2, stream=sf) - p.pprint(c) - p.pprint(s) + make_executable(bash_file) + c["dependencies"] = dependencies + write_settings_file(settings_file, c, s) # Note --export=All is needed to make sure the executable is copied and executed on the nodes. export = "ALL" existing_bundles = handle_bundles( c, - scriptFile, + bash_file, export, dependFiles=dependencies, existing_bundles=existing_bundles, ) if not c["dry_run"]: - if c["bundle"] == "": # Submit job - submitScript( - scriptFile, - statusFile, + submit_script( + bash_file, + status_file, export, job_ids_file, dependFiles=dependencies, + fail_on_dependency_skip=c["fail_on_dependency_skip"], ) else: - print("...adding to bundle '%s'" % (c["bundle"])) + print("...adding to bundle '{c['bundle']}'") print(f" environment_commands={c['environment_commands']}") print_url(c, "ilamb") return existing_bundles + + +def determine_and_add_dependencies( + c: Dict[str, Any], dependencies: List[str], script_dir: str +) -> None: + define_or_guess2( + c, "ts_land_subsection", "land_monthly", ParameterGuessType.SECTION_GUESS + ) + add_dependencies( + dependencies, + script_dir, + "ts", + c["ts_land_subsection"], + c["year1"], + c["year2"], + c["ts_num_years"], + ) + if not c["land_only"]: + define_or_guess2( + c, + "ts_atm_subsection", + "atm_monthly_180x360_aave", + ParameterGuessType.SECTION_GUESS, + ) + add_dependencies( + dependencies, + script_dir, + "ts", + c["ts_atm_subsection"], + c["year1"], + c["year2"], + c["ts_num_years"], + ) diff --git a/zppy/mpas_analysis.py b/zppy/mpas_analysis.py index 40a1c664..003d2ded 100644 --- a/zppy/mpas_analysis.py +++ b/zppy/mpas_analysis.py @@ -1,64 +1,52 @@ -import os -import pprint +from typing import Any, Dict, List, Tuple -import jinja2 +from configobj import ConfigObj from zppy.bundle import handle_bundles from zppy.utils import ( - checkStatus, - getTasks, - getYears, - makeExecutable, + check_status, + get_file_names, + get_tasks, + get_years, + initialize_template, + make_executable, print_url, - submitScript, + submit_script, + write_settings_file, ) # ----------------------------------------------------------------------------- -def mpas_analysis(config, scriptDir, existing_bundles, job_ids_file): +def mpas_analysis(config: ConfigObj, script_dir: str, existing_bundles, job_ids_file): - # Initialize jinja2 template engine - templateLoader = jinja2.FileSystemLoader( - searchpath=config["default"]["templateDir"] - ) - templateEnv = jinja2.Environment(loader=templateLoader) - template = templateEnv.get_template("mpas_analysis.bash") + template, _ = initialize_template(config, "mpas_analysis.bash") # --- List of mpas_analysis tasks --- - tasks = getTasks(config, "mpas_analysis") + tasks: List[Dict[str, Any]] = get_tasks(config, "mpas_analysis") if len(tasks) == 0: return existing_bundles # --- Generate and submit mpas_analysis scripts --- - # MPAS-Analysis uses a shared output directory, so only a single # job should run at once. To gracefully handle this, we make each - # MAPS-Analysis task dependant on all previous ones. This may not + # MPAS-Analysis task dependant on all previous ones. This may not # be 100% fool-proof, but should be a reasonable start dependencies = [] - for c in tasks: - - if config["mpas_analysis"]["shortTermArchive"]: - c["subdir_ocean"] = "/archive/ocn/hist" - c["subdir_ice"] = "/archive/ice/hist" - else: - c["subdir_ocean"] = "/run" - c["subdir_ice"] = "/run" - + set_subdirs(config, c) # Loop over year sets - ts_year_sets = getYears(c["ts_years"]) + ts_year_sets: List[Tuple[int, int]] = get_years(c["ts_years"]) + climo_year_sets: List[Tuple[int, int]] + enso_year_sets: List[Tuple[int, int]] if c["climo_years"] != [""]: - climo_year_sets = getYears(c["climo_years"]) + climo_year_sets = get_years(c["climo_years"]) else: climo_year_sets = ts_year_sets if c["enso_years"] != [""]: - enso_year_sets = getYears(c["enso_years"]) + enso_year_sets = get_years(c["enso_years"]) else: enso_year_sets = ts_year_sets - for s, rs, es in zip(ts_year_sets, climo_year_sets, enso_year_sets): - c["ts_year1"] = s[0] c["ts_year2"] = s[1] if ("last_year" in c.keys()) and (c["ts_year2"] > c["last_year"]): @@ -71,49 +59,32 @@ def mpas_analysis(config, scriptDir, existing_bundles, job_ids_file): c["enso_year2"] = es[1] if ("last_year" in c.keys()) and (c["enso_year2"] > c["last_year"]): continue # Skip this year set - c["scriptDir"] = scriptDir + c["scriptDir"] = script_dir + prefix_suffix: str = f"_ts_{c['ts_year1']:04d}-{c['ts_year2']:04d}_climo_{c['climo_year1']:04d}-{c['climo_year2']:04d}" + prefix: str if c["subsection"]: - prefix = "mpas_analysis_%s_ts_%04d-%04d_climo_%04d-%04d" % ( - c["subsection"], - c["ts_year1"], - c["ts_year2"], - c["climo_year1"], - c["climo_year2"], - ) + prefix = f"mpas_analysis_{c['subsection']}{prefix_suffix}" else: - prefix = "mpas_analysis_ts_%04d-%04d_climo_%04d-%04d" % ( - c["ts_year1"], - c["ts_year2"], - c["climo_year1"], - c["climo_year2"], - ) + prefix = f"mpas_analysis{prefix_suffix}" print(prefix) c["prefix"] = prefix - scriptFile = os.path.join(scriptDir, "%s.bash" % (prefix)) - statusFile = os.path.join(scriptDir, "%s.status" % (prefix)) - settingsFile = os.path.join(scriptDir, "%s.settings" % (prefix)) - + bash_file, settings_file, status_file = get_file_names(script_dir, prefix) # Check if we can skip because it completed successfully before - skip = checkStatus(statusFile) + skip: bool = check_status(status_file) if skip: # Add to the dependency list - dependencies.append(statusFile) + dependencies.append(status_file) continue - # Create script - with open(scriptFile, "w") as f: + with open(bash_file, "w") as f: f.write(template.render(**c)) - makeExecutable(scriptFile) - - with open(settingsFile, "w") as sf: - p = pprint.PrettyPrinter(indent=2, stream=sf) - p.pprint(c) - p.pprint(s) - + make_executable(bash_file) + c["dependencies"] = dependencies + write_settings_file(settings_file, c, s) export = "ALL" existing_bundles = handle_bundles( c, - scriptFile, + bash_file, export, dependFiles=dependencies, existing_bundles=existing_bundles, @@ -122,22 +93,32 @@ def mpas_analysis(config, scriptDir, existing_bundles, job_ids_file): if c["bundle"] == "": # Submit job - submitScript( - scriptFile, - statusFile, + submit_script( + bash_file, + status_file, export, job_ids_file, dependFiles=dependencies, + fail_on_dependency_skip=c["fail_on_dependency_skip"], ) # Note that this line should still be executed even if jobid == -1 # The later MPAS-Analysis tasks still depend on this task (and thus will also fail). # Add to the dependency list - dependencies.append(statusFile) + dependencies.append(status_file) else: - print("...adding to bundle '%s'" % (c["bundle"])) + print(f"...adding to bundle {c['bundle']}") print(f" environment_commands={c['environment_commands']}") print_url(c, "mpas_analysis") return existing_bundles + + +def set_subdirs(config: ConfigObj, c: Dict[str, Any]) -> None: + if config["mpas_analysis"]["shortTermArchive"]: + c["subdir_ocean"] = "/archive/ocn/hist" + c["subdir_ice"] = "/archive/ice/hist" + else: + c["subdir_ocean"] = "/run" + c["subdir_ice"] = "/run" diff --git a/zppy/tc_analysis.py b/zppy/tc_analysis.py index 52d7124c..b9346c8a 100644 --- a/zppy/tc_analysis.py +++ b/zppy/tc_analysis.py @@ -1,75 +1,65 @@ -import os -import pprint -from typing import List +from typing import Any, Dict, List, Tuple -import jinja2 +from configobj import ConfigObj from zppy.bundle import handle_bundles -from zppy.utils import checkStatus, getTasks, getYears, makeExecutable, submitScript +from zppy.utils import ( + check_status, + get_file_names, + get_tasks, + get_years, + initialize_template, + make_executable, + submit_script, + write_settings_file, +) # ----------------------------------------------------------------------------- -def tc_analysis(config, scriptDir, existing_bundles, job_ids_file): +def tc_analysis(config: ConfigObj, script_dir: str, existing_bundles, job_ids_file): - # Initialize jinja2 template engine - templateLoader = jinja2.FileSystemLoader( - searchpath=config["default"]["templateDir"] - ) - templateEnv = jinja2.Environment(loader=templateLoader) - template = templateEnv.get_template("tc_analysis.bash") + template, _ = initialize_template(config, "tc_analysis.bash") # --- List of tasks --- - tasks = getTasks(config, "tc_analysis") + tasks: List[Dict[str, Any]] = get_tasks(config, "tc_analysis") if len(tasks) == 0: return existing_bundles # --- Generate and submit scripts --- - # There is a `GenerateConnectivityFile: error while loading shared libraries: libnetcdf.so.11: cannot open shared object file: No such file or directory` error # when multiple year_sets are run simultaneously. Therefore, we will wait for the completion of one year_set before moving on to the next. dependencies: List[str] = [] for c in tasks: - # Loop over year sets - year_sets = getYears(c["years"]) + year_sets: List[Tuple[int, int]] = get_years(c["years"]) for s in year_sets: c["year1"] = s[0] c["year2"] = s[1] if ("last_year" in c.keys()) and (c["year2"] > c["last_year"]): continue # Skip this year set - c["scriptDir"] = scriptDir + c["scriptDir"] = script_dir if c["input_files"]: c["atm_name"] = c["input_files"].split(".")[0] else: raise ValueError("No value was given for `input_files`.") - prefix = "tc_analysis_%04d-%04d" % ( - c["year1"], - c["year2"], - ) + prefix = f"tc_analysis_{c['year1']:04d}-{c['year2']:04d}" print(prefix) c["prefix"] = prefix - scriptFile = os.path.join(scriptDir, "%s.bash" % (prefix)) - statusFile = os.path.join(scriptDir, "%s.status" % (prefix)) - settingsFile = os.path.join(scriptDir, "%s.settings" % (prefix)) - skip = checkStatus(statusFile) + bash_file, settings_file, status_file = get_file_names(script_dir, prefix) + skip: bool = check_status(status_file) if skip: continue - # Create script - with open(scriptFile, "w") as f: + with open(bash_file, "w") as f: f.write(template.render(**c)) - makeExecutable(scriptFile) - - with open(settingsFile, "w") as sf: - p = pprint.PrettyPrinter(indent=2, stream=sf) - p.pprint(c) - p.pprint(s) - + make_executable(bash_file) + c["dependencies"] = dependencies + write_settings_file(settings_file, c, s) export = "NONE" existing_bundles = handle_bundles( c, - scriptFile, + bash_file, export, dependFiles=dependencies, existing_bundles=existing_bundles, @@ -77,20 +67,21 @@ def tc_analysis(config, scriptDir, existing_bundles, job_ids_file): if not c["dry_run"]: if c["bundle"] == "": # Submit job - submitScript( - scriptFile, - statusFile, + submit_script( + bash_file, + status_file, export, job_ids_file, dependFiles=dependencies, + fail_on_dependency_skip=c["fail_on_dependency_skip"], ) # Note that this line should still be executed even if jobid == -1 # The later tc_analysis tasks still depend on this task (and thus will also fail). # Add to the dependency list - dependencies.append(statusFile) + dependencies.append(status_file) else: - print("...adding to bundle '%s'" % (c["bundle"])) + print(f"...adding to bundle {c['bundle']}") print(f" environment_commands={c['environment_commands']}") diff --git a/zppy/templates/default.ini b/zppy/templates/default.ini index 1e456999..9bf5bdce 100644 --- a/zppy/templates/default.ini +++ b/zppy/templates/default.ini @@ -25,10 +25,18 @@ e3sm_to_cmip_environment_commands = string(default="") # Set up the environment -- this is where you can tell zppy to use a custom conda environment. # To use a custom conda environment, you can set `environment_commands="source ; conda activate "`. environment_commands = string(default="") +# If set to True, zppy will fail as soon as a job is unable to launch because of a missing dependency. +# If set to False, zppy will launch other jobs, if possible. +fail_on_dependency_skip = boolean(default=False) # The frequency of the data. Options include "monthly", "diurnal_8xdaily" frequency = string(default="monthly") # The grid to use grid = string(default="") +# These two parameters enable zppy to guess path or section parameters. +# This allows users to set fewer parameters, but with the risk of zppy choosing incorrect values for them. +# Set to False for more transparency in path or section defintions. +guess_path_parameters = boolean(default=True) +guess_section_parameters = boolean(default=True) # The directory to be post-processed # NOTE: no default, must be provided by user input = string @@ -128,6 +136,7 @@ scratch = string(default="") backend = string(default="mpl") cfg = string(default="") # Name of the frequency from `[climo]` to use for "diurnal_cycle" runs +# Required for "diurnal_cycle" runs climo_diurnal_frequency = string(default="") # Name of the subsection of `[climo]` to use for "diurnal_cycle" runs climo_diurnal_subsection = string(default="") @@ -154,16 +163,18 @@ output_format = string_list(default=list("png")) # See https://e3sm-project.github.io/e3sm_diags/_build/html/master/available-parameters.html output_format_subplot = string_list(default=list()) # End year (i.e., the last year to use) for the reference data +# Required for "tropical_subseasonal" runs ref_end_yr = string(default="") # Final year (i.e., the last available year) for the reference data # Required for "qbo" runs -# Required for run_type="model_vs_model" "enso_diags"/"streamflow"/"tc_analysis" runs +# Required for run_type="model_vs_model" "enso_diags"/"streamflow"/"tc_analysis"/"tropical_subseasonal" runs ref_final_yr = string(default="") # See https://e3sm-project.github.io/e3sm_diags/_build/html/master/available-parameters.html # Required for run_type="model_vs_model" ref_name = string(default="") # Start year for the reference data -# Required for "enso_diags"/"qbo"/"streamflow"/"tc_analysis" runs +# Required for "enso_diags"/"qbo" runs +# Required for run_type="model_vs_model" "tropical_subseasonal"/"streamflow"/"tc_analysis" runs ref_start_yr = string(default="") # The ref years to run; "1:100:20" would mean process years 1-100 in 20-year increments # Recommended for run_type="model_vs_model" @@ -181,32 +192,18 @@ reference_data_path_tc = string(default="") # `reference_data_path` but for "enso_diags"/"qbo"/"area_mean_time_series" runs # Required for run_type="model_vs_model" "enso_diags"/"qbo"/"area_mean_time_series" runs reference_data_path_ts = string(default="") -# Required for "tropical_subseasonal" runs +# Required for run_type="model_vs_model" "tropical_subseasonal" runs reference_data_path_ts_daily = string(default="") # `reference_data_path` but for "streamflow" runs # Required for run_type="model_vs_model" "streamflow" runs reference_data_path_ts_rof = string(default="") # See https://e3sm-project.github.io/e3sm_diags/_build/html/master/available-parameters.html -# Some parameters are required if run_type="model_vs_model": `diff_title`, `ref_name`, `reference_data_path`, `short_ref_name` -# Required for "model_vs_model" "area_mean_time_series" runs: `reference_data_path_ts`, `ts_num_years_ref`, `ts_subsection` -# Required for "model_vs_model" "diurnal_cycle" runs: `reference_path_climo_diurnal` -# Required for "model_vs_model" "enso_diags" runs: `ref_final_yr`, `ref_start_yr`, `reference_data_path_ts`, `ts_num_years_ref`, `ts_subsection` -# Required for "model_vs_model" "qbo" runs: `reference_data_path_ts`, `ts_num_years_ref`, `ts_subsection` -# Required for "model_vs_model" "streamflow" runs: `gauges_path`, `ref_final_yr`, `ref_start_yr`, `reference_data_path_ts_rof`, `ts_num_years_ref`, `ts_subsection` -# Required for "model_vs_model" "tc_analysis" runs: `ref_final_yr`, `ref_start_yr`, `reference_diurnal_path_tc` -# Required for "model_vs_model" "tropical_subseasonal" runs: `ref_final_yr`, `ref_start_yr`, `reference_data_path_ts_daily`, `ts_num_years_ref`, `ts_subsection` +# Some additional parameters are required if run_type="model_vs_model" +# Search for `Required for run_type="model_vs_model"` in this file. run_type = string(default="model_vs_obs") # The sets to run -# All available sets (16) = "aerosol_aeronet","aerosol_budget","annual_cycle_zonal_mean","area_mean_time_series","cosp_histogram","diurnal_cycle","enso_diags","lat_lon","meridional_mean_2d","polar","qbo","streamflow","tc_analysis","zonal_mean_2d","zonal_mean_2d_stratosphere","zonal_mean_xy" -# -# A subset of these are provided as a default below. These 10 sets can be run as long as standard climo files are generated. -# -# The 6 additional sets can be included if the appropriate input is available: "area_mean_time_series","diurnal_cycle","enso_diags","qbo","streamflow","tc_analysis" -# 3 of these require time-series data and for `obs_ts` and `ref_start_yr` to be set: "area_mean_time_series","enso_diags","qbo" -# "diurnal_cycle" requires `climo_diurnal_subsection`, `climo_diurnal_frequency`, and `dc_obs_climo` to be set. -# "qbo" requires `ref_final_yr` to be set. -# "streamflow" requires `streamflow_obs_ts` to be set. -# "tc_analysis" requires `tc_obs` to be set. +# All available sets (17) = "aerosol_aeronet","aerosol_budget","annual_cycle_zonal_mean","area_mean_time_series","cosp_histogram","diurnal_cycle","enso_diags","lat_lon","meridional_mean_2d","polar","qbo","streamflow","tc_analysis", "tropical_subseasonal", "zonal_mean_2d","zonal_mean_2d_stratosphere","zonal_mean_xy" +# To find the parameters required for a set, search for the set's name in this file. # The order of the `sets` list is the order the sets will show up in E3SM Diags. # `sets` below are ordered by 1) core or speciality and then 2) older to newer. sets = string_list(default=list("lat_lon","zonal_mean_xy","zonal_mean_2d","polar","cosp_histogram","meridional_mean_2d","annual_cycle_zonal_mean","zonal_mean_2d_stratosphere","aerosol_aeronet","aerosol_budget")) @@ -225,10 +222,10 @@ tag = string(default="model_vs_obs") # Path to observation data for "tc_analysis" runs tc_obs = string(default="") # The years increment for reference data -# Required for run_type="model_vs_model" "enso_diags"/"qbo"/"area_mean_time_series/streamflow" runs +# Required for run_type="model_vs_model" "enso_diags"/"qbo"/"area_mean_time_series"/"tropical_subseasonal"/"streamflow" runs ts_num_years_ref = integer(default=5) -# Name of the `[ts]` subtask for "enso_diags"/"qbo"/"area_mean_time_series"/"streamflow" runs -# Required for run_type="model_vs_model" "enso_diags"/"qbo"/"area_mean_time_series"/"streamflow" runs +# Name of the `[ts]` subtask to depend on +# Required for run_type="model_vs_model" "enso_diags"/"qbo"/"area_mean_time_series"/"tropical_subseasonal"/"streamflow" runs ts_subsection = string(default="") # Required for "tropical_subseasonal" run ts_daily_subsection = string(default="") @@ -335,8 +332,8 @@ cfg = string(default="ilamb/cmip.cfg") ilamb_obs = string(default="") # for land_only run land_only = boolean(default=False) -ts_atm_subsection = string(default="atm_monthly_180x360_aave") -ts_land_subsection = string(default="land_monthly") +ts_atm_subsection = string(default="") +ts_land_subsection = string(default="") # Name of the grid used by the relevant `[ts]` `atm` task ts_atm_grid = string(default="180x360_aave") # Name of the grid used by the relevant `[ts]` `land` task diff --git a/zppy/templates/e3sm_diags.bash b/zppy/templates/e3sm_diags.bash index b32eed54..b94dbd43 100644 --- a/zppy/templates/e3sm_diags.bash +++ b/zppy/templates/e3sm_diags.bash @@ -207,10 +207,12 @@ create_links_ts ${ts_dir_source} ${ts_dir_ref} ${ref_Y1} ${ref_Y2} 6 {%- endif %} {%- endif %} +{%- if "tropical_subseasonal" in sets %} ts_daily_dir={{ output }}/post/atm/{{ grid }}/ts/daily/{{ '%dyr' % (ts_num_years) }} {% if run_type == "model_vs_model" %} ts_daily_dir_ref={{ reference_data_path_ts_daily }}/{{ ts_num_years_ref }}yr {%- endif %} +{%- endif %} {%- if "streamflow" in sets %} {% if run_type == "model_vs_obs" %} @@ -227,9 +229,6 @@ create_links_ts_rof ${ts_rof_dir_source} ${ts_rof_dir_ref} ${ref_Y1} ${ref_Y2} 8 {%- endif %} {%- endif %} -{% if run_type == "model_vs_obs" %} -ref_name={{ ref_name }} -{%- endif %} {% if (run_type == "model_vs_model") and keep_mvm_case_name_in_fig %} ref_name={{ ref_name }} {%- endif %} @@ -390,7 +389,7 @@ trop_param.short_ref_name = '{{ short_ref_name }}' ref_start_yr = {{ ref_start_yr }} ref_end_yr = {{ ref_end_yr }} trop_param.ref_start_yr = f'{ref_start_yr:04}' -trop_param.ref_end_yr = f'{ref_final_yr:04}' +trop_param.ref_end_yr = f'{ref_end_yr:04}' # Optionally, swap test and reference model if {{ swap_test_ref }}: trop_param.test_data_path, trop_param.reference_data_path = trop_param.reference_data_path, trop_param.test_data_path @@ -479,7 +478,6 @@ params.append(dc_param) {%- if "streamflow" in sets %} streamflow_param = StreamflowParameter() -streamflow_param.reference_data_path = '{{ streamflow_obs_ts }}' streamflow_param.test_data_path = '${ts_rof_dir_primary}' streamflow_param.test_name = short_name streamflow_param.test_start_yr = start_yr diff --git a/zppy/templates/tc_analysis.bash b/zppy/templates/tc_analysis.bash index 57740ae2..9a616dd9 100644 --- a/zppy/templates/tc_analysis.bash +++ b/zppy/templates/tc_analysis.bash @@ -90,6 +90,13 @@ cat ${result_dir}out.dat0* > ${result_dir}cyclones_${file_name}.txt StitchNodes --in_fmt "lon,lat,slp,wind" --in_connect ${result_dir}connect_CSne${res}_v2.dat --range 6.0 --mintime 6 --maxgap 1 --in ${result_dir}cyclones_${file_name}.txt --out ${result_dir}cyclones_stitch_${file_name}.dat --threshold "wind,>=,17.5,6;lat,<=,40.0,6;lat,>=,-40.0,6" rm ${result_dir}cyclones_${file_name}.txt +# If cyclones_stitch file is empty, exit +if ! [ -s ${result_dir}cyclones_stitch_${file_name}.dat ]; then + cd {{ scriptDir }} + echo 'ERROR (1)' > {{ prefix }}.status + exit 1 +fi + # Generate histogram of detections HistogramNodes --in ${result_dir}cyclones_stitch_${file_name}.dat --iloncol 2 --ilatcol 3 --out ${result_dir}cyclones_hist_${file_name}.nc diff --git a/zppy/ts.py b/zppy/ts.py index d07a6fa8..eab2a3d1 100644 --- a/zppy/ts.py +++ b/zppy/ts.py @@ -1,118 +1,81 @@ -import os -import pprint -import re +from typing import Any, Dict, List, Tuple -import jinja2 +from configobj import ConfigObj from zppy.bundle import handle_bundles from zppy.utils import ( - checkStatus, - getComponent, - getTasks, - getYears, - makeExecutable, - setMappingFile, - submitScript, + ParameterGuessType, + check_status, + define_or_guess, + get_file_names, + get_tasks, + get_years, + initialize_template, + make_executable, + set_component_and_prc_typ, + set_grid, + set_mapping_file, + submit_script, + write_settings_file, ) # ----------------------------------------------------------------------------- -def ts(config, scriptDir, existing_bundles, job_ids_file): +def ts(config: ConfigObj, script_dir: str, existing_bundles, job_ids_file): - # --- Initialize jinja2 template engine --- - templateLoader = jinja2.FileSystemLoader( - searchpath=config["default"]["templateDir"] - ) - templateEnv = jinja2.Environment(loader=templateLoader) - template = templateEnv.get_template("ts.bash") + template, _ = initialize_template(config, "ts.bash") # --- List of tasks --- - tasks = getTasks(config, "ts") + tasks: List[Dict[str, Any]] = get_tasks(config, "ts") if len(tasks) == 0: return existing_bundles # --- Generate and submit ts scripts --- for c in tasks: - - setMappingFile(c) - - # Grid name (if not explicitly defined) - # 'native' if no remapping - # or extracted from mapping filename - if c["grid"] == "": - if c["mapping_file"] == "": - c["grid"] = "native" - elif c["mapping_file"] == "glb": - c["grid"] = "glb" - else: - tmp = os.path.basename(c["mapping_file"]) - # FIXME: W605 invalid escape sequence '\.' - tmp = re.sub("\.[^.]*\.nc$", "", tmp) # noqa: W605 - tmp = tmp.split("_") - if tmp[0] == "map": - c["grid"] = "%s_%s" % (tmp[-2], tmp[-1]) - else: - raise ValueError( - "Cannot extract target grid name from mapping file %s" - % (c["mapping_file"]) - ) - - # Output component (for directory structure) and procedure type for ncclimo - c["component"], c["prc_typ"] = getComponent( - c["input_component"], c["input_files"] - ) - + set_mapping_file(c) + set_grid(c) + set_component_and_prc_typ(c) c["cmor_tables_prefix"] = c["diagnostics_base_path"] - + year_sets: List[Tuple[int, int]] = get_years(c["years"]) # Loop over year sets - year_sets = getYears(c["years"]) for s in year_sets: - c["yr_start"] = s[0] c["yr_end"] = s[1] if ("last_year" in c.keys()) and (c["yr_end"] > c["last_year"]): continue # Skip this year set c["ypf"] = s[1] - s[0] + 1 - c["scriptDir"] = scriptDir - if c["subsection"]: - sub = c["subsection"] - else: - sub = c["grid"] - prefix = "ts_%s_%04d-%04d-%04d" % ( - sub, - c["yr_start"], - c["yr_end"], - c["ypf"], + c["scriptDir"] = script_dir + sub: str = define_or_guess( + c, "subsection", "grid", ParameterGuessType.SECTION_GUESS ) + prefix = f"ts_{sub}_{c['yr_start']:04d}-{c['yr_end']:04d}-{c['ypf']:04d}" print(prefix) c["prefix"] = prefix - scriptFile = os.path.join(scriptDir, "%s.bash" % (prefix)) - statusFile = os.path.join(scriptDir, "%s.status" % (prefix)) - settingsFile = os.path.join(scriptDir, "%s.settings" % (prefix)) - skip = checkStatus(statusFile) + bash_file, settings_file, status_file = get_file_names(script_dir, prefix) + skip: bool = check_status(status_file) if skip: continue - # Create script - with open(scriptFile, "w") as f: + with open(bash_file, "w") as f: f.write(template.render(**c)) - makeExecutable(scriptFile) - - with open(settingsFile, "w") as sf: - p = pprint.PrettyPrinter(indent=2, stream=sf) - p.pprint(c) - p.pprint(s) - + make_executable(bash_file) + write_settings_file(settings_file, c, s) export = "ALL" existing_bundles = handle_bundles( - c, scriptFile, export, existing_bundles=existing_bundles + c, bash_file, export, existing_bundles=existing_bundles ) if not c["dry_run"]: if c["bundle"] == "": # Submit job - submitScript(scriptFile, statusFile, export, job_ids_file) + submit_script( + bash_file, + status_file, + export, + job_ids_file, + fail_on_dependency_skip=c["fail_on_dependency_skip"], + ) else: - print("...adding to bundle '%s'" % (c["bundle"])) + print(f"...adding to bundle {c['bundle']}") print(f" environment_commands={c['environment_commands']}") print( diff --git a/zppy/utils.py b/zppy/utils.py index 96c3a180..7b8b204a 100644 --- a/zppy/utils.py +++ b/zppy/utils.py @@ -1,19 +1,96 @@ import os import os.path +import pprint +import re import shlex import stat import time +from enum import Enum from subprocess import PIPE, Popen -from typing import Any, Dict, List +from typing import Any, Dict, List, Set, Tuple -# ----------------------------------------------------------------------------- -# Process specified section and its sub-sections to build list of tasks -# -# If the section includes sub-sections, one task will be created for each -# sub-section and no task will be created for the main section. +import jinja2 +from configobj import ConfigObj + + +# Classes ##################################################################### +class ParameterGuessType(Enum): + PATH_GUESS = 1 + SECTION_GUESS = 2 + + +class ParameterNotProvidedError(RuntimeError): + pass + + +class DependencySkipError(RuntimeError): + pass + + +# Utitlities for this file #################################################### + + +def get_active_status(task: Dict[str, Any]) -> bool: + active: Any = task["active"] + if type(active) == bool: + return active + elif type(active) == str: + active_lower_case: str = active.lower() + if active_lower_case == "true": + return True + elif active_lower_case == "false": + return False + raise ValueError(f"Invalid value {active} for 'active'") + raise TypeError(f"Invalid type {type(active)} for 'active'") + + +def get_guess_type_parameter(guess_type: ParameterGuessType) -> str: + guess_type_parameter: str + if guess_type == ParameterGuessType.PATH_GUESS: + guess_type_parameter = "guess_path_parameters" + elif guess_type == ParameterGuessType.SECTION_GUESS: + guess_type_parameter = "guess_section_parameters" + else: + raise ValueError(f"Invalid guess_type: {guess_type}") + return guess_type_parameter + + +def get_url_message(c: Dict[str, Any], task: str) -> str: + base_path = c["web_portal_base_path"] + base_url = c["web_portal_base_url"] + www = c["www"] + case = c["case"] + url_msg: str + if www.startswith(base_path): + # TODO: python 3.9 introduces `removeprefix` + # This will begin with a "/" + www_suffix = www[len(base_path) :] + url_msg = f"URL: {base_url}{www_suffix}/{case}/{task}" + else: + url_msg = f"Could not determine URL from www={www}" + return url_msg + + +# Beginning steps ############################################################# + +# TODO: determine return type +def initialize_template(config: ConfigObj, template_name: str) -> Tuple[Any, Any]: + # --- Initialize jinja2 template engine --- + template_loader = jinja2.FileSystemLoader( + searchpath=config["default"]["templateDir"] + ) + template_env = jinja2.Environment(loader=template_loader) + template = template_env.get_template(template_name) + return template, template_env -def getTasks(config, section_name): +# TODO: type aliases require python 3.12 or higher +# type TaskDict = Dict[str, Any] + +# Process specified section and its sub-sections to build the list of tasks. +# If the section includes sub-sections, one task will be created for each +# sub-section and no task will be created for the main section. +def get_tasks(config: ConfigObj, section_name: str) -> List[Dict[str, Any]]: # mypy: resolves error: Need type annotation for "tasks" (hint: "tasks: List[] = ...") tasks: List[Dict[str, Any]] = [] @@ -21,17 +98,18 @@ def getTasks(config, section_name): # Sanity check # flake8: resolves E713 test for membership should be 'not in' if section_name not in config: - print('WARNING: Skipping section not found = "%s"' % (section_name)) + print(f'WARNING: Skipping section not found = "{section_name}"') return tasks # List of sub-sections - sub_section_names = config[section_name].sections + sub_section_names: List[str] = config[section_name].sections # Merge default with current section. Need to work with copies to avoid contamination - section_cfg = config["default"].copy() + section_cfg: Dict[str, Any] = config["default"].copy() section_cfg.update(config[section_name].copy()) # Construct list of tasks + task: Dict[str, Any] if len(sub_section_names) == 0: # No sub-section, single task @@ -46,21 +124,19 @@ def getTasks(config, section_name): tasks.append(task) else: - # One task for each sub-section for sub_section_name in sub_section_names: - # Merge current section with default task = config["default"].copy() task.update(config[section_name].copy()) # Merge sub-section with section. Start with a dictionary copy of sub-section - tmp = config[section_name][sub_section_name].copy() + tmp: Dict[str, Any] = config[section_name][sub_section_name].copy() # Remove all empty fields (None). These will be inherited from section - sub = {k: v for k, v in tmp.items() if v is not None} + sub: Dict[str, Any] = {k: v for k, v in tmp.items() if v is not None} # Merge content of sub-secton into section task.update(sub) # At this point, task will still include dictionary entries for - # all sub-sections. Remove them to clean-up + # all sub-sections. Remove them to clean up. for s in sub_section_names: task.pop(s) # Finally, add name of subsection to dictionary @@ -79,78 +155,53 @@ def getTasks(config, section_name): return tasks -# ----------------------------------------------------------------------------- -def get_active_status(task): - active = task["active"] - if type(active) == bool: - return active - elif type(active) == str: - active_lower_case = active.lower() - if active_lower_case == "true": - return True - elif active_lower_case == "false": - return False - raise ValueError("Invalid value {} for 'active'".format(active)) - raise TypeError("Invalid type {} for 'active'".format(type(active))) +# `for c in tasks` steps ###################################################### -# ----------------------------------------------------------------------------- -# Return all year sets from a configuration given by a list of strings -# "year_begin:year_end:year_freq" -# "year_begin-year_end" - - -def getYears(years_list): - if type(years_list) == str: - # This will be the case if years_list is missing a trailing comma - years_list = [years_list] - year_sets = [] - for years in years_list: - - if years.count(":") == 2: - - year_begin, year_end, year_freq = years.split(":") - year_begin = int(year_begin) - year_end = int(year_end) - year_freq = int(year_freq) - - year1 = year_begin - year2 = year1 + year_freq - 1 - while year2 <= year_end: - year_sets.append((year1, year2)) - year1 = year2 + 1 - year2 = year1 + year_freq - 1 - - elif years.count("-") == 1: - year1, year2 = years.split("-") - year1 = int(year1) - year2 = int(year2) - year_sets.append((year1, year2)) - - elif years != "": - error_str = "Error interpreting years %s" % (years) - print(error_str) - raise ValueError(error_str) - - return year_sets - +def set_mapping_file(c: Dict[str, Any]) -> None: + if c["mapping_file"] and (c["mapping_file"] != "glb"): + directory: str = os.path.dirname(c["mapping_file"]) + if not directory: + # We use the mapping file from Mache's [diagnostics > base_path]. + # However, new mapping files should be added to Mache's [sync > public_diags]. + # These files will then be synced over. + c["mapping_file"] = os.path.join( + c["diagnostics_base_path"], "maps", c["mapping_file"] + ) -# ----------------------------------------------------------------------------- -# Return output component name and procedure type based on either -# input_component or input_files +def set_grid(c: Dict[str, Any]) -> None: + # Grid name (if not explicitly defined) + # 'native' if no remapping + # or extracted from mapping filename + if c["grid"] == "": + if c["mapping_file"] == "": + c["grid"] = "native" + elif c["mapping_file"] == "glb": + c["grid"] = "glb" + else: + tmp = os.path.basename(c["mapping_file"]) + # FIXME: W605 invalid escape sequence '\.' + tmp = re.sub("\.[^.]*\.nc$", "", tmp) # noqa: W605 + tmp = tmp.split("_") + if tmp[0] == "map": + c["grid"] = f"{tmp[-2]}_{tmp[-1]}" + else: + raise ValueError( + f"Cannot extract target grid name from mapping file {c['mapping_file']}" + ) + # If grid is defined, just use that -def getComponent(input_component, input_files): - if input_component != "": - tmp = input_component +# Output component (for directory structure) and procedure type for ncclimo +def set_component_and_prc_typ(c: Dict[str, Any]) -> None: + if c["input_component"] != "": + tmp = c["input_component"] else: - tmp = input_files.split(".")[0] - + tmp = c["input_files"].split(".")[0] + component: str # Default ncclim procedure type is "sgs" - prc_typ = "sgs" - - # Output component (for directory structure) and ncclimo procedure type + prc_typ: str = "sgs" if tmp in ("cam", "eam", "eamxx"): component = "atm" prc_typ = tmp @@ -166,37 +217,172 @@ def getComponent(input_component, input_files): component = "rof" else: raise ValueError( - f"Cannot extract output component name from {input_component} or {input_files}." + f"Cannot extract output component name from {c['input_component']} or {c['input_files']}." ) - - return component, prc_typ + c["component"] = component + c["prc_typ"] = prc_typ -# ----------------------------------------------------------------------------- +def check_required_parameters( + c: Dict[str, Any], sets_with_requirement: Set[str], relevant_parameter: str +) -> None: + requested_sets = set(c["sets"]) + if ( + (sets_with_requirement & requested_sets) + and (relevant_parameter in c.keys()) + and (c[relevant_parameter] == "") + ): + raise ParameterNotProvidedError(relevant_parameter) -def setMappingFile(c): - if c["mapping_file"] and (c["mapping_file"] != "glb"): - directory = os.path.dirname(c["mapping_file"]) - if not directory: - # We use the mapping file from Mache's [diagnostics > base_path]. - # However, new mapping files should be added to Mache's [sync > public_diags]. - # These files will then be synced over. - c["mapping_file"] = os.path.join( - c["diagnostics_base_path"], "maps", c["mapping_file"] +# Return all year sets from a configuration given by a list of strings +# "year_begin:year_end:year_freq" +# "year_begin-year_end" +def get_years(years_input) -> List[Tuple[int, int]]: + years_list: List[str] + if type(years_input) == str: + # This will be the case if years_list is missing a trailing comma + years_list = [years_input] + else: + years_list = years_input + year_sets: List[Tuple[int, int]] = [] + for years in years_list: + if years.count(":") == 2: + year_begin: int + year_end: int + year_freq: int + year_begin, year_end, year_freq = tuple( + map(lambda y: int(y), years.split(":")) ) + year1: int = year_begin + year2: int = year1 + year_freq - 1 + while year2 <= year_end: + year_sets.append((year1, year2)) + year1 = year2 + 1 + year2 = year1 + year_freq - 1 + elif years.count("-") == 1: + year1, year2 = tuple(map(lambda y: int(y), years.split("-"))) + year_sets.append((year1, year2)) + elif years != "": + error_str = f"Error interpreting years {years}" + print(error_str) + raise ValueError(error_str) + return year_sets + + +# `for s in year_sets` steps ################################################## + +# This returns a value +def define_or_guess( + c: Dict[str, Any], + first_choice_parameter: str, + second_choice_parameter: str, + guess_type: ParameterGuessType, +) -> Any: + # Determine which type of guess to use. + guess_type_parameter: str = get_guess_type_parameter(guess_type) + # Define a value, if possible. + value: Any + if (first_choice_parameter in c.keys()) and c[first_choice_parameter]: + value = c[first_choice_parameter] + elif c[guess_type_parameter]: + # first_choice_parameter isn't defined, + # so let's make a guess for the value. + value = c[second_choice_parameter] + else: + raise ParameterNotProvidedError(first_choice_parameter) + return value + + +# This updates the dict c +def define_or_guess2( + c: Dict[str, Any], + parameter: str, + backup_option: str, + guess_type: ParameterGuessType, +) -> None: + # Determine which type of guess to use. + guess_type_parameter: str = get_guess_type_parameter(guess_type) + # Define a value, if possible. + if (parameter in c.keys()) and (c[parameter] == ""): + if c[guess_type_parameter]: + c[parameter] = backup_option + else: + raise ParameterNotProvidedError(parameter) + + +def get_file_names(script_dir: str, prefix: str): + return tuple( + [ + os.path.join(script_dir, f"{prefix}.{suffix}") + for suffix in ["bash", "settings", "status"] + ] + ) + + +def check_status(status_file: str) -> bool: + skip: bool = False + if os.path.isfile(status_file): + with open(status_file, "r") as f: + tmp: List[str] = f.read().split() + if tmp[0] in ("OK", "WAITING", "RUNNING"): + skip = True + print(f"...skipping because status file says '{tmp[0]}'") + + return skip + + +def make_executable(script_file: str) -> None: + st = os.stat(script_file) + os.chmod(script_file, st.st_mode | stat.S_IEXEC) -# ----------------------------------------------------------------------------- -def submitScript(scriptFile, statusFile, export, job_ids_file, dependFiles=[]): +def add_dependencies( + dependencies: List[str], + scriptDir: str, + prefix: str, + sub: str, + start_yr: int, + end_yr: int, + num_years: int, +) -> None: + y1: int = start_yr + y2: int = start_yr + num_years - 1 + while y2 <= end_yr: + dependencies.append( + os.path.join( + scriptDir, f"{prefix}_{sub}_{y1:04d}-{y2:04d}-{num_years:04d}.status" + ) + ) + y1 += num_years + y2 += num_years + +def write_settings_file( + settings_file: str, task_dict: Dict[str, Any], year_tuple: Tuple[int, int] +): + with open(settings_file, "w") as sf: + p = pprint.PrettyPrinter(indent=2, stream=sf) + p.pprint(task_dict) + p.pprint(year_tuple) + + +def submit_script( + script_file: str, + status_file: str, + export, + job_ids_file, + dependFiles: List[str] = [], + fail_on_dependency_skip: bool = False, +): # id of submitted job, or -1 if not submitted jobid = None # Handle dependencies - dependIds = [] + dependIds: List[int] = [] for dependFile in dependFiles: if os.path.isfile(dependFile): + tmp: List[str] with open(dependFile, "r") as f: tmp = f.read().split() if tmp[0] in ("OK"): @@ -204,31 +390,37 @@ def submitScript(scriptFile, statusFile, export, job_ids_file, dependFiles=[]): elif tmp[0] in ("WAITING", "RUNNING"): dependIds.append(int(tmp[1])) else: - print("...skipping because dependency says '%s'" % (tmp[0])) + skip_message = f"...skipping because dependency says '{tmp[0]}'" + if fail_on_dependency_skip: + raise DependencySkipError(skip_message) + else: + print(skip_message) + jobid = -1 + break + else: + skip_message = f"...skipping because of dependency status file missing\n {dependFile}" + if fail_on_dependency_skip: + raise DependencySkipError(skip_message) + else: + print(skip_message) jobid = -1 break - else: - print( - "...skipping because of dependency status file missing\n %s" - % (dependFile) - ) - jobid = -1 - break # If no exception occurred during dependency check, proceed with submission if jobid != -1: # Submit command + command: str if len(dependIds) == 0: - command = f"sbatch --export={export} {scriptFile}" + command = f"sbatch --export={export} {script_file}" else: - jobs = "" + jobs: str = "" for i in dependIds: jobs += ":{:d}".format(i) # Note that `--dependency` does handle bundles even though it lists individual tasks, not bundles. # Since each task of a bundle lists "RUNNING ", the bundle's job ID will be included. command = ( - f"sbatch --export={export} --dependency=afterok{jobs} {scriptFile}" + f"sbatch --export={export} --dependency=afterok{jobs} {script_file}" ) # Actual submission @@ -238,7 +430,7 @@ def submitScript(scriptFile, statusFile, export, job_ids_file, dependFiles=[]): out = stdout.decode().strip() print(f"...{out}") if status != 0 or not out.startswith("Submitted batch job"): - error_str = f"Problem submitting script {scriptFile}" + error_str = f"Problem submitting script {script_file}" print(error_str) print(command) print(stderr) @@ -255,68 +447,11 @@ def submitScript(scriptFile, statusFile, export, job_ids_file, dependFiles=[]): # Create status file if job has been submitted if jobid != -1: - with open(statusFile, "w") as f: - f.write("WAITING %d\n" % (jobid)) + with open(status_file, "w") as f: + f.write(f"WAITING {jobid:d}\n") return jobid -# ----------------------------------------------------------------------------- -def checkStatus(statusFile): - - skip = False - if os.path.isfile(statusFile): - with open(statusFile, "r") as f: - tmp = f.read().split() - if tmp[0] in ("OK", "WAITING", "RUNNING"): - skip = True - print(f"...skipping because status file says '{tmp[0]}'") - - return skip - - -# ----------------------------------------------------------------------------- -def makeExecutable(scriptFile): - - st = os.stat(scriptFile) - os.chmod(scriptFile, st.st_mode | stat.S_IEXEC) - - return - - -# ----------------------------------------------------------------------------- -def print_url(c, task): - base_path = c["web_portal_base_path"] - base_url = c["web_portal_base_url"] - www = c["www"] - case = c["case"] - if www.startswith(base_path): - # TODO: python 3.9 introduces `removeprefix` - # This will begin with a "/" - www_suffix = www[len(base_path) :] - print(f"URL: {base_url}{www_suffix}/{case}/{task}") - else: - print(f"Could not determine URL from www={www}") - - -# ----------------------------------------------------------------------------- -def add_dependencies( - dependencies: List[str], - scriptDir: str, - prefix: str, - sub: str, - start_yr: int, - end_yr: int, - num_years: int, -): - y1: int = start_yr - y2: int = start_yr + num_years - 1 - while y2 <= end_yr: - dependencies.append( - os.path.join( - scriptDir, - "%s_%s_%04d-%04d-%04d.status" % (prefix, sub, y1, y2, num_years), - ) - ) - y1 += num_years - y2 += num_years +def print_url(c: Dict[str, Any], task: str) -> None: + print(get_url_message(c, task))