diff --git a/.cirrus.yml b/.cirrus.yml
index 92b8d788e6..c9c1d71859 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -38,7 +38,7 @@ env:
# Conda packages to be installed.
CONDA_CACHE_PACKAGES: "nox pip"
# Git commit hash for iris test data.
- IRIS_TEST_DATA_VERSION: "2.5"
+ IRIS_TEST_DATA_VERSION: "2.7"
# Base directory for the iris-test-data.
IRIS_TEST_DATA_DIR: ${HOME}/iris-test-data
diff --git a/.github/deploy_key.scitools-docs.enc b/.github/deploy_key.scitools-docs.enc
deleted file mode 100644
index 165a7c1970..0000000000
--- a/.github/deploy_key.scitools-docs.enc
+++ /dev/null
@@ -1 +0,0 @@
-gAAAAABZSMeGIlHxHu4oCV_h8shbCRf1qJYoLO9Z0q9uKRDTlytoigzlvfxhN-9WMjc3Js1f1Zg55PfEpTOpL82p6QHF-gqW0k0qGjanO3lnQzM6EzIu3KyJPrVrL-O6edwoPMYKqwsNO3VQHNuEspsFKY0TbjnTPHc45SPU5LjEGX4c_SADSDcLDJm2rbrU2eVkT-gFHy_-ZzK0Di83WlDc79YzIkVe5BAn5PbWv3O9BROR4fJzecbjmWRT_rp1cqI_gaUpVcwTdRK3II9YnazBtW4h2WbCeTcySLD7N4o9K0P71SR6gG_XFbpML3Haf5IUdRi0qPBuvJ_4YVnnuJo6mhiIOJfUEcNj_bbLOYVzPmKyQMHvrPf_lK5JhdX6MUvqluhqHuc0i_z_j1O2y32lB7b1iiY6eE_BsNlXJHlOX1GiXkX0nZLI48p-D22jya44WshWSnVcoalcCDkdbvdFbpOscwXDR3nB-PCOmRUF_d1BlMbp1if-VP0yt3tJ_5yyCrqSRWwFusaibQTF6yoImetl7Am95hh2FjFDNkalHqtarnUv86w-26v1ukcTIjJ0iHzNbCK1m0VMkvE6uDeqRgIZnVKON5cesmM3YbulRrHpaOiSly_sMhLhfg5jTxAuOa319AQGoHEOcRLRUYdw2TQkDEiHGiUh_U4-nC7GTGDGcXyeBIa4ciuC2Qi0QXf9qyEGoIRcU8BP34LDNdtovJoZOBDzhr5Ajnu7yA3GB3TD_kiZrgm6agFuu7a51OMfjezhwGzUJ4X-empPctwm9woOJmPCTFqCvxB2VwVV0L6yngsTooyAHCi5st_AG-p5FIT3VZGx7EgCd68ze9XlRoACoe9XOdSFklbaSMGRbJlvKCPAA0zj4__PfIhlD8Cxwwjq_VXlSr_QxygIGZJlhkT46P9TroolgdipaBp1aQ3_PKHfgw5Y9ZqBKCZF5DOJejqUbfVKUp2JdqoX3yQBD0ByQFdfCuLvoiYcM2ofKdIMvel3Jwn0Nx4NYR2qg3h7FYti0jdrNlC89gnL4tKsf0DAGxZ1UYmqQMWJ3-GKCKrlKyeaHYB2djPRGP8VeoRZh_UorSNHU56KSztK_hTP6P0nFymRJRUSRBMKTaTfJf1aBlk9zJHSe9hOKwxyUNkwcTftGn5P0WNcnaTk3ecTVe-1QJKbPWwMBDzqQtTCsCizgN4UdQsmy4iMYq-LT2TC-JXXo0CPTNDybUj92wSa7KeKTvKnbN8DMZbGRdgy5BOSGw4hMIoIFSB-6tnBIvTntNfMT9ac9e9jKm47Q4qXpaeF3AsvBqxkMRQLaYVppPng6cA49VjJQDZ0gTdPKSSKZkApfeeQL0LLCGwzQ4C52TWK2NJSQ3pvRYI1F0taDQWopIiwFfox-OSYnOJECHkHjxaxhHQzVb3w47xKKZNXbLb-LV7QI-kGuKLfoqO1lq94cw1H-EVrXaGJcDDLjK2jRgdVfDyPsHMcW1oUDJqu8gQ6fCXYPbqJzdmFNFsc1hywHWCU7crV61D2QubwzbLRnP8053MvsMnbdhWtwocTlvvdG-qW6CiEA9Eanfpf0RW1W9oh6yQJ__0vS9UWswqq5ahkkpHY9LTE0US4L3xbFOrq7HgbA2jelTdPVfxo3BfUHuL8oKpFDTzgZi07gNmkhIZfpuXj2KFnm9XM31AsY6V2rXL0xSx-9rvi4FP0LK6V5vQ8OKI8aRPCDyzLUv2xnayMW4yaYg3GHD5yo7pIOswKc6GOEmetPnay3j0dVN3hfpkpfJWhss3vjZ2Zl0NmjJ7OuS25tjUGLy82A1yFSpL8mKRkHZJuMDZbd_Or6gaPVoVT_Otbkh-6pMZuDeOHOUfgey0Z374jCjRpyQ9k-Fpw8ykow8iIIQ088kC5CeQy6jRhD7mO3iR4-U1XKDJQNlNg1z_JYyDrwykp7FFN2sQn7RRYHIXx2iMrEDXdrdTrujMFN6omC13yDuXJukAgZb6zBBUTlonxRUBjUJWt2P-1sRRTsG8mr9EaE5K-xhR5Ust_37L3svNQ0vwLtPLIpWGZHhD8P_dYNR2RL4679xyzI8A7wLY82wFBHrcghAd4UtLJH9ul6IuS_CaVo-gbfowNRaQ0Zw7WHZGIXpZWEx1_zck6qDEaCY8TpQeciBWpH5uJDSYqdLdMwigdQEGzAJ1DHSWsyTrmOR7Lhwi9WqOzfWe4ahxAkAUH_Jdr_i-nGfl_x3OgQdHM7jWVMXDcXEmR0bkw-s0EKXCn20q2bxDkm5SUWkYtWAZ2aZRgo4wHOqGBcP99xZ25mq9uxtNOkLBF81lnVbn_4BAZBNnnKwwj4SafeIW4KR1ZOpnEI47sGUR6NhEk9VtJsv0zeZIv8VjRbNLh3QCxkNMue60SjJ48kjotZSX1RQJN0xwPftiABBf8MX9tyZe8emQvPeIcdQTSQPnYEUx22xZGeeJTNrZ9soQyP6mrkkRihp6o9tG7HT9QEVLGM19wAigwAAMMXGqdGzWwpar30JtJU94gAmIlwFUJqeO_fdJKFspnUyJ6gt5_oHsKNEV7Uz5EJwGpa94tlPJXjvZpu-wWQfu8U0trTU2mTCA0bmZIDID-Xk4vCW_SD4OVnsvWyga4QHSg3AqVTjnjlapAjsYcFjiOo2C_U3besloprpyuAwpTdn7zdfMHIJO0ckBFnXlk8XB3kT0YGrCpBvW6gYMXlnePVcr3wJehCvMg1Q9Dc5fVQUqt65zcjbgiudfzFGtTe9T4f1IttoAtrJgTN4W1mtbZzSK864I_ngaX5YWgZSinjkbocCCFEJDcbiXMnV7OWOZefqW6VZu4BZKEKlN9k2kH3UCECCK3uRAQIPn_48DgaVnAff2-fMADltiosSPJ_a3057acJP0cf-1QsJuV7r3zdzL3shgrMRjpSsSTCYdMhZ6disFGcJg7hJJvtH1FieZ76jps5FYi5lE8Ua9yBKlG4dCGuUBnikvpfy2FLMLFNn-iXLflu2oiBbcLvn_ReZUnFIR6KgGRN8xKEBaXATQVtb2E678GtQptK8PHP2DoAtbsIXUDn60YH04D9pEck8NnmWYAz7sWbiL6OKdaO7jQep4mt3CgkyFC0NCKP9zCbVNtmfHRVmHtckjgfHF-tK_v59KeAuwWPtm7ow2BjynAK42IGR9nWtQFRUZIboaND8UF76YGKFF7kOf_XTvoNrVTCRkD6b8KJy2IFfdoHP6WET9QLvwDSXgYLPlCX9z7aQ_lc57u5d_dGO-7NZ_Qbs69ByyIvQoztVBjw6fa7EzSwccqPfMQL_fiecNCng-r4gHaH6TlgSbfqQOISHxTtvmbym1no560ZsHfnQfuL6BCI8s6OoygxhOnQhaDqyOUVBut_x3VR_DKFMyUazXYNgLbRsdITaAvR-0gIx5TAX9n3A4HwHuiBZCtwRYaiJnW8FX9lk1Y_g5UHL2OC3rsNFui3aBLzAFhx58lALxnxhlUItuHHK9BgexnR2yCj2nOWLoWQzfFaf2_fpjEh_QBHTqUxdQZ8ighg_8lh6hmLbW4PcUxKX71RFmikLyS3-idlzsiEomNlPNaVllRF21vE6dR-nZ6xsxzTvNB4wumP2irQ9mFBTN1WpiLMyNoEEucA2I848YHUfkZrjTG_dcCQNp7H_2gKdIsZ135lUEG6lYfhLMHTmP5uYxxx3Pipjp6wF2GFCsZPIlIPsgrhbSxqkWg1EOViHtpw6ypFKn7wQHHfnrnHkFWnrKbMARVBjJUB-FhK4b6qLU_k_MTMipemneMUFXlj3EkEhKM18MIHGkIOkwG5QtPYcjUAf_2sZlxSMVnh6sQ8kVwF6lfk_l8jhoO93HUTntZUSv7GrE3s80yJgII4Qw37AdgcJiAkoPn1-17HfSsAy6uRh5-OvrCtkDqQxfuJSyn_4pRMh6hZT7N9pI5limMXXn2nHnxU93UT3qU-smA8q0ECfvK3JwoaYy_llSx0wSBvpmxjLQ302sFYM5FVZ9zRbHuLCCZShVopiyMDLHVJe_1g9Ou1KL-h6RVZgg3Ttyb5m2KDfoHEVLeZkW81YLCsyo7uNb6SVRM-615TIVGT6Eq7oJ6wO2LMDKjEpHKFiOFpY2fpR8noM81UqgLddYfl_lei7RVjaNO98otqE4iSNtpgJgyhAx4CdYm__yQRSXhckR4K7yAhM9Kh5BLbQQnf2_0WS1sWTmNMZZNMfOSqmTCRVwcYvg4TDGOA-vZARbZW1M7npVMldV_SbvgcEZD6InY9c40eheRqS0YD2W2HEZIiNeLRw0y5WBcYuJIpXhI3ViTXx-frJnv0Mo9uwmuLbJmWFcn6RdIVcU68_oPZZlZD4Vm7SjikbuZKF1BF3lXamTTDIBcWiDLwuNDv2lUkURDCWa5WJsfUCfTAJ6PTe8=
\ No newline at end of file
diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml
index a8247a247b..04e26686ea 100644
--- a/.github/workflows/benchmark.yml
+++ b/.github/workflows/benchmark.yml
@@ -1,10 +1,11 @@
-# This is a basic workflow to help you get started with Actions
+# Use ASV to check for performance regressions in the last 24 hours' commits.
name: benchmark-check
on:
- # Triggers the workflow on push or pull request events but only for the master branch
- pull_request:
+ schedule:
+ # Runs every day at 23:00.
+ - cron: "0 23 * * *"
jobs:
benchmark:
@@ -22,13 +23,9 @@ jobs:
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- - uses: actions/checkout@v2
-
- - name: Fetch the PR base branch too
- run: |
- git fetch --depth=1 origin ${{ github.event.pull_request.base.ref }}
- git branch _base FETCH_HEAD
- echo PR_BASE_SHA=$(git rev-parse _base) >> $GITHUB_ENV
+ - uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
- name: Install Nox
run: |
@@ -65,11 +62,56 @@ jobs:
run: |
echo "OVERRIDE_TEST_DATA_REPOSITORY=${GITHUB_WORKSPACE}/${IRIS_TEST_DATA_PATH}/test_data" >> $GITHUB_ENV
- - name: Run CI benchmarks
+ - name: Run overnight benchmarks
+ run: |
+ first_commit=$(git log --after="$(date -d "1 day ago" +"%Y-%m-%d") 23:00:00" --pretty=format:"%h" | tail -n 1)
+ if [ "$first_commit" != "" ]
+ then
+ nox --session="benchmarks(overnight)" -- $first_commit
+ fi
+
+ - name: Create issues for performance shifts
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
- mkdir --parents benchmarks/.asv
- set -o pipefail
- nox --session="benchmarks(ci compare)" | tee benchmarks/.asv/ci_compare.txt
+ if [ -d benchmarks/.asv/performance-shifts ]
+ then
+ cd benchmarks/.asv/performance-shifts
+ for commit_file in *
+ do
+ commit="${commit_file%.*}"
+ pr_number=$(git log "$commit"^! --oneline | grep -o "#[0-9]*" | tail -1 | cut -c 2-)
+ author=$(gh pr view $pr_number --json author -q '.["author"]["login"]' --repo $GITHUB_REPOSITORY)
+ merger=$(gh pr view $pr_number --json mergedBy -q '.["mergedBy"]["login"]' --repo $GITHUB_REPOSITORY)
+ # Find a valid assignee from author/merger/nothing.
+ if curl -s https://api.github.com/users/$author | grep -q "login"; then
+ assignee=$author
+ elif curl -s https://api.github.com/users/$merger | grep -q "login"; then
+ assignee=$merger
+ else
+ assignee=""
+ fi
+ title="Performance Shift(s): \`$commit\`"
+ body="
+ Benchmark comparison has identified performance shifts at commit \
+ $commit (#$pr_number). Please review the report below and \
+ take corrective/congratulatory action as appropriate \
+ :slightly_smiling_face:
+
+
+ Performance shift report
+
+ \`\`\`
+ $(cat $commit_file)
+ \`\`\`
+
+
+
+ Generated by GHA run [\`${{github.run_id}}\`](https://github.com/${{github.repository}}/actions/runs/${{github.run_id}})
+ "
+ gh issue create --title "$title" --body "$body" --assignee $assignee --label "Bot" --label "Type: Performance" --repo $GITHUB_REPOSITORY
+ done
+ fi
- name: Archive asv results
if: ${{ always() }}
@@ -78,4 +120,3 @@ jobs:
name: asv-report
path: |
benchmarks/.asv/results
- benchmarks/.asv/ci_compare.txt
diff --git a/.github/workflows/refresh-lockfiles.yml b/.github/workflows/refresh-lockfiles.yml
index b40c3ca446..614bd7bb65 100644
--- a/.github/workflows/refresh-lockfiles.yml
+++ b/.github/workflows/refresh-lockfiles.yml
@@ -2,7 +2,7 @@
# available packages and dependencies.
#
# Environment specifications are given as conda environment.yml files found in
-# `requirements/ci/py**.yml`. These state the pacakges required, the conda channels
+# `requirements/ci/py**.yml`. These state the packages required, the conda channels
# that the packages will be pulled from, and any versions of packages that need to be
# pinned at specific versions.
#
@@ -14,50 +14,14 @@ name: Refresh Lockfiles
on:
workflow_dispatch:
- inputs:
- clobber:
- description: |
- Force the workflow to run, potentially clobbering any commits already made to the branch.
- Enter "yes" or "true" to run.
- default: "no"
schedule:
# Run once a week on a Saturday night
- - cron: 1 0 * * 6
+ # N.B. "should" be quoted, according to
+ # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onschedule
+ - cron: "1 0 * * 6"
jobs:
-
- no_clobber:
- if: "github.repository == 'SciTools/iris'"
- runs-on: ubuntu-latest
- steps:
- # check if the auto-update-lockfiles branch exists. If it does, and someone other than
- # the lockfile bot has made the head commit, abort the workflow.
- # This job can be manually overridden by running directly from the github actions panel
- # (known as a "workflow_dispatch") and setting the `clobber` input to "yes".
- - uses: actions/script@v5.1.0
- with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- script: |
- if (context.eventName == "workflow_dispatch") {
- const clobber = context.payload.inputs.clobber || "no";
- if (["yes", "true", "y"].includes(clobber.trim().toLowerCase())) {
- core.info("Manual override, continuing workflow, potentially overwriting previous commits to auto-update-lockfiles");
- return
- }
- }
- github.repos.getBranch({...context.repo, branch: "auto-update-lockfiles"}).then(res => {
- const committer = res.data.commit.commit.committer;
- if (committer && committer.name === "Lockfile bot") {
- core.info("Lockfile bot was the last to push to auto-update-lockfiles. Continue.");
- } else {
- core.setFailed("New commits to auto-update-lockfiles since bot last ran. Abort!");
- }
- }).catch(err => {
- if (err.status === 404) {
- core.info("auto-update-lockfiles branch not found, continue");
- }
- })
gen_lockfiles:
# this is a matrix job: it splits to create new lockfiles for each
@@ -67,14 +31,13 @@ jobs:
# ref: https://tomasvotruba.com/blog/2020/11/16/how-to-make-dynamic-matrix-in-github-actions/
if: "github.repository == 'SciTools/iris'"
runs-on: ubuntu-latest
- needs: no_clobber
strategy:
matrix:
python: ['38']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: install conda-lock
run: |
source $CONDA/bin/activate base
@@ -96,7 +59,7 @@ jobs:
needs: gen_lockfiles
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: get artifacts
uses: actions/download-artifact@v2
with:
@@ -109,7 +72,7 @@ jobs:
- name: Create Pull Request
id: cpr
- uses: peter-evans/create-pull-request@f22a7da129c901513876a2380e2dae9f8e145330
+ uses: peter-evans/create-pull-request@18f7dc018cc2cd597073088f7c7591b9d1c02672
with:
commit-message: Updated environment lockfiles
committer: "Lockfile bot "
@@ -119,6 +82,8 @@ jobs:
title: "[iris.ci] environment lockfiles auto-update"
body: |
Lockfiles updated to the latest resolvable environment.
+
+ If the CI test suite fails, create a new branch based of this pull request and add the required fixes to that branch.
labels: |
New: Pull Request
Bot
diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml
index f9bb09ce46..008fe56deb 100644
--- a/.github/workflows/stale.yml
+++ b/.github/workflows/stale.yml
@@ -1,16 +1,20 @@
# See https://github.com/actions/stale
name: Stale issues and pull-requests
+
on:
schedule:
- - cron: 0 0 * * *
+ # Run once a day
+ # N.B. "should" be quoted, according to
+ # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onschedule
+ - cron: "0 0 * * *"
jobs:
stale:
if: "github.repository == 'SciTools/iris'"
runs-on: ubuntu-latest
steps:
- - uses: actions/stale@v4.1.0
+ - uses: actions/stale@v5
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
@@ -59,11 +63,11 @@ jobs:
stale-pr-label: Stale
# Labels on issues exempted from stale.
- exempt-issue-labels: |
+ exempt-issue-labels:
"Status: Blocked,Status: Decision Required,Peloton 🚴♂️,Good First Issue"
# Labels on prs exempted from stale.
- exempt-pr-labels: |
+ exempt-pr-labels:
"Status: Blocked,Status: Decision Required,Peloton 🚴♂️,Good First Issue"
# Max number of operations per run.
diff --git a/benchmarks/README.md b/benchmarks/README.md
new file mode 100644
index 0000000000..6ea53c3ae8
--- /dev/null
+++ b/benchmarks/README.md
@@ -0,0 +1,97 @@
+# Iris Performance Benchmarking
+
+Iris uses an [Airspeed Velocity](https://github.com/airspeed-velocity/asv)
+(ASV) setup to benchmark performance. This is primarily designed to check for
+performance shifts between commits using statistical analysis, but can also
+be easily repurposed for manual comparative and scalability analyses.
+
+The benchmarks are automatically run overnight
+[by a GitHub Action](../.github/workflows/benchmark.yml), with any notable
+shifts in performance being flagged in a new GitHub issue.
+
+## Running benchmarks
+
+`asv ...` commands must be run from this directory. You will need to have ASV
+installed, as well as Nox (see
+[Benchmark environments](#benchmark-environments)).
+
+[Iris' noxfile](../noxfile.py) includes a `benchmarks` session that provides
+conveniences for setting up before benchmarking, and can also replicate the
+automated overnight run locally. See the session docstring for detail.
+
+### Environment variables
+
+* `OVERRIDE_TEST_DATA_REPOSITORY` - required - some benchmarks use
+`iris-test-data` content, and your local `site.cfg` is not available for
+benchmark scripts.
+* `DATA_GEN_PYTHON` - required - path to a Python executable that can be
+used to generate benchmark test objects/files; see
+[Data generation](#data-generation). The Nox session sets this automatically,
+but will defer to any value already set in the shell.
+* `BENCHMARK_DATA` - optional - path to a directory for benchmark synthetic
+test data, which the benchmark scripts will create if it doesn't already
+exist. Defaults to `/benchmarks/.data/` if not set.
+* `ON_DEMAND_BENCHMARKS` - optional - when set (to any value): benchmarks
+decorated with `@on_demand_benchmark` are included in the ASV run. Usually
+coupled with the ASV `--bench` argument to only run the benchmark(s) of
+interest. Is set during the Nox `cperf` and `sperf` sessions.
+
+## Writing benchmarks
+
+[See the ASV docs](https://asv.readthedocs.io/) for full detail.
+
+### Data generation
+**Important:** be sure not to use the benchmarking environment to generate any
+test objects/files, as this environment changes with each commit being
+benchmarked, creating inconsistent benchmark 'conditions'. The
+[generate_data](./benchmarks/generate_data/__init__.py) module offers a
+solution; read more detail there.
+
+### ASV re-run behaviour
+
+Note that ASV re-runs a benchmark multiple times between its `setup()` routine.
+This is a problem for benchmarking certain Iris operations such as data
+realisation, since the data will no longer be lazy after the first run.
+Consider writing extra steps to restore objects' original state _within_ the
+benchmark itself.
+
+If adding steps to the benchmark will skew the result too much then re-running
+can be disabled by setting an attribute on the benchmark: `number = 1`. To
+maintain result accuracy this should be accompanied by increasing the number of
+repeats _between_ `setup()` calls using the `repeat` attribute.
+`warmup_time = 0` is also advisable since ASV performs independent re-runs to
+estimate run-time, and these will still be subject to the original problem.
+
+### Scaling / non-Scaling Performance Differences
+
+When comparing performance between commits/file-type/whatever it can be helpful
+to know if the differences exist in scaling or non-scaling parts of the Iris
+functionality in question. This can be done using a size parameter, setting
+one value to be as small as possible (e.g. a scalar `Cube`), and the other to
+be significantly larger (e.g. a 1000x1000 `Cube`). Performance differences
+might only be seen for the larger value, or the smaller, or both, getting you
+closer to the root cause.
+
+### On-demand benchmarks
+
+Some benchmarks provide useful insight but are inappropriate to be included in
+a benchmark run by default, e.g. those with long run-times or requiring a local
+file. These benchmarks should be decorated with `@on_demand_benchmark`
+(see [benchmarks init](./benchmarks/__init__.py)), which
+sets the benchmark to only be included in a run when the `ON_DEMAND_BENCHMARKS`
+environment variable is set. Examples include the CPerf and SPerf benchmark
+suites for the UK Met Office NG-VAT project.
+
+## Benchmark environments
+
+We have disabled ASV's standard environment management, instead using an
+environment built using the same Nox scripts as Iris' test environments. This
+is done using ASV's plugin architecture - see
+[asv_delegated_conda.py](asv_delegated_conda.py) and the extra config items in
+[asv.conf.json](asv.conf.json).
+
+(ASV is written to control the environment(s) that benchmarks are run in -
+minimising external factors and also allowing it to compare between a matrix
+of dependencies (each in a separate environment). We have chosen to sacrifice
+these features in favour of testing each commit with its intended dependencies,
+controlled by Nox + lock-files).
diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json
index 3468b2fca9..7337eaa8c7 100644
--- a/benchmarks/asv.conf.json
+++ b/benchmarks/asv.conf.json
@@ -5,6 +5,7 @@
"repo": "..",
"environment_type": "conda-delegated",
"show_commit_url": "http://github.com/scitools/iris/commit/",
+ "branches": ["upstream/main"],
"benchmark_dir": "./benchmarks",
"env_dir": ".asv/env",
diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py
index 4a964a648d..765eb2195d 100644
--- a/benchmarks/benchmarks/__init__.py
+++ b/benchmarks/benchmarks/__init__.py
@@ -4,5 +4,126 @@
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Common code for benchmarks."""
+from functools import wraps
+from os import environ
+import resource
ARTIFICIAL_DIM_SIZE = int(10e3) # For all artificial cubes, coords etc.
+
+
+def disable_repeat_between_setup(benchmark_object):
+ """
+ Decorator for benchmarks where object persistence would be inappropriate.
+
+ E.g:
+ * Benchmarking data realisation
+ * Benchmarking Cube coord addition
+
+ Can be applied to benchmark classes/methods/functions.
+
+ https://asv.readthedocs.io/en/stable/benchmarks.html#timing-benchmarks
+
+ """
+ # Prevent repeat runs between setup() runs - object(s) will persist after 1st.
+ benchmark_object.number = 1
+ # Compensate for reduced certainty by increasing number of repeats.
+ # (setup() is run between each repeat).
+ # Minimum 5 repeats, run up to 30 repeats / 20 secs whichever comes first.
+ benchmark_object.repeat = (5, 30, 20.0)
+ # ASV uses warmup to estimate benchmark time before planning the real run.
+ # Prevent this, since object(s) will persist after first warmup run,
+ # which would give ASV misleading info (warmups ignore ``number``).
+ benchmark_object.warmup_time = 0.0
+
+ return benchmark_object
+
+
+class TrackAddedMemoryAllocation:
+ """
+ Context manager which measures by how much process resident memory grew,
+ during execution of its enclosed code block.
+
+ Obviously limited as to what it actually measures : Relies on the current
+ process not having significant unused (de-allocated) memory when the
+ tested codeblock runs, and only reliable when the code allocates a
+ significant amount of new memory.
+
+ Example:
+ with TrackAddedMemoryAllocation() as mb:
+ initial_call()
+ other_call()
+ result = mb.addedmem_mb()
+
+ """
+
+ @staticmethod
+ def process_resident_memory_mb():
+ return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0
+
+ def __enter__(self):
+ self.mb_before = self.process_resident_memory_mb()
+ return self
+
+ def __exit__(self, *_):
+ self.mb_after = self.process_resident_memory_mb()
+
+ def addedmem_mb(self):
+ """Return measured memory growth, in Mb."""
+ return self.mb_after - self.mb_before
+
+ @staticmethod
+ def decorator(changed_params: list = None):
+ """
+ Decorates this benchmark to track growth in resident memory during execution.
+
+ Intended for use on ASV ``track_`` benchmarks. Applies the
+ :class:`TrackAddedMemoryAllocation` context manager to the benchmark
+ code, sets the benchmark ``unit`` attribute to ``Mb``. Optionally
+ replaces the benchmark ``params`` attribute with ``changed_params`` -
+ useful to avoid testing very small memory volumes, where the results
+ are vulnerable to noise.
+
+ Parameters
+ ----------
+ changed_params : list
+ Replace the benchmark's ``params`` attribute with this list.
+
+ """
+ if changed_params:
+ # Must make a copy for re-use safety!
+ _changed_params = list(changed_params)
+ else:
+ _changed_params = None
+
+ def _inner_decorator(decorated_func):
+ @wraps(decorated_func)
+ def _inner_func(*args, **kwargs):
+ assert decorated_func.__name__[:6] == "track_"
+ # Run the decorated benchmark within the added memory context manager.
+ with TrackAddedMemoryAllocation() as mb:
+ decorated_func(*args, **kwargs)
+ return mb.addedmem_mb()
+
+ if _changed_params:
+ # Replace the params if replacement provided.
+ _inner_func.params = _changed_params
+ _inner_func.unit = "Mb"
+ return _inner_func
+
+ return _inner_decorator
+
+
+def on_demand_benchmark(benchmark_object):
+ """
+ Decorator. Disables these benchmark(s) unless ON_DEMAND_BENCHARKS env var is set.
+
+ For benchmarks that, for whatever reason, should not be run by default.
+ E.g:
+ * Require a local file
+ * Used for scalability analysis instead of commit monitoring.
+
+ Can be applied to benchmark classes/methods/functions.
+
+ """
+ if "ON_DEMAND_BENCHMARKS" in environ:
+ return benchmark_object
diff --git a/benchmarks/benchmarks/aux_factory.py b/benchmarks/benchmarks/aux_factory.py
index 270119da71..45bfa1b515 100644
--- a/benchmarks/benchmarks/aux_factory.py
+++ b/benchmarks/benchmarks/aux_factory.py
@@ -10,9 +10,10 @@
import numpy as np
-from benchmarks import ARTIFICIAL_DIM_SIZE
from iris import aux_factory, coords
+from . import ARTIFICIAL_DIM_SIZE
+
class FactoryCommon:
# TODO: once https://github.com/airspeed-velocity/asv/pull/828 is released:
diff --git a/benchmarks/benchmarks/coords.py b/benchmarks/benchmarks/coords.py
index fce7318d49..5cea1e1e2e 100644
--- a/benchmarks/benchmarks/coords.py
+++ b/benchmarks/benchmarks/coords.py
@@ -10,9 +10,10 @@
import numpy as np
-from benchmarks import ARTIFICIAL_DIM_SIZE
from iris import coords
+from . import ARTIFICIAL_DIM_SIZE, disable_repeat_between_setup
+
def setup():
"""General variables needed by multiple benchmark classes."""
@@ -92,6 +93,23 @@ def setup(self):
def create(self):
return coords.AuxCoord(**self.create_kwargs)
+ def time_points(self):
+ _ = self.component.points
+
+ def time_bounds(self):
+ _ = self.component.bounds
+
+
+@disable_repeat_between_setup
+class AuxCoordLazy(AuxCoord):
+ """Lazy equivalent of :class:`AuxCoord`."""
+
+ def setup(self):
+ super().setup()
+ self.create_kwargs["points"] = self.component.lazy_points()
+ self.create_kwargs["bounds"] = self.component.lazy_bounds()
+ self.setup_common()
+
class CellMeasure(CoordCommon):
def setup(self):
diff --git a/benchmarks/benchmarks/cperf/__init__.py b/benchmarks/benchmarks/cperf/__init__.py
new file mode 100644
index 0000000000..fb311c44dc
--- /dev/null
+++ b/benchmarks/benchmarks/cperf/__init__.py
@@ -0,0 +1,97 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+Benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
+
+CPerf = comparing performance working with data in UM versus LFRic formats.
+
+Files available from the UK Met Office:
+ moo ls moose:/adhoc/projects/avd/asv/data_for_nightly_tests/
+"""
+import numpy as np
+
+from iris import load_cube
+
+# TODO: remove uses of PARSE_UGRID_ON_LOAD once UGRID parsing is core behaviour.
+from iris.experimental.ugrid import PARSE_UGRID_ON_LOAD
+
+from ..generate_data import BENCHMARK_DATA
+from ..generate_data.ugrid import make_cubesphere_testfile
+
+# The data of the core test UM files has dtype=np.float32 shape=(1920, 2560)
+_UM_DIMS_YX = (1920, 2560)
+# The closest cubesphere size in terms of datapoints is sqrt(1920*2560 / 6)
+# This gives ~= 905, i.e. "C905"
+_N_CUBESPHERE_UM_EQUIVALENT = int(np.sqrt(np.prod(_UM_DIMS_YX) / 6))
+
+
+class SingleDiagnosticMixin:
+ """For use in any benchmark classes that work on a single diagnostic file."""
+
+ params = [
+ ["LFRic", "UM", "UM_lbpack0", "UM_netcdf"],
+ [False, True],
+ [False, True],
+ ]
+ param_names = ["file type", "height dim (len 71)", "time dim (len 3)"]
+
+ def setup(self, file_type, three_d, three_times):
+ if file_type == "LFRic":
+ # Generate an appropriate synthetic LFRic file.
+ if three_times:
+ n_times = 3
+ else:
+ n_times = 1
+
+ # Use a cubesphere size ~equivalent to our UM test data.
+ cells_per_panel_edge = _N_CUBESPHERE_UM_EQUIVALENT
+ create_kwargs = dict(c_size=cells_per_panel_edge, n_times=n_times)
+
+ if three_d:
+ create_kwargs["n_levels"] = 71
+
+ # Will re-use a file if already present.
+ file_path = make_cubesphere_testfile(**create_kwargs)
+
+ else:
+ # Locate the appropriate UM file.
+ if three_times:
+ # pa/pb003 files
+ numeric = "003"
+ else:
+ # pa/pb000 files
+ numeric = "000"
+
+ if three_d:
+ # theta diagnostic, N1280 file w/ 71 levels (1920, 2560, 71)
+ file_name = f"umglaa_pb{numeric}-theta"
+ else:
+ # surface_temp diagnostic, N1280 file (1920, 2560)
+ file_name = f"umglaa_pa{numeric}-surfacetemp"
+
+ file_suffices = {
+ "UM": "", # packed FF (WGDOS lbpack = 1)
+ "UM_lbpack0": ".uncompressed", # unpacked FF (lbpack = 0)
+ "UM_netcdf": ".nc", # UM file -> Iris -> NetCDF file
+ }
+ suffix = file_suffices[file_type]
+
+ file_path = (BENCHMARK_DATA / file_name).with_suffix(suffix)
+ if not file_path.exists():
+ message = "\n".join(
+ [
+ f"Expected local file not found: {file_path}",
+ "Available from the UK Met Office.",
+ ]
+ )
+ raise FileNotFoundError(message)
+
+ self.file_path = file_path
+ self.file_type = file_type
+
+ def load(self):
+ with PARSE_UGRID_ON_LOAD.context():
+ return load_cube(str(self.file_path))
diff --git a/benchmarks/benchmarks/cperf/equality.py b/benchmarks/benchmarks/cperf/equality.py
new file mode 100644
index 0000000000..47eb255513
--- /dev/null
+++ b/benchmarks/benchmarks/cperf/equality.py
@@ -0,0 +1,58 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+Equality benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
+"""
+from . import SingleDiagnosticMixin
+from .. import on_demand_benchmark
+
+
+class EqualityMixin(SingleDiagnosticMixin):
+ """
+ Uses :class:`SingleDiagnosticMixin` as the realistic case will be comparing
+ :class:`~iris.cube.Cube`\\ s that have been loaded from file.
+ """
+
+ # Cut down the parent parameters.
+ params = [["LFRic", "UM"]]
+
+ def setup(self, file_type, three_d=False, three_times=False):
+ super().setup(file_type, three_d, three_times)
+ self.cube = self.load()
+ self.other_cube = self.load()
+
+
+@on_demand_benchmark
+class CubeEquality(EqualityMixin):
+ """
+ Benchmark time and memory costs of comparing LFRic and UM
+ :class:`~iris.cube.Cube`\\ s.
+ """
+
+ def _comparison(self):
+ _ = self.cube == self.other_cube
+
+ def peakmem_eq(self, file_type):
+ self._comparison()
+
+ def time_eq(self, file_type):
+ self._comparison()
+
+
+@on_demand_benchmark
+class MeshEquality(EqualityMixin):
+ """Provides extra context for :class:`CubeEquality`."""
+
+ params = [["LFRic"]]
+
+ def _comparison(self):
+ _ = self.cube.mesh == self.other_cube.mesh
+
+ def peakmem_eq(self, file_type):
+ self._comparison()
+
+ def time_eq(self, file_type):
+ self._comparison()
diff --git a/benchmarks/benchmarks/cperf/load.py b/benchmarks/benchmarks/cperf/load.py
new file mode 100644
index 0000000000..04bb7e1a61
--- /dev/null
+++ b/benchmarks/benchmarks/cperf/load.py
@@ -0,0 +1,57 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+File loading benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
+"""
+from . import SingleDiagnosticMixin
+from .. import on_demand_benchmark
+
+
+@on_demand_benchmark
+class SingleDiagnosticLoad(SingleDiagnosticMixin):
+ def time_load(self, _, __, ___):
+ """
+ The 'real world comparison'
+ * UM coords are always realised (DimCoords).
+ * LFRic coords are not realised by default (MeshCoords).
+
+ """
+ cube = self.load()
+ assert cube.has_lazy_data()
+ # UM files load lon/lat as DimCoords, which are always realised.
+ expecting_lazy_coords = self.file_type == "LFRic"
+ for coord_name in "longitude", "latitude":
+ coord = cube.coord(coord_name)
+ assert coord.has_lazy_points() == expecting_lazy_coords
+ assert coord.has_lazy_bounds() == expecting_lazy_coords
+
+ def time_load_w_realised_coords(self, _, __, ___):
+ """A valuable extra comparison where both UM and LFRic coords are realised."""
+ cube = self.load()
+ for coord_name in "longitude", "latitude":
+ coord = cube.coord(coord_name)
+ # Don't touch actual points/bounds objects - permanent
+ # realisation plays badly with ASV's re-run strategy.
+ if coord.has_lazy_points():
+ coord.core_points().compute()
+ if coord.has_lazy_bounds():
+ coord.core_bounds().compute()
+
+
+@on_demand_benchmark
+class SingleDiagnosticRealise(SingleDiagnosticMixin):
+ # The larger files take a long time to realise.
+ timeout = 600.0
+
+ def setup(self, file_type, three_d, three_times):
+ super().setup(file_type, three_d, three_times)
+ self.loaded_cube = self.load()
+
+ def time_realise(self, _, __, ___):
+ # Don't touch loaded_cube.data - permanent realisation plays badly with
+ # ASV's re-run strategy.
+ assert self.loaded_cube.has_lazy_data()
+ self.loaded_cube.core_data().compute()
diff --git a/benchmarks/benchmarks/cperf/save.py b/benchmarks/benchmarks/cperf/save.py
new file mode 100644
index 0000000000..63eb5c25fb
--- /dev/null
+++ b/benchmarks/benchmarks/cperf/save.py
@@ -0,0 +1,47 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+File saving benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
+"""
+
+from iris import save
+
+from . import _N_CUBESPHERE_UM_EQUIVALENT, _UM_DIMS_YX
+from .. import TrackAddedMemoryAllocation, on_demand_benchmark
+from ..generate_data.ugrid import (
+ make_cube_like_2d_cubesphere,
+ make_cube_like_umfield,
+)
+
+
+@on_demand_benchmark
+class NetcdfSave:
+ """
+ Benchmark time and memory costs of saving ~large-ish data cubes to netcdf.
+ Parametrised by file type.
+
+ """
+
+ params = ["LFRic", "UM"]
+ param_names = ["data type"]
+
+ def setup(self, data_type):
+ if data_type == "LFRic":
+ self.cube = make_cube_like_2d_cubesphere(
+ n_cube=_N_CUBESPHERE_UM_EQUIVALENT, with_mesh=True
+ )
+ else:
+ self.cube = make_cube_like_umfield(_UM_DIMS_YX)
+
+ def _save_data(self, cube):
+ save(cube, "tmp.nc")
+
+ def time_save_data_netcdf(self, data_type):
+ self._save_data(self.cube)
+
+ @TrackAddedMemoryAllocation.decorator()
+ def track_addedmem_save_data_netcdf(self, data_type):
+ self._save_data(self.cube)
diff --git a/benchmarks/benchmarks/cube.py b/benchmarks/benchmarks/cube.py
index 3cfa6b248b..8a12391684 100644
--- a/benchmarks/benchmarks/cube.py
+++ b/benchmarks/benchmarks/cube.py
@@ -10,11 +10,13 @@
import numpy as np
-from benchmarks import ARTIFICIAL_DIM_SIZE
from iris import analysis, aux_factory, coords, cube
+from . import ARTIFICIAL_DIM_SIZE, disable_repeat_between_setup
+from .generate_data.stock import sample_meshcoord
-def setup():
+
+def setup(*params):
"""General variables needed by multiple benchmark classes."""
global data_1d
global data_2d
@@ -170,6 +172,44 @@ def setup(self):
self.setup_common()
+class MeshCoord:
+ params = [
+ 6, # minimal cube-sphere
+ int(1e6), # realistic cube-sphere size
+ ARTIFICIAL_DIM_SIZE, # To match size in :class:`AuxCoord`
+ ]
+ param_names = ["number of faces"]
+
+ def setup(self, n_faces):
+ mesh_kwargs = dict(
+ n_nodes=n_faces + 2, n_edges=n_faces * 2, n_faces=n_faces
+ )
+
+ self.mesh_coord = sample_meshcoord(sample_mesh_kwargs=mesh_kwargs)
+ self.data = np.zeros(n_faces)
+ self.cube_blank = cube.Cube(data=self.data)
+ self.cube = self.create()
+
+ def create(self):
+ return cube.Cube(
+ data=self.data, aux_coords_and_dims=[(self.mesh_coord, 0)]
+ )
+
+ def time_create(self, n_faces):
+ _ = self.create()
+
+ @disable_repeat_between_setup
+ def time_add(self, n_faces):
+ self.cube_blank.add_aux_coord(self.mesh_coord, 0)
+
+ @disable_repeat_between_setup
+ def time_remove(self, n_faces):
+ self.cube.remove_coord(self.mesh_coord)
+
+ def time_return(self, n_faces):
+ _ = self.cube
+
+
class Merge:
def setup(self):
self.cube_list = cube.CubeList()
diff --git a/benchmarks/benchmarks/experimental/__init__.py b/benchmarks/benchmarks/experimental/__init__.py
new file mode 100644
index 0000000000..f16e400bce
--- /dev/null
+++ b/benchmarks/benchmarks/experimental/__init__.py
@@ -0,0 +1,9 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+Benchmark tests for the experimental module.
+
+"""
diff --git a/benchmarks/benchmarks/experimental/ugrid/__init__.py b/benchmarks/benchmarks/experimental/ugrid/__init__.py
new file mode 100644
index 0000000000..3e5f1ae440
--- /dev/null
+++ b/benchmarks/benchmarks/experimental/ugrid/__init__.py
@@ -0,0 +1,195 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+Benchmark tests for the experimental.ugrid module.
+
+"""
+
+from copy import deepcopy
+
+import numpy as np
+
+from iris.experimental import ugrid
+
+from ... import ARTIFICIAL_DIM_SIZE, disable_repeat_between_setup
+from ...generate_data.stock import sample_mesh
+
+
+class UGridCommon:
+ """
+ A base class running a generalised suite of benchmarks for any ugrid object.
+ Object to be specified in a subclass.
+
+ ASV will run the benchmarks within this class for any subclasses.
+
+ ASV will not benchmark this class as setup() triggers a NotImplementedError.
+ (ASV has not yet released ABC/abstractmethod support - asv#838).
+
+ """
+
+ params = [
+ 6, # minimal cube-sphere
+ int(1e6), # realistic cube-sphere size
+ ]
+ param_names = ["number of faces"]
+
+ def setup(self, *params):
+ self.object = self.create()
+
+ def create(self):
+ raise NotImplementedError
+
+ def time_create(self, *params):
+ """Create an instance of the benchmarked object. create() method is
+ specified in the subclass."""
+ self.create()
+
+ def time_return(self, *params):
+ """Return an instance of the benchmarked object."""
+ _ = self.object
+
+
+class Connectivity(UGridCommon):
+ def setup(self, n_faces):
+ self.array = np.zeros([n_faces, 3], dtype=np.int)
+ super().setup(n_faces)
+
+ def create(self):
+ return ugrid.Connectivity(
+ indices=self.array, cf_role="face_node_connectivity"
+ )
+
+ def time_indices(self, n_faces):
+ _ = self.object.indices
+
+ def time_location_lengths(self, n_faces):
+ # Proofed against the Connectivity name change (633ed17).
+ if getattr(self.object, "src_lengths", False):
+ meth = self.object.src_lengths
+ else:
+ meth = self.object.location_lengths
+ _ = meth()
+
+ def time_validate_indices(self, n_faces):
+ self.object.validate_indices()
+
+
+@disable_repeat_between_setup
+class ConnectivityLazy(Connectivity):
+ """Lazy equivalent of :class:`Connectivity`."""
+
+ def setup(self, n_faces):
+ super().setup(n_faces)
+ self.array = self.object.lazy_indices()
+ self.object = self.create()
+
+
+class Mesh(UGridCommon):
+ def setup(self, n_faces, lazy=False):
+ ####
+ # Steal everything from the sample mesh for benchmarking creation of a
+ # brand new mesh.
+ source_mesh = sample_mesh(
+ n_nodes=n_faces + 2,
+ n_edges=n_faces * 2,
+ n_faces=n_faces,
+ lazy_values=lazy,
+ )
+
+ def get_coords_and_axes(location):
+ search_kwargs = {f"include_{location}s": True}
+ return [
+ (source_mesh.coord(axis=axis, **search_kwargs), axis)
+ for axis in ("x", "y")
+ ]
+
+ self.mesh_kwargs = dict(
+ topology_dimension=source_mesh.topology_dimension,
+ node_coords_and_axes=get_coords_and_axes("node"),
+ connectivities=source_mesh.connectivities(),
+ edge_coords_and_axes=get_coords_and_axes("edge"),
+ face_coords_and_axes=get_coords_and_axes("face"),
+ )
+ ####
+
+ super().setup(n_faces)
+
+ self.face_node = self.object.face_node_connectivity
+ self.node_x = self.object.node_coords.node_x
+ # Kwargs for reuse in search and remove methods.
+ self.connectivities_kwarg = dict(cf_role="edge_node_connectivity")
+ self.coords_kwarg = dict(include_faces=True)
+
+ # TODO: an opportunity for speeding up runtime if needed, since
+ # eq_object is not needed for all benchmarks. Just don't generate it
+ # within a benchmark - the execution time is large enough that it
+ # could be a significant portion of the benchmark - makes regressions
+ # smaller and could even pick up regressions in copying instead!
+ self.eq_object = deepcopy(self.object)
+
+ def create(self):
+ return ugrid.Mesh(**self.mesh_kwargs)
+
+ def time_add_connectivities(self, n_faces):
+ self.object.add_connectivities(self.face_node)
+
+ def time_add_coords(self, n_faces):
+ self.object.add_coords(node_x=self.node_x)
+
+ def time_connectivities(self, n_faces):
+ _ = self.object.connectivities(**self.connectivities_kwarg)
+
+ def time_coords(self, n_faces):
+ _ = self.object.coords(**self.coords_kwarg)
+
+ def time_eq(self, n_faces):
+ _ = self.object == self.eq_object
+
+ def time_remove_connectivities(self, n_faces):
+ self.object.remove_connectivities(**self.connectivities_kwarg)
+
+ def time_remove_coords(self, n_faces):
+ self.object.remove_coords(**self.coords_kwarg)
+
+
+@disable_repeat_between_setup
+class MeshLazy(Mesh):
+ """Lazy equivalent of :class:`Mesh`."""
+
+ def setup(self, n_faces, lazy=True):
+ super().setup(n_faces, lazy=lazy)
+
+
+class MeshCoord(UGridCommon):
+ # Add extra parameter value to match AuxCoord benchmarking.
+ params = UGridCommon.params + [ARTIFICIAL_DIM_SIZE]
+
+ def setup(self, n_faces, lazy=False):
+ self.mesh = sample_mesh(
+ n_nodes=n_faces + 2,
+ n_edges=n_faces * 2,
+ n_faces=n_faces,
+ lazy_values=lazy,
+ )
+
+ super().setup(n_faces)
+
+ def create(self):
+ return ugrid.MeshCoord(mesh=self.mesh, location="face", axis="x")
+
+ def time_points(self, n_faces):
+ _ = self.object.points
+
+ def time_bounds(self, n_faces):
+ _ = self.object.bounds
+
+
+@disable_repeat_between_setup
+class MeshCoordLazy(MeshCoord):
+ """Lazy equivalent of :class:`MeshCoord`."""
+
+ def setup(self, n_faces, lazy=True):
+ super().setup(n_faces, lazy=lazy)
diff --git a/benchmarks/benchmarks/experimental/ugrid/regions_combine.py b/benchmarks/benchmarks/experimental/ugrid/regions_combine.py
new file mode 100644
index 0000000000..0cac84d0a8
--- /dev/null
+++ b/benchmarks/benchmarks/experimental/ugrid/regions_combine.py
@@ -0,0 +1,253 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+Benchmarks stages of operation of the function
+:func:`iris.experimental.ugrid.utils.recombine_submeshes`.
+
+Where possible benchmarks should be parameterised for two sizes of input data:
+ * minimal: enables detection of regressions in parts of the run-time that do
+ NOT scale with data size.
+ * large: large enough to exclusively detect regressions in parts of the
+ run-time that scale with data size. Aim for benchmark time ~20x
+ that of the minimal benchmark.
+
+"""
+import os
+
+import dask.array as da
+import numpy as np
+
+from iris import load, load_cube, save
+from iris.experimental.ugrid import PARSE_UGRID_ON_LOAD
+from iris.experimental.ugrid.utils import recombine_submeshes
+
+from ... import TrackAddedMemoryAllocation
+from ...generate_data.ugrid import make_cube_like_2d_cubesphere
+
+
+class MixinCombineRegions:
+ # Characterise time taken + memory-allocated, for various stages of combine
+ # operations on cubesphere-like test data.
+ params = [4, 500]
+ param_names = ["cubesphere-N"]
+ # For use on 'track_addedmem_..' type benchmarks - result is too noisy.
+ no_small_params = params[1:]
+
+ def _parametrised_cache_filename(self, n_cubesphere, content_name):
+ return f"cube_C{n_cubesphere}_{content_name}.nc"
+
+ def _make_region_cubes(self, full_mesh_cube):
+ """Make a fixed number of region cubes from a full meshcube."""
+ # Divide the cube into regions.
+ n_faces = full_mesh_cube.shape[-1]
+ # Start with a simple list of face indices
+ # first extend to multiple of 5
+ n_faces_5s = 5 * ((n_faces + 1) // 5)
+ i_faces = np.arange(n_faces_5s, dtype=int)
+ # reshape (5N,) to (N, 5)
+ i_faces = i_faces.reshape((n_faces_5s // 5, 5))
+ # reorder [2, 3, 4, 0, 1] within each block of 5
+ i_faces = np.concatenate([i_faces[:, 2:], i_faces[:, :2]], axis=1)
+ # flatten to get [2 3 4 0 1 (-) 8 9 10 6 7 (-) 13 14 15 11 12 ...]
+ i_faces = i_faces.flatten()
+ # reduce back to orignal length, wrap any overflows into valid range
+ i_faces = i_faces[:n_faces] % n_faces
+
+ # Divide into regions -- always slightly uneven, since 7 doesn't divide
+ n_regions = 7
+ n_facesperregion = n_faces // n_regions
+ i_face_regions = (i_faces // n_facesperregion) % n_regions
+ region_inds = [
+ np.where(i_face_regions == i_region)[0]
+ for i_region in range(n_regions)
+ ]
+ # NOTE: this produces 7 regions, with near-adjacent value ranges but
+ # with some points "moved" to an adjacent region.
+ # Also, region-0 is bigger (because of not dividing by 7).
+
+ # Finally, make region cubes with these indices.
+ region_cubes = [full_mesh_cube[..., inds] for inds in region_inds]
+ return region_cubes
+
+ def setup_cache(self):
+ """Cache all the necessary source data on disk."""
+
+ # Control dask, to minimise memory usage + allow largest data.
+ self.fix_dask_settings()
+
+ for n_cubesphere in self.params:
+ # Do for each parameter, since "setup_cache" is NOT parametrised
+ mesh_cube = make_cube_like_2d_cubesphere(
+ n_cube=n_cubesphere, with_mesh=True
+ )
+ # Save to files which include the parameter in the names.
+ save(
+ mesh_cube,
+ self._parametrised_cache_filename(n_cubesphere, "meshcube"),
+ )
+ region_cubes = self._make_region_cubes(mesh_cube)
+ save(
+ region_cubes,
+ self._parametrised_cache_filename(n_cubesphere, "regioncubes"),
+ )
+
+ def setup(
+ self, n_cubesphere, imaginary_data=True, create_result_cube=True
+ ):
+ """
+ The combine-tests "standard" setup operation.
+
+ Load the source cubes (full-mesh + region) from disk.
+ These are specific to the cubesize parameter.
+ The data is cached on disk rather than calculated, to avoid any
+ pre-loading of the process memory allocation.
+
+ If 'imaginary_data' is set (default), the region cubes data is replaced
+ with lazy data in the form of a da.zeros(). Otherwise, the region data
+ is lazy data from the files.
+
+ If 'create_result_cube' is set, create "self.combined_cube" containing
+ the (still lazy) result.
+
+ NOTE: various test classes override + extend this.
+
+ """
+
+ # Load source cubes (full-mesh and regions)
+ with PARSE_UGRID_ON_LOAD.context():
+ self.full_mesh_cube = load_cube(
+ self._parametrised_cache_filename(n_cubesphere, "meshcube")
+ )
+ self.region_cubes = load(
+ self._parametrised_cache_filename(n_cubesphere, "regioncubes")
+ )
+
+ # Remove all var-names from loaded cubes, which can otherwise cause
+ # problems. Also implement 'imaginary' data.
+ for cube in self.region_cubes + [self.full_mesh_cube]:
+ cube.var_name = None
+ for coord in cube.coords():
+ coord.var_name = None
+ if imaginary_data:
+ # Replace cube data (lazy file data) with 'imaginary' data.
+ # This has the same lazy-array attributes, but is allocated by
+ # creating chunks on demand instead of loading from file.
+ data = cube.lazy_data()
+ data = da.zeros(
+ data.shape, dtype=data.dtype, chunks=data.chunksize
+ )
+ cube.data = data
+
+ if create_result_cube:
+ self.recombined_cube = self.recombine()
+
+ # Fix dask usage mode for all the subsequent performance tests.
+ self.fix_dask_settings()
+
+ def fix_dask_settings(self):
+ """
+ Fix "standard" dask behaviour for time+space testing.
+
+ Currently this is single-threaded mode, with known chunksize,
+ which is optimised for space saving so we can test largest data.
+
+ """
+
+ import dask.config as dcfg
+
+ # Use single-threaded, to avoid process-switching costs and minimise memory usage.
+ # N.B. generally may be slower, but use less memory ?
+ dcfg.set(scheduler="single-threaded")
+ # Configure iris._lazy_data.as_lazy_data to aim for 100Mb chunks
+ dcfg.set({"array.chunk-size": "128Mib"})
+
+ def recombine(self):
+ # A handy general shorthand for the main "combine" operation.
+ result = recombine_submeshes(
+ self.full_mesh_cube,
+ self.region_cubes,
+ index_coord_name="i_mesh_face",
+ )
+ return result
+
+
+class CombineRegionsCreateCube(MixinCombineRegions):
+ """
+ Time+memory costs of creating a combined-regions cube.
+
+ The result is lazy, and we don't do the actual calculation.
+
+ """
+
+ def setup(self, n_cubesphere):
+ # In this case only, do *not* create the result cube.
+ # That is the operation we want to test.
+ super().setup(n_cubesphere, create_result_cube=False)
+
+ def time_create_combined_cube(self, n_cubesphere):
+ self.recombine()
+
+ @TrackAddedMemoryAllocation.decorator(MixinCombineRegions.no_small_params)
+ def track_addedmem_create_combined_cube(self, n_cubesphere):
+ self.recombine()
+
+
+class CombineRegionsComputeRealData(MixinCombineRegions):
+ """
+ Time+memory costs of computing combined-regions data.
+ """
+
+ def time_compute_data(self, n_cubesphere):
+ _ = self.recombined_cube.data
+
+ @TrackAddedMemoryAllocation.decorator(MixinCombineRegions.no_small_params)
+ def track_addedmem_compute_data(self, n_cubesphere):
+ _ = self.recombined_cube.data
+
+
+class CombineRegionsSaveData(MixinCombineRegions):
+ """
+ Test saving *only*, having replaced the input cube data with 'imaginary'
+ array data, so that input data is not loaded from disk during the save
+ operation.
+
+ """
+
+ def time_save(self, n_cubesphere):
+ # Save to disk, which must compute data + stream it to file.
+ save(self.recombined_cube, "tmp.nc")
+
+ @TrackAddedMemoryAllocation.decorator(MixinCombineRegions.no_small_params)
+ def track_addedmem_save(self, n_cubesphere):
+ save(self.recombined_cube, "tmp.nc")
+
+ def track_filesize_saved(self, n_cubesphere):
+ save(self.recombined_cube, "tmp.nc")
+ return os.path.getsize("tmp.nc") * 1.0e-6
+
+
+CombineRegionsSaveData.track_filesize_saved.unit = "Mb"
+
+
+class CombineRegionsFileStreamedCalc(MixinCombineRegions):
+ """
+ Test the whole cost of file-to-file streaming.
+ Uses the combined cube which is based on lazy data loading from the region
+ cubes on disk.
+ """
+
+ def setup(self, n_cubesphere):
+ # In this case only, do *not* replace the loaded regions data with
+ # 'imaginary' data, as we want to test file-to-file calculation+save.
+ super().setup(n_cubesphere, imaginary_data=False)
+
+ def time_stream_file2file(self, n_cubesphere):
+ # Save to disk, which must compute data + stream it to file.
+ save(self.recombined_cube, "tmp.nc")
+
+ @TrackAddedMemoryAllocation.decorator(MixinCombineRegions.no_small_params)
+ def track_addedmem_stream_file2file(self, n_cubesphere):
+ save(self.recombined_cube, "tmp.nc")
diff --git a/benchmarks/benchmarks/generate_data/__init__.py b/benchmarks/benchmarks/generate_data/__init__.py
index a56f2e4623..8874a2c214 100644
--- a/benchmarks/benchmarks/generate_data/__init__.py
+++ b/benchmarks/benchmarks/generate_data/__init__.py
@@ -16,12 +16,16 @@
benchmark sequence runs over two different Python versions.
"""
+from contextlib import contextmanager
from inspect import getsource
from os import environ
from pathlib import Path
from subprocess import CalledProcessError, check_output, run
from textwrap import dedent
+from iris._lazy_data import as_concrete_data
+from iris.fileformats import netcdf
+
#: Python executable used by :func:`run_function_elsewhere`, set via env
#: variable of same name. Must be path of Python within an environment that
#: includes Iris (including dependencies and test modules) and Mule.
@@ -92,3 +96,22 @@ def run_function_elsewhere(func_to_run, *args, **kwargs):
[DATA_GEN_PYTHON, "-c", python_string], capture_output=True, check=True
)
return result.stdout
+
+
+@contextmanager
+def load_realised():
+ """
+ Force NetCDF loading with realised arrays.
+
+ Since passing between data generation and benchmarking environments is via
+ file loading, but some benchmarks are only meaningful if starting with real
+ arrays.
+ """
+ from iris.fileformats.netcdf import _get_cf_var_data as pre_patched
+
+ def patched(cf_var, filename):
+ return as_concrete_data(pre_patched(cf_var, filename))
+
+ netcdf._get_cf_var_data = patched
+ yield netcdf
+ netcdf._get_cf_var_data = pre_patched
diff --git a/benchmarks/benchmarks/generate_data/stock.py b/benchmarks/benchmarks/generate_data/stock.py
new file mode 100644
index 0000000000..bbc7dc0a63
--- /dev/null
+++ b/benchmarks/benchmarks/generate_data/stock.py
@@ -0,0 +1,156 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+Wrappers for using :mod:`iris.tests.stock` methods for benchmarking.
+
+See :mod:`benchmarks.generate_data` for an explanation of this structure.
+"""
+
+from pathlib import Path
+
+from iris.experimental.ugrid import PARSE_UGRID_ON_LOAD, load_mesh
+
+from . import BENCHMARK_DATA, REUSE_DATA, load_realised, run_function_elsewhere
+
+
+def _create_file__xios_common(func_name, **kwargs):
+ def _external(func_name_, temp_file_dir, **kwargs_):
+ from iris.tests.stock import netcdf
+
+ func = getattr(netcdf, func_name_)
+ print(func(temp_file_dir, **kwargs_), end="")
+
+ args_hash = hash(str(kwargs))
+ save_path = (BENCHMARK_DATA / f"{func_name}_{args_hash}").with_suffix(
+ ".nc"
+ )
+ if not REUSE_DATA or not save_path.is_file():
+ # The xios functions take control of save location so need to move to
+ # a more specific name that allows re-use.
+ actual_path = run_function_elsewhere(
+ _external,
+ func_name_=func_name,
+ temp_file_dir=str(BENCHMARK_DATA),
+ **kwargs,
+ )
+ Path(actual_path.decode()).replace(save_path)
+ return save_path
+
+
+def create_file__xios_2d_face_half_levels(
+ temp_file_dir, dataset_name, n_faces=866, n_times=1
+):
+ """
+ Wrapper for :meth:`iris.tests.stock.netcdf.create_file__xios_2d_face_half_levels`.
+
+ Have taken control of temp_file_dir
+
+ todo: is create_file__xios_2d_face_half_levels still appropriate now we can
+ properly save Mesh Cubes?
+ """
+
+ return _create_file__xios_common(
+ func_name="create_file__xios_2d_face_half_levels",
+ dataset_name=dataset_name,
+ n_faces=n_faces,
+ n_times=n_times,
+ )
+
+
+def create_file__xios_3d_face_half_levels(
+ temp_file_dir, dataset_name, n_faces=866, n_times=1, n_levels=38
+):
+ """
+ Wrapper for :meth:`iris.tests.stock.netcdf.create_file__xios_3d_face_half_levels`.
+
+ Have taken control of temp_file_dir
+
+ todo: is create_file__xios_3d_face_half_levels still appropriate now we can
+ properly save Mesh Cubes?
+ """
+
+ return _create_file__xios_common(
+ func_name="create_file__xios_3d_face_half_levels",
+ dataset_name=dataset_name,
+ n_faces=n_faces,
+ n_times=n_times,
+ n_levels=n_levels,
+ )
+
+
+def sample_mesh(n_nodes=None, n_faces=None, n_edges=None, lazy_values=False):
+ """Wrapper for :meth:iris.tests.stock.mesh.sample_mesh`."""
+
+ def _external(*args, **kwargs):
+ from iris.experimental.ugrid import save_mesh
+ from iris.tests.stock.mesh import sample_mesh
+
+ save_path_ = kwargs.pop("save_path")
+ # Always saving, so laziness is irrelevant. Use lazy to save time.
+ kwargs["lazy_values"] = True
+ new_mesh = sample_mesh(*args, **kwargs)
+ save_mesh(new_mesh, save_path_)
+
+ arg_list = [n_nodes, n_faces, n_edges]
+ args_hash = hash(str(arg_list))
+ save_path = (BENCHMARK_DATA / f"sample_mesh_{args_hash}").with_suffix(
+ ".nc"
+ )
+ if not REUSE_DATA or not save_path.is_file():
+ _ = run_function_elsewhere(
+ _external, *arg_list, save_path=str(save_path)
+ )
+ with PARSE_UGRID_ON_LOAD.context():
+ if not lazy_values:
+ # Realise everything.
+ with load_realised():
+ mesh = load_mesh(str(save_path))
+ else:
+ mesh = load_mesh(str(save_path))
+ return mesh
+
+
+def sample_meshcoord(sample_mesh_kwargs=None, location="face", axis="x"):
+ """
+ Wrapper for :meth:`iris.tests.stock.mesh.sample_meshcoord`.
+
+ Parameters deviate from the original as cannot pass a
+ :class:`iris.experimental.ugrid.Mesh to the separate Python instance - must
+ instead generate the Mesh as well.
+
+ MeshCoords cannot be saved to file, so the _external method saves the
+ MeshCoord's Mesh, then the original Python instance loads in that Mesh and
+ regenerates the MeshCoord from there.
+ """
+
+ def _external(sample_mesh_kwargs_, save_path_):
+ from iris.experimental.ugrid import save_mesh
+ from iris.tests.stock.mesh import sample_mesh, sample_meshcoord
+
+ if sample_mesh_kwargs_:
+ input_mesh = sample_mesh(**sample_mesh_kwargs_)
+ else:
+ input_mesh = None
+ # Don't parse the location or axis arguments - only saving the Mesh at
+ # this stage.
+ new_meshcoord = sample_meshcoord(mesh=input_mesh)
+ save_mesh(new_meshcoord.mesh, save_path_)
+
+ args_hash = hash(str(sample_mesh_kwargs))
+ save_path = (
+ BENCHMARK_DATA / f"sample_mesh_coord_{args_hash}"
+ ).with_suffix(".nc")
+ if not REUSE_DATA or not save_path.is_file():
+ _ = run_function_elsewhere(
+ _external,
+ sample_mesh_kwargs_=sample_mesh_kwargs,
+ save_path_=str(save_path),
+ )
+ with PARSE_UGRID_ON_LOAD.context():
+ with load_realised():
+ source_mesh = load_mesh(str(save_path))
+ # Regenerate MeshCoord from its Mesh, which we saved.
+ return source_mesh.to_MeshCoord(location=location, axis=axis)
diff --git a/benchmarks/benchmarks/generate_data/ugrid.py b/benchmarks/benchmarks/generate_data/ugrid.py
new file mode 100644
index 0000000000..527b49a6bb
--- /dev/null
+++ b/benchmarks/benchmarks/generate_data/ugrid.py
@@ -0,0 +1,195 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+Scripts for generating supporting data for UGRID-related benchmarking.
+"""
+from iris import load_cube as iris_loadcube
+from iris.experimental.ugrid import PARSE_UGRID_ON_LOAD
+
+from . import BENCHMARK_DATA, REUSE_DATA, load_realised, run_function_elsewhere
+from .stock import (
+ create_file__xios_2d_face_half_levels,
+ create_file__xios_3d_face_half_levels,
+)
+
+
+def generate_cube_like_2d_cubesphere(
+ n_cube: int, with_mesh: bool, output_path: str
+):
+ """
+ Construct and save to file an LFRIc cubesphere-like cube for a given
+ cubesphere size, *or* a simpler structured (UM-like) cube of equivalent
+ size.
+
+ NOTE: this function is *NEVER* called from within this actual package.
+ Instead, it is to be called via benchmarks.remote_data_generation,
+ so that it can use up-to-date facilities, independent of the ASV controlled
+ environment which contains the "Iris commit under test".
+ This means:
+ * it must be completely self-contained : i.e. it includes all its
+ own imports, and saves results to an output file.
+
+ """
+ from iris import save
+ from iris.tests.stock.mesh import sample_mesh, sample_mesh_cube
+
+ n_face_nodes = n_cube * n_cube
+ n_faces = 6 * n_face_nodes
+
+ # Set n_nodes=n_faces and n_edges=2*n_faces
+ # : Not exact, but similar to a 'real' cubesphere.
+ n_nodes = n_faces
+ n_edges = 2 * n_faces
+ if with_mesh:
+ mesh = sample_mesh(
+ n_nodes=n_nodes, n_faces=n_faces, n_edges=n_edges, lazy_values=True
+ )
+ cube = sample_mesh_cube(mesh=mesh, n_z=1)
+ else:
+ cube = sample_mesh_cube(nomesh_faces=n_faces, n_z=1)
+
+ # Strip off the 'extra' aux-coord mapping the mesh, which sample-cube adds
+ # but which we don't want.
+ cube.remove_coord("mesh_face_aux")
+
+ # Save the result to a named file.
+ save(cube, output_path)
+
+
+def make_cube_like_2d_cubesphere(n_cube: int, with_mesh: bool):
+ """
+ Generate an LFRIc cubesphere-like cube for a given cubesphere size,
+ *or* a simpler structured (UM-like) cube of equivalent size.
+
+ All the cube data, coords and mesh content are LAZY, and produced without
+ allocating large real arrays (to allow peak-memory testing).
+
+ NOTE: the actual cube generation is done in a stable Iris environment via
+ benchmarks.remote_data_generation, so it is all channeled via cached netcdf
+ files in our common testdata directory.
+
+ """
+ identifying_filename = (
+ f"cube_like_2d_cubesphere_C{n_cube}_Mesh={with_mesh}.nc"
+ )
+ filepath = BENCHMARK_DATA / identifying_filename
+ if not filepath.exists():
+ # Create the required testfile, by running the generation code remotely
+ # in a 'fixed' python environment.
+ run_function_elsewhere(
+ generate_cube_like_2d_cubesphere,
+ n_cube,
+ with_mesh=with_mesh,
+ output_path=str(filepath),
+ )
+
+ # File now *should* definitely exist: content is simply the desired cube.
+ with PARSE_UGRID_ON_LOAD.context():
+ cube = iris_loadcube(str(filepath))
+
+ # Ensure correct laziness.
+ _ = cube.data
+ for coord in cube.coords(mesh_coords=False):
+ assert not coord.has_lazy_points()
+ assert not coord.has_lazy_bounds()
+ if cube.mesh:
+ for coord in cube.mesh.coords():
+ assert coord.has_lazy_points()
+ for conn in cube.mesh.connectivities():
+ assert conn.has_lazy_indices()
+
+ return cube
+
+
+def make_cube_like_umfield(xy_dims):
+ """
+ Create a "UM-like" cube with lazy content, for save performance testing.
+
+ Roughly equivalent to a single current UM cube, to be compared with
+ a "make_cube_like_2d_cubesphere(n_cube=_N_CUBESPHERE_UM_EQUIVALENT)"
+ (see below).
+
+ Note: probably a bit over-simplified, as there is no time coord, but that
+ is probably equally true of our LFRic-style synthetic data.
+
+ Args:
+ * xy_dims (2-tuple):
+ Set the horizontal dimensions = n-lats, n-lons.
+
+ """
+
+ def _external(xy_dims_, save_path_):
+ from dask import array as da
+ import numpy as np
+
+ from iris import save
+ from iris.coords import DimCoord
+ from iris.cube import Cube
+
+ nz, ny, nx = (1,) + xy_dims_
+
+ # Base data : Note this is float32 not float64 like LFRic/XIOS outputs.
+ lazy_data = da.zeros((nz, ny, nx), dtype=np.float32)
+ cube = Cube(lazy_data, long_name="structured_phenom")
+
+ # Add simple dim coords also.
+ z_dimco = DimCoord(np.arange(nz), long_name="level", units=1)
+ y_dimco = DimCoord(
+ np.linspace(-90.0, 90.0, ny),
+ standard_name="latitude",
+ units="degrees",
+ )
+ x_dimco = DimCoord(
+ np.linspace(-180.0, 180.0, nx),
+ standard_name="longitude",
+ units="degrees",
+ )
+ for idim, co in enumerate([z_dimco, y_dimco, x_dimco]):
+ cube.add_dim_coord(co, idim)
+
+ save(cube, save_path_)
+
+ save_path = (
+ BENCHMARK_DATA / f"make_cube_like_umfield_{xy_dims}"
+ ).with_suffix(".nc")
+ if not REUSE_DATA or not save_path.is_file():
+ _ = run_function_elsewhere(_external, xy_dims, str(save_path))
+ with PARSE_UGRID_ON_LOAD.context():
+ with load_realised():
+ cube = iris_loadcube(str(save_path))
+
+ return cube
+
+
+def make_cubesphere_testfile(c_size, n_levels=0, n_times=1):
+ """
+ Build a C cubesphere testfile in a given directory, with a standard naming.
+ If n_levels > 0 specified: 3d file with the specified number of levels.
+ Return the file path.
+
+ todo: is create_file__xios... still appropriate now we can properly save
+ Mesh Cubes?
+
+ """
+ n_faces = 6 * c_size * c_size
+ stem_name = f"mesh_cubesphere_C{c_size}_t{n_times}"
+ kwargs = dict(
+ temp_file_dir=None,
+ dataset_name=stem_name, # N.B. function adds the ".nc" extension
+ n_times=n_times,
+ n_faces=n_faces,
+ )
+
+ three_d = n_levels > 0
+ if three_d:
+ kwargs["n_levels"] = n_levels
+ kwargs["dataset_name"] += f"_{n_levels}levels"
+ func = create_file__xios_3d_face_half_levels
+ else:
+ func = create_file__xios_2d_face_half_levels
+
+ file_path = func(**kwargs)
+ return file_path
diff --git a/benchmarks/benchmarks/import_iris.py b/benchmarks/benchmarks/import_iris.py
index 3e83ea8cfe..ad54c23122 100644
--- a/benchmarks/benchmarks/import_iris.py
+++ b/benchmarks/benchmarks/import_iris.py
@@ -3,240 +3,247 @@
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
-import sys
+from importlib import import_module, reload
class Iris:
- warmup_time = 0
- number = 1
- repeat = 10
-
- def setup(self):
- self.before = set(sys.modules.keys())
-
- def teardown(self):
- after = set(sys.modules.keys())
- diff = after - self.before
- for module in diff:
- sys.modules.pop(module)
+ @staticmethod
+ def _import(module_name):
+ """
+ Have experimented with adding sleep() commands into the imported
+ modules. The results reveal:
+
+ ASV avoids invoking `import x` if nothing gets called in the
+ benchmark (some imports were timed, but only those where calls
+ happened during import).
+
+ Using reload() is not identical to importing, but does produce
+ results that are very close to expected import times, so this is fine
+ for monitoring for regressions.
+ It is also ideal for accurate repetitions, without the need to mess
+ with the ASV `number` attribute etc, since cached imports are not used
+ and the repetitions are therefore no faster than the first run.
+ """
+ mod = import_module(module_name)
+ reload(mod)
def time_iris(self):
- import iris
+ self._import("iris")
def time__concatenate(self):
- import iris._concatenate
+ self._import("iris._concatenate")
def time__constraints(self):
- import iris._constraints
+ self._import("iris._constraints")
def time__data_manager(self):
- import iris._data_manager
+ self._import("iris._data_manager")
def time__deprecation(self):
- import iris._deprecation
+ self._import("iris._deprecation")
def time__lazy_data(self):
- import iris._lazy_data
+ self._import("iris._lazy_data")
def time__merge(self):
- import iris._merge
+ self._import("iris._merge")
def time__representation(self):
- import iris._representation
+ self._import("iris._representation")
def time_analysis(self):
- import iris.analysis
+ self._import("iris.analysis")
def time_analysis__area_weighted(self):
- import iris.analysis._area_weighted
+ self._import("iris.analysis._area_weighted")
def time_analysis__grid_angles(self):
- import iris.analysis._grid_angles
+ self._import("iris.analysis._grid_angles")
def time_analysis__interpolation(self):
- import iris.analysis._interpolation
+ self._import("iris.analysis._interpolation")
def time_analysis__regrid(self):
- import iris.analysis._regrid
+ self._import("iris.analysis._regrid")
def time_analysis__scipy_interpolate(self):
- import iris.analysis._scipy_interpolate
+ self._import("iris.analysis._scipy_interpolate")
def time_analysis_calculus(self):
- import iris.analysis.calculus
+ self._import("iris.analysis.calculus")
def time_analysis_cartography(self):
- import iris.analysis.cartography
+ self._import("iris.analysis.cartography")
def time_analysis_geomerty(self):
- import iris.analysis.geometry
+ self._import("iris.analysis.geometry")
def time_analysis_maths(self):
- import iris.analysis.maths
+ self._import("iris.analysis.maths")
def time_analysis_stats(self):
- import iris.analysis.stats
+ self._import("iris.analysis.stats")
def time_analysis_trajectory(self):
- import iris.analysis.trajectory
+ self._import("iris.analysis.trajectory")
def time_aux_factory(self):
- import iris.aux_factory
+ self._import("iris.aux_factory")
def time_common(self):
- import iris.common
+ self._import("iris.common")
def time_common_lenient(self):
- import iris.common.lenient
+ self._import("iris.common.lenient")
def time_common_metadata(self):
- import iris.common.metadata
+ self._import("iris.common.metadata")
def time_common_mixin(self):
- import iris.common.mixin
+ self._import("iris.common.mixin")
def time_common_resolve(self):
- import iris.common.resolve
+ self._import("iris.common.resolve")
def time_config(self):
- import iris.config
+ self._import("iris.config")
def time_coord_categorisation(self):
- import iris.coord_categorisation
+ self._import("iris.coord_categorisation")
def time_coord_systems(self):
- import iris.coord_systems
+ self._import("iris.coord_systems")
def time_coords(self):
- import iris.coords
+ self._import("iris.coords")
def time_cube(self):
- import iris.cube
+ self._import("iris.cube")
def time_exceptions(self):
- import iris.exceptions
+ self._import("iris.exceptions")
def time_experimental(self):
- import iris.experimental
+ self._import("iris.experimental")
def time_fileformats(self):
- import iris.fileformats
+ self._import("iris.fileformats")
def time_fileformats__ff(self):
- import iris.fileformats._ff
+ self._import("iris.fileformats._ff")
def time_fileformats__ff_cross_references(self):
- import iris.fileformats._ff_cross_references
+ self._import("iris.fileformats._ff_cross_references")
def time_fileformats__pp_lbproc_pairs(self):
- import iris.fileformats._pp_lbproc_pairs
+ self._import("iris.fileformats._pp_lbproc_pairs")
def time_fileformats_structured_array_identification(self):
- import iris.fileformats._structured_array_identification
+ self._import("iris.fileformats._structured_array_identification")
def time_fileformats_abf(self):
- import iris.fileformats.abf
+ self._import("iris.fileformats.abf")
def time_fileformats_cf(self):
- import iris.fileformats.cf
+ self._import("iris.fileformats.cf")
def time_fileformats_dot(self):
- import iris.fileformats.dot
+ self._import("iris.fileformats.dot")
def time_fileformats_name(self):
- import iris.fileformats.name
+ self._import("iris.fileformats.name")
def time_fileformats_name_loaders(self):
- import iris.fileformats.name_loaders
+ self._import("iris.fileformats.name_loaders")
def time_fileformats_netcdf(self):
- import iris.fileformats.netcdf
+ self._import("iris.fileformats.netcdf")
def time_fileformats_nimrod(self):
- import iris.fileformats.nimrod
+ self._import("iris.fileformats.nimrod")
def time_fileformats_nimrod_load_rules(self):
- import iris.fileformats.nimrod_load_rules
+ self._import("iris.fileformats.nimrod_load_rules")
def time_fileformats_pp(self):
- import iris.fileformats.pp
+ self._import("iris.fileformats.pp")
def time_fileformats_pp_load_rules(self):
- import iris.fileformats.pp_load_rules
+ self._import("iris.fileformats.pp_load_rules")
def time_fileformats_pp_save_rules(self):
- import iris.fileformats.pp_save_rules
+ self._import("iris.fileformats.pp_save_rules")
def time_fileformats_rules(self):
- import iris.fileformats.rules
+ self._import("iris.fileformats.rules")
def time_fileformats_um(self):
- import iris.fileformats.um
+ self._import("iris.fileformats.um")
def time_fileformats_um__fast_load(self):
- import iris.fileformats.um._fast_load
+ self._import("iris.fileformats.um._fast_load")
def time_fileformats_um__fast_load_structured_fields(self):
- import iris.fileformats.um._fast_load_structured_fields
+ self._import("iris.fileformats.um._fast_load_structured_fields")
def time_fileformats_um__ff_replacement(self):
- import iris.fileformats.um._ff_replacement
+ self._import("iris.fileformats.um._ff_replacement")
def time_fileformats_um__optimal_array_structuring(self):
- import iris.fileformats.um._optimal_array_structuring
+ self._import("iris.fileformats.um._optimal_array_structuring")
def time_fileformats_um_cf_map(self):
- import iris.fileformats.um_cf_map
+ self._import("iris.fileformats.um_cf_map")
def time_io(self):
- import iris.io
+ self._import("iris.io")
def time_io_format_picker(self):
- import iris.io.format_picker
+ self._import("iris.io.format_picker")
def time_iterate(self):
- import iris.iterate
+ self._import("iris.iterate")
def time_palette(self):
- import iris.palette
+ self._import("iris.palette")
def time_plot(self):
- import iris.plot
+ self._import("iris.plot")
def time_quickplot(self):
- import iris.quickplot
+ self._import("iris.quickplot")
def time_std_names(self):
- import iris.std_names
+ self._import("iris.std_names")
def time_symbols(self):
- import iris.symbols
+ self._import("iris.symbols")
def time_tests(self):
- import iris.tests
+ self._import("iris.tests")
def time_time(self):
- import iris.time
+ self._import("iris.time")
def time_util(self):
- import iris.util
+ self._import("iris.util")
# third-party imports
def time_third_party_cartopy(self):
- import cartopy
+ self._import("cartopy")
def time_third_party_cf_units(self):
- import cf_units
+ self._import("cf_units")
def time_third_party_cftime(self):
- import cftime
+ self._import("cftime")
def time_third_party_matplotlib(self):
- import matplotlib
+ self._import("matplotlib")
def time_third_party_numpy(self):
- import numpy
+ self._import("numpy")
def time_third_party_scipy(self):
- import scipy
+ self._import("scipy")
diff --git a/benchmarks/benchmarks/iterate.py b/benchmarks/benchmarks/iterate.py
index 20422750ef..0a5415ac2b 100644
--- a/benchmarks/benchmarks/iterate.py
+++ b/benchmarks/benchmarks/iterate.py
@@ -9,9 +9,10 @@
"""
import numpy as np
-from benchmarks import ARTIFICIAL_DIM_SIZE
from iris import coords, cube, iterate
+from . import ARTIFICIAL_DIM_SIZE
+
def setup():
"""General variables needed by multiple benchmark classes."""
diff --git a/benchmarks/benchmarks/loading.py b/benchmarks/benchmarks/load/__init__.py
similarity index 97%
rename from benchmarks/benchmarks/loading.py
rename to benchmarks/benchmarks/load/__init__.py
index 4558c3b5cb..74a751a46b 100644
--- a/benchmarks/benchmarks/loading.py
+++ b/benchmarks/benchmarks/load/__init__.py
@@ -19,8 +19,8 @@
from iris.cube import Cube
from iris.fileformats.um import structured_um_loading
-from .generate_data import BENCHMARK_DATA, REUSE_DATA, run_function_elsewhere
-from .generate_data.um_files import create_um_files
+from ..generate_data import BENCHMARK_DATA, REUSE_DATA, run_function_elsewhere
+from ..generate_data.um_files import create_um_files
class LoadAndRealise:
diff --git a/benchmarks/benchmarks/load/ugrid.py b/benchmarks/benchmarks/load/ugrid.py
new file mode 100644
index 0000000000..8227a4c5a0
--- /dev/null
+++ b/benchmarks/benchmarks/load/ugrid.py
@@ -0,0 +1,128 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+Mesh data loading benchmark tests.
+
+Where possible benchmarks should be parameterised for two sizes of input data:
+ * minimal: enables detection of regressions in parts of the run-time that do
+ NOT scale with data size.
+ * large: large enough to exclusively detect regressions in parts of the
+ run-time that scale with data size. Aim for benchmark time ~20x
+ that of the minimal benchmark.
+
+"""
+
+from iris import load_cube as iris_load_cube
+from iris.experimental.ugrid import PARSE_UGRID_ON_LOAD
+from iris.experimental.ugrid import load_mesh as iris_load_mesh
+
+from ..generate_data.stock import create_file__xios_2d_face_half_levels
+
+
+def synthetic_data(**kwargs):
+ # Ensure all uses of the synthetic data function use the common directory.
+ # File location is controlled by :mod:`generate_data`, hence temp_file_dir=None.
+ return create_file__xios_2d_face_half_levels(temp_file_dir=None, **kwargs)
+
+
+def load_cube(*args, **kwargs):
+ with PARSE_UGRID_ON_LOAD.context():
+ return iris_load_cube(*args, **kwargs)
+
+
+def load_mesh(*args, **kwargs):
+ with PARSE_UGRID_ON_LOAD.context():
+ return iris_load_mesh(*args, **kwargs)
+
+
+class BasicLoading:
+ params = [1, int(4.1e6)]
+ param_names = ["number of faces"]
+
+ def setup_common(self, **kwargs):
+ self.data_path = synthetic_data(**kwargs)
+
+ def setup(self, *args):
+ self.setup_common(dataset_name="Loading", n_faces=args[0])
+
+ def time_load_file(self, *args):
+ _ = load_cube(str(self.data_path))
+
+ def time_load_mesh(self, *args):
+ _ = load_mesh(str(self.data_path))
+
+
+class BasicLoadingTime(BasicLoading):
+ """Same as BasicLoading, but scaling over a time series - an unlimited dimension."""
+
+ param_names = ["number of time steps"]
+
+ def setup(self, *args):
+ self.setup_common(dataset_name="Loading", n_faces=1, n_times=args[0])
+
+
+class DataRealisation:
+ # Prevent repeat runs between setup() runs - data won't be lazy after 1st.
+ number = 1
+ # Compensate for reduced certainty by increasing number of repeats.
+ repeat = (10, 10, 10.0)
+ # Prevent ASV running its warmup, which ignores `number` and would
+ # therefore get a false idea of typical run time since the data would stop
+ # being lazy.
+ warmup_time = 0.0
+ timeout = 300.0
+
+ params = [1, int(4e6)]
+ param_names = ["number of faces"]
+
+ def setup_common(self, **kwargs):
+ data_path = synthetic_data(**kwargs)
+ self.cube = load_cube(str(data_path))
+
+ def setup(self, *args):
+ self.setup_common(dataset_name="Realisation", n_faces=args[0])
+
+ def time_realise_data(self, *args):
+ assert self.cube.has_lazy_data()
+ _ = self.cube.data[0]
+
+
+class DataRealisationTime(DataRealisation):
+ """Same as DataRealisation, but scaling over a time series - an unlimited dimension."""
+
+ param_names = ["number of time steps"]
+
+ def setup(self, *args):
+ self.setup_common(
+ dataset_name="Realisation", n_faces=1, n_times=args[0]
+ )
+
+
+class Callback:
+ params = [1, int(4.5e6)]
+ param_names = ["number of faces"]
+
+ def setup_common(self, **kwargs):
+ def callback(cube, field, filename):
+ return cube[::2]
+
+ self.data_path = synthetic_data(**kwargs)
+ self.callback = callback
+
+ def setup(self, *args):
+ self.setup_common(dataset_name="Loading", n_faces=args[0])
+
+ def time_load_file_callback(self, *args):
+ _ = load_cube(str(self.data_path), callback=self.callback)
+
+
+class CallbackTime(Callback):
+ """Same as Callback, but scaling over a time series - an unlimited dimension."""
+
+ param_names = ["number of time steps"]
+
+ def setup(self, *args):
+ self.setup_common(dataset_name="Loading", n_faces=1, n_times=args[0])
diff --git a/benchmarks/benchmarks/mixin.py b/benchmarks/benchmarks/mixin.py
index e78b150438..bec5518eee 100644
--- a/benchmarks/benchmarks/mixin.py
+++ b/benchmarks/benchmarks/mixin.py
@@ -10,10 +10,11 @@
import numpy as np
-from benchmarks import ARTIFICIAL_DIM_SIZE
from iris import coords
from iris.common.metadata import AncillaryVariableMetadata
+from . import ARTIFICIAL_DIM_SIZE
+
LONG_NAME = "air temperature"
STANDARD_NAME = "air_temperature"
VAR_NAME = "air_temp"
diff --git a/benchmarks/benchmarks/plot.py b/benchmarks/benchmarks/plot.py
index 24899776dc..75195c86e9 100644
--- a/benchmarks/benchmarks/plot.py
+++ b/benchmarks/benchmarks/plot.py
@@ -10,9 +10,10 @@
import matplotlib
import numpy as np
-from benchmarks import ARTIFICIAL_DIM_SIZE
from iris import coords, cube, plot
+from . import ARTIFICIAL_DIM_SIZE
+
matplotlib.use("agg")
diff --git a/benchmarks/benchmarks/regridding.py b/benchmarks/benchmarks/regridding.py
index 6db33aa192..c315119c11 100644
--- a/benchmarks/benchmarks/regridding.py
+++ b/benchmarks/benchmarks/regridding.py
@@ -25,16 +25,31 @@ def setup(self) -> None:
)
self.cube = iris.load_cube(cube_file_path)
+ # Prepare a tougher cube and chunk it
+ chunked_cube_file_path = tests.get_data_path(
+ ["NetCDF", "regrid", "regrid_xyt.nc"]
+ )
+ self.chunked_cube = iris.load_cube(chunked_cube_file_path)
+
+ # Chunked data makes the regridder run repeatedly
+ self.cube.data = self.cube.lazy_data().rechunk((1, -1, -1))
+
template_file_path = tests.get_data_path(
["NetCDF", "regrid", "regrid_template_global_latlon.nc"]
)
self.template_cube = iris.load_cube(template_file_path)
- # Chunked data makes the regridder run repeatedly
- self.cube.data = self.cube.lazy_data().rechunk((1, -1, -1))
+ # Prepare a regridding scheme
+ self.scheme_area_w = AreaWeighted()
def time_regrid_area_w(self) -> None:
# Regrid the cube onto the template.
- out = self.cube.regrid(self.template_cube, AreaWeighted())
+ out = self.cube.regrid(self.template_cube, self.scheme_area_w)
# Realise the data
out.data
+
+ def time_regrid_area_w_new_grid(self) -> None:
+ # Regrid the chunked cube
+ out = self.chunked_cube.regrid(self.template_cube, self.scheme_area_w)
+ # Realise data
+ out.data
diff --git a/benchmarks/benchmarks/save.py b/benchmarks/benchmarks/save.py
new file mode 100644
index 0000000000..730b63294d
--- /dev/null
+++ b/benchmarks/benchmarks/save.py
@@ -0,0 +1,57 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+File saving benchmarks.
+
+Where possible benchmarks should be parameterised for two sizes of input data:
+ * minimal: enables detection of regressions in parts of the run-time that do
+ NOT scale with data size.
+ * large: large enough to exclusively detect regressions in parts of the
+ run-time that scale with data size. Aim for benchmark time ~20x
+ that of the minimal benchmark.
+
+"""
+from iris import save
+from iris.experimental.ugrid import save_mesh
+
+from . import TrackAddedMemoryAllocation
+from .generate_data.ugrid import make_cube_like_2d_cubesphere
+
+
+class NetcdfSave:
+ params = [[1, 600], [False, True]]
+ param_names = ["cubesphere-N", "is_unstructured"]
+ # For use on 'track_addedmem_..' type benchmarks - result is too noisy.
+ no_small_params = [[600], [True]]
+
+ def setup(self, n_cubesphere, is_unstructured):
+ self.cube = make_cube_like_2d_cubesphere(
+ n_cube=n_cubesphere, with_mesh=is_unstructured
+ )
+
+ def _save_data(self, cube, do_copy=True):
+ if do_copy:
+ # Copy the cube, to avoid distorting the results by changing it
+ # Because we known that older Iris code realises lazy coords
+ cube = cube.copy()
+ save(cube, "tmp.nc")
+
+ def _save_mesh(self, cube):
+ # In this case, we are happy that the mesh is *not* modified
+ save_mesh(cube.mesh, "mesh.nc")
+
+ def time_netcdf_save_cube(self, n_cubesphere, is_unstructured):
+ self._save_data(self.cube)
+
+ def time_netcdf_save_mesh(self, n_cubesphere, is_unstructured):
+ if is_unstructured:
+ self._save_mesh(self.cube)
+
+ @TrackAddedMemoryAllocation.decorator(no_small_params)
+ def track_addedmem_netcdf_save(self, n_cubesphere, is_unstructured):
+ # Don't need to copy the cube here since track_ benchmarks don't
+ # do repeats between self.setup() calls.
+ self._save_data(self.cube, do_copy=False)
diff --git a/benchmarks/benchmarks/sperf/__init__.py b/benchmarks/benchmarks/sperf/__init__.py
new file mode 100644
index 0000000000..696c8ef4df
--- /dev/null
+++ b/benchmarks/benchmarks/sperf/__init__.py
@@ -0,0 +1,39 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+Benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project.
+
+SPerf = assessing performance against a series of increasingly large LFRic
+datasets.
+"""
+from iris import load_cube
+
+# TODO: remove uses of PARSE_UGRID_ON_LOAD once UGRID parsing is core behaviour.
+from iris.experimental.ugrid import PARSE_UGRID_ON_LOAD
+
+from ..generate_data.ugrid import make_cubesphere_testfile
+
+
+class FileMixin:
+ """For use in any benchmark classes that work on a file."""
+
+ params = [
+ [12, 384, 640, 960, 1280, 1668],
+ [1, 36, 72],
+ [1, 3, 36, 72],
+ ]
+ param_names = ["cubesphere_C", "N levels", "N time steps"]
+ # cubesphere_C: notation refers to faces per panel.
+ # e.g. C1 is 6 faces, 8 nodes
+
+ def setup(self, c_size, n_levels, n_times):
+ self.file_path = make_cubesphere_testfile(
+ c_size=c_size, n_levels=n_levels, n_times=n_times
+ )
+
+ def load_cube(self):
+ with PARSE_UGRID_ON_LOAD.context():
+ return load_cube(str(self.file_path))
diff --git a/benchmarks/benchmarks/sperf/combine_regions.py b/benchmarks/benchmarks/sperf/combine_regions.py
new file mode 100644
index 0000000000..fd2c95c7fc
--- /dev/null
+++ b/benchmarks/benchmarks/sperf/combine_regions.py
@@ -0,0 +1,250 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+Region combine benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project.
+"""
+import os.path
+
+from dask import array as da
+import numpy as np
+
+from iris import load, load_cube, save
+from iris.experimental.ugrid import PARSE_UGRID_ON_LOAD
+from iris.experimental.ugrid.utils import recombine_submeshes
+
+from .. import TrackAddedMemoryAllocation, on_demand_benchmark
+from ..generate_data.ugrid import make_cube_like_2d_cubesphere
+
+
+class Mixin:
+ # Characterise time taken + memory-allocated, for various stages of combine
+ # operations on cubesphere-like test data.
+ timeout = 180.0
+ params = [100, 200, 300, 500, 1000, 1668]
+ param_names = ["cubesphere_C"]
+ # Fix result units for the tracking benchmarks.
+ unit = "Mb"
+
+ def _parametrised_cache_filename(self, n_cubesphere, content_name):
+ return f"cube_C{n_cubesphere}_{content_name}.nc"
+
+ def _make_region_cubes(self, full_mesh_cube):
+ """Make a fixed number of region cubes from a full meshcube."""
+ # Divide the cube into regions.
+ n_faces = full_mesh_cube.shape[-1]
+ # Start with a simple list of face indices
+ # first extend to multiple of 5
+ n_faces_5s = 5 * ((n_faces + 1) // 5)
+ i_faces = np.arange(n_faces_5s, dtype=int)
+ # reshape (5N,) to (N, 5)
+ i_faces = i_faces.reshape((n_faces_5s // 5, 5))
+ # reorder [2, 3, 4, 0, 1] within each block of 5
+ i_faces = np.concatenate([i_faces[:, 2:], i_faces[:, :2]], axis=1)
+ # flatten to get [2 3 4 0 1 (-) 8 9 10 6 7 (-) 13 14 15 11 12 ...]
+ i_faces = i_faces.flatten()
+ # reduce back to orignal length, wrap any overflows into valid range
+ i_faces = i_faces[:n_faces] % n_faces
+
+ # Divide into regions -- always slightly uneven, since 7 doesn't divide
+ n_regions = 7
+ n_facesperregion = n_faces // n_regions
+ i_face_regions = (i_faces // n_facesperregion) % n_regions
+ region_inds = [
+ np.where(i_face_regions == i_region)[0]
+ for i_region in range(n_regions)
+ ]
+ # NOTE: this produces 7 regions, with near-adjacent value ranges but
+ # with some points "moved" to an adjacent region.
+ # Also, region-0 is bigger (because of not dividing by 7).
+
+ # Finally, make region cubes with these indices.
+ region_cubes = [full_mesh_cube[..., inds] for inds in region_inds]
+ return region_cubes
+
+ def setup_cache(self):
+ """Cache all the necessary source data on disk."""
+
+ # Control dask, to minimise memory usage + allow largest data.
+ self.fix_dask_settings()
+
+ for n_cubesphere in self.params:
+ # Do for each parameter, since "setup_cache" is NOT parametrised
+ mesh_cube = make_cube_like_2d_cubesphere(
+ n_cube=n_cubesphere, with_mesh=True
+ )
+ # Save to files which include the parameter in the names.
+ save(
+ mesh_cube,
+ self._parametrised_cache_filename(n_cubesphere, "meshcube"),
+ )
+ region_cubes = self._make_region_cubes(mesh_cube)
+ save(
+ region_cubes,
+ self._parametrised_cache_filename(n_cubesphere, "regioncubes"),
+ )
+
+ def setup(
+ self, n_cubesphere, imaginary_data=True, create_result_cube=True
+ ):
+ """
+ The combine-tests "standard" setup operation.
+
+ Load the source cubes (full-mesh + region) from disk.
+ These are specific to the cubesize parameter.
+ The data is cached on disk rather than calculated, to avoid any
+ pre-loading of the process memory allocation.
+
+ If 'imaginary_data' is set (default), the region cubes data is replaced
+ with lazy data in the form of a da.zeros(). Otherwise, the region data
+ is lazy data from the files.
+
+ If 'create_result_cube' is set, create "self.combined_cube" containing
+ the (still lazy) result.
+
+ NOTE: various test classes override + extend this.
+
+ """
+
+ # Load source cubes (full-mesh and regions)
+ with PARSE_UGRID_ON_LOAD.context():
+ self.full_mesh_cube = load_cube(
+ self._parametrised_cache_filename(n_cubesphere, "meshcube")
+ )
+ self.region_cubes = load(
+ self._parametrised_cache_filename(n_cubesphere, "regioncubes")
+ )
+
+ # Remove all var-names from loaded cubes, which can otherwise cause
+ # problems. Also implement 'imaginary' data.
+ for cube in self.region_cubes + [self.full_mesh_cube]:
+ cube.var_name = None
+ for coord in cube.coords():
+ coord.var_name = None
+ if imaginary_data:
+ # Replace cube data (lazy file data) with 'imaginary' data.
+ # This has the same lazy-array attributes, but is allocated by
+ # creating chunks on demand instead of loading from file.
+ data = cube.lazy_data()
+ data = da.zeros(
+ data.shape, dtype=data.dtype, chunks=data.chunksize
+ )
+ cube.data = data
+
+ if create_result_cube:
+ self.recombined_cube = self.recombine()
+
+ # Fix dask usage mode for all the subsequent performance tests.
+ self.fix_dask_settings()
+
+ def fix_dask_settings(self):
+ """
+ Fix "standard" dask behaviour for time+space testing.
+
+ Currently this is single-threaded mode, with known chunksize,
+ which is optimised for space saving so we can test largest data.
+
+ """
+
+ import dask.config as dcfg
+
+ # Use single-threaded, to avoid process-switching costs and minimise memory usage.
+ # N.B. generally may be slower, but use less memory ?
+ dcfg.set(scheduler="single-threaded")
+ # Configure iris._lazy_data.as_lazy_data to aim for 100Mb chunks
+ dcfg.set({"array.chunk-size": "128Mib"})
+
+ def recombine(self):
+ # A handy general shorthand for the main "combine" operation.
+ result = recombine_submeshes(
+ self.full_mesh_cube,
+ self.region_cubes,
+ index_coord_name="i_mesh_face",
+ )
+ return result
+
+
+@on_demand_benchmark
+class CreateCube(Mixin):
+ """
+ Time+memory costs of creating a combined-regions cube.
+
+ The result is lazy, and we don't do the actual calculation.
+
+ """
+
+ def setup(
+ self, n_cubesphere, imaginary_data=True, create_result_cube=False
+ ):
+ # In this case only, do *not* create the result cube.
+ # That is the operation we want to test.
+ super().setup(n_cubesphere, imaginary_data, create_result_cube)
+
+ def time_create_combined_cube(self, n_cubesphere):
+ self.recombine()
+
+ @TrackAddedMemoryAllocation.decorator()
+ def track_addedmem_create_combined_cube(self, n_cubesphere):
+ self.recombine()
+
+
+@on_demand_benchmark
+class ComputeRealData(Mixin):
+ """
+ Time+memory costs of computing combined-regions data.
+ """
+
+ def time_compute_data(self, n_cubesphere):
+ _ = self.recombined_cube.data
+
+ @TrackAddedMemoryAllocation.decorator()
+ def track_addedmem_compute_data(self, n_cubesphere):
+ _ = self.recombined_cube.data
+
+
+@on_demand_benchmark
+class SaveData(Mixin):
+ """
+ Test saving *only*, having replaced the input cube data with 'imaginary'
+ array data, so that input data is not loaded from disk during the save
+ operation.
+
+ """
+
+ def time_save(self, n_cubesphere):
+ # Save to disk, which must compute data + stream it to file.
+ save(self.recombined_cube, "tmp.nc")
+
+ @TrackAddedMemoryAllocation.decorator()
+ def track_addedmem_save(self, n_cubesphere):
+ save(self.recombined_cube, "tmp.nc")
+
+ def track_filesize_saved(self, n_cubesphere):
+ save(self.recombined_cube, "tmp.nc")
+ return os.path.getsize("tmp.nc") * 1.0e-6
+
+
+@on_demand_benchmark
+class FileStreamedCalc(Mixin):
+ """
+ Test the whole cost of file-to-file streaming.
+ Uses the combined cube which is based on lazy data loading from the region
+ cubes on disk.
+ """
+
+ def setup(
+ self, n_cubesphere, imaginary_data=False, create_result_cube=True
+ ):
+ # In this case only, do *not* replace the loaded regions data with
+ # 'imaginary' data, as we want to test file-to-file calculation+save.
+ super().setup(n_cubesphere, imaginary_data, create_result_cube)
+
+ def time_stream_file2file(self, n_cubesphere):
+ # Save to disk, which must compute data + stream it to file.
+ save(self.recombined_cube, "tmp.nc")
+
+ @TrackAddedMemoryAllocation.decorator()
+ def track_addedmem_stream_file2file(self, n_cubesphere):
+ save(self.recombined_cube, "tmp.nc")
diff --git a/benchmarks/benchmarks/sperf/equality.py b/benchmarks/benchmarks/sperf/equality.py
new file mode 100644
index 0000000000..85c73ab92b
--- /dev/null
+++ b/benchmarks/benchmarks/sperf/equality.py
@@ -0,0 +1,36 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+Equality benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project.
+"""
+from . import FileMixin
+from .. import on_demand_benchmark
+
+
+@on_demand_benchmark
+class CubeEquality(FileMixin):
+ """
+ Benchmark time and memory costs of comparing :class:`~iris.cube.Cube`\\ s
+ with attached :class:`~iris.experimental.ugrid.mesh.Mesh`\\ es.
+
+ Uses :class:`FileMixin` as the realistic case will be comparing
+ :class:`~iris.cube.Cube`\\ s that have been loaded from file.
+
+ """
+
+ # Cut down paremt parameters.
+ params = [FileMixin.params[0]]
+
+ def setup(self, c_size, n_levels=1, n_times=1):
+ super().setup(c_size, n_levels, n_times)
+ self.cube = self.load_cube()
+ self.other_cube = self.load_cube()
+
+ def peakmem_eq(self, n_cube):
+ _ = self.cube == self.other_cube
+
+ def time_eq(self, n_cube):
+ _ = self.cube == self.other_cube
diff --git a/benchmarks/benchmarks/sperf/load.py b/benchmarks/benchmarks/sperf/load.py
new file mode 100644
index 0000000000..c1d1db43a9
--- /dev/null
+++ b/benchmarks/benchmarks/sperf/load.py
@@ -0,0 +1,32 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+File loading benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project.
+"""
+from . import FileMixin
+from .. import on_demand_benchmark
+
+
+@on_demand_benchmark
+class Load(FileMixin):
+ def time_load_cube(self, _, __, ___):
+ _ = self.load_cube()
+
+
+@on_demand_benchmark
+class Realise(FileMixin):
+ # The larger files take a long time to realise.
+ timeout = 600.0
+
+ def setup(self, c_size, n_levels, n_times):
+ super().setup(c_size, n_levels, n_times)
+ self.loaded_cube = self.load_cube()
+
+ def time_realise_cube(self, _, __, ___):
+ # Don't touch loaded_cube.data - permanent realisation plays badly with
+ # ASV's re-run strategy.
+ assert self.loaded_cube.has_lazy_data()
+ self.loaded_cube.core_data().compute()
diff --git a/benchmarks/benchmarks/sperf/save.py b/benchmarks/benchmarks/sperf/save.py
new file mode 100644
index 0000000000..62c84a2619
--- /dev/null
+++ b/benchmarks/benchmarks/sperf/save.py
@@ -0,0 +1,56 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""
+File saving benchmarks for the SPerf scheme of the UK Met Office's NG-VAT project.
+"""
+import os.path
+
+from iris import save
+from iris.experimental.ugrid import save_mesh
+
+from .. import TrackAddedMemoryAllocation, on_demand_benchmark
+from ..generate_data.ugrid import make_cube_like_2d_cubesphere
+
+
+@on_demand_benchmark
+class NetcdfSave:
+ """
+ Benchmark time and memory costs of saving ~large-ish data cubes to netcdf.
+
+ """
+
+ params = [[1, 100, 200, 300, 500, 1000, 1668], [False, True]]
+ param_names = ["cubesphere_C", "is_unstructured"]
+ # Fix result units for the tracking benchmarks.
+ unit = "Mb"
+
+ def setup(self, n_cubesphere, is_unstructured):
+ self.cube = make_cube_like_2d_cubesphere(
+ n_cube=n_cubesphere, with_mesh=is_unstructured
+ )
+
+ def _save_cube(self, cube):
+ save(cube, "tmp.nc")
+
+ def _save_mesh(self, cube):
+ save_mesh(cube.mesh, "mesh.nc")
+
+ def time_save_cube(self, n_cubesphere, is_unstructured):
+ self._save_cube(self.cube)
+
+ @TrackAddedMemoryAllocation.decorator()
+ def track_addedmem_save_cube(self, n_cubesphere, is_unstructured):
+ self._save_cube(self.cube)
+
+ def time_save_mesh(self, n_cubesphere, is_unstructured):
+ if is_unstructured:
+ self._save_mesh(self.cube)
+
+ # The filesizes make a good reference point for the 'addedmem' memory
+ # usage results.
+ def track_filesize_save_cube(self, n_cubesphere, is_unstructured):
+ self._save_cube(self.cube)
+ return os.path.getsize("tmp.nc") * 1.0e-6
diff --git a/docs/src/common_links.inc b/docs/src/common_links.inc
index 67fc493e3e..ce7f498d80 100644
--- a/docs/src/common_links.inc
+++ b/docs/src/common_links.inc
@@ -38,6 +38,7 @@
.. _using git: https://docs.github.com/en/github/using-git
.. _requirements/ci/: https://github.com/SciTools/iris/tree/main/requirements/ci
.. _CF-UGRID: https://ugrid-conventions.github.io/ugrid-conventions/
+.. _issues on GitHub: https://github.com/SciTools/iris/issues?q=is%3Aopen+is%3Aissue+sort%3Areactions-%2B1-desc
.. comment
diff --git a/docs/src/conf.py b/docs/src/conf.py
index bb7357c9b5..a1490f6a31 100644
--- a/docs/src/conf.py
+++ b/docs/src/conf.py
@@ -321,6 +321,15 @@ def _dotv(version):
"theme_override.css",
]
+# this allows for using datatables: https://datatables.net/
+html_css_files = [
+ "https://cdn.datatables.net/1.10.23/css/jquery.dataTables.min.css",
+]
+
+html_js_files = [
+ "https://cdn.datatables.net/1.10.23/js/jquery.dataTables.min.js",
+]
+
# url link checker. Some links work but report as broken, lets ignore them.
# See https://www.sphinx-doc.org/en/1.2/config.html#options-for-the-linkcheck-builder
linkcheck_ignore = [
@@ -335,6 +344,7 @@ def _dotv(version):
"https://software.ac.uk/how-cite-software",
"http://www.esrl.noaa.gov/psd/data/gridded/conventions/cdc_netcdf_standard.shtml",
"http://www.nationalarchives.gov.uk/doc/open-government-licence",
+ "https://www.metoffice.gov.uk/",
]
# list of sources to exclude from the build.
diff --git a/docs/src/developers_guide/asv_example_images/commits.png b/docs/src/developers_guide/asv_example_images/commits.png
new file mode 100644
index 0000000000..4e0d695322
Binary files /dev/null and b/docs/src/developers_guide/asv_example_images/commits.png differ
diff --git a/docs/src/developers_guide/asv_example_images/comparison.png b/docs/src/developers_guide/asv_example_images/comparison.png
new file mode 100644
index 0000000000..e146d30696
Binary files /dev/null and b/docs/src/developers_guide/asv_example_images/comparison.png differ
diff --git a/docs/src/developers_guide/asv_example_images/scalability.png b/docs/src/developers_guide/asv_example_images/scalability.png
new file mode 100644
index 0000000000..260c3ef536
Binary files /dev/null and b/docs/src/developers_guide/asv_example_images/scalability.png differ
diff --git a/docs/src/developers_guide/contributing_benchmarks.rst b/docs/src/developers_guide/contributing_benchmarks.rst
new file mode 100644
index 0000000000..65bc9635b6
--- /dev/null
+++ b/docs/src/developers_guide/contributing_benchmarks.rst
@@ -0,0 +1,62 @@
+.. include:: ../common_links.inc
+
+.. _contributing.benchmarks:
+
+Benchmarking
+============
+Iris includes architecture for benchmarking performance and other metrics of
+interest. This is done using the `Airspeed Velocity`_ (ASV) package.
+
+Full detail on the setup and how to run or write benchmarks is in
+`benchmarks/README.md`_ in the Iris repository.
+
+Continuous Integration
+----------------------
+The primary purpose of `Airspeed Velocity`_, and Iris' specific benchmarking
+setup, is to monitor for performance changes using statistical comparison
+between commits, and this forms part of Iris' continuous integration.
+
+Accurately assessing performance takes longer than functionality pass/fail
+tests, so the benchmark suite is not automatically run against open pull
+requests, instead it is **run overnight against each the commits of the
+previous day** to check if any commit has introduced performance shifts.
+Detected shifts are reported in a new Iris GitHub issue.
+
+If a pull request author/reviewer suspects their changes may cause performance
+shifts, a convenience is available (currently via Nox) to replicate the
+overnight benchmark run but comparing the current ``HEAD`` with a requested
+branch (e.g. ``upstream/main``). Read more in `benchmarks/README.md`_.
+
+Other Uses
+----------
+Even when not statistically comparing commits, ASV's accurate execution time
+results - recorded using a sophisticated system of repeats - have other
+applications.
+
+* Absolute numbers can be interpreted providing they are recorded on a
+ dedicated resource.
+* Results for a series of commits can be visualised for an intuitive
+ understanding of when and why changes occurred.
+
+ .. image:: asv_example_images/commits.png
+ :width: 300
+
+* Parameterised benchmarks make it easy to visualise:
+
+ * Comparisons
+
+ .. image:: asv_example_images/comparison.png
+ :width: 300
+
+ * Scalability
+
+ .. image:: asv_example_images/scalability.png
+ :width: 300
+
+This also isn't limited to execution times. ASV can also measure memory demand,
+and even arbitrary numbers (e.g. file size, regridding accuracy), although
+without the repetition logic that execution timing has.
+
+
+.. _Airspeed Velocity: https://github.com/airspeed-velocity/asv
+.. _benchmarks/README.md: https://github.com/SciTools/iris/blob/main/benchmarks/README.md
diff --git a/docs/src/developers_guide/contributing_ci_tests.rst b/docs/src/developers_guide/contributing_ci_tests.rst
index 0257ff7cff..46848166b3 100644
--- a/docs/src/developers_guide/contributing_ci_tests.rst
+++ b/docs/src/developers_guide/contributing_ci_tests.rst
@@ -72,14 +72,11 @@ New lockfiles are generated automatically each week to ensure that Iris continue
tested against the latest available version of its dependencies.
Each week the yaml files in ``requirements/ci`` are resolved by a GitHub Action.
If the resolved environment has changed, a pull request is created with the new lock files.
-The CI test suite will run on this pull request and fixes for failed tests can be pushed to
-the ``auto-update-lockfiles`` branch to be included in the PR.
-Once a developer has pushed to this branch, the auto-update process will not run again until
-the PR is merged, to prevent overwriting developer commits.
-The auto-updater can still be invoked manually in this situation by going to the `GitHub Actions`_
-page for the workflow, and manually running using the "Run Workflow" button.
-By default, this will also not override developer commits. To force an update, you must
-confirm "yes" in the "Run Worflow" prompt.
+The CI test suite will run on this pull request. If the tests fail, a developer
+will need to create a new branch based off the ``auto-update-lockfiles`` branch
+and add the required fixes to this new branch. If the fixes are made to the
+``auto-update-lockfiles`` branch these will be overwritten the next time the
+Github Action is run.
.. _skipping Cirrus-CI tasks:
diff --git a/docs/src/developers_guide/contributing_getting_involved.rst b/docs/src/developers_guide/contributing_getting_involved.rst
index 1858d5ca9f..05e51eeba5 100644
--- a/docs/src/developers_guide/contributing_getting_involved.rst
+++ b/docs/src/developers_guide/contributing_getting_involved.rst
@@ -37,7 +37,7 @@ If you are new to using GitHub we recommend reading the
`Governance `_
section of the `SciTools`_ ogranization web site.
-.. _GitHub getting started: https://docs.github.com/en/github/getting-started-with-github
+.. _GitHub getting started: https://docs.github.com/en/github/getting-started-with-github
.. toctree::
@@ -50,7 +50,7 @@ If you are new to using GitHub we recommend reading the
contributing_documentation
contributing_codebase_index
contributing_changes
- release
+ release
.. toctree::
@@ -62,5 +62,6 @@ If you are new to using GitHub we recommend reading the
../whatsnew/index
../techpapers/index
../copyright
+ ../voted_issues
diff --git a/docs/src/developers_guide/contributing_testing_index.rst b/docs/src/developers_guide/contributing_testing_index.rst
index c5cf1b997b..7c6eb1b3cc 100644
--- a/docs/src/developers_guide/contributing_testing_index.rst
+++ b/docs/src/developers_guide/contributing_testing_index.rst
@@ -11,3 +11,4 @@ Testing
imagehash_index
contributing_running_tests
contributing_ci_tests
+ contributing_benchmarks
diff --git a/docs/src/index.rst b/docs/src/index.rst
index 872726e673..1a3ad38394 100644
--- a/docs/src/index.rst
+++ b/docs/src/index.rst
@@ -150,7 +150,3 @@ The legacy support resources:
:hidden:
whatsnew/index
-
-
-.. todolist::
-
diff --git a/docs/src/voted_issues.rst b/docs/src/voted_issues.rst
new file mode 100644
index 0000000000..edc1c860a2
--- /dev/null
+++ b/docs/src/voted_issues.rst
@@ -0,0 +1,55 @@
+.. include:: common_links.inc
+
+.. _voted_issues:
+
+Voted Issues
+============
+
+You can help us to prioritise development of new features by leaving a 👍
+reaction on the header (not subsequent comments) of any issue.
+
+.. tip:: We suggest you subscribe to the issue so you will be updated.
+ When viewing the issue there is a **Notifications**
+ section where you can select to subscribe.
+
+Below is a sorted table of all issues that have 1 or more 👍 from our github
+project. Please note that there is more development activity than what is on
+the below table.
+
+.. _voted-issues.json: https://github.com/scitools/voted_issues/blob/main/voted-issues.json
+
+.. raw:: html
+
+
+
+
+ 👍 |
+ Issue |
+ Author |
+ Title |
+
+
+
+
+
+
+
+
+
+.. note:: The data in this table is updated daily and is sourced from
+ `voted-issues.json`_.
+ For the latest data please see the `issues on GitHub`_.
+ Note that the list on Github does not show the number of votes 👍
+ only the total number of comments for the whole issue.
\ No newline at end of file
diff --git a/docs/src/whatsnew/3.2.rst b/docs/src/whatsnew/3.2.rst
new file mode 100644
index 0000000000..ef3764daa5
--- /dev/null
+++ b/docs/src/whatsnew/3.2.rst
@@ -0,0 +1,384 @@
+.. include:: ../common_links.inc
+
+v3.2 (15 Feb 2022)
+******************
+
+This document explains the changes made to Iris for this release
+(:doc:`View all changes `.)
+
+
+.. dropdown:: :opticon:`report` v3.2.0 Release Highlights
+ :container: + shadow
+ :title: text-primary text-center font-weight-bold
+ :body: bg-light
+ :animate: fade-in
+ :open:
+
+ The highlights for this minor release of Iris include:
+
+ * We've added experimental support for
+ :ref:`Meshes `, which can now be loaded and
+ attached to a cube. Mesh support is based on the `CF-UGRID`_ model.
+ * We've also dropped support for ``Python 3.7``.
+
+ And finally, get in touch with us on :issue:`GitHub` if you have
+ any issues or feature requests for improving Iris. Enjoy!
+
+
+📢 Announcements
+================
+
+#. Welcome to `@wjbenfold`_, `@tinyendian`_, `@larsbarring`_, `@bsherratt`_ and
+ `@aaronspring`_ who made their first contributions to Iris. The first of
+ many we hope!
+#. Congratulations to `@wjbenfold`_ who has become a core developer for Iris! 🎉
+
+
+✨ Features
+===========
+
+#. `@bjlittle`_, `@pp-mo`_, `@trexfeathers`_ and `@stephenworsley`_ added
+ support for :ref:`unstructured meshes `. This involved
+ adding a data model (:pull:`3968`, :pull:`4014`, :pull:`4027`, :pull:`4036`,
+ :pull:`4053`, :pull:`4439`) and API (:pull:`4063`, :pull:`4064`), and
+ supporting representation (:pull:`4033`, :pull:`4054`) of data on meshes.
+ Most of this new API can be found in :mod:`iris.experimental.ugrid`. The key
+ objects introduced are :class:`iris.experimental.ugrid.mesh.Mesh`,
+ :class:`iris.experimental.ugrid.mesh.MeshCoord` and
+ :obj:`iris.experimental.ugrid.load.PARSE_UGRID_ON_LOAD`.
+ A :class:`~iris.experimental.ugrid.mesh.Mesh` contains a full description of a UGRID
+ type mesh. :class:`~iris.experimental.ugrid.mesh.MeshCoord`\ s are coordinates that
+ reference and represent a :class:`~iris.experimental.ugrid.mesh.Mesh` for use
+ on a :class:`~iris.cube.Cube`. :class:`~iris.cube.Cube`\ s are also given the
+ property :attr:`~iris.cube.Cube.mesh` which returns a
+ :class:`~iris.experimental.ugrid.mesh.Mesh` if one is attached to the
+ :class:`~iris.cube.Cube` via a :class:`~iris.experimental.ugrid.mesh.MeshCoord`.
+
+#. `@trexfeathers`_ added support for loading unstructured mesh data from netcdf data,
+ for files using the `CF-UGRID`_ conventions.
+ The context manager :obj:`~iris.experimental.ugrid.load.PARSE_UGRID_ON_LOAD`
+ provides a way to load UGRID files so that :class:`~iris.cube.Cube`\ s can be
+ returned with a :class:`~iris.experimental.ugrid.mesh.Mesh` attached.
+ (:pull:`4058`).
+
+#. `@pp-mo`_ added support to save cubes with :ref:`meshes ` to netcdf
+ files, using the `CF-UGRID`_ conventions.
+ The existing :meth:`iris.save` function now does this, when saving cubes with meshes.
+ A routine :meth:`iris.experimental.ugrid.save.save_mesh` allows saving
+ :class:`~iris.experimental.ugrid.mesh.Mesh` objects to netcdf *without* any associated data
+ (i.e. not attached to cubes).
+ (:pull:`4318` and :pull:`4339`).
+
+#. `@trexfeathers`_ added :meth:`iris.experimental.ugrid.mesh.Mesh.from_coords`
+ for inferring a :class:`~iris.experimental.ugrid.mesh.Mesh` from an
+ appropriate collection of :class:`iris.coords.Coord`\ s.
+
+#. `@larsbarring`_ updated :func:`~iris.util.equalise_attributes` to return a list of dictionaries
+ containing the attributes removed from each :class:`~iris.cube.Cube`. (:pull:`4357`)
+
+#. `@trexfeathers`_ enabled streaming of **all** lazy arrays when saving to
+ NetCDF files (was previously just :class:`~iris.cube.Cube`
+ :attr:`~iris.cube.Cube.data`). This is
+ important given the much greater size of
+ :class:`~iris.coords.AuxCoord` :attr:`~iris.coords.AuxCoord.points` and
+ :class:`~iris.experimental.ugrid.mesh.Connectivity`
+ :attr:`~iris.experimental.ugrid.mesh.Connectivity.indices` under the
+ :ref:`mesh model `. (:pull:`4375`)
+
+#. `@bsherratt`_ added a ``threshold`` parameter to
+ :meth:`~iris.cube.Cube.intersection` (:pull:`4363`)
+
+#. `@wjbenfold`_ added test data to ci benchmarks so that it is accessible to
+ benchmark scripts. Also added a regridding benchmark that uses this data
+ (:pull:`4402`)
+
+#. `@pp-mo`_ updated to the latest CF Standard Names Table ``v78`` (21 Sept 2021).
+ (:issue:`4479`, :pull:`4483`)
+
+#. `@SimonPeatman`_ added support for filenames in the form of a :class:`~pathlib.PurePath`
+ in :func:`~iris.load`, :func:`~iris.load_cube`, :func:`~iris.load_cubes`,
+ :func:`~iris.load_raw` and :func:`~iris.save` (:issue:`3411`, :pull:`3917`).
+ Support for :class:`~pathlib.PurePath` is yet to be implemented across the rest
+ of Iris (:issue:`4523`).
+
+#. `@pp-mo`_ removed broken tooling for deriving Iris metadata translations
+ from `Metarelate`_. From now we intend to manage phenonemon translation
+ in Iris itself. (:pull:`4484`)
+
+#. `@pp-mo`_ improved printout of various cube data component objects :
+ :class:`~iris.coords.Coord`, :class:`~iris.coords.CellMeasure`,
+ :class:`~iris.coords.AncillaryVariable`,
+ :class:`~iris.experimental.ugrid.mesh.MeshCoord` and
+ :class:`~iris.experimental.ugrid.mesh.Mesh`.
+ These now all provide a more controllable ``summary()`` method, and
+ more convenient and readable ``str()`` and ``repr()`` output in the style of
+ the :class:`iris.cube.Cube`.
+ They also no longer realise lazy data. (:pull:`4499`).
+
+
+🐛 Bugs Fixed
+=============
+
+#. `@rcomer`_ fixed :meth:`~iris.cube.Cube.intersection` for special cases where
+ one cell's bounds align with the requested maximum and negative minimum, fixing
+ :issue:`4221`. (:pull:`4278`)
+
+#. `@bsherratt`_ fixed further edge cases in
+ :meth:`~iris.cube.Cube.intersection`, including :issue:`3698` (:pull:`4363`)
+
+#. `@tinyendian`_ fixed the error message produced by :meth:`~iris.cube.CubeList.concatenate_cube`
+ when a cube list contains cubes with different names, which will no longer report
+ "Cube names differ: var1 != var1" if var1 appears multiple times in the list
+ (:issue:`4342`, :pull:`4345`)
+
+#. `@larsbarring`_ fixed :class:`~iris.coord_systems.GeoCS` to handle spherical ellipsoid
+ parameter inverse_flattening = 0 (:issue:`4146`, :pull:`4348`)
+
+#. `@pdearnshaw`_ fixed an error in the call to :class:`cftime.datetime` in
+ :mod:`~iris.fileformats.pp_save_rules` that prevented the saving to PP of climate
+ means for DJF (:pull:`4391`)
+
+#. `@wjbenfold`_ improved the error message for failure of :meth:`~iris.cube.CubeList.concatenate`
+ to indicate that the value of a scalar coordinate may be mismatched, rather than the metadata
+ (:issue:`4096`, :pull:`4387`)
+
+#. `@bsherratt`_ fixed a regression to the NAME file loader introduced in 3.0.4,
+ as well as some long-standing bugs with vertical coordinates and number
+ formats. (:pull:`4411`)
+
+#. `@rcomer`_ fixed :meth:`~iris.cube.Cube.subset` to alway return ``None`` if
+ no value match is found. (:pull:`4417`)
+
+#. `@wjbenfold`_ changed :meth:`iris.util.points_step` to stop it from warning
+ when applied to a single point (:issue:`4250`, :pull:`4367`)
+
+#. `@trexfeathers`_ changed :class:`~iris.coords._DimensionalMetadata` and
+ :class:`~iris.experimental.ugrid.Connectivity` equality methods to preserve
+ array laziness, allowing efficient comparisons even with larger-than-memory
+ objects. (:pull:`4439`)
+
+#. `@rcomer`_ modified :meth:`~iris.cube.Cube.aggregated_by` to calculate new
+ coordinate bounds using minimum and maximum for unordered coordinates,
+ fixing :issue:`1528`. (:pull:`4315`)
+
+#. `@wjbenfold`_ changed how a delayed unit conversion is performed on a cube
+ so that a cube with lazy data awaiting a unit conversion can be pickled.
+ (:issue:`4354`, :pull:`4377`)
+
+#. `@pp-mo`_ fixed a bug in netcdf loading, whereby *any* rotated latlon coordinate
+ was mistakenly interpreted as a latitude, usually resulting in two 'latitude's
+ instead of one latitude and one longitude.
+ (:issue:`4460`, :pull:`4470`)
+
+#. `@wjbenfold`_ stopped :meth:`iris.coord_systems.GeogCS.as_cartopy_projection`
+ from assuming the globe to be the Earth (:issue:`4408`, :pull:`4497`)
+
+#. `@rcomer`_ corrected the ``long_name`` mapping from UM stash code ``m01s09i215``
+ to indicate cloud fraction greater than 7.9 oktas, rather than 7.5
+ (:issue:`3305`, :pull:`4535`)
+
+#. `@lbdreyer`_ fixed a bug in :class:`iris.io.load_http` which was missing an import
+ (:pull:`4580`)
+
+
+💣 Incompatible Changes
+=======================
+
+#. N/A
+
+
+🚀 Performance Enhancements
+===========================
+
+#. `@wjbenfold`_ resolved an issue that previously caused regridding with lazy
+ data to take significantly longer than with real data. Benchmark
+ :class:`benchmarks.HorizontalChunkedRegridding` shows a time decrease
+ from >10s to 625ms. (:issue:`4280`, :pull:`4400`)
+
+#. `@bjlittle`_ included an optimisation to :class:`~iris.cube.Cube.coord_dims`
+ to avoid unnecessary processing whenever a coordinate instance that already
+ exists within the cube is provided. (:pull:`4549`)
+
+
+🔥 Deprecations
+===============
+
+#. `@wjbenfold`_ removed :mod:`iris.experimental.equalise_cubes`. In ``v3.0``
+ the experimental ``equalise_attributes`` functionality was moved to the
+ :mod:`iris.util.equalise_attributes` function. Since then, calling the
+ :func:`iris.experimental.equalise_cubes.equalise_attributes` function raised
+ an exception. (:issue:`3528`, :pull:`4496`)
+
+#. `@wjbenfold`_ deprecated :func:`iris.util.approx_equal` in preference for
+ :func:`math.isclose`. The :func:`~iris.util.approx_equal` function will be
+ removed in a future release of Iris. (:pull:`4514`)
+
+#. `@wjbenfold`_ deprecated :mod:`iris.experimental.raster` as it is not
+ believed to still be in use. The deprecation warnings invite users to contact
+ the Iris Developers if this isn't the case. (:pull:`4525`)
+
+#. `@wjbenfold`_ deprecated :mod:`iris.fileformats.abf` and
+ :mod:`iris.fileformats.dot` as they are not believed to still be in use. The
+ deprecation warnings invite users to contact the Iris Developers if this
+ isn't the case. (:pull:`4515`)
+
+#. `@wjbenfold`_ removed the :func:`iris.util.as_compatible_shape` function,
+ which was deprecated in ``v3.0``. Instead use
+ :class:`iris.common.resolve.Resolve`. For example, rather than calling
+ ``as_compatible_shape(src_cube, target_cube)`` replace with
+ ``Resolve(src_cube, target_cube)(target_cube.core_data())``. (:pull:`4513`)
+
+#. `@wjbenfold`_ deprecated :func:`iris.analysis.maths.intersection_of_cubes` in
+ preference for :meth:`iris.cube.CubeList.extract_overlapping`. The
+ :func:`~iris.analysis.maths.intersection_of_cubes` function will be removed in
+ a future release of Iris. (:pull:`4541`)
+
+#. `@pp-mo`_ deprecated :mod:`iris.experimental.regrid_conservative`. This is
+ now replaced by `iris-emsf-regrid`_. (:pull:`4551`)
+
+#. `@pp-mo`_ deprecated everything in :mod:`iris.experimental.regrid`.
+ Most features have a preferred exact alternative, as suggested, *except*
+ :class:`iris.experimental.regrid.ProjectedUnstructuredLinear` : that has no
+ identical equivalent, but :class:`iris.analysis.UnstructuredNearest` is
+ suggested as being quite close (though possibly slower). (:pull:`4548`)
+
+
+🔗 Dependencies
+===============
+
+#. `@bjlittle`_ introduced the ``cartopy >=0.20`` minimum pin.
+ (:pull:`4331`)
+
+#. `@trexfeathers`_ introduced the ``cf-units >=3`` and ``nc-time-axis >=1.3``
+ minimum pins. (:pull:`4356`)
+
+#. `@bjlittle`_ introduced the ``numpy >=1.19`` minimum pin, in
+ accordance with `NEP-29`_ deprecation policy. (:pull:`4386`)
+
+#. `@bjlittle`_ dropped support for ``Python 3.7``, as per the `NEP-29`_
+ backwards compatibility and deprecation policy schedule. (:pull:`4481`)
+
+
+📚 Documentation
+================
+
+#. `@rcomer`_ updated the "Plotting Wind Direction Using Quiver" Gallery
+ example. (:pull:`4120`)
+
+#. `@trexfeathers`_ included `Iris GitHub Discussions`_ in
+ :ref:`get involved `. (:pull:`4307`)
+
+#. `@wjbenfold`_ improved readability in :ref:`userguide interpolation
+ section `. (:pull:`4314`)
+
+#. `@wjbenfold`_ added explanation about the absence of | operator for
+ :class:`iris.Constraint` to :ref:`userguide loading section
+ ` and to api reference documentation. (:pull:`4321`)
+
+#. `@trexfeathers`_ added more detail on making `iris-test-data`_ available
+ during :ref:`developer_running_tests`. (:pull:`4359`)
+
+#. `@lbdreyer`_ added a section to the release documentation outlining the role
+ of the :ref:`release_manager`. (:pull:`4413`)
+
+#. `@trexfeathers`_ encouraged contributors to include type hinting in code
+ they are working on - :ref:`code_formatting`. (:pull:`4390`)
+
+#. `@wjbenfold`_ updated Cartopy documentation links to point to the renamed
+ :class:`cartopy.mpl.geoaxes.GeoAxes`. (:pull:`4464`)
+
+#. `@wjbenfold`_ clarified behaviour of :func:`iris.load` in :ref:`userguide
+ loading section `. (:pull:`4462`)
+
+#. `@bjlittle`_ migrated readthedocs to use mambaforge for `faster documentation building`_.
+ (:pull:`4476`)
+
+#. `@wjbenfold`_ contributed `@alastair-gemmell`_'s :ref:`step-by-step guide to
+ contributing to the docs ` to the docs.
+ (:pull:`4461`)
+
+#. `@pp-mo`_ improved and corrected docstrings of
+ :class:`iris.analysis.PointInCell`, making it clear what is the actual
+ calculation performed. (:pull:`4548`)
+
+#. `@pp-mo`_ removed reference in docstring of
+ :class:`iris.analysis.UnstructuredNearest` to the obsolete (deprecated)
+ :class:`iris.experimental.regrid.ProjectedUnstructuredNearest`.
+ (:pull:`4548`)
+
+
+💼 Internal
+===========
+
+#. `@trexfeathers`_ set the linkcheck to ignore
+ http://www.nationalarchives.gov.uk/doc/open-government-licence since this
+ always works locally, but never within CI. (:pull:`4307`)
+
+#. `@wjbenfold`_ netCDF integration tests now skip ``TestConstrainedLoad`` if
+ test data is missing (:pull:`4319`)
+
+#. `@wjbenfold`_ excluded ``Good First Issue`` labelled issues from being
+ marked stale. (:pull:`4317`)
+
+#. `@tkknight`_ added additional make targets for reducing the time of the
+ documentation build including ``html-noapi`` and ``html-quick``.
+ Useful for development purposes only. For more information see
+ :ref:`contributing.documentation.building` the documentation. (:pull:`4333`)
+
+#. `@rcomer`_ modified the ``animation`` test to prevent it throwing a warning
+ that sometimes interferes with unrelated tests. (:pull:`4330`)
+
+#. `@rcomer`_ removed a now redundant workaround in :func:`~iris.plot.contourf`.
+ (:pull:`4349`)
+
+#. `@trexfeathers`_ refactored :mod:`iris.experimental.ugrid` into sub-modules.
+ (:pull:`4347`).
+
+#. `@bjlittle`_ enabled the `sort-all`_ `pre-commit`_ hook to automatically
+ sort ``__all__`` entries into alphabetical order. (:pull:`4353`)
+
+#. `@rcomer`_ modified a NetCDF saver test to prevent it triggering a numpy
+ deprecation warning. (:issue:`4374`, :pull:`4376`)
+
+#. `@akuhnregnier`_ removed addition of period from
+ :func:`~iris.analysis.cartography.wrap_lons` and updated affected tests
+ using ``assertArrayAllClose`` following :issue:`3993`.
+ (:pull:`4421`)
+
+#. `@rcomer`_ updated some tests to work with Matplotlib v3.5. (:pull:`4428`)
+
+#. `@rcomer`_ applied minor fixes to some regridding tests. (:pull:`4432`)
+
+#. `@lbdreyer`_ corrected the license PyPI classifier. (:pull:`4435`)
+
+#. `@aaronspring`_ exchanged ``dask`` with
+ ``dask-core`` in testing environments reducing the number of dependencies
+ installed for testing. (:pull:`4434`)
+
+#. `@wjbenfold`_ prevented github action runs in forks (:issue:`4441`,
+ :pull:`4444`)
+
+#. `@wjbenfold`_ fixed tests for hybrid formulae that weren't being found by
+ nose (:issue:`4431`, :pull:`4450`)
+
+.. comment
+ Whatsnew author names (@github name) in alphabetical order. Note that,
+ core dev names are automatically included by the common_links.inc:
+
+.. _@aaronspring: https://github.com/aaronspring
+.. _@akuhnregnier: https://github.com/akuhnregnier
+.. _@bsherratt: https://github.com/bsherratt
+.. _@larsbarring: https://github.com/larsbarring
+.. _@pdearnshaw: https://github.com/pdearnshaw
+.. _@SimonPeatman: https://github.com/SimonPeatman
+.. _@tinyendian: https://github.com/tinyendian
+
+.. comment
+ Whatsnew resources in alphabetical order:
+
+.. _NEP-29: https://numpy.org/neps/nep-0029-deprecation_policy.html
+.. _Metarelate: http://www.metarelate.net/
+.. _UGRID: http://ugrid-conventions.github.io/ugrid-conventions/
+.. _iris-emsf-regrid: https://github.com/SciTools-incubator/iris-esmf-regrid
+.. _faster documentation building: https://docs.readthedocs.io/en/stable/guides/conda.html#making-builds-faster-with-mamba
+.. _sort-all: https://github.com/aio-libs/sort-all
diff --git a/docs/src/whatsnew/dev.rst b/docs/src/whatsnew/dev.rst
index 27ed876a20..bb37e39c45 100644
--- a/docs/src/whatsnew/dev.rst
+++ b/docs/src/whatsnew/dev.rst
@@ -31,13 +31,19 @@ This document explains the changes made to Iris for this release
✨ Features
===========
-#. N/A
+#. `@wjbenfold`_ added support for ``false_easting`` and ``false_northing`` to
+ :class:`~iris.coord_system.Mercator`. (:issue:`3107`, :pull:`4524`)
🐛 Bugs Fixed
=============
-#. N/A
+#. `@rcomer`_ reverted part of the change from :pull:`3906` so that
+ :func:`iris.plot.plot` no longer defaults to placing a "Y" coordinate (e.g.
+ latitude) on the y-axis of the plot. (:issue:`4493`, :pull:`4601`)
+
+#. `@rcomer`_ enabled passing of scalar objects to :func:`~iris.plot.plot` and
+ :func:`~iris.plot.scatter`. (:pull:`4616`)
💣 Incompatible Changes
@@ -61,19 +67,25 @@ This document explains the changes made to Iris for this release
🔗 Dependencies
===============
-#. N/A
+#. `@rcomer`_ introduced the ``nc-time-axis >=1.4`` minimum pin, reflecting that
+ we no longer use the deprecated :class:`nc_time_axis.CalendarDateTime`
+ when plotting against time coordinates. (:pull:`4584`)
📚 Documentation
================
-#. N/A
+#. `@tkknight`_ added a page to show the issues that have been voted for. See
+ :ref:`voted_issues`. (:issue:`3307`, :pull:`4617`)
💼 Internal
===========
-#. N/A
+#. `@trexfeathers`_ and `@pp-mo`_ finished implementing a mature benchmarking
+ infrastructure (see :ref:`contributing.benchmarks`), building on 2 hard
+ years of lessons learned 🎉. (:pull:`4477`, :pull:`4562`, :pull:`4571`,
+ :pull:`4583`, :pull:`4621`)
.. comment
diff --git a/docs/src/whatsnew/index.rst b/docs/src/whatsnew/index.rst
index 51f03e8d8f..7e0829da5b 100644
--- a/docs/src/whatsnew/index.rst
+++ b/docs/src/whatsnew/index.rst
@@ -11,6 +11,7 @@ Iris versions.
:maxdepth: 1
dev.rst
+ 3.2.rst
3.1.rst
3.0.rst
2.4.rst
diff --git a/lib/iris/__init__.py b/lib/iris/__init__.py
index 713c163deb..3e847acad7 100644
--- a/lib/iris/__init__.py
+++ b/lib/iris/__init__.py
@@ -44,6 +44,10 @@
standard library function :func:`os.path.expanduser` and
module :mod:`fnmatch` for more details.
+ .. warning::
+
+ If supplying a URL, only OPeNDAP Data Sources are supported.
+
* constraints:
Either a single constraint, or an iterable of constraints.
Each constraint can be either a string, an instance of
@@ -287,6 +291,7 @@ def load(uris, constraints=None, callback=None):
* uris:
One or more filenames/URIs, as a string or :class:`pathlib.PurePath`.
+ If supplying a URL, only OPeNDAP Data Sources are supported.
Kwargs:
@@ -315,6 +320,7 @@ def load_cube(uris, constraint=None, callback=None):
* uris:
One or more filenames/URIs, as a string or :class:`pathlib.PurePath`.
+ If supplying a URL, only OPeNDAP Data Sources are supported.
Kwargs:
@@ -354,6 +360,7 @@ def load_cubes(uris, constraints=None, callback=None):
* uris:
One or more filenames/URIs, as a string or :class:`pathlib.PurePath`.
+ If supplying a URL, only OPeNDAP Data Sources are supported.
Kwargs:
@@ -399,6 +406,7 @@ def load_raw(uris, constraints=None, callback=None):
* uris:
One or more filenames/URIs, as a string or :class:`pathlib.PurePath`.
+ If supplying a URL, only OPeNDAP Data Sources are supported.
Kwargs:
diff --git a/lib/iris/coord_systems.py b/lib/iris/coord_systems.py
index 2f875bb159..311ed35f44 100644
--- a/lib/iris/coord_systems.py
+++ b/lib/iris/coord_systems.py
@@ -1083,6 +1083,8 @@ def __init__(
longitude_of_projection_origin=None,
ellipsoid=None,
standard_parallel=None,
+ false_easting=None,
+ false_northing=None,
):
"""
Constructs a Mercator coord system.
@@ -1098,6 +1100,12 @@ def __init__(
* standard_parallel:
The latitude where the scale is 1. Defaults to 0.0 .
+ * false_easting:
+ X offset from the planar origin in metres. Defaults to 0.0.
+
+ * false_northing:
+ Y offset from the planar origin in metres. Defaults to 0.0.
+
"""
#: True longitude of planar origin in degrees.
self.longitude_of_projection_origin = _arg_default(
@@ -1110,12 +1118,20 @@ def __init__(
#: The latitude where the scale is 1.
self.standard_parallel = _arg_default(standard_parallel, 0)
+ #: X offset from the planar origin in metres.
+ self.false_easting = _arg_default(false_easting, 0)
+
+ #: Y offset from the planar origin in metres.
+ self.false_northing = _arg_default(false_northing, 0)
+
def __repr__(self):
res = (
"Mercator(longitude_of_projection_origin="
"{self.longitude_of_projection_origin!r}, "
"ellipsoid={self.ellipsoid!r}, "
- "standard_parallel={self.standard_parallel!r})"
+ "standard_parallel={self.standard_parallel!r}, "
+ "false_easting={self.false_easting!r}, "
+ "false_northing={self.false_northing!r})"
)
return res.format(self=self)
@@ -1126,6 +1142,8 @@ def as_cartopy_crs(self):
central_longitude=self.longitude_of_projection_origin,
globe=globe,
latitude_true_scale=self.standard_parallel,
+ false_easting=self.false_easting,
+ false_northing=self.false_northing,
)
def as_cartopy_projection(self):
diff --git a/lib/iris/fileformats/_nc_load_rules/actions.py b/lib/iris/fileformats/_nc_load_rules/actions.py
index d286abbf3d..4c5184deb1 100644
--- a/lib/iris/fileformats/_nc_load_rules/actions.py
+++ b/lib/iris/fileformats/_nc_load_rules/actions.py
@@ -18,7 +18,7 @@
3) Iris-specific info is (still) stored in additional properties created on
the engine object :
- engine.cf_var, .cube, .cube_parts, .requires, .rule_triggered, .filename
+ engine.cf_var, .cube, .cube_parts, .requires, .rules_triggered, .filename
Our "rules" are just action routines.
The top-level 'run_actions' routine decides which actions to call, based on the
@@ -78,7 +78,7 @@ def inner(engine, *args, **kwargs):
# but also may vary depending on whether it successfully
# triggered, and if so what it matched.
rule_name = _default_rulenamesfunc(func.__name__)
- engine.rule_triggered.add(rule_name)
+ engine.rules_triggered.add(rule_name)
func._rulenames_func = _default_rulenamesfunc
return inner
diff --git a/lib/iris/fileformats/_nc_load_rules/helpers.py b/lib/iris/fileformats/_nc_load_rules/helpers.py
index a5b507d583..198daeceea 100644
--- a/lib/iris/fileformats/_nc_load_rules/helpers.py
+++ b/lib/iris/fileformats/_nc_load_rules/helpers.py
@@ -440,10 +440,13 @@ def build_mercator_coordinate_system(engine, cf_grid_var):
longitude_of_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_LON_OF_PROJ_ORIGIN, None
)
+ standard_parallel = getattr(
+ cf_grid_var, CF_ATTR_GRID_STANDARD_PARALLEL, None
+ )
+ false_easting = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_EASTING, None)
+ false_northing = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_NORTHING, None)
# Iris currently only supports Mercator projections with specific
- # values for false_easting, false_northing,
- # scale_factor_at_projection_origin and standard_parallel. These are
- # checked elsewhere.
+ # scale_factor_at_projection_origin. This is checked elsewhere.
ellipsoid = None
if (
@@ -454,7 +457,11 @@ def build_mercator_coordinate_system(engine, cf_grid_var):
ellipsoid = iris.coord_systems.GeogCS(major, minor, inverse_flattening)
cs = iris.coord_systems.Mercator(
- longitude_of_projection_origin, ellipsoid=ellipsoid
+ longitude_of_projection_origin,
+ ellipsoid=ellipsoid,
+ standard_parallel=standard_parallel,
+ false_easting=false_easting,
+ false_northing=false_northing,
)
return cs
@@ -1244,27 +1251,10 @@ def has_supported_mercator_parameters(engine, cf_name):
is_valid = True
cf_grid_var = engine.cf_var.cf_group[cf_name]
- false_easting = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_EASTING, None)
- false_northing = getattr(cf_grid_var, CF_ATTR_GRID_FALSE_NORTHING, None)
scale_factor_at_projection_origin = getattr(
cf_grid_var, CF_ATTR_GRID_SCALE_FACTOR_AT_PROJ_ORIGIN, None
)
- standard_parallel = getattr(
- cf_grid_var, CF_ATTR_GRID_STANDARD_PARALLEL, None
- )
- if false_easting is not None and false_easting != 0:
- warnings.warn(
- "False eastings other than 0.0 not yet supported "
- "for Mercator projections"
- )
- is_valid = False
- if false_northing is not None and false_northing != 0:
- warnings.warn(
- "False northings other than 0.0 not yet supported "
- "for Mercator projections"
- )
- is_valid = False
if (
scale_factor_at_projection_origin is not None
and scale_factor_at_projection_origin != 1
@@ -1274,12 +1264,6 @@ def has_supported_mercator_parameters(engine, cf_name):
"Mercator projections"
)
is_valid = False
- if standard_parallel is not None and standard_parallel != 0:
- warnings.warn(
- "Standard parallels other than 0.0 not yet "
- "supported for Mercator projections"
- )
- is_valid = False
return is_valid
diff --git a/lib/iris/fileformats/netcdf.py b/lib/iris/fileformats/netcdf.py
index 73a137b4af..80f213dbc2 100644
--- a/lib/iris/fileformats/netcdf.py
+++ b/lib/iris/fileformats/netcdf.py
@@ -498,7 +498,7 @@ def _actions_activation_stats(engine, cf_name):
print("Rules Triggered:")
- for rule in sorted(list(engine.rule_triggered)):
+ for rule in sorted(list(engine.rules_triggered)):
print("\t%s" % rule)
print("Case Specific Facts:")
@@ -570,13 +570,21 @@ def _get_cf_var_data(cf_var, filename):
return as_lazy_data(proxy, chunks=chunks)
-class OrderedAddableList(list):
- # Used purely in actions debugging, to accumulate a record of which actions
- # were activated.
- # It replaces a set, so as to record the ordering of operations, with
- # possible repeats, and it also numbers the entries.
- # Actions routines invoke the 'add' method, which thus effectively converts
- # a set.add into a list.append.
+class _OrderedAddableList(list):
+ """
+ A custom container object for actions recording.
+
+ Used purely in actions debugging, to accumulate a record of which actions
+ were activated.
+
+ It replaces a set, so as to preserve the ordering of operations, with
+ possible repeats, and it also numbers the entries.
+
+ The actions routines invoke an 'add' method, so this effectively replaces
+ a set.add with a list.append.
+
+ """
+
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._n_add = 0
@@ -602,7 +610,7 @@ def _load_cube(engine, cf, cf_var, filename):
engine.cube = cube
engine.cube_parts = {}
engine.requires = {}
- engine.rule_triggered = OrderedAddableList()
+ engine.rules_triggered = _OrderedAddableList()
engine.filename = filename
# Assert all the case-specific facts.
@@ -825,12 +833,12 @@ def inner(cf_datavar):
def load_cubes(filenames, callback=None, constraints=None):
"""
- Loads cubes from a list of NetCDF filenames/URLs.
+ Loads cubes from a list of NetCDF filenames/OPeNDAP URLs.
Args:
* filenames (string/list):
- One or more NetCDF filenames/DAP URLs to load from.
+ One or more NetCDF filenames/OPeNDAP URLs to load from.
Kwargs:
@@ -2553,10 +2561,8 @@ def add_ellipsoid(ellipsoid):
cf_var_grid.longitude_of_projection_origin = (
cs.longitude_of_projection_origin
)
- # The Mercator class has implicit defaults for certain
- # parameters
- cf_var_grid.false_easting = 0.0
- cf_var_grid.false_northing = 0.0
+ cf_var_grid.false_easting = cs.false_easting
+ cf_var_grid.false_northing = cs.false_northing
cf_var_grid.scale_factor_at_projection_origin = 1.0
# lcc
diff --git a/lib/iris/io/__init__.py b/lib/iris/io/__init__.py
index 034fa4baab..8d5a2e05d2 100644
--- a/lib/iris/io/__init__.py
+++ b/lib/iris/io/__init__.py
@@ -216,7 +216,7 @@ def load_files(filenames, callback, constraints=None):
def load_http(urls, callback):
"""
- Takes a list of urls and a callback function, and returns a generator
+ Takes a list of OPeNDAP URLs and a callback function, and returns a generator
of Cubes from the given URLs.
.. note::
@@ -226,11 +226,11 @@ def load_http(urls, callback):
"""
# Create default dict mapping iris format handler to its associated filenames
+ from iris.fileformats import FORMAT_AGENT
+
handler_map = collections.defaultdict(list)
for url in urls:
- handling_format_spec = iris.fileformats.FORMAT_AGENT.get_spec(
- url, None
- )
+ handling_format_spec = FORMAT_AGENT.get_spec(url, None)
handler_map[handling_format_spec].append(url)
# Call each iris format handler with the appropriate filenames
diff --git a/lib/iris/plot.py b/lib/iris/plot.py
index 0e9645c783..d886ac1cf9 100644
--- a/lib/iris/plot.py
+++ b/lib/iris/plot.py
@@ -591,7 +591,7 @@ def _fixup_dates(coord, values):
r = [datetime.datetime(*date) for date in dates]
else:
try:
- import nc_time_axis
+ import nc_time_axis # noqa: F401
except ImportError:
msg = (
"Cannot plot against time in a non-gregorian "
@@ -603,12 +603,10 @@ def _fixup_dates(coord, values):
raise IrisError(msg)
r = [
- nc_time_axis.CalendarDateTime(
- cftime.datetime(*date, calendar=coord.units.calendar),
- coord.units.calendar,
- )
+ cftime.datetime(*date, calendar=coord.units.calendar)
for date in dates
]
+
values = np.empty(len(r), dtype=object)
values[:] = r
return values
@@ -654,13 +652,13 @@ def _get_plot_objects(args):
u_object, v_object = args[:2]
u, v = _uv_from_u_object_v_object(u_object, v_object)
args = args[2:]
- if len(u) != len(v):
+ if u.size != v.size:
msg = (
"The x and y-axis objects are not compatible. They should "
"have equal sizes but got ({}: {}) and ({}: {})."
)
raise ValueError(
- msg.format(u_object.name(), len(u), v_object.name(), len(v))
+ msg.format(u_object.name(), u.size, v_object.name(), v.size)
)
else:
# single argument
@@ -675,7 +673,7 @@ def _get_plot_objects(args):
if (
isinstance(v_object, iris.cube.Cube)
and isinstance(u_object, iris.coords.Coord)
- and iris.util.guess_coord_axis(u_object) in ["Y", "Z"]
+ and iris.util.guess_coord_axis(u_object) == "Z"
):
u_object, v_object = v_object, u_object
u, v = v, u
diff --git a/lib/iris/tests/integration/plot/test_netcdftime.py b/lib/iris/tests/integration/plot/test_netcdftime.py
index 340f37dda7..9f0baeda35 100644
--- a/lib/iris/tests/integration/plot/test_netcdftime.py
+++ b/lib/iris/tests/integration/plot/test_netcdftime.py
@@ -18,10 +18,6 @@
from iris.coords import AuxCoord
-if tests.NC_TIME_AXIS_AVAILABLE:
- from nc_time_axis import CalendarDateTime
-
-
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import iris.plot as iplt
@@ -48,9 +44,8 @@ def test_360_day_calendar(self):
)
for atime in times
]
- expected_ydata = np.array(
- [CalendarDateTime(time, calendar) for time in times]
- )
+
+ expected_ydata = times
(line1,) = iplt.plot(time_coord)
result_ydata = line1.get_ydata()
self.assertArrayEqual(expected_ydata, result_ydata)
diff --git a/lib/iris/tests/results/coord_systems/Mercator.xml b/lib/iris/tests/results/coord_systems/Mercator.xml
index e8036ef824..db3ccffec7 100644
--- a/lib/iris/tests/results/coord_systems/Mercator.xml
+++ b/lib/iris/tests/results/coord_systems/Mercator.xml
@@ -1,2 +1,2 @@
-
+
diff --git a/lib/iris/tests/results/imagerepo.json b/lib/iris/tests/results/imagerepo.json
index 79560a5365..6a997c38b4 100644
--- a/lib/iris/tests/results/imagerepo.json
+++ b/lib/iris/tests/results/imagerepo.json
@@ -684,7 +684,10 @@
"https://scitools.github.io/test-iris-imagehash/images/v4/8bfe956b7c01c2f26300929dfc1e3c6690736f91817e3b0c84be6be5d1603ed1.png"
],
"iris.tests.test_plot.TestPlot.test_y.0": [
- "https://scitools.github.io/test-iris-imagehash/images/v4/8ff99c067e01e7166101c9c6b04396b5cd4e2f0993163de9c4fe7b79207e36a1.png"
+ "https://scitools.github.io/test-iris-imagehash/images/v4/8fe896266f068d873b83cb71e435725cd07c607ad07e70fcd0007a7881fe7ab8.png",
+ "https://scitools.github.io/test-iris-imagehash/images/v4/8fe896066f068d873b83cb71e435725cd07c607ad07c70fcd0007af881fe7bb8.png",
+ "https://scitools.github.io/test-iris-imagehash/images/v4/8fe896366f0f8d93398bcb71e435f24ed074646ed07670acf010726d81f2798c.png",
+ "https://scitools.github.io/test-iris-imagehash/images/v4/aff8946c7a14c99fb193d263e42432d8d00c2d27944a3f8dc5223ef703ff6b90.png"
],
"iris.tests.test_plot.TestPlot.test_z.0": [
"https://scitools.github.io/test-iris-imagehash/images/v4/8fffc1dc7e019c70f001b70ee4386de1814e7938837b6a7f84d07c9f15b02f21.png"
@@ -874,7 +877,10 @@
"https://scitools.github.io/test-iris-imagehash/images/v4/82ff950b7f81c0d6620199bcfc5e986695734da1816e1b2c85be2b65d96276d1.png"
],
"iris.tests.test_plot.TestQuickplotPlot.test_y.0": [
- "https://scitools.github.io/test-iris-imagehash/images/v4/a3f9bc067e01c6166009c9c6b5439ee5cd4e0d2993361de9ccf65b79887636a9.png"
+ "https://scitools.github.io/test-iris-imagehash/images/v4/a7ffb6067f008d87339bc973e435d86ef034c87ad07c586cd001da69897e5838.png",
+ "https://scitools.github.io/test-iris-imagehash/images/v4/a7ffb6067f008d87339bc973e435d86ef034c87ad07cd86cd001da68897e58a8.png",
+ "https://scitools.github.io/test-iris-imagehash/images/v4/a7efb6367f008d97338fc973e435d86ef030c86ed070d86cd030d86d89f0d82c.png",
+ "https://scitools.github.io/test-iris-imagehash/images/v4/a2fbb46e7f10c99f2013d863e46498dcd06c0d2798421fa5dd221e7789ff6f10.png"
],
"iris.tests.test_plot.TestQuickplotPlot.test_z.0": [
"https://scitools.github.io/test-iris-imagehash/images/v4/a3ffc1de7e009c7030019786f438cde3810fd93c9b734a778ce47c9799b02731.png"
diff --git a/lib/iris/tests/results/netcdf/netcdf_merc.cml b/lib/iris/tests/results/netcdf/netcdf_merc.cml
index 02fc4e7c34..5e17400158 100644
--- a/lib/iris/tests/results/netcdf/netcdf_merc.cml
+++ b/lib/iris/tests/results/netcdf/netcdf_merc.cml
@@ -53,15 +53,15 @@
45.5158, 45.9993]]" shape="(192, 192)" standard_name="longitude" units="Unit('degrees')" value_type="float32" var_name="lon"/>
-
-
+
-
-
+
diff --git a/lib/iris/tests/results/netcdf/netcdf_merc_false.cml b/lib/iris/tests/results/netcdf/netcdf_merc_false.cml
new file mode 100644
index 0000000000..d916f5f753
--- /dev/null
+++ b/lib/iris/tests/results/netcdf/netcdf_merc_false.cml
@@ -0,0 +1,33 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/lib/iris/tests/test_load.py b/lib/iris/tests/test_load.py
index 86ff2f1ece..d21b40ee26 100644
--- a/lib/iris/tests/test_load.py
+++ b/lib/iris/tests/test_load.py
@@ -12,6 +12,9 @@
import iris.tests as tests # isort:skip
import pathlib
+from unittest import mock
+
+import netCDF4
import iris
import iris.io
@@ -148,19 +151,20 @@ def test_path_object(self):
self.assertEqual(len(cubes), 1)
-class TestOpenDAP(tests.IrisTest):
- def test_load(self):
- # Check that calling iris.load_* with a http URI triggers a call to
- # ``iris.io.load_http``
+class TestOPeNDAP(tests.IrisTest):
+ def setUp(self):
+ self.url = "http://geoport.whoi.edu:80/thredds/dodsC/bathy/gom15"
- url = "http://geoport.whoi.edu:80/thredds/dodsC/bathy/gom15"
+ def test_load_http_called(self):
+ # Check that calling iris.load_* with an http URI triggers a call to
+ # ``iris.io.load_http``
class LoadHTTPCalled(Exception):
pass
def new_load_http(passed_urls, *args, **kwargs):
self.assertEqual(len(passed_urls), 1)
- self.assertEqual(url, passed_urls[0])
+ self.assertEqual(self.url, passed_urls[0])
raise LoadHTTPCalled()
try:
@@ -174,11 +178,28 @@ def new_load_http(passed_urls, *args, **kwargs):
iris.load_cubes,
]:
with self.assertRaises(LoadHTTPCalled):
- fn(url)
+ fn(self.url)
finally:
iris.io.load_http = orig
+ def test_netCDF_Dataset_call(self):
+ # Check that load_http calls netCDF4.Dataset and supplies the expected URL.
+
+ # To avoid making a request to an OPeNDAP server in a test, instead
+ # mock the call to netCDF.Dataset so that it returns a dataset for a
+ # local file.
+ filename = tests.get_data_path(
+ ("NetCDF", "global", "xyt", "SMALL_total_column_co2.nc")
+ )
+ fake_dataset = netCDF4.Dataset(filename)
+
+ with mock.patch(
+ "netCDF4.Dataset", return_value=fake_dataset
+ ) as dataset_loader:
+ next(iris.io.load_http([self.url], callback=None))
+ dataset_loader.assert_called_with(self.url, mode="r")
+
if __name__ == "__main__":
tests.main()
diff --git a/lib/iris/tests/test_netcdf.py b/lib/iris/tests/test_netcdf.py
index 2c22c6d088..8cdbe27257 100644
--- a/lib/iris/tests/test_netcdf.py
+++ b/lib/iris/tests/test_netcdf.py
@@ -218,6 +218,16 @@ def test_load_merc_grid(self):
)
self.assertCML(cube, ("netcdf", "netcdf_merc.cml"))
+ def test_load_merc_false_en_grid(self):
+ # Test loading a single CF-netCDF file with a Mercator grid_mapping that
+ # includes false easting and northing
+ cube = iris.load_cube(
+ tests.get_data_path(
+ ("NetCDF", "mercator", "false_east_north_merc.nc")
+ )
+ )
+ self.assertCML(cube, ("netcdf", "netcdf_merc_false.cml"))
+
def test_load_stereographic_grid(self):
# Test loading a single CF-netCDF file with a stereographic
# grid_mapping.
diff --git a/lib/iris/tests/unit/coord_systems/test_Mercator.py b/lib/iris/tests/unit/coord_systems/test_Mercator.py
index 33efaef9da..8a37a8fcc5 100644
--- a/lib/iris/tests/unit/coord_systems/test_Mercator.py
+++ b/lib/iris/tests/unit/coord_systems/test_Mercator.py
@@ -29,7 +29,8 @@ def test_repr(self):
"Mercator(longitude_of_projection_origin=90.0, "
"ellipsoid=GeogCS(semi_major_axis=6377563.396, "
"semi_minor_axis=6356256.909), "
- "standard_parallel=0.0)"
+ "standard_parallel=0.0, "
+ "false_easting=0.0, false_northing=0.0)"
)
self.assertEqual(expected, repr(self.tm))
@@ -38,16 +39,23 @@ class Test_init_defaults(tests.IrisTest):
def test_set_optional_args(self):
# Check that setting the optional (non-ellipse) args works.
crs = Mercator(
- longitude_of_projection_origin=27, standard_parallel=157.4
+ longitude_of_projection_origin=27,
+ standard_parallel=157.4,
+ false_easting=13,
+ false_northing=12,
)
self.assertEqualAndKind(crs.longitude_of_projection_origin, 27.0)
self.assertEqualAndKind(crs.standard_parallel, 157.4)
+ self.assertEqualAndKind(crs.false_easting, 13.0)
+ self.assertEqualAndKind(crs.false_northing, 12.0)
def _check_crs_defaults(self, crs):
# Check for property defaults when no kwargs options were set.
# NOTE: except ellipsoid, which is done elsewhere.
self.assertEqualAndKind(crs.longitude_of_projection_origin, 0.0)
self.assertEqualAndKind(crs.standard_parallel, 0.0)
+ self.assertEqualAndKind(crs.false_easting, 0.0)
+ self.assertEqualAndKind(crs.false_northing, 0.0)
def test_no_optional_args(self):
# Check expected defaults with no optional args.
@@ -57,7 +65,10 @@ def test_no_optional_args(self):
def test_optional_args_None(self):
# Check expected defaults with optional args=None.
crs = Mercator(
- longitude_of_projection_origin=None, standard_parallel=None
+ longitude_of_projection_origin=None,
+ standard_parallel=None,
+ false_easting=None,
+ false_northing=None,
)
self._check_crs_defaults(crs)
@@ -77,6 +88,8 @@ def test_extra_kwargs(self):
# converted to a cartopy CRS.
longitude_of_projection_origin = 90.0
true_scale_lat = 14.0
+ false_easting = 13
+ false_northing = 12
ellipsoid = GeogCS(
semi_major_axis=6377563.396, semi_minor_axis=6356256.909
)
@@ -85,6 +98,8 @@ def test_extra_kwargs(self):
longitude_of_projection_origin,
ellipsoid=ellipsoid,
standard_parallel=true_scale_lat,
+ false_easting=false_easting,
+ false_northing=false_northing,
)
expected = ccrs.Mercator(
@@ -95,6 +110,8 @@ def test_extra_kwargs(self):
ellipse=None,
),
latitude_true_scale=true_scale_lat,
+ false_easting=false_easting,
+ false_northing=false_northing,
)
res = merc_cs.as_cartopy_crs()
@@ -113,6 +130,8 @@ def test_simple(self):
def test_extra_kwargs(self):
longitude_of_projection_origin = 90.0
true_scale_lat = 14.0
+ false_easting = 13
+ false_northing = 12
ellipsoid = GeogCS(
semi_major_axis=6377563.396, semi_minor_axis=6356256.909
)
@@ -121,6 +140,8 @@ def test_extra_kwargs(self):
longitude_of_projection_origin,
ellipsoid=ellipsoid,
standard_parallel=true_scale_lat,
+ false_easting=false_easting,
+ false_northing=false_northing,
)
expected = ccrs.Mercator(
@@ -131,6 +152,8 @@ def test_extra_kwargs(self):
ellipse=None,
),
latitude_true_scale=true_scale_lat,
+ false_easting=false_easting,
+ false_northing=false_northing,
)
res = merc_cs.as_cartopy_projection()
diff --git a/lib/iris/tests/unit/fileformats/nc_load_rules/helpers/test_has_supported_mercator_parameters.py b/lib/iris/tests/unit/fileformats/nc_load_rules/helpers/test_has_supported_mercator_parameters.py
index dfe2895f29..1b9857c0be 100644
--- a/lib/iris/tests/unit/fileformats/nc_load_rules/helpers/test_has_supported_mercator_parameters.py
+++ b/lib/iris/tests/unit/fileformats/nc_load_rules/helpers/test_has_supported_mercator_parameters.py
@@ -28,7 +28,7 @@ def _engine(cf_grid_var, cf_name):
class TestHasSupportedMercatorParameters(tests.IrisTest):
- def test_valid(self):
+ def test_valid_base(self):
cf_name = "mercator"
cf_grid_var = mock.Mock(
spec=[],
@@ -45,85 +45,50 @@ def test_valid(self):
self.assertTrue(is_valid)
- def test_invalid_scale_factor(self):
- # Iris does not yet support scale factors other than one for
- # Mercator projections
+ def test_valid_false_easting_northing(self):
cf_name = "mercator"
cf_grid_var = mock.Mock(
spec=[],
- longitude_of_projection_origin=0,
- false_easting=0,
- false_northing=0,
- scale_factor_at_projection_origin=0.9,
+ longitude_of_projection_origin=-90,
+ false_easting=15,
+ false_northing=10,
+ scale_factor_at_projection_origin=1,
semi_major_axis=6377563.396,
semi_minor_axis=6356256.909,
)
engine = _engine(cf_grid_var, cf_name)
- with warnings.catch_warnings(record=True) as warns:
- warnings.simplefilter("always")
- is_valid = has_supported_mercator_parameters(engine, cf_name)
+ is_valid = has_supported_mercator_parameters(engine, cf_name)
- self.assertFalse(is_valid)
- self.assertEqual(len(warns), 1)
- self.assertRegex(str(warns[0]), "Scale factor")
+ self.assertTrue(is_valid)
- def test_invalid_standard_parallel(self):
- # Iris does not yet support standard parallels other than zero for
- # Mercator projections
+ def test_valid_standard_parallel(self):
cf_name = "mercator"
cf_grid_var = mock.Mock(
spec=[],
- longitude_of_projection_origin=0,
+ longitude_of_projection_origin=-90,
false_easting=0,
false_northing=0,
- standard_parallel=30,
- semi_major_axis=6377563.396,
- semi_minor_axis=6356256.909,
- )
- engine = _engine(cf_grid_var, cf_name)
-
- with warnings.catch_warnings(record=True) as warns:
- warnings.simplefilter("always")
- is_valid = has_supported_mercator_parameters(engine, cf_name)
-
- self.assertFalse(is_valid)
- self.assertEqual(len(warns), 1)
- self.assertRegex(str(warns[0]), "Standard parallel")
-
- def test_invalid_false_easting(self):
- # Iris does not yet support false eastings other than zero for
- # Mercator projections
- cf_name = "mercator"
- cf_grid_var = mock.Mock(
- spec=[],
- longitude_of_projection_origin=0,
- false_easting=100,
- false_northing=0,
- scale_factor_at_projection_origin=1,
+ standard_parallel=15,
semi_major_axis=6377563.396,
semi_minor_axis=6356256.909,
)
engine = _engine(cf_grid_var, cf_name)
- with warnings.catch_warnings(record=True) as warns:
- warnings.simplefilter("always")
- is_valid = has_supported_mercator_parameters(engine, cf_name)
+ is_valid = has_supported_mercator_parameters(engine, cf_name)
- self.assertFalse(is_valid)
- self.assertEqual(len(warns), 1)
- self.assertRegex(str(warns[0]), "False easting")
+ self.assertTrue(is_valid)
- def test_invalid_false_northing(self):
- # Iris does not yet support false northings other than zero for
+ def test_invalid_scale_factor(self):
+ # Iris does not yet support scale factors other than one for
# Mercator projections
cf_name = "mercator"
cf_grid_var = mock.Mock(
spec=[],
longitude_of_projection_origin=0,
false_easting=0,
- false_northing=100,
- scale_factor_at_projection_origin=1,
+ false_northing=0,
+ scale_factor_at_projection_origin=0.9,
semi_major_axis=6377563.396,
semi_minor_axis=6356256.909,
)
@@ -135,7 +100,7 @@ def test_invalid_false_northing(self):
self.assertFalse(is_valid)
self.assertEqual(len(warns), 1)
- self.assertRegex(str(warns[0]), "False northing")
+ self.assertRegex(str(warns[0]), "Scale factor")
if __name__ == "__main__":
diff --git a/lib/iris/tests/unit/plot/test__fixup_dates.py b/lib/iris/tests/unit/plot/test__fixup_dates.py
index 157780dcae..1ad5c87691 100644
--- a/lib/iris/tests/unit/plot/test__fixup_dates.py
+++ b/lib/iris/tests/unit/plot/test__fixup_dates.py
@@ -23,6 +23,7 @@ def test_gregorian_calendar(self):
unit = Unit("hours since 2000-04-13 00:00:00", calendar="gregorian")
coord = AuxCoord([1, 3, 6], "time", units=unit)
result = _fixup_dates(coord, coord.points)
+ self.assertIsInstance(result[0], datetime.datetime)
expected = [
datetime.datetime(2000, 4, 13, 1),
datetime.datetime(2000, 4, 13, 3),
@@ -34,6 +35,7 @@ def test_gregorian_calendar_sub_second(self):
unit = Unit("seconds since 2000-04-13 00:00:00", calendar="gregorian")
coord = AuxCoord([1, 1.25, 1.5], "time", units=unit)
result = _fixup_dates(coord, coord.points)
+ self.assertIsInstance(result[0], datetime.datetime)
expected = [
datetime.datetime(2000, 4, 13, 0, 0, 1),
datetime.datetime(2000, 4, 13, 0, 0, 1),
@@ -52,9 +54,7 @@ def test_360_day_calendar(self):
cftime.datetime(2000, 2, 29, calendar=calendar),
cftime.datetime(2000, 2, 30, calendar=calendar),
]
- self.assertArrayEqual(
- [cdt.datetime for cdt in result], expected_datetimes
- )
+ self.assertArrayEqual(result, expected_datetimes)
@tests.skip_nc_time_axis
def test_365_day_calendar(self):
@@ -67,9 +67,7 @@ def test_365_day_calendar(self):
cftime.datetime(2000, 2, 25, 1, 0, calendar=calendar),
cftime.datetime(2000, 2, 25, 2, 30, calendar=calendar),
]
- self.assertArrayEqual(
- [cdt.datetime for cdt in result], expected_datetimes
- )
+ self.assertArrayEqual(result, expected_datetimes)
@tests.skip_nc_time_axis
def test_360_day_calendar_attribute(self):
diff --git a/lib/iris/tests/unit/plot/test__get_plot_objects.py b/lib/iris/tests/unit/plot/test__get_plot_objects.py
new file mode 100644
index 0000000000..8586faa756
--- /dev/null
+++ b/lib/iris/tests/unit/plot/test__get_plot_objects.py
@@ -0,0 +1,45 @@
+# Copyright Iris contributors
+#
+# This file is part of Iris and is released under the LGPL license.
+# See COPYING and COPYING.LESSER in the root of the repository for full
+# licensing details.
+"""Unit tests for the `iris.plot._get_plot_objects` function."""
+
+# Import iris.tests first so that some things can be initialised before
+# importing anything else.
+import iris.tests as tests # isort:skip
+
+import iris.cube
+
+if tests.MPL_AVAILABLE:
+ from iris.plot import _get_plot_objects
+
+
+@tests.skip_plot
+class Test__get_plot_objects(tests.IrisTest):
+ def test_scalar(self):
+ cube1 = iris.cube.Cube(1)
+ cube2 = iris.cube.Cube(1)
+ expected = (cube1, cube2, 1, 1, ())
+ result = _get_plot_objects((cube1, cube2))
+ self.assertTupleEqual(expected, result)
+
+ def test_mismatched_size_first_scalar(self):
+ cube1 = iris.cube.Cube(1)
+ cube2 = iris.cube.Cube([1, 42])
+ with self.assertRaisesRegex(
+ ValueError, "x and y-axis objects are not compatible"
+ ):
+ _get_plot_objects((cube1, cube2))
+
+ def test_mismatched_size_second_scalar(self):
+ cube1 = iris.cube.Cube(1)
+ cube2 = iris.cube.Cube([1, 42])
+ with self.assertRaisesRegex(
+ ValueError, "x and y-axis objects are not compatible"
+ ):
+ _get_plot_objects((cube2, cube1))
+
+
+if __name__ == "__main__":
+ tests.main()
diff --git a/noxfile.py b/noxfile.py
index 0600540c5b..00a866f814 100755
--- a/noxfile.py
+++ b/noxfile.py
@@ -5,9 +5,12 @@
"""
+from datetime import datetime
import hashlib
import os
from pathlib import Path
+from tempfile import NamedTemporaryFile
+from typing import Literal
import nox
from nox.logger import logger
@@ -289,31 +292,66 @@ def linkcheck(session: nox.sessions.Session):
)
-@nox.session(python=PY_VER, venv_backend="conda")
+@nox.session
@nox.parametrize(
- ["ci_mode"],
- [True, False],
- ids=["ci compare", "full"],
+ "run_type",
+ ["overnight", "branch", "cperf", "sperf", "custom"],
+ ids=["overnight", "branch", "cperf", "sperf", "custom"],
)
-def benchmarks(session: nox.sessions.Session, ci_mode: bool):
+def benchmarks(
+ session: nox.sessions.Session,
+ run_type: Literal["overnight", "branch", "cperf", "sperf", "custom"],
+):
"""
Perform Iris performance benchmarks (using Airspeed Velocity).
+ All run types require a single Nox positional argument (e.g.
+ ``nox --session="foo" -- my_pos_arg``) - detailed in the parameters
+ section - and can optionally accept a series of further arguments that will
+ be added to session's ASV command.
+
Parameters
----------
session: object
A `nox.sessions.Session` object.
- ci_mode: bool
- Run a cut-down selection of benchmarks, comparing the current commit to
- the last commit for performance regressions.
-
- Notes
- -----
- ASV is set up to use ``nox --session=tests --install-only`` to prepare
- the benchmarking environment. This session environment must use a Python
- version that is also available for ``--session=tests``.
+ run_type: {"overnight", "branch", "custom"}
+ * ``overnight``: benchmarks all commits between the input **first
+ commit** to ``HEAD``, comparing each to its parent for performance
+ shifts. If a commit causes shifts, the output is saved to a file:
+ ``.asv/performance-shifts/``. Designed for checking the
+ previous 24 hours' commits, typically in a scheduled script.
+ * ``branch``: Performs the same operations as ``overnight``, but always
+ on two commits only - ``HEAD``, and ``HEAD``'s merge-base with the
+ input **base branch**. Output from this run is never saved to a file.
+ Designed for testing if the active branch's changes cause performance
+ shifts - anticipating what would be caught by ``overnight`` once
+ merged.
+ **For maximum accuracy, avoid using the machine that is running this
+ session. Run time could be >1 hour for the full benchmark suite.**
+ * ``cperf``: Run the on-demand CPerf suite of benchmarks (part of the
+ UK Met Office NG-VAT project) for the ``HEAD`` of ``upstream/main``
+ only, and publish the results to the input **publish directory**,
+ within a unique subdirectory for this run.
+ * ``sperf``: As with CPerf, but for the SPerf suite.
+ * ``custom``: run ASV with the input **ASV sub-command**, without any
+ preset arguments - must all be supplied by the user. So just like
+ running ASV manually, with the convenience of re-using the session's
+ scripted setup steps.
+
+ Examples
+ --------
+ * ``nox --session="benchmarks(overnight)" -- a1b23d4``
+ * ``nox --session="benchmarks(branch)" -- upstream/main``
+ * ``nox --session="benchmarks(branch)" -- upstream/mesh-data-model``
+ * ``nox --session="benchmarks(branch)" -- upstream/main --bench=regridding``
+ * ``nox --session="benchmarks(cperf)" -- my_publish_dir
+ * ``nox --session="benchmarks(custom)" -- continuous a1b23d4 HEAD --quick``
"""
+ # The threshold beyond which shifts are 'notable'. See `asv compare`` docs
+ # for more.
+ COMPARE_FACTOR = 1.2
+
session.install("asv", "nox")
data_gen_var = "DATA_GEN_PYTHON"
@@ -327,12 +365,12 @@ def benchmarks(session: nox.sessions.Session, ci_mode: bool):
"nox",
"--session=tests",
"--install-only",
- f"--python={session.python}",
+ f"--python={_PY_VERSION_LATEST}",
)
# Find the environment built above, set it to be the data generation
# environment.
data_gen_python = next(
- Path(".nox").rglob(f"tests*/bin/python{session.python}")
+ Path(".nox").rglob(f"tests*/bin/python{_PY_VERSION_LATEST}")
).resolve()
session.env[data_gen_var] = data_gen_python
@@ -360,25 +398,124 @@ def benchmarks(session: nox.sessions.Session, ci_mode: bool):
# Skip over setup questions for a new machine.
session.run("asv", "machine", "--yes")
- def asv_exec(*sub_args: str) -> None:
- run_args = ["asv", *sub_args]
- session.run(*run_args)
-
- if ci_mode:
- # If on a PR: compare to the base (target) branch.
- # Else: compare to previous commit.
- previous_commit = os.environ.get("PR_BASE_SHA", "HEAD^1")
- try:
- asv_exec(
- "continuous",
- "--factor=1.2",
- previous_commit,
- "HEAD",
- "--attribute",
- "rounds=4",
+ # All run types require one Nox posarg.
+ run_type_arg = {
+ "overnight": "first commit",
+ "branch": "base branch",
+ "cperf": "publish directory",
+ "sperf": "publish directory",
+ "custom": "ASV sub-command",
+ }
+ if run_type not in run_type_arg.keys():
+ message = f"Unsupported run-type: {run_type}"
+ raise NotImplementedError(message)
+ if not session.posargs:
+ message = (
+ f"Missing mandatory first Nox session posarg: "
+ f"{run_type_arg[run_type]}"
+ )
+ raise ValueError(message)
+ first_arg = session.posargs[0]
+ # Optional extra arguments to be passed down to ASV.
+ asv_args = session.posargs[1:]
+
+ def asv_compare(*commits):
+ """Run through a list of commits comparing each one to the next."""
+ commits = [commit[:8] for commit in commits]
+ shifts_dir = Path(".asv") / "performance-shifts"
+ for i in range(len(commits) - 1):
+ before = commits[i]
+ after = commits[i + 1]
+ asv_command_ = f"asv compare {before} {after} --factor={COMPARE_FACTOR} --split"
+ session.run(*asv_command_.split(" "))
+
+ if run_type == "overnight":
+ # Record performance shifts.
+ # Run the command again but limited to only showing performance
+ # shifts.
+ shifts = session.run(
+ *asv_command_.split(" "), "--only-changed", silent=True
+ )
+ if shifts:
+ # Write the shifts report to a file.
+ # Dir is used by .github/workflows/benchmarks.yml,
+ # but not cached - intended to be discarded after run.
+ shifts_dir.mkdir(exist_ok=True, parents=True)
+ shifts_path = (shifts_dir / after).with_suffix(".txt")
+ with shifts_path.open("w") as shifts_file:
+ shifts_file.write(shifts)
+
+ # Common ASV arguments for all run_types except `custom`.
+ asv_harness = (
+ "asv run {posargs} --attribute rounds=4 --interleave-rounds --strict "
+ "--show-stderr"
+ )
+
+ if run_type == "overnight":
+ first_commit = first_arg
+ commit_range = f"{first_commit}^^.."
+ asv_command = asv_harness.format(posargs=commit_range)
+ session.run(*asv_command.split(" "), *asv_args)
+
+ # git rev-list --first-parent is the command ASV uses.
+ git_command = f"git rev-list --first-parent {commit_range}"
+ commit_string = session.run(
+ *git_command.split(" "), silent=True, external=True
+ )
+ commit_list = commit_string.rstrip().split("\n")
+ asv_compare(*reversed(commit_list))
+
+ elif run_type == "branch":
+ base_branch = first_arg
+ git_command = f"git merge-base HEAD {base_branch}"
+ merge_base = session.run(
+ *git_command.split(" "), silent=True, external=True
+ )[:8]
+
+ with NamedTemporaryFile("w") as hashfile:
+ hashfile.writelines([merge_base, "\n", "HEAD"])
+ hashfile.flush()
+ commit_range = f"HASHFILE:{hashfile.name}"
+ asv_command = asv_harness.format(posargs=commit_range)
+ session.run(*asv_command.split(" "), *asv_args)
+
+ asv_compare(merge_base, "HEAD")
+
+ elif run_type in ("cperf", "sperf"):
+ publish_dir = Path(first_arg)
+ if not publish_dir.is_dir():
+ message = (
+ f"Input 'publish directory' is not a directory: {publish_dir}"
)
- finally:
- asv_exec("compare", previous_commit, "HEAD")
+ raise NotADirectoryError(message)
+ publish_subdir = (
+ publish_dir
+ / f"{run_type}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
+ )
+ publish_subdir.mkdir()
+
+ # Activate on demand benchmarks (C/SPerf are deactivated for 'standard' runs).
+ session.env["ON_DEMAND_BENCHMARKS"] = "True"
+ commit_range = "upstream/main^!"
+
+ asv_command = (
+ asv_harness.format(posargs=commit_range) + f" --bench={run_type}"
+ )
+ session.run(*asv_command.split(" "), *asv_args)
+
+ asv_command = f"asv publish {commit_range} --html-dir={publish_subdir}"
+ session.run(*asv_command.split(" "))
+
+ # Print completion message.
+ location = Path().cwd() / ".asv"
+ print(
+ f'New ASV results for "{run_type}".\n'
+ f'See "{publish_subdir}",'
+ f'\n html in "{location / "html"}".'
+ f'\n or JSON files under "{location / "results"}".'
+ )
+
else:
- # f5ceb808 = first commit supporting nox --install-only .
- asv_exec("run", "f5ceb808..HEAD")
+ asv_subcommand = first_arg
+ assert run_type == "custom"
+ session.run("asv", asv_subcommand, *asv_args)
diff --git a/requirements/ci/nox.lock/py38-linux-64.lock b/requirements/ci/nox.lock/py38-linux-64.lock
index caf6a739b3..a612138dfa 100644
--- a/requirements/ci/nox.lock/py38-linux-64.lock
+++ b/requirements/ci/nox.lock/py38-linux-64.lock
@@ -1,6 +1,6 @@
# Generated by conda-lock.
# platform: linux-64
-# input_hash: 0b8e98b045b5545a96321ab961f5e97fe2da8aa929328cc8df2d4d5f33ed8159
+# input_hash: fc890d56b881193a2422ceb96d07b1b2bb857890e1d48fb24a765ec2f886d4d2
@EXPLICIT
https://conda.anaconda.org/conda-forge/linux-64/_libgcc_mutex-0.1-conda_forge.tar.bz2#d7c89558ba9fa0495403155b64376d81
https://conda.anaconda.org/conda-forge/linux-64/ca-certificates-2021.10.8-ha878542_0.tar.bz2#575611b8a84f45960e87722eeb51fa26
@@ -9,20 +9,20 @@ https://conda.anaconda.org/conda-forge/noarch/font-ttf-inconsolata-3.000-h77eed3
https://conda.anaconda.org/conda-forge/noarch/font-ttf-source-code-pro-2.038-h77eed37_0.tar.bz2#4d59c254e01d9cde7957100457e2d5fb
https://conda.anaconda.org/conda-forge/noarch/font-ttf-ubuntu-0.83-hab24e00_0.tar.bz2#19410c3df09dfb12d1206132a1d357c5
https://conda.anaconda.org/conda-forge/linux-64/ld_impl_linux-64-2.36.1-hea4e1c9_2.tar.bz2#bd4f2e711b39af170e7ff15163fe87ee
-https://conda.anaconda.org/conda-forge/linux-64/libgfortran5-11.2.0-h5c6108e_12.tar.bz2#f547bf125ab234cec9c89491b262fc2f
-https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-11.2.0-he4da1e4_12.tar.bz2#7ff3b832ba5e6918c0d026976359d065
+https://conda.anaconda.org/conda-forge/linux-64/libgfortran5-11.2.0-h5c6108e_13.tar.bz2#b62e87134ec17e1180cfcb3951624db4
+https://conda.anaconda.org/conda-forge/linux-64/libstdcxx-ng-11.2.0-he4da1e4_13.tar.bz2#573a74710fad22a27da784cc238150b9
https://conda.anaconda.org/conda-forge/linux-64/mpi-1.0-mpich.tar.bz2#c1fcff3417b5a22bbc4cf6e8c23648cf
https://conda.anaconda.org/conda-forge/linux-64/mysql-common-8.0.28-ha770c72_0.tar.bz2#56594fdd5a80774a80d546fbbccf2c03
https://conda.anaconda.org/conda-forge/noarch/fonts-conda-forge-1-0.tar.bz2#f766549260d6815b0c52253f1fb1bb29
-https://conda.anaconda.org/conda-forge/linux-64/libgfortran-ng-11.2.0-h69a702a_12.tar.bz2#33c165be455015cc74e8d857182f3f58
-https://conda.anaconda.org/conda-forge/linux-64/libgomp-11.2.0-h1d223b6_12.tar.bz2#763c5ec8116d984b4a33342236d7da36
+https://conda.anaconda.org/conda-forge/linux-64/libgfortran-ng-11.2.0-h69a702a_13.tar.bz2#a3a07a89af69d1eada078695b42e4961
+https://conda.anaconda.org/conda-forge/linux-64/libgomp-11.2.0-h1d223b6_13.tar.bz2#8e91f1f21417c9ab1265240ee4f9db1e
https://conda.anaconda.org/conda-forge/linux-64/_openmp_mutex-4.5-1_gnu.tar.bz2#561e277319a41d4f24f5c05a9ef63c04
https://conda.anaconda.org/conda-forge/noarch/fonts-conda-ecosystem-1-0.tar.bz2#fee5683a3f04bd15cbd8318b096a27ab
-https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-11.2.0-h1d223b6_12.tar.bz2#d34efbb8d7d6312c816b4bb647b818b1
+https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-11.2.0-h1d223b6_13.tar.bz2#63eaf0f146cc80abd84743d48d667da4
https://conda.anaconda.org/conda-forge/linux-64/alsa-lib-1.2.3-h516909a_0.tar.bz2#1378b88874f42ac31b2f8e4f6975cb7b
https://conda.anaconda.org/conda-forge/linux-64/bzip2-1.0.8-h7f98852_4.tar.bz2#a1fd65c7ccbf10880423d82bca54eb54
https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.18.1-h7f98852_0.tar.bz2#f26ef8098fab1f719c91eb760d63381a
-https://conda.anaconda.org/conda-forge/linux-64/expat-2.4.4-h9c3ff4c_0.tar.bz2#3cedab1fd76644efd516e1b271f2da95
+https://conda.anaconda.org/conda-forge/linux-64/expat-2.4.6-h27087fc_0.tar.bz2#90dec9e76bc164857cc200f81e981dab
https://conda.anaconda.org/conda-forge/linux-64/fribidi-1.0.10-h36c2ea0_0.tar.bz2#ac7bc6a654f8f41b352b38f4051135f8
https://conda.anaconda.org/conda-forge/linux-64/geos-3.10.2-h9c3ff4c_0.tar.bz2#fe9a66a351bfa7a84c3108304c7bcba5
https://conda.anaconda.org/conda-forge/linux-64/giflib-5.2.1-h36c2ea0_2.tar.bz2#626e68ae9cc5912d6adb79d318cf962d
@@ -30,13 +30,13 @@ https://conda.anaconda.org/conda-forge/linux-64/graphite2-1.3.13-h58526e2_1001.t
https://conda.anaconda.org/conda-forge/linux-64/icu-69.1-h9c3ff4c_0.tar.bz2#e0773c9556d588b062a4e1424a6a02fa
https://conda.anaconda.org/conda-forge/linux-64/jbig-2.1-h7f98852_2003.tar.bz2#1aa0cee79792fa97b7ff4545110b60bf
https://conda.anaconda.org/conda-forge/linux-64/jpeg-9e-h7f98852_0.tar.bz2#5c214edc675a7fb7cbb34b1d854e5141
+https://conda.anaconda.org/conda-forge/linux-64/keyutils-1.6.1-h166bdaf_0.tar.bz2#30186d27e2c9fa62b45fb1476b7200e3
https://conda.anaconda.org/conda-forge/linux-64/lerc-3.0-h9c3ff4c_0.tar.bz2#7fcefde484980d23f0ec24c11e314d2e
https://conda.anaconda.org/conda-forge/linux-64/libbrotlicommon-1.0.9-h7f98852_6.tar.bz2#b0f44f63f7d771d7670747a1dd5d5ac1
-https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.8-h7f98852_0.tar.bz2#91d22aefa665265e8e31988b15145c8a
+https://conda.anaconda.org/conda-forge/linux-64/libdeflate-1.10-h7f98852_0.tar.bz2#ffa3a757a97e851293909b49f49f28fb
https://conda.anaconda.org/conda-forge/linux-64/libev-4.33-h516909a_1.tar.bz2#6f8720dff19e17ce5d48cfe7f3d2f0a3
https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.2-h7f98852_5.tar.bz2#d645c6d2ac96843a2bfaccd2d62b3ac3
https://conda.anaconda.org/conda-forge/linux-64/libiconv-1.16-h516909a_0.tar.bz2#5c0f338a513a2943c659ae619fca9211
-https://conda.anaconda.org/conda-forge/linux-64/libllvm13-13.0.0-hf817b99_0.tar.bz2#b10bb2ebebfffa8800fa80ad3285719e
https://conda.anaconda.org/conda-forge/linux-64/libmo_unpack-3.1.2-hf484d3e_1001.tar.bz2#95f32a6a5a666d33886ca5627239f03d
https://conda.anaconda.org/conda-forge/linux-64/libnsl-2.0.0-h7f98852_0.tar.bz2#39b1328babf85c7c3a61636d9cd50206
https://conda.anaconda.org/conda-forge/linux-64/libogg-1.3.4-h7f98852_1.tar.bz2#6e8cc2173440d77708196c5b93771680
@@ -47,7 +47,7 @@ https://conda.anaconda.org/conda-forge/linux-64/libuuid-2.32.1-h7f98852_1000.tar
https://conda.anaconda.org/conda-forge/linux-64/libwebp-base-1.2.2-h7f98852_1.tar.bz2#46cf26ecc8775a0aab300ea1821aaa3c
https://conda.anaconda.org/conda-forge/linux-64/libzlib-1.2.11-h36c2ea0_1013.tar.bz2#dcddf696ff5dfcab567100d691678e18
https://conda.anaconda.org/conda-forge/linux-64/lz4-c-1.9.3-h9c3ff4c_1.tar.bz2#fbe97e8fa6f275d7c76a09e795adc3e6
-https://conda.anaconda.org/conda-forge/linux-64/mpich-3.4.3-h846660c_100.tar.bz2#1bb747e2de717cb9a6501d72539d6556
+https://conda.anaconda.org/conda-forge/linux-64/mpich-4.0.1-h846660c_100.tar.bz2#4b85205b094808088bb0862e08251653
https://conda.anaconda.org/conda-forge/linux-64/ncurses-6.3-h9c3ff4c_0.tar.bz2#fb31bcb7af058244479ca635d20f0f4a
https://conda.anaconda.org/conda-forge/linux-64/nspr-4.32-h9c3ff4c_1.tar.bz2#29ded371806431b0499aaee146abfc3e
https://conda.anaconda.org/conda-forge/linux-64/openssl-1.1.1l-h7f98852_0.tar.bz2#de7b38a1542dbe6f41653a8ae71adc53
@@ -68,30 +68,32 @@ https://conda.anaconda.org/conda-forge/linux-64/gettext-0.19.8.1-h73d1719_1008.t
https://conda.anaconda.org/conda-forge/linux-64/libblas-3.9.0-13_linux64_openblas.tar.bz2#8a4038563ed92dfa622bd72c0d8f31d3
https://conda.anaconda.org/conda-forge/linux-64/libbrotlidec-1.0.9-h7f98852_6.tar.bz2#c7c03a2592cac92246a13a0732bd1573
https://conda.anaconda.org/conda-forge/linux-64/libbrotlienc-1.0.9-h7f98852_6.tar.bz2#28bfe0a70154e6881da7bae97517c948
-https://conda.anaconda.org/conda-forge/linux-64/libclang-13.0.0-default_hc23dcda_0.tar.bz2#7b140452b5bc91e46410b84807307249
https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20191231-he28a2e2_2.tar.bz2#4d331e44109e3f0e19b4cb8f9b82f3e1
https://conda.anaconda.org/conda-forge/linux-64/libevent-2.1.10-h9b69904_4.tar.bz2#390026683aef81db27ff1b8570ca1336
+https://conda.anaconda.org/conda-forge/linux-64/libllvm13-13.0.1-hf817b99_2.tar.bz2#47da3ce0d8b2e65ccb226c186dd91eba
https://conda.anaconda.org/conda-forge/linux-64/libvorbis-1.3.7-h9c3ff4c_0.tar.bz2#309dec04b70a3cc0f1e84a4013683bc0
https://conda.anaconda.org/conda-forge/linux-64/libxcb-1.13-h7f98852_1004.tar.bz2#b3653fdc58d03face9724f602218a904
https://conda.anaconda.org/conda-forge/linux-64/readline-8.1-h46c0cb4_0.tar.bz2#5788de3c8d7a7d64ac56c784c4ef48e6
+https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.12-h27826a3_0.tar.bz2#5b8c42eb62e9fc961af70bdd6a26e168
https://conda.anaconda.org/conda-forge/linux-64/udunits2-2.2.28-hc3e0081_0.tar.bz2#d4c341e0379c31e9e781d4f204726867
https://conda.anaconda.org/conda-forge/linux-64/xorg-libsm-1.2.3-hd9c2040_1000.tar.bz2#9e856f78d5c80d5a78f61e72d1d473a3
https://conda.anaconda.org/conda-forge/linux-64/zlib-1.2.11-h36c2ea0_1013.tar.bz2#cf7190238072a41e9579e4476a6a60b8
https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.2-ha95c52a_0.tar.bz2#5222b231b1ef49a7f60d40b363469b70
https://conda.anaconda.org/conda-forge/linux-64/brotli-bin-1.0.9-h7f98852_6.tar.bz2#9e94bf16f14c78a36561d5019f490d22
https://conda.anaconda.org/conda-forge/linux-64/hdf4-4.2.15-h10796ff_3.tar.bz2#21a8d66dc17f065023b33145c42652fe
+https://conda.anaconda.org/conda-forge/linux-64/krb5-1.19.2-h3790be6_4.tar.bz2#dbbd32092ee31aab0f2d213e8f9f1b40
https://conda.anaconda.org/conda-forge/linux-64/libcblas-3.9.0-13_linux64_openblas.tar.bz2#b17676dbd6688396c3a3076259fb7907
-https://conda.anaconda.org/conda-forge/linux-64/libglib-2.70.2-h174f98d_1.tar.bz2#d03a54631298fd1ab732ff65f6ed3a07
+https://conda.anaconda.org/conda-forge/linux-64/libclang-13.0.1-default_hc23dcda_0.tar.bz2#8cebb0736cba83485b13dc10d242d96d
+https://conda.anaconda.org/conda-forge/linux-64/libglib-2.70.2-h174f98d_4.tar.bz2#d44314ffae96b17657fbf3f8e47b04fc
https://conda.anaconda.org/conda-forge/linux-64/liblapack-3.9.0-13_linux64_openblas.tar.bz2#018b80e8f21d8560ae4961567e3e00c9
-https://conda.anaconda.org/conda-forge/linux-64/libnghttp2-1.46.0-h812cca2_0.tar.bz2#507fa47e9075f889af8e8b72925379be
+https://conda.anaconda.org/conda-forge/linux-64/libnghttp2-1.47.0-h727a467_0.tar.bz2#a22567abfea169ff8048506b1ca9b230
https://conda.anaconda.org/conda-forge/linux-64/libpng-1.6.37-h21135ba_2.tar.bz2#b6acf807307d033d4b7e758b4f44b036
https://conda.anaconda.org/conda-forge/linux-64/libssh2-1.10.0-ha56f1ee_2.tar.bz2#6ab4eaa11ff01801cffca0a27489dc04
-https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.3.0-h6f004c6_2.tar.bz2#34fda41ca84e67232888c9a885903055
+https://conda.anaconda.org/conda-forge/linux-64/libtiff-4.3.0-h542a066_3.tar.bz2#1a0efb4dfd880b0376da8e1ba39fa838
https://conda.anaconda.org/conda-forge/linux-64/libxml2-2.9.12-h885dcf4_1.tar.bz2#d1355eaa48f465782f228275a0a69771
https://conda.anaconda.org/conda-forge/linux-64/libzip-1.8.0-h4de3113_1.tar.bz2#175a746a43d42c053b91aa765fbc197d
https://conda.anaconda.org/conda-forge/linux-64/mysql-libs-8.0.28-hfa10184_0.tar.bz2#aac17542e50a474e2e632878dc696d50
https://conda.anaconda.org/conda-forge/linux-64/sqlite-3.37.0-h9cd32fc_0.tar.bz2#eb66fc098824d25518a79e83d12a81d6
-https://conda.anaconda.org/conda-forge/linux-64/tk-8.6.11-h27826a3_1.tar.bz2#84e76fb280e735fec1efd2d21fd9cb27
https://conda.anaconda.org/conda-forge/linux-64/xorg-libx11-1.7.2-h7f98852_0.tar.bz2#12a61e640b8894504326aadafccbb790
https://conda.anaconda.org/conda-forge/linux-64/atk-1.0-2.36.0-h3371d22_4.tar.bz2#661e1ed5d92552785d9f8c781ce68685
https://conda.anaconda.org/conda-forge/linux-64/brotli-1.0.9-h7f98852_6.tar.bz2#612385c4a83edb0619fe911d9da317f4
@@ -100,7 +102,8 @@ https://conda.anaconda.org/conda-forge/linux-64/freetype-2.10.4-h0708190_1.tar.b
https://conda.anaconda.org/conda-forge/linux-64/gdk-pixbuf-2.42.6-h04a7f16_0.tar.bz2#b24a1e18325a6e8f8b6b4a2ec5860ce2
https://conda.anaconda.org/conda-forge/linux-64/gstreamer-1.18.5-h9f60fe5_3.tar.bz2#511aa83cdfcc0132380db5daf2f15f27
https://conda.anaconda.org/conda-forge/linux-64/gts-0.7.6-h64030ff_2.tar.bz2#112eb9b5b93f0c02e59aea4fd1967363
-https://conda.anaconda.org/conda-forge/linux-64/krb5-1.19.2-hcc1bbae_3.tar.bz2#e29650992ae593bc05fc93722483e5c3
+https://conda.anaconda.org/conda-forge/linux-64/libcurl-7.81.0-h2574ce0_0.tar.bz2#1f8655741d0269ca6756f131522da1e8
+https://conda.anaconda.org/conda-forge/linux-64/libpq-14.2-hd57d9b9_0.tar.bz2#91b38e297e1cc79f88f7cbf7bdb248e0
https://conda.anaconda.org/conda-forge/linux-64/libwebp-1.2.2-h3452ae3_0.tar.bz2#c363665b4aabe56aae4f8981cff5b153
https://conda.anaconda.org/conda-forge/linux-64/libxkbcommon-1.0.3-he3ba5ed_0.tar.bz2#f9dbabc7e01c459ed7a1d1d64b206e9b
https://conda.anaconda.org/conda-forge/linux-64/nss-3.74-hb5efdd6_0.tar.bz2#136876ca50177058594f6c2944e95c40
@@ -109,28 +112,29 @@ https://conda.anaconda.org/conda-forge/linux-64/xorg-libxext-1.3.4-h7f98852_1.ta
https://conda.anaconda.org/conda-forge/linux-64/xorg-libxrender-0.9.10-h7f98852_1003.tar.bz2#f59c1242cc1dd93e72c2ee2b360979eb
https://conda.anaconda.org/conda-forge/noarch/alabaster-0.7.12-py_0.tar.bz2#2489a97287f90176ecdc3ca982b4b0a0
https://conda.anaconda.org/conda-forge/noarch/cfgv-3.3.1-pyhd8ed1ab_0.tar.bz2#ebb5f5f7dc4f1a3780ef7ea7738db08c
-https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-2.0.11-pyhd8ed1ab_0.tar.bz2#e51530e33440ea8044edb0076cb40a0f
+https://conda.anaconda.org/conda-forge/noarch/charset-normalizer-2.0.12-pyhd8ed1ab_0.tar.bz2#1f5b32dabae0f1893ae3283dac7f799e
https://conda.anaconda.org/conda-forge/noarch/cloudpickle-2.0.0-pyhd8ed1ab_0.tar.bz2#3a8fc8b627d5fb6af827e126a10a86c6
https://conda.anaconda.org/conda-forge/noarch/colorama-0.4.4-pyh9f0ad1d_0.tar.bz2#c08b4c1326b880ed44f3ffb04803332f
+https://conda.anaconda.org/conda-forge/linux-64/curl-7.81.0-h2574ce0_0.tar.bz2#3a95d393b490f82aa406f1892fad84d9
https://conda.anaconda.org/conda-forge/noarch/cycler-0.11.0-pyhd8ed1ab_0.tar.bz2#a50559fad0affdbb33729a68669ca1cb
https://conda.anaconda.org/conda-forge/noarch/distlib-0.3.4-pyhd8ed1ab_0.tar.bz2#7b50d840543d9cdae100e91582c33035
-https://conda.anaconda.org/conda-forge/noarch/filelock-3.4.2-pyhd8ed1ab_1.tar.bz2#d3f5797d3f9625c64860c93fc4359e64
-https://conda.anaconda.org/conda-forge/linux-64/fontconfig-2.13.94-ha180cfb_0.tar.bz2#c534c5248da4913002473919d76d0161
-https://conda.anaconda.org/conda-forge/noarch/fsspec-2022.1.0-pyhd8ed1ab_0.tar.bz2#188e095f4dc38887bb48b065734b9e8d
+https://conda.anaconda.org/conda-forge/noarch/filelock-3.6.0-pyhd8ed1ab_0.tar.bz2#6e03ca6c7b47a4152a2b12c6eee3bd32
+https://conda.anaconda.org/conda-forge/linux-64/fontconfig-2.13.96-ha180cfb_0.tar.bz2#d190a1c55c84ba1c9a33484a38ece029
+https://conda.anaconda.org/conda-forge/noarch/fsspec-2022.2.0-pyhd8ed1ab_0.tar.bz2#f31e31092035d427b05233ab924c7613
https://conda.anaconda.org/conda-forge/linux-64/gst-plugins-base-1.18.5-hf529b03_3.tar.bz2#524a9f1718bac53a6cf4906bcc51d044
+https://conda.anaconda.org/conda-forge/linux-64/hdf5-1.12.1-mpi_mpich_h08b82f9_4.tar.bz2#975d5635b158c1b3c5c795f9d0a430a1
https://conda.anaconda.org/conda-forge/noarch/idna-3.3-pyhd8ed1ab_0.tar.bz2#40b50b8b030f5f2f22085c062ed013dd
https://conda.anaconda.org/conda-forge/noarch/imagesize-1.3.0-pyhd8ed1ab_0.tar.bz2#be807e7606fff9436e5e700f6bffb7c6
https://conda.anaconda.org/conda-forge/noarch/iris-sample-data-2.4.0-pyhd8ed1ab_0.tar.bz2#18ee9c07cf945a33f92caf1ee3d23ad9
-https://conda.anaconda.org/conda-forge/linux-64/libcurl-7.81.0-h2574ce0_0.tar.bz2#1f8655741d0269ca6756f131522da1e8
-https://conda.anaconda.org/conda-forge/linux-64/libpq-14.1-hd57d9b9_1.tar.bz2#a7024916bfdf33a014a0cc803580c9a1
https://conda.anaconda.org/conda-forge/noarch/locket-0.2.0-py_2.tar.bz2#709e8671651c7ec3d1ad07800339ff1d
https://conda.anaconda.org/conda-forge/noarch/munkres-1.1.4-pyh9f0ad1d_0.tar.bz2#2ba8498c1018c1e9c61eb99b973dfe19
https://conda.anaconda.org/conda-forge/noarch/nose-1.3.7-py_1006.tar.bz2#382019d5f8e9362ef6f60a8d4e7bce8f
https://conda.anaconda.org/conda-forge/noarch/olefile-0.46-pyh9f0ad1d_1.tar.bz2#0b2e68acc8c78c8cc392b90983481f58
-https://conda.anaconda.org/conda-forge/noarch/platformdirs-2.3.0-pyhd8ed1ab_0.tar.bz2#7bc119135be2a43e1701432399d8c28a
+https://conda.anaconda.org/conda-forge/noarch/platformdirs-2.5.1-pyhd8ed1ab_0.tar.bz2#d5df87964a39f67c46a5448f4e78d9b6
+https://conda.anaconda.org/conda-forge/linux-64/proj-8.2.1-h277dcde_0.tar.bz2#f2ceb1be6565c35e2db0ac948754751d
https://conda.anaconda.org/conda-forge/noarch/pycparser-2.21-pyhd8ed1ab_0.tar.bz2#076becd9e05608f8dc72757d5f3a91ff
https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.0.7-pyhd8ed1ab_0.tar.bz2#727e2216d9c47455d8ddc060eb2caad9
-https://conda.anaconda.org/conda-forge/noarch/pyshp-2.1.3-pyh44b312d_0.tar.bz2#2d1867b980785eb44b8122184d8b42a6
+https://conda.anaconda.org/conda-forge/noarch/pyshp-2.2.0-pyhd8ed1ab_0.tar.bz2#2aa546be05be34b8e1744afd327b623f
https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.8-2_cp38.tar.bz2#bfbb29d517281e78ac53e48d21e6e860
https://conda.anaconda.org/conda-forge/noarch/pytz-2021.3-pyhd8ed1ab_0.tar.bz2#7e4f811bff46a5a6a7e0094921389395
https://conda.anaconda.org/conda-forge/noarch/six-1.16.0-pyh6c4a22f_0.tar.bz2#e5f25f8dbc060e9a8d912e432202afc2
@@ -149,75 +153,72 @@ https://conda.anaconda.org/conda-forge/noarch/babel-2.9.1-pyh44b312d_0.tar.bz2#7
https://conda.anaconda.org/conda-forge/linux-64/cairo-1.16.0-ha00ac49_1009.tar.bz2#d1dff57b8731c245d3247b46d002e1c9
https://conda.anaconda.org/conda-forge/linux-64/certifi-2021.10.8-py38h578d9bd_1.tar.bz2#52a6cee65a5d10ed1c3f0af24fb48dd3
https://conda.anaconda.org/conda-forge/linux-64/cffi-1.15.0-py38h3931269_0.tar.bz2#9c491a90ae11d08ca97326a0ed876f3a
-https://conda.anaconda.org/conda-forge/linux-64/curl-7.81.0-h2574ce0_0.tar.bz2#3a95d393b490f82aa406f1892fad84d9
https://conda.anaconda.org/conda-forge/linux-64/docutils-0.16-py38h578d9bd_3.tar.bz2#a7866449fb9e5e4008a02df276549d34
-https://conda.anaconda.org/conda-forge/linux-64/hdf5-1.12.1-mpi_mpich_h9c45103_3.tar.bz2#4f1a733e563d27b98010b62888e149c9
-https://conda.anaconda.org/conda-forge/linux-64/importlib-metadata-4.10.1-py38h578d9bd_0.tar.bz2#26da12e39b1b93e82fb865e967d0cbe0
+https://conda.anaconda.org/conda-forge/linux-64/importlib-metadata-4.11.2-py38h578d9bd_0.tar.bz2#0c1ffd6807cbf6c15456c49ca9baa668
https://conda.anaconda.org/conda-forge/linux-64/kiwisolver-1.3.2-py38h1fd1430_1.tar.bz2#085365abfe53d5d13bb68b1dda0b439e
https://conda.anaconda.org/conda-forge/linux-64/libgd-2.3.3-h3cfcdeb_1.tar.bz2#37d7568c595f0cfcd0c493f5ca0344ab
-https://conda.anaconda.org/conda-forge/linux-64/markupsafe-2.0.1-py38h497a2fe_1.tar.bz2#1ef7b5f4826ca48a15e2cd98a5c3436d
+https://conda.anaconda.org/conda-forge/linux-64/libnetcdf-4.8.1-mpi_mpich_h319fa22_1.tar.bz2#7583fbaea3648f692c0c019254bc196c
+https://conda.anaconda.org/conda-forge/linux-64/markupsafe-2.1.0-py38h0a891b7_1.tar.bz2#60eff55f2a845f35e58bd0be235fe4b7
https://conda.anaconda.org/conda-forge/linux-64/mpi4py-3.1.3-py38he865349_0.tar.bz2#b1b3d6847a68251a1465206ab466b475
-https://conda.anaconda.org/conda-forge/linux-64/numpy-1.22.2-py38h6ae9a64_0.tar.bz2#065a900932f904e0182acfcfadc467e3
+https://conda.anaconda.org/conda-forge/linux-64/numpy-1.22.3-py38h05e7239_0.tar.bz2#90b4ee61abb81fb3f3995ec9d4c734f0
https://conda.anaconda.org/conda-forge/noarch/packaging-21.3-pyhd8ed1ab_0.tar.bz2#71f1ab2de48613876becddd496371c85
https://conda.anaconda.org/conda-forge/noarch/partd-1.2.0-pyhd8ed1ab_0.tar.bz2#0c32f563d7f22e3a34c95cad8cc95651
https://conda.anaconda.org/conda-forge/linux-64/pillow-6.2.1-py38hd70f55b_1.tar.bz2#80d719bee2b77a106b199150c0829107
https://conda.anaconda.org/conda-forge/noarch/pockets-0.9.1-py_0.tar.bz2#1b52f0c42e8077e5a33e00fe72269364
-https://conda.anaconda.org/conda-forge/linux-64/proj-8.2.1-h277dcde_0.tar.bz2#f2ceb1be6565c35e2db0ac948754751d
https://conda.anaconda.org/conda-forge/linux-64/pyqt5-sip-4.19.18-py38h709712a_8.tar.bz2#11b72f5b1cc15427c89232321172a0bc
https://conda.anaconda.org/conda-forge/linux-64/pysocks-1.7.1-py38h578d9bd_4.tar.bz2#9c4bbee6f682f2fc7d7803df3996e77e
https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.8.2-pyhd8ed1ab_0.tar.bz2#dd999d1cc9f79e67dbb855c8924c7984
-https://conda.anaconda.org/conda-forge/linux-64/python-xxhash-2.0.2-py38h497a2fe_1.tar.bz2#977d03222271270ea8fe35388bf13752
+https://conda.anaconda.org/conda-forge/linux-64/python-xxhash-3.0.0-py38h0a891b7_0.tar.bz2#12eaa8cbfedfbf7879e5653467b03c94
https://conda.anaconda.org/conda-forge/linux-64/pyyaml-6.0-py38h497a2fe_3.tar.bz2#131de7d638aa59fb8afbce59f1a8aa98
https://conda.anaconda.org/conda-forge/linux-64/qt-5.12.9-ha98a1a1_5.tar.bz2#9b27fa0b1044a2119fb1b290617fe06f
-https://conda.anaconda.org/conda-forge/linux-64/setuptools-60.7.1-py38h578d9bd_0.tar.bz2#8bf9c51a7e371df1673de909c1f46e6c
+https://conda.anaconda.org/conda-forge/linux-64/setuptools-60.9.3-py38h578d9bd_0.tar.bz2#864b832ea94d9c0b37ddfbbb8adb42f1
https://conda.anaconda.org/conda-forge/linux-64/tornado-6.1-py38h497a2fe_2.tar.bz2#63b3b55c98b4239134e0be080f448944
https://conda.anaconda.org/conda-forge/linux-64/unicodedata2-14.0.0-py38h497a2fe_0.tar.bz2#8da7787169411910df2a62dc8ef533e0
-https://conda.anaconda.org/conda-forge/linux-64/virtualenv-20.13.0-py38h578d9bd_0.tar.bz2#561081f4a30990533541979c9ee84732
+https://conda.anaconda.org/conda-forge/linux-64/virtualenv-20.13.3-py38h578d9bd_0.tar.bz2#4f2dd671de7a8666acdc51a9dd6d4324
https://conda.anaconda.org/conda-forge/linux-64/brotlipy-0.7.0-py38h497a2fe_1003.tar.bz2#9189b42c42b9c87b2b2068cbe31901a8
-https://conda.anaconda.org/conda-forge/linux-64/cftime-1.5.2-py38h6c62de6_0.tar.bz2#73892e60ccea826c7f7a2215e48d22cf
+https://conda.anaconda.org/conda-forge/linux-64/cftime-1.6.0-py38h3ec907f_0.tar.bz2#35411e5fc8dd523f9e68316847e6a25b
https://conda.anaconda.org/conda-forge/linux-64/cryptography-36.0.1-py38h3e25421_0.tar.bz2#acc14d0d71dbf74f6a15f2456951b6cf
-https://conda.anaconda.org/conda-forge/noarch/dask-core-2022.1.1-pyhd8ed1ab_0.tar.bz2#7968db84df10b74d9792d66d7da216df
+https://conda.anaconda.org/conda-forge/noarch/dask-core-2022.2.1-pyhd8ed1ab_0.tar.bz2#0cb751f07e68fda1d631a02faa66f0de
https://conda.anaconda.org/conda-forge/linux-64/fonttools-4.29.1-py38h497a2fe_0.tar.bz2#121e02be214af4980911bb2cbd5b2742
-https://conda.anaconda.org/conda-forge/linux-64/harfbuzz-3.3.1-hb4a5f5f_0.tar.bz2#abe529a4b140720078f0febe1b6014a4
+https://conda.anaconda.org/conda-forge/linux-64/harfbuzz-3.4.0-hb4a5f5f_0.tar.bz2#42190c4597593e9742513d7b39b02c49
https://conda.anaconda.org/conda-forge/noarch/jinja2-3.0.3-pyhd8ed1ab_0.tar.bz2#036d872c653780cb26e797e2e2f61b4c
-https://conda.anaconda.org/conda-forge/linux-64/libnetcdf-4.8.1-mpi_mpich_h319fa22_1.tar.bz2#7583fbaea3648f692c0c019254bc196c
https://conda.anaconda.org/conda-forge/linux-64/mo_pack-0.2.0-py38h6c62de6_1006.tar.bz2#829b1209dfadd431a11048d6eeaf5bef
+https://conda.anaconda.org/conda-forge/linux-64/netcdf-fortran-4.5.4-mpi_mpich_h1364a43_0.tar.bz2#b6ba4f487ef9fd5d353ff277df06d133
https://conda.anaconda.org/conda-forge/noarch/nodeenv-1.6.0-pyhd8ed1ab_0.tar.bz2#0941325bf48969e2b3b19d0951740950
-https://conda.anaconda.org/conda-forge/linux-64/pandas-1.4.0-py38h43a58ef_0.tar.bz2#23427f52c81076594a95c006ebf7552e
-https://conda.anaconda.org/conda-forge/noarch/pip-22.0.3-pyhd8ed1ab_0.tar.bz2#45dedae69a0ea21cb8566d04b2ca5536
+https://conda.anaconda.org/conda-forge/linux-64/pandas-1.4.1-py38h43a58ef_0.tar.bz2#1083ebe2edc30e4fb9568d1f66e3588b
+https://conda.anaconda.org/conda-forge/noarch/pip-22.0.4-pyhd8ed1ab_0.tar.bz2#b1239ce8ef2a1eec485c398a683c5bff
https://conda.anaconda.org/conda-forge/noarch/pygments-2.11.2-pyhd8ed1ab_0.tar.bz2#caef60540e2239e27bf62569a5015e3b
https://conda.anaconda.org/conda-forge/linux-64/pyproj-3.3.0-py38h5383654_1.tar.bz2#5b600e019fa7c33be73bdb626236936b
https://conda.anaconda.org/conda-forge/linux-64/pyqt-impl-5.12.3-py38h0ffb2e6_8.tar.bz2#acfc7625a212c27f7decdca86fdb2aba
https://conda.anaconda.org/conda-forge/linux-64/python-stratify-0.2.post0-py38h6c62de6_1.tar.bz2#a350e3f4ca899e95122f66806e048858
https://conda.anaconda.org/conda-forge/linux-64/pywavelets-1.2.0-py38h6c62de6_1.tar.bz2#2953d3fc0113fc6ffb955a5b72811fb0
-https://conda.anaconda.org/conda-forge/linux-64/scipy-1.7.3-py38h56a6a73_0.tar.bz2#2d318049369bb52d2687b0ac2be82751
+https://conda.anaconda.org/conda-forge/linux-64/scipy-1.8.0-py38h56a6a73_1.tar.bz2#86073932d9e675c5929376f6f8b79b97
https://conda.anaconda.org/conda-forge/linux-64/shapely-1.8.0-py38h596eeab_5.tar.bz2#ec3b783081e14a9dc0eb5ce609649728
https://conda.anaconda.org/conda-forge/noarch/sphinxcontrib-napoleon-0.7-py_0.tar.bz2#0bc25ff6f2e34af63ded59692df5f749
https://conda.anaconda.org/conda-forge/linux-64/ukkonen-1.0.1-py38h1fd1430_1.tar.bz2#c494f75082f9c052944fda1b22c83336
https://conda.anaconda.org/conda-forge/linux-64/cf-units-3.0.1-py38h6c62de6_2.tar.bz2#350322b046c129e5802b79358a1343f7
-https://conda.anaconda.org/conda-forge/noarch/identify-2.4.8-pyhd8ed1ab_0.tar.bz2#d4d25c0b7c1a7a1b0442e061fdd49260
+https://conda.anaconda.org/conda-forge/linux-64/esmf-8.2.0-mpi_mpich_h4975321_100.tar.bz2#56f5c650937b1667ad0a557a0dff3bc4
+https://conda.anaconda.org/conda-forge/noarch/identify-2.4.11-pyhd8ed1ab_0.tar.bz2#979d7dfda4d04702391e80158c322039
https://conda.anaconda.org/conda-forge/noarch/imagehash-4.2.1-pyhd8ed1ab_0.tar.bz2#01cc8698b6e1a124dc4f585516c27643
https://conda.anaconda.org/conda-forge/linux-64/matplotlib-base-3.5.1-py38hf4fb855_0.tar.bz2#47cf0cab2ae368e1062e75cfbc4277af
-https://conda.anaconda.org/conda-forge/linux-64/netcdf-fortran-4.5.4-mpi_mpich_h1364a43_0.tar.bz2#b6ba4f487ef9fd5d353ff277df06d133
https://conda.anaconda.org/conda-forge/linux-64/netcdf4-1.5.8-nompi_py38h2823cc8_101.tar.bz2#1dfe1cdee4532c72f893955259eb3de9
-https://conda.anaconda.org/conda-forge/linux-64/pango-1.50.3-h9967ed3_0.tar.bz2#37f1c68380bc5dfe0f5bb2655e207a73
+https://conda.anaconda.org/conda-forge/linux-64/pango-1.50.5-h4dcc4a0_0.tar.bz2#56ce3e3bec0d5c9e6db22083a3ef5e13
https://conda.anaconda.org/conda-forge/noarch/pyopenssl-22.0.0-pyhd8ed1ab_0.tar.bz2#1d7e241dfaf5475e893d4b824bb71b44
https://conda.anaconda.org/conda-forge/linux-64/pyqtchart-5.12-py38h7400c14_8.tar.bz2#78a2a6cb4ef31f997c1bee8223a9e579
https://conda.anaconda.org/conda-forge/linux-64/pyqtwebengine-5.12.1-py38h7400c14_8.tar.bz2#857894ea9c5e53c962c3a0932efa71ea
https://conda.anaconda.org/conda-forge/linux-64/cartopy-0.20.2-py38ha217159_3.tar.bz2#d7461e191f7a0522e4709612786bdf4e
-https://conda.anaconda.org/conda-forge/linux-64/esmf-8.2.0-mpi_mpich_h4975321_100.tar.bz2#56f5c650937b1667ad0a557a0dff3bc4
+https://conda.anaconda.org/conda-forge/linux-64/esmpy-8.2.0-mpi_mpich_py38h9147699_101.tar.bz2#5a9de1dec507b6614150a77d1aabf257
https://conda.anaconda.org/conda-forge/linux-64/gtk2-2.24.33-h90689f9_2.tar.bz2#957a0255ab58aaf394a91725d73ab422
https://conda.anaconda.org/conda-forge/linux-64/librsvg-2.52.5-h0a9e6e8_2.tar.bz2#aa768fdaad03509a97df37f81163346b
https://conda.anaconda.org/conda-forge/noarch/nc-time-axis-1.4.0-pyhd8ed1ab_0.tar.bz2#9113b4e4fa2fa4a7f129c71a6f319475
https://conda.anaconda.org/conda-forge/linux-64/pre-commit-2.17.0-py38h578d9bd_0.tar.bz2#839ac9dba9a6126c9532781a9ea4506b
https://conda.anaconda.org/conda-forge/linux-64/pyqt-5.12.3-py38h578d9bd_8.tar.bz2#88368a5889f31dff922a2d57bbfc3f5b
https://conda.anaconda.org/conda-forge/noarch/urllib3-1.26.8-pyhd8ed1ab_1.tar.bz2#53f1387c68c21cecb386e2cde51b3f7c
-https://conda.anaconda.org/conda-forge/linux-64/esmpy-8.2.0-mpi_mpich_py38h9147699_101.tar.bz2#5a9de1dec507b6614150a77d1aabf257
-https://conda.anaconda.org/conda-forge/linux-64/graphviz-2.50.0-h8e749b2_2.tar.bz2#8c20fd968c8b6af73444b1199d5fb0cb
+https://conda.anaconda.org/conda-forge/linux-64/graphviz-3.0.0-h5abf519_0.tar.bz2#e5521af56c6e927397ca9851eecb2f48
https://conda.anaconda.org/conda-forge/linux-64/matplotlib-3.5.1-py38h578d9bd_0.tar.bz2#0d78be9cf1c400ba8e3077cf060492f1
https://conda.anaconda.org/conda-forge/noarch/requests-2.27.1-pyhd8ed1ab_0.tar.bz2#7c1c427246b057b8fa97200ecdb2ed62
https://conda.anaconda.org/conda-forge/noarch/sphinx-4.4.0-pyh6c4a22f_1.tar.bz2#a9025d14c2a609e0d895ad3e75b5369c
-https://conda.anaconda.org/conda-forge/noarch/sphinx-copybutton-0.4.0-pyhd8ed1ab_0.tar.bz2#80fd2cc25ad45911b4e42d5b91593e2f
+https://conda.anaconda.org/conda-forge/noarch/sphinx-copybutton-0.5.0-pyhd8ed1ab_0.tar.bz2#4c969cdd5191306c269490f7ff236d9c
https://conda.anaconda.org/conda-forge/noarch/sphinx-gallery-0.10.1-pyhd8ed1ab_0.tar.bz2#4918585fe5e5341740f7e63c61743efb
https://conda.anaconda.org/conda-forge/noarch/sphinx-panels-0.6.0-pyhd8ed1ab_0.tar.bz2#6eec6480601f5d15babf9c3b3987f34a
https://conda.anaconda.org/conda-forge/noarch/sphinx_rtd_theme-1.0.0-pyhd8ed1ab_0.tar.bz2#9f633f2f2869184e31acfeae95b24345
diff --git a/requirements/ci/py38.yml b/requirements/ci/py38.yml
index 320895526a..91cd9d3f5f 100644
--- a/requirements/ci/py38.yml
+++ b/requirements/ci/py38.yml
@@ -25,7 +25,7 @@ dependencies:
- graphviz
- iris-sample-data >=2.4.0
- mo_pack
- - nc-time-axis >=1.3
+ - nc-time-axis >=1.4
- pandas
- pip
- python-stratify
diff --git a/setup.cfg b/setup.cfg
index 1d3fb8b7c9..ecdcad85b2 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -2,7 +2,7 @@
author = SciTools Developers
author_email = scitools-iris-dev@googlegroups.com
classifiers =
- Development Status :: 5 Production/Stable
+ Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)
Operating System :: MacOS
@@ -11,7 +11,6 @@ classifiers =
Operating System :: Unix
Programming Language :: Python
Programming Language :: Python :: 3 :: Only
- Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: Implementation :: CPython
Topic :: Scientific/Engineering
@@ -82,7 +81,7 @@ test =
requests
all =
mo_pack
- nc-time-axis>=1.3
+ nc-time-axis>=1.4
pandas
stratify
%(docs)s