From 7d33c40d5339234b586663094d616c33855d9ed3 Mon Sep 17 00:00:00 2001 From: Wes Bonelli Date: Wed, 10 Aug 2022 12:56:54 -0400 Subject: [PATCH] test: update pytest framework (#1493) * use pytest-benchmark's builtin profiling capability instead of manual implementation * remove requires_exe(mf6) from test_mf6.py tests that don't run models/simulations * add @requires_spatial_reference marker to conftest.py (for tests depending on spatialreference.org) * try both importlib.import_module and pkg_resources.get_distribution in @requires_pkg marker * mark test_lgr.py::test_simple_lgr_model_from_scratch as flaky (occasional forrtl error (65): floating invalid) * split test_export.py::test_polygon_from_ij into network-bound and non-network-bound cases * add comments to flaky tests with links to potentially similar issues * add timeouts to CI jobs (10min for build, lint, & smoke, 45min for test, 90min for daily jobs) * remove unneeded markers from pytest.ini * match profiling/benchmarking test files in pytest.ini * mark get-modflow tests as flaky (https://github.com/modflowpy/flopy/pull/1489#issuecomment-1209777379) * cache benchmark results in daily CI and compare with prior runs * various tidying/cleanup --- .github/workflows/commit.yml | 11 ++-- .github/workflows/daily.yml | 46 ++++++++++++---- DEVELOPER.md | 35 ++++++++---- autotest/conftest.py | 70 ++++++++++++------------ autotest/pytest.ini | 4 +- autotest/regression/test_lgr.py | 4 +- autotest/regression/test_mf6.py | 71 ++++++++++++------------ autotest/test_binarygrid_util.py | 3 + autotest/test_example_scripts.py | 1 - autotest/test_export.py | 91 ++++++++++++++++++++++++------- autotest/test_grid.py | 6 +- autotest/test_lgr.py | 4 ++ autotest/test_mf6.py | 30 +++------- autotest/test_modpathfile.py | 94 +------------------------------- autotest/test_plot.py | 4 +- autotest/test_scripts.py | 5 ++ autotest/test_usg.py | 3 +- 17 files changed, 243 insertions(+), 239 deletions(-) diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml index 9264d2ac10..9a6b33d207 100644 --- a/.github/workflows/commit.yml +++ b/.github/workflows/commit.yml @@ -18,6 +18,7 @@ jobs: defaults: run: shell: bash + timeout-minutes: 10 steps: - name: Checkout repo @@ -50,13 +51,13 @@ jobs: run: | twine check --strict dist/* - lint: name: Lint runs-on: ubuntu-latest defaults: run: shell: bash + timeout-minutes: 10 steps: - name: Checkout repo @@ -106,14 +107,13 @@ jobs: run: | pylint --jobs=2 --errors-only --exit-zero ./flopy - - smoke: name: Smoke runs-on: ubuntu-latest defaults: run: shell: bash + timeout-minutes: 10 steps: - name: Checkout repo @@ -185,7 +185,6 @@ jobs: directory: ./autotest file: coverage.xml - test: name: Test needs: smoke @@ -204,6 +203,7 @@ jobs: path: ~/.cache/pip - os: macos-latest path: ~/Library/Caches/pip + timeout-minutes: 45 steps: - name: Checkout repo @@ -290,6 +290,7 @@ jobs: defaults: run: shell: pwsh + timeout-minutes: 45 steps: - name: Checkout repo @@ -302,7 +303,7 @@ jobs: uses: actions/cache@v2.1.0 with: path: ~/conda_pkgs_dir - key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml', 'flopy') }} + key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} # Standard python fails on windows without GDAL installation # Using custom bash shell ("shell: bash -l {0}") with Miniconda diff --git a/.github/workflows/daily.yml b/.github/workflows/daily.yml index 7015e65f18..0fb77f983b 100644 --- a/.github/workflows/daily.yml +++ b/.github/workflows/daily.yml @@ -25,6 +25,7 @@ jobs: defaults: run: shell: bash + timeout-minutes: 90 steps: - name: Checkout repo @@ -90,7 +91,6 @@ jobs: file: coverage.xml examples: - name: Example scripts & notebooks runs-on: ${{ matrix.os }} strategy: @@ -110,6 +110,7 @@ jobs: defaults: run: shell: bash + timeout-minutes: 90 steps: - name: Checkout repo @@ -194,6 +195,7 @@ jobs: defaults: run: shell: bash + timeout-minutes: 90 steps: - name: Checkout repo @@ -230,14 +232,23 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Run tests + - name: Load cached benchmark results (for comparison) + uses: actions/cache@v2.1.0 + with: + path: ./autotest/.benchmarks + key: benchmark-${{ matrix.os }}-${{ matrix.python-version }} }} + + - name: Run benchmarks working-directory: ./autotest run: | - pytest -v --cov=flopy --cov-report=xml --durations=0 --benchmark-only --benchmark-autosave --keep-failed=.failed + pytest -v --durations=0 \ + --cov=flopy --cov-report=xml \ + --benchmark-only --benchmark-autosave --benchmark-compare --benchmark-compare-fail=mean:25% \ + --keep-failed=.failed env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Upload failed test outputs + - name: Upload failed benchmark outputs uses: actions/upload-artifact@v2 if: failure() with: @@ -279,6 +290,7 @@ jobs: defaults: run: shell: pwsh + timeout-minutes: 90 steps: - name: Checkout repo @@ -291,7 +303,7 @@ jobs: uses: actions/cache@v2.1.0 with: path: ~/conda_pkgs_dir - key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml', 'flopy') }} + key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} # Standard python fails on windows without GDAL installation # Using custom bash shell ("shell: bash -l {0}") with Miniconda @@ -362,6 +374,7 @@ jobs: defaults: run: shell: pwsh + timeout-minutes: 90 steps: - name: Checkout repo @@ -374,7 +387,7 @@ jobs: uses: actions/cache@v2.1.0 with: path: ~/conda_pkgs_dir - key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml', 'flopy') }} + key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} # Standard python fails on windows without GDAL installation # Using custom bash shell ("shell: bash -l {0}") with Miniconda @@ -410,7 +423,6 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Upload failed test outputs uses: actions/upload-artifact@v2 if: failure() @@ -446,6 +458,7 @@ jobs: defaults: run: shell: pwsh + timeout-minutes: 90 steps: - name: Checkout repo @@ -458,7 +471,7 @@ jobs: uses: actions/cache@v2.1.0 with: path: ~/conda_pkgs_dir - key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml', 'flopy') }} + key: ${{ runner.os }}-${{ matrix.python-version }}-${{ matrix.run-type }}-${{ hashFiles('etc/environment.yml') }} # Standard python fails on windows without GDAL installation # Using custom bash shell ("shell: bash -l {0}") with Miniconda @@ -487,14 +500,23 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Run tests + - name: Load cached benchmark results (for comparison) + uses: actions/cache@v2.1.0 + with: + path: ./autotest/.benchmarks + key: benchmark-${{ runner.os }}-${{ matrix.python-version }} }} + + - name: Run benchmarks working-directory: ./autotest run: | - pytest -v --cov=flopy --cov-report=xml --durations=0 --benchmark-only --benchmark-autosave --keep-failed=.failed + pytest -v --durations=0 \ + --cov=flopy --cov-report=xml \ + --benchmark-only --benchmark-autosave --benchmark-compare --benchmark-compare-fail=mean:25% \ + --keep-failed=.failed env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Upload failed test outputs + - name: Upload failed benchmark outputs uses: actions/upload-artifact@v2 if: failure() with: @@ -505,7 +527,7 @@ jobs: - name: Upload benchmark results uses: actions/upload-artifact@v2 with: - name: benchmark-${{ matrix.os }}-${{ matrix.python-version }} + name: benchmark-${{ runner.os }}-${{ matrix.python-version }} path: | ./autotest/.benchmarks/**/*.json diff --git a/DEVELOPER.md b/DEVELOPER.md index 92f7a5447c..ee3b2bd4db 100644 --- a/DEVELOPER.md +++ b/DEVELOPER.md @@ -192,8 +192,6 @@ Markers are a `pytest` feature that can be used to select subsets of tests. Mark - `slow`: tests that don't complete in a few seconds - `example`: exercise scripts, tutorials and notebooks - `regression`: tests that compare multiple results -- `benchmark`: test that gather runtime statistics -- `profile`: tests measuring performance in detail Markers can be used with the `-m ` option. For example, to run only fast tests: @@ -221,9 +219,20 @@ This will retain the test directories created by the test, which allows files to There is also a `--keep-failed ` option which preserves the outputs of failed tests in the given location, however this option is only compatible with function-scoped temporary directories (the `tmpdir` fixture defined in `conftest.py`). -### Benchmarking +### Performance testing -Benchmarking is accomplished with [`pytest-benchmark`](https://pytest-benchmark.readthedocs.io/en/latest/index.html). Any test function can be turned into a benchmark by requesting the `benchmark` fixture (i.e. declaring a `benchmark` argument), which can be used to wrap any function call. For instance: +Performance testing is accomplished with [`pytest-benchmark`](https://pytest-benchmark.readthedocs.io/en/latest/index.html). + +To allow optional separation of performance from correctness concerns, performance test files may be named either as typical test files or may match any of the following patterns: + +- `benchmark_*.py` +- `profile_*.py` +- `*_profile*.py`. +- `*_benchmark*.py` + +#### Benchmarking + +Any test function can be turned into a benchmark by requesting the `benchmark` fixture (i.e. declaring a `benchmark` argument), which can be used to wrap any function call. For instance: ```python def test_benchmark(benchmark): @@ -251,25 +260,27 @@ Rather than alter an existing function call to use this syntax, a lambda can be ```python def test_benchmark(benchmark): - def sleep_1s(): + def sleep_s(s): import time - time.sleep(1) + time.sleep(s) return True - assert benchmark(lambda: sleep_1s()) + assert benchmark(lambda: sleep_s(1)) ``` This can be convenient when the function call is complicated or passes many arguments. -To control the number of repetitions and rounds (repetitions of repetitions) use `benchmark.pedantic`, e.g. `benchmark.pedantic(some_function(), iterations=1, rounds=1)`. +Benchmarked functions are repeated several times (the number of iterations depending on the test's runtime, with faster tests generally getting more reps) to compute summary statistics. To control the number of repetitions and rounds (repetitions of repetitions) use `benchmark.pedantic`, e.g. `benchmark.pedantic(some_function(), iterations=1, rounds=1)`. + +Benchmarking is incompatible with `pytest-xdist` and is disabled automatically when tests are run in parallel. When tests are not run in parallel, benchmarking is enabled by default. Benchmarks can be disabled with the `--benchmark-disable` flag. -Benchmarked functions are repeated several times (the number of iterations depending on the test's runtime, with faster tests generally getting more reps) to compute summary statistics. Benchmarking is incompatible with `pytest-xdist` and is disabled automatically when tests are run in parallel. When tests are not run in parallel, benchmarking is enabled by default. Benchmarks can be disabled with the `--benchmark-disable` flag. +Benchmark results are only printed to `stdout` by default. To save results to a JSON file, use `--benchmark-autosave`. This will create a `.benchmarks` folder in the current working location (if you're running tests, this should be `autotest/.benchmarks`). -Benchmark results are only printed to stdout by default. To save results to a JSON file, use `--benchmark-autosave`. This will create a `.benchmarks` folder in the current working location (if you're running tests, this should appear at `autotest/.benchmarks`). +#### Profiling -### Profiling +Profiling is [distinct](https://stackoverflow.com/a/39381805/6514033) from benchmarking in evaluating a program's call stack in detail, while benchmarking just invokes a function repeatedly and computes summary statistics. Profiling is also accomplished with `pytest-benchmark`: use the `--benchmark-cprofile` option when running tests which use the `benchmark` fixture described above. The option's value is the column to sort results by. For instance, to sort by total time, use `--benchmark-cprofile="tottime"`. See the `pytest-benchmark` [docs](https://pytest-benchmark.readthedocs.io/en/stable/usage.html#commandline-options) for more information. -Profiling is [distinct](https://stackoverflow.com/a/39381805/6514033) from benchmarking in considering program behavior in detail, while benchmarking just invokes functions repeatedly and computes summary statistics. Profiling test files may be named either as typical test files or matching `profile_*.py` or `*_profile*.py`. Functions marked with the `profile` marker are considered profiling tests and will not run unless `pytest` is invoked with the `--profile` (short `-P`) flag. +By default, `pytest-benchmark` will only print profiling results to `stdout`. If the `--benchmark-autosave` flag is provided, performance profile data will be included in the JSON files written to the `.benchmarks` save directory as described in the benchmarking section above. ### Writing tests diff --git a/autotest/conftest.py b/autotest/conftest.py index 93988d64ba..8368a3c841 100644 --- a/autotest/conftest.py +++ b/autotest/conftest.py @@ -1,3 +1,5 @@ +import importlib +import io import os import pkg_resources import socket @@ -151,7 +153,7 @@ def is_github_rate_limited() -> Optional[bool]: """ try: with request.urlopen( - "https://api.github.com/users/octocat" + "https://api.github.com/users/octocat" ) as response: remaining = int(response.headers["x-ratelimit-remaining"]) if remaining < 10: @@ -166,17 +168,27 @@ def is_github_rate_limited() -> Optional[bool]: _has_exe_cache = {} _has_pkg_cache = {} + def has_exe(exe): if exe not in _has_exe_cache: _has_exe_cache[exe] = bool(which(exe)) return _has_exe_cache[exe] + def has_pkg(pkg): if pkg not in _has_pkg_cache: + + # for some dependencies, package name and import name are different + # (e.g. pyshp/shapefile, mfpymake/pymake, python-dateutil/dateutil) + # pkg_resources expects package name, importlib expects import name try: - _has_pkg_cache[pkg] = bool(pkg_resources.get_distribution(pkg)) - except pkg_resources.DistributionNotFound: - _has_pkg_cache[pkg] = False + _has_pkg_cache[pkg] = bool(importlib.import_module(pkg)) + except ModuleNotFoundError: + try: + _has_pkg_cache[pkg] = bool(pkg_resources.get_distribution(pkg)) + except pkg_resources.DistributionNotFound: + _has_pkg_cache[pkg] = False + return _has_pkg_cache[pkg] @@ -185,7 +197,7 @@ def requires_exe(*exes): return pytest.mark.skipif( missing, reason=f"missing executable{'s' if len(missing) != 1 else ''}: " + - ", ".join(missing), + ", ".join(missing), ) @@ -194,7 +206,7 @@ def requires_pkg(*pkgs): return pytest.mark.skipif( missing, reason=f"missing package{'s' if len(missing) != 1 else ''}: " + - ", ".join(missing), + ", ".join(missing), ) @@ -231,6 +243,12 @@ def excludes_branch(branch): reason="github.com is required.") +requires_spatial_reference = pytest.mark.skipif( + not is_connected("spatialreference.org"), + reason="spatialreference.org is required." +) + + # example data fixtures @pytest.fixture(scope="session") @@ -257,9 +275,9 @@ def example_shapefiles(example_data_path) -> List[Path]: @pytest.fixture(scope="function") def tmpdir(tmpdir_factory, request) -> Path: - node = request.node.name\ - .replace("/", "_")\ - .replace("\\", "_")\ + node = request.node.name \ + .replace("/", "_") \ + .replace("\\", "_") \ .replace(":", "_") temp = Path(tmpdir_factory.mktemp(node)) yield Path(temp) @@ -276,7 +294,7 @@ def tmpdir(tmpdir_factory, request) -> Path: @pytest.fixture(scope="class") def class_tmpdir(tmpdir_factory, request) -> Path: assert ( - request.cls is not None + request.cls is not None ), "Class-scoped temp dir fixture must be used on class" temp = Path(tmpdir_factory.mktemp(request.cls.__name__)) yield temp @@ -329,9 +347,9 @@ def pytest_addoption(parser): action="store", default=None, help="Move the contents of temporary test directories to correspondingly named subdirectories at the given " - "location after tests complete. This option can be used to exclude test results from automatic cleanup, " - "e.g. for manual inspection. The provided path is created if it does not already exist. An error is " - "thrown if any matching files already exist.", + "location after tests complete. This option can be used to exclude test results from automatic cleanup, " + "e.g. for manual inspection. The provided path is created if it does not already exist. An error is " + "thrown if any matching files already exist.", ) parser.addoption( @@ -360,21 +378,6 @@ def pytest_addoption(parser): help="Run only smoke tests (should complete in <1 minute)." ) - parser.addoption( - "-P", - "--profile", - action="store_true", - default=False, - help="Run performance profiling tests (skipped otherwise)." - ) - - parser.addoption( - "--profile-autosave", - action="store_true", - default=False, - help="Store performance profiling results in a folder called .profile in the current working directory", - ) - def pytest_configure(config): config.addinivalue_line( @@ -398,15 +401,14 @@ def pytest_runtest_setup(item): if smoke and (slow or example or regression): pytest.skip() - # performance profiling mutually excludes normal tests - should_profile = item.config.getoption("--profile") - is_profiletest = any(item.iter_markers(name="profile")) - if (is_profiletest and not should_profile) or (not is_profiletest and should_profile): - pytest.skip() - def pytest_report_header(config): """Header for pytest to show versions of packages.""" + + # if we ever drop support for python 3.7, could use importlib.metadata instead? + # or importlib_metadata backport: https://importlib-metadata.readthedocs.io/en/latest/ + # pkg_resources discouraged: https://setuptools.pypa.io/en/latest/pkg_resources.html + processed = set() flopy_pkg = pkg_resources.get_distribution("flopy") lines = [] diff --git a/autotest/pytest.ini b/autotest/pytest.ini index 0e993ce2cb..6566da0901 100644 --- a/autotest/pytest.ini +++ b/autotest/pytest.ini @@ -2,12 +2,12 @@ python_files = test_*.py profile_*.py + benchmark_*.py *_test*.py *_profile*.py + *_benchmark*.py markers = slow: tests that don't complete in a few seconds example: exercise scripts, tutorials and notebooks regression: tests that compare multiple results - benchmark: test that gather runtime statistics - profile: tests measuring performance in detail meta: run by other tests (e.g. testing fixtures) \ No newline at end of file diff --git a/autotest/regression/test_lgr.py b/autotest/regression/test_lgr.py index 040182ed86..346ee7cc0f 100644 --- a/autotest/regression/test_lgr.py +++ b/autotest/regression/test_lgr.py @@ -47,7 +47,7 @@ def test_simplelgr(tmpdir, example_data_path): # get the namefiles of the parent and child namefiles = lgr.get_namefiles() assert ( - len(namefiles) == 2 + len(namefiles) == 2 ), f"get_namefiles returned {len(namefiles)} items instead of 2" tpth = dirname(namefiles[0]) @@ -72,4 +72,4 @@ def test_simplelgr(tmpdir, example_data_path): pth0 = join(ws, "ex3_child.nam") pth1 = join(model_ws2, "ex3_child.nam") success = pymake.compare_heads(pth0, pth1) - assert success, "child heads do not match" \ No newline at end of file + assert success, "child heads do not match" diff --git a/autotest/regression/test_mf6.py b/autotest/regression/test_mf6.py index 19335bf185..1c895b40e1 100644 --- a/autotest/regression/test_mf6.py +++ b/autotest/regression/test_mf6.py @@ -350,7 +350,7 @@ def test_np001(tmpdir, example_data_path): sim.set_all_data_external() sim.write_simulation() assert ( - sim.simulation_data.max_columns_of_data == dis_package.ncol.get_data() + sim.simulation_data.max_columns_of_data == dis_package.ncol.get_data() ) # test package file with relative path to simulation path wel_path = os.path.join(ws, "well_folder", f"{model_name}.wel") @@ -462,12 +462,12 @@ def test_np001(tmpdir, example_data_path): summary = ".".join(chk[0].summary_array.desc) assert "drn_1 package: invalid BC index" in summary assert ( - "npf package: vertical hydraulic conductivity values below " - "checker threshold of 1e-11" in summary + "npf package: vertical hydraulic conductivity values below " + "checker threshold of 1e-11" in summary ) assert ( - "npf package: horizontal hydraulic conductivity values above " - "checker threshold of 100000.0" in summary + "npf package: horizontal hydraulic conductivity values above " + "checker threshold of 100000.0" in summary ) data_invalid = False try: @@ -502,10 +502,10 @@ def test_np001(tmpdir, example_data_path): for line in fd: line_lst = line.strip().split() if ( - len(line) > 2 - and line_lst[0] == "0" - and line_lst[1] == "0" - and line_lst[2] == "0" + len(line) > 2 + and line_lst[0] == "0" + and line_lst[1] == "0" + and line_lst[2] == "0" ): found_cellid = True assert found_cellid @@ -810,12 +810,12 @@ def test_np002(tmpdir, example_data_path): chk = sim.check() summary = ".".join(chk[0].summary_array.desc) assert ( - "sto package: specific storage values below " - "checker threshold of 1e-06" in summary + "sto package: specific storage values below " + "checker threshold of 1e-06" in summary ) assert ( - "sto package: specific yield values above " - "checker threshold of 0.5" in summary + "sto package: specific yield values above " + "checker threshold of 0.5" in summary ) assert "Not a number" in summary model.remove_package("chd_2") @@ -1471,10 +1471,10 @@ def test005_create_tests_advgw_tidal(tmpdir, example_data_path): col_max = 6 for col in range(0, col_max): if ( - (row == 3 and col == 5) - or (row == 2 and col == 4) - or (row == 1 and col == 3) - or (row == 0 and col == 2) + (row == 3 and col == 5) + or (row == 2 and col == 4) + or (row == 1 and col == 3) + or (row == 0 and col == 2) ): mult = 0.5 else: @@ -1578,10 +1578,10 @@ def test005_create_tests_advgw_tidal(tmpdir, example_data_path): col_min = 6 for col in range(col_min, 10): if ( - (row == 0 and col == 9) - or (row == 1 and col == 8) - or (row == 2 and col == 7) - or (row == 3 and col == 6) + (row == 0 and col == 9) + or (row == 1 and col == 8) + or (row == 2 and col == 7) + or (row == 3 and col == 6) ): mult = 0.5 else: @@ -1943,9 +1943,9 @@ def test035_create_tests_fhb(tmpdir, example_data_path): ) time = model.modeltime assert ( - time.steady_state[0] == False - and time.steady_state[1] == False - and time.steady_state[2] == False + time.steady_state[0] == False + and time.steady_state[1] == False + and time.steady_state[2] == False ) wel_period = {0: [((0, 1, 0), "flow")]} wel_package = ModflowGwfwel( @@ -3514,10 +3514,10 @@ def test005_advgw_tidal(tmpdir, example_data_path): model = sim.get_model(model_name) time = model.modeltime assert ( - time.steady_state[0] == True - and time.steady_state[1] == False - and time.steady_state[2] == False - and time.steady_state[3] == False + time.steady_state[0] == True + and time.steady_state[1] == False + and time.steady_state[2] == False + and time.steady_state[3] == False ) ghb = model.get_package("ghb") obs = ghb.obs @@ -3976,14 +3976,14 @@ def test006_2models_mvr(tmpdir, example_data_path): model = sim.get_model(model_name) for package in model_package_check: assert ( - package in model.package_type_dict - or package in sim.package_type_dict - ) == (package in load_only or f"{package}6" in load_only) + package in model.package_type_dict + or package in sim.package_type_dict + ) == (package in load_only or f"{package}6" in load_only) assert (len(sim._exchange_files) > 0) == ( - "gwf6-gwf6" in load_only or "gwf-gwf" in load_only + "gwf6-gwf6" in load_only or "gwf-gwf" in load_only ) assert (len(sim._ims_files) > 0) == ( - "ims6" in load_only or "ims" in load_only + "ims6" in load_only or "ims" in load_only ) # load package by name @@ -4012,7 +4012,6 @@ def test006_2models_mvr(tmpdir, example_data_path): @pytest.mark.slow @pytest.mark.regression def test001e_uzf_3lay(tmpdir, example_data_path): - # init paths test_ex_name = "test001e_UZF_3lay" model_name = "gwf_1" @@ -4073,7 +4072,7 @@ def test001e_uzf_3lay(tmpdir, example_data_path): model = sim.get_model() for package in model_package_check: assert (package in model.package_type_dict) == ( - package in load_only or f"{package}6" in load_only + package in load_only or f"{package}6" in load_only ) # test running a runnable load_only case sim = MFSimulation.load( @@ -4343,4 +4342,4 @@ def test027_timeseriestest(tmpdir, example_data_path): files1=expected_head_file_b, files2=head_new, htol=10.0, - ) \ No newline at end of file + ) diff --git a/autotest/test_binarygrid_util.py b/autotest/test_binarygrid_util.py index c8e304c6fc..25090fad35 100644 --- a/autotest/test_binarygrid_util.py +++ b/autotest/test_binarygrid_util.py @@ -3,6 +3,8 @@ import pytest from matplotlib import pyplot as plt +from flaky import flaky + from flopy.discretization import StructuredGrid, UnstructuredGrid, VertexGrid from flopy.mf6.utils import MfGrdFile @@ -81,6 +83,7 @@ def test_mfgrddisv_MfGrdFile(mfgrd_test_path): assert isinstance(mg, VertexGrid), f"invalid grid type ({type(mg)})" +@flaky def test_mfgrddisv_modelgrid(mfgrd_test_path): fn = mfgrd_test_path / "flow.disv.grb" mg = VertexGrid.from_binary_grid_file(fn, verbose=True) diff --git a/autotest/test_example_scripts.py b/autotest/test_example_scripts.py index 8f31ebb783..dc665e831a 100644 --- a/autotest/test_example_scripts.py +++ b/autotest/test_example_scripts.py @@ -1,7 +1,6 @@ import re from functools import reduce from os import linesep -from pathlib import Path import pytest diff --git a/autotest/test_export.py b/autotest/test_export.py index 3322b48f9f..026412f6a9 100644 --- a/autotest/test_export.py +++ b/autotest/test_export.py @@ -14,7 +14,7 @@ get_example_data_path, has_pkg, requires_exe, - requires_pkg, + requires_pkg, requires_spatial_reference, ) from flopy.discretization import StructuredGrid, UnstructuredGrid from flopy.export import NetCdf @@ -831,6 +831,62 @@ def test_polygon_from_ij(tmpdir): assert geoms[0].type == "Polygon" assert np.abs(geoms[0].bounds[-1] - 5169292.893203464) < 1e-4 + + +@requires_pkg("pyproj") +@requires_spatial_reference +def test_polygon_from_ij_with_epsg(tmpdir): + ws = str(tmpdir) + m = Modflow("toy_model", model_ws=ws) + + botm = np.zeros((2, 10, 10)) + botm[0, :, :] = 1.5 + botm[1, 5, 5] = 4 # negative layer thickness! + botm[1, 6, 6] = 4 + dis = ModflowDis( + nrow=10, ncol=10, nlay=2, delr=100, delc=100, top=3, botm=botm, model=m + ) + + fname = os.path.join(ws, "toy.model.nc") + ncdf = NetCdf(fname, m) + ncdf.write() + + fname = os.path.join(ws, "toy_model_two.nc") + m.export(fname) + + fname = os.path.join(ws, "toy_model_dis.nc") + dis.export(fname) + + mg = m.modelgrid + mg.set_coord_info( + xoff=mg._xul_to_xll(600000.0, -45.0), + yoff=mg._yul_to_yll(5170000, -45.0), + angrot=-45.0, + proj4="EPSG:26715", + ) + + recarray = np.array( + [ + (0, 5, 5, 0.1, True, "s0"), + (1, 4, 5, 0.2, False, "s1"), + (0, 7, 8, 0.3, True, "s2"), + ], + dtype=[ + ("k", "= 4 +@flaky @requires_github def test_get_modflow(tmpdir): try: @@ -167,6 +171,7 @@ def test_get_modflow(tmpdir): assert all(exe in actual for exe in expected) +@flaky @requires_github def test_get_nightly(tmpdir): try: diff --git a/autotest/test_usg.py b/autotest/test_usg.py index 30e3c25a15..076f7abf5d 100644 --- a/autotest/test_usg.py +++ b/autotest/test_usg.py @@ -312,9 +312,10 @@ def test_usg_lak(tmpdir, mfusg_rch_evt_model_path): assert success +# occasional forrtl: error (72): floating overflow +@flaky @requires_exe("mfusg") @pytest.mark.slow -@flaky(max_runs=3) # occasional forrtl: error (72): floating overflow def test_freyburg_usg(tmpdir, freyberg_usg_model_path): # test mfusg model with rch nrchop 3 / freyburg.usg print("testing usg nrchop 3: freyburg.usg.nam")