diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index 869cee99fd..40cd801537 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -52,30 +52,14 @@ jobs: run: | black --check . - pylint: - name: Code linting (pylint) - runs-on: ubuntu-latest - needs: [code-formatting] - steps: - - uses: actions/checkout@v3 - - uses: actions/setup-python@v4 - with: - python-version: '3.8' - - name: Install dev dependencies - run: | - pip install -r requirements-dev.txt - pip list - - name: Run pylint - run: | - pylint watertap - tests: - name: Tests (py${{ matrix.python-version }}/${{ matrix.os }}) + name: Tests (py${{ matrix.python-version }}/${{ matrix.os }}/rep=${{ matrix.rep }}) runs-on: ${{ matrix.os-version }} needs: [code-formatting] strategy: fail-fast: false matrix: + rep: ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"] python-version: - '3.8' - '3.9' @@ -83,15 +67,9 @@ jobs: - '3.11' os: - linux - - win64 - # - macos include: - os: linux os-version: ubuntu-20.04 - - os: win64 - os-version: windows-2019 - # - os: macos - # os-version: macos-10.15 - python-version: '3.8' # limit uploading coverage report for a single Python version in the matrix cov_report: true @@ -110,9 +88,6 @@ jobs: echo '::group::Output of "pip install" commands' pip install -r requirements-dev.txt echo '::endgroup::' - echo '::group::Output of "pip install -U ray" command' - pip install -U ray - echo '::endgroup::' echo '::group::Display installed packages' conda list pip list @@ -125,159 +100,8 @@ jobs: if: matrix.cov_report run: | - echo PYTEST_ADDOPTS="$PYTEST_ADDOPTS --cov-report=xml" >> $GITHUB_ENV - - name: Test with pytest - run: | - pytest --pyargs watertap - - name: Upload coverage report to Codecov - if: matrix.cov_report - uses: codecov/codecov-action@v2 - - name: Test documentation code - run: | - make -C docs doctest -d - # TODO: this should be moved to a dedicated job/workflow - # until then, we can leave this here as a reminder - - name: Test documentation links - if: 'false' - run: | - make -C docs linkcheck -d - - user-mode-pytest: - name: pytest (user mode) (py${{ matrix.python-version }}/${{ matrix.os }}) - runs-on: ${{ matrix.os-version }} - needs: [code-formatting] - strategy: - fail-fast: false - matrix: - python-version: - - '3.8' - - '3.11' - os: - - linux - - win64 - include: - - os: linux - os-version: ubuntu-20.04 - - os: win64 - os-version: windows-2019 - steps: - - name: Set up Python ${{ matrix.python-version }} - uses: conda-incubator/setup-miniconda@v2 - with: - activate-environment: watertap - python-version: ${{ matrix.python-version }} - - name: Define install URL (default) - env: - _repo_full_name: watertap-org/watertap - _ref_to_install: main - run: | - echo "_install_url=https://github.com/${_repo_full_name}/archive/${_ref_to_install}.zip" >> $GITHUB_ENV - - name: Define install URL (for PRs) - if: github.event.pull_request - env: - _repo_full_name: ${{ github.event.pull_request.head.repo.full_name }} - _ref_to_install: ${{ github.event.pull_request.head.sha }} - run: - echo "_install_url=https://github.com/${_repo_full_name}/archive/${_ref_to_install}.zip" >> $GITHUB_ENV - - name: Install watertap and testing dependencies - run: | - echo '::group::Output of "pip install" commands' - pip install "watertap @ ${_install_url}" pytest - echo '::endgroup::' - echo '::group::Display installed packages' - conda list - pip list - pip show idaes-pse - echo '::endgroup::' - echo '::group::Output of "idaes get-extensions" command' - idaes get-extensions --verbose - echo '::endgroup::' - - name: Run pytest - run: | - pytest --pyargs watertap - - notebooks: - name: Test notebooks (py${{ matrix.python-version }}/${{ matrix.os }}) - runs-on: ${{ matrix.os-version }} - needs: [code-formatting] - strategy: - fail-fast: false - matrix: - python-version: - - '3.8' - - '3.11' - os: - - linux - - win64 - include: - - os: linux - os-version: ubuntu-20.04 - - os: win64 - os-version: windows-2019 - steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: conda-incubator/setup-miniconda@v2 - with: - activate-environment: watertap-dev - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - echo '::group::Output of "conda install" commands' - conda install --quiet --yes pip setuptools wheel pandoc - echo '::endgroup::' - echo '::group::Output of "pip install" commands' - pip install -r requirements-dev.txt - echo '::endgroup::' - echo '::group::Display installed packages' - conda list - pip list - pip show idaes-pse - echo '::endgroup::' - echo '::group::Output of "idaes get-extensions" command' - idaes get-extensions --verbose - echo '::endgroup::' - - name: Run pytest with nbmake - run: - pytest --nbmake **/*.ipynb - - macos: - name: macOS setup (EXPERIMENTAL) - runs-on: macos-11 - needs: [code-formatting] - strategy: - fail-fast: false - matrix: - python-version: - - '3.8' - steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: conda-incubator/setup-miniconda@v2 - with: - activate-environment: watertap - python-version: ${{ matrix.python-version }} - - name: Install WaterTAP (dev) without idaes get-extensions - run: | - echo '::group::Output of "conda install" commands' - conda install --quiet --yes pip=21.1 wheel setuptools pandoc - echo '::endgroup::' - echo '::group::Output of "pip install" commands' - pip install -r requirements-dev.txt - echo '::endgroup::' - echo '::group::Display installed packages' - conda list - pip list - pip show pyomo idaes-pse - echo '::endgroup::' - - name: Install Ipopt from conda-forge - run: - conda install --quiet --yes -c conda-forge ipopt=3.14.11 - - name: Build Pyomo extensions - run: | - conda install --quiet --yes cmake - # some failures are expected, but this should succeed as long as pynumero is built correctly - pyomo build-extensions || python -c "from pyomo.contrib.pynumero.asl import AmplInterface; exit(0) if AmplInterface.available() else exit(1)" - - name: Run pytest + echo PYTEST_ADDOPTS="$PYTEST_ADDOPTS --cov-report=xml --cov-append" >> $GITHUB_ENV + - name: Test with pytest (tools) run: | - pytest --pyargs watertap -k 'not (nf_dspmde.nf_ui or nf_dspmde.nf_with_bypass_ui)' + timeout -s SIGINT 3m pytest --pyargs watertap -m 'tools' --verbose --capture=no --full-trace + timeout-minutes: 5 diff --git a/.github/workflows/mpi4py-test.yml b/.github/workflows/mpi4py-test.yml deleted file mode 100644 index 8ad8fdea16..0000000000 --- a/.github/workflows/mpi4py-test.yml +++ /dev/null @@ -1,73 +0,0 @@ -# This workflow will install Python dependencies, run tests and lint with a variety of Python versions -# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions - -name: MPI tests - -on: - push: - branches: [main] - pull_request: - branches: [main] - -concurrency: - # NOTE: the value of `group` should be chosen carefully, - # otherwise we might end up over- or under-canceling workflow runs - # github.head_ref is only defined for pull request events - # so, if it's not present (i.e. event was triggered by push) - # we use github.ref instead - group: ${{ github.workflow }}-${{ github.head_ref || github.ref }} - cancel-in-progress: true - -env: - # --color=yes needed for colorized output to be shown in GHA logs - # --pyargs watertap is needed to be able to define CLI options in watertap/conftest.py - PYTEST_ADDOPTS: "--color=yes" - PIP_PROGRESS_BAR: "off" - -jobs: - build: - - runs-on: ${{ matrix.os-version }} - defaults: - run: - shell: bash -l {0} - strategy: - fail-fast: false - matrix: - python-version: [3.8] - os: - - linux - - win64 - # - macos - include: - - os: linux - os-version: ubuntu-20.04 - - os: win64 - os-version: windows-2019 - # - os: macos - # os-version: macos-10.15 - - steps: - - uses: actions/checkout@v3 - - name: Set up Python ${{ matrix.python-version }} - uses: conda-incubator/setup-miniconda@v2 - with: - auto-update-conda: true - python-version: ${{ matrix.python-version }} - - name: Install dependencies - run: | - conda install --quiet --yes -c conda-forge mpi4py - python -m pip install --upgrade pip - pip install -r requirements-dev.txt - idaes get-extensions --verbose - - name: Conda info - run: conda info - - name: Test parallel pytest w/ MPI - run: | - mpiexec -n 2 coverage run --parallel-mode -m mpi4py -m pytest watertap/tools/parameter_sweep/tests/test*parameter_sweep.py watertap/tools/analysis_tools/loop_tool/tests/test*loop_tool.py --no-cov - # single report - coverage combine - # convert to XML - coverage xml - - name: Upload coverage report to Codecov - uses: codecov/codecov-action@v2 diff --git a/watertap/conftest.py b/watertap/conftest.py index 92f472ae8d..29763a5ef6 100644 --- a/watertap/conftest.py +++ b/watertap/conftest.py @@ -10,9 +10,10 @@ # "https://github.com/watertap-org/watertap/" ################################################################################# import contextlib +import datetime import enum from pathlib import Path -from typing import Container, Optional, Callable +from typing import Container, Optional, Callable, List import pytest from _pytest.nodes import Item @@ -20,6 +21,15 @@ from _pytest.config.argparsing import Parser +START = datetime.datetime.now() + + +def _get_elapsed_seconds(end=None) -> float: + end = end or datetime.datetime.now() + delta = end - START + return delta.total_seconds() + + class MarkerSpec(enum.Enum): unit = "Quick tests that do not require a solver, must run in < 2 s" component = "Quick tests that may require a solver" @@ -29,6 +39,7 @@ class MarkerSpec(enum.Enum): requires_idaes_solver = ( "Tests that require a solver from the IDEAS extensions to pass" ) + tools = "Tests pertaining to WaterTAP tools" @property def description(self) -> str: @@ -83,3 +94,28 @@ def pytest_addoption(parser: Parser): default=False, dest="edb_no_mock", ) + + +def pytest_collection_modifyitems(items: List[Item]): + for item in items: + if "watertap/tools" in str(item.path): + item.add_marker("tools") + + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_protocol(item): + import psutil + + this_process = psutil.Process() + + print(f"START {item}") + print(f"{_get_elapsed_seconds()=}") + print("processes before:") + for proc in this_process.children(recursive=True): + print(f"\t{proc}") + yield + print(f"\nprocesses after:") + for proc in this_process.children(recursive=True): + print(f"\t{proc}") + print(f"{_get_elapsed_seconds()=}") + print(f"END {item}") diff --git a/watertap/tools/parallel/multiprocessing_parallel_manager.py b/watertap/tools/parallel/multiprocessing_parallel_manager.py index c01a81554a..42377a8144 100644 --- a/watertap/tools/parallel/multiprocessing_parallel_manager.py +++ b/watertap/tools/parallel/multiprocessing_parallel_manager.py @@ -10,28 +10,33 @@ # "https://github.com/watertap-org/watertap/" ################################################################################# -import numpy +import functools +import logging +import multiprocessing +import numpy from watertap.tools.parallel.results import LocalResults from watertap.tools.parallel.parallel_manager import ( - parallelActor, + build_and_execute, ParallelManager, ) -import multiprocessing + + +_logger = logging.getLogger(__name__) class MultiprocessingParallelManager(ParallelManager): - def __init__(self, number_of_subprocesses=1, **kwargs): + def __init__( + self, + number_of_subprocesses=1, + **kwargs, + ): self.max_number_of_subprocesses = number_of_subprocesses # this will be updated when child processes are kicked off self.actual_number_of_subprocesses = None - # Future -> (process number, parameters). Used to keep track of the process number and parameters for - # all in-progress futures - self.running_futures = dict() - def is_root_process(self): return True @@ -82,40 +87,19 @@ def scatter( # split the parameters prameters for async run self.expected_samples = len(all_parameters) divided_parameters = numpy.array_split(all_parameters, self.expected_samples) - # create queues, run queue will be used to store paramters we want to run - # and return_queue is used to store results - self.run_queue = multiprocessing.Queue() - self.return_queue = multiprocessing.Queue() - for i, param in enumerate(divided_parameters): - # print(param) - self.run_queue.put([i, param]) - # setup multiprocessing actors - self.actors = [] - - for cpu in range(self.actual_number_of_subprocesses): - self.actors.append( - multiprocessing.Process( - target=multiProcessingActor, - args=( - self.run_queue, - self.return_queue, - do_build, - do_build_kwargs, - do_execute, - divided_parameters[0], - ), - ) - ) - self.actors[-1].start() - def gather(self): - results = [] - # collect result from the actors - while len(results) < self.expected_samples: - if self.return_queue.empty() == False: - i, values, result = self.return_queue.get() + build_and_execute_this_run = functools.partial( + build_and_execute, do_build, do_build_kwargs, do_execute + ) - results.append(LocalResults(i, values, result)) + actor = functools.partial(multiProcessingActor, build_and_execute_this_run) + + self._pool = multiprocessing.Pool(self.actual_number_of_subprocesses) + self._results = self._pool.map_async(actor, enumerate(divided_parameters)) + self._pool.close() + + def gather(self): + results = self._results.get() # sort the results by the process number to keep a deterministic ordering results.sort(key=lambda result: result.process_number) return results @@ -124,20 +108,11 @@ def results_from_local_tree(self, results): return results -# This function is used for running the actors in multprocessing def multiProcessingActor( - queue, - return_queue, - do_build, - do_build_kwargs, - do_execute, - local_parameters, + build_and_execute_this_run, + i_local_parameters, ): - actor = parallelActor(do_build, do_build_kwargs, do_execute, local_parameters) - while True: - if queue.empty(): - break - else: - i, local_parameters = queue.get() - result = actor.execute(local_parameters) - return_queue.put([i, local_parameters, result]) + i, local_parameters = i_local_parameters + return LocalResults( + i, local_parameters, build_and_execute_this_run(local_parameters) + )