Skip to content

Commit

Permalink
Dissentagle comms from solvers
Browse files Browse the repository at this point in the history
Old `parallelComm` made heavy assumptions about availability of Trilinos.
Comms (particularly the parallel ones) are intimately tied to the solver
in use, so they should be instantiated with the solver.

No matter what, we need mpi4py, so use it (and only it) to determine number
of processes in order to pick a solver.

Addresses usnistgov#644
  • Loading branch information
guyer committed Sep 24, 2019
1 parent 409d7bd commit cf62c2b
Show file tree
Hide file tree
Showing 11 changed files with 280 additions and 220 deletions.
53 changes: 38 additions & 15 deletions fipy/solvers/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from __future__ import unicode_literals
import os

from fipy.tools.parser import _parseSolver
from fipy.tools import parallelComm as _parallelComm

from fipy.solvers.solver import *
__all__ = list(solver.__all__)
Expand All @@ -9,45 +10,66 @@

_desired_solver = _parseSolver()

import os
if _desired_solver is None and 'FIPY_SOLVERS' in os.environ:
_desired_solver = os.environ['FIPY_SOLVERS'].lower()
del os

exceptions = []
try:
from mpi4py import MPI
_Nproc = MPI.COMM_WORLD.size
del MPI
except ImportError:
_Nproc = 1

_exceptions = []

class SerialSolverError(Exception):
def __init__(self, solver):
super(SerialSolverError, self).__init__(solver + ' does not run in parallel')

solver = None

from fipy.tools.comms.dummyComm import DummyComm
serialComm, parallelComm = DummyComm(), DummyComm()

if solver is None and _desired_solver in ["pysparse", None]:
try:
if _parallelComm.Nproc > 1:
if _Nproc > 1:
raise SerialSolverError('pysparse')
from fipy.solvers.pysparse import *
__all__.extend(pysparse.__all__)
from fipy.matrices.pysparseMatrix import _PysparseMeshMatrix
_MeshMatrix = _PysparseMeshMatrix
solver = "pysparse"
except Exception as inst:
exceptions.append(inst)
_exceptions.append("%s: %s" % ("pysparse", inst))

if solver is None and _desired_solver in ["petsc", None]:
try:
if _Nproc > 1:
raise SerialSolverError('petsc')
from fipy.solvers.petsc import *
__all__.extend(petsc.__all__)

from fipy.matrices.petscMatrix import _PETScMeshMatrix
_MeshMatrix = _PETScMeshMatrix
solver = "petsc"
except Exception as inst:
exceptions.append(inst)
_exceptions.append("%s: %s" % ("petsc", inst))

if solver is None and _desired_solver in ["trilinos", "no-pysparse", None]:
try:
from fipy.solvers.trilinos import *
__all__.extend(trilinos.__all__)

from fipy.solvers.trilinos.comms.serialEpetraCommWrapper import SerialEpetraCommWrapper
serialComm = SerialEpetraCommWrapper()

if _Nproc > 1:
from fipy.solvers.trilinos.comms.parallelEpetraCommWrapper import ParallelEpetraCommWrapper
parallelComm = ParallelEpetraCommWrapper()
else:
parallelComm = SerialEpetraCommWrapper()

if _desired_solver != "no-pysparse":
try:
Expand All @@ -63,31 +85,31 @@ def __init__(self, solver):
_MeshMatrix = _TrilinosMeshMatrix
solver = "no-pysparse"
except Exception as inst:
exceptions.append(inst)
_exceptions.append("%s: %s" % ("trilinos", inst))

if solver is None and _desired_solver in ["scipy", None]:
try:
if _parallelComm.Nproc > 1:
if _Nproc > 1:
raise SerialSolverError('scipy')
from fipy.solvers.scipy import *
__all__.extend(scipy.__all__)
from fipy.matrices.scipyMatrix import _ScipyMeshMatrix
_MeshMatrix = _ScipyMeshMatrix
solver = "scipy"
except Exception as inst:
exceptions.append(inst)
_exceptions.append("%s: %s" % ("scipy", inst))

if solver is None and _desired_solver in ["pyamg", None]:
try:
if _parallelComm.Nproc > 1:
if _Nproc > 1:
raise SerialSolverError('pyamg')
from fipy.solvers.pyAMG import *
__all__.extend(pyAMG.__all__)
from fipy.matrices.scipyMatrix import _ScipyMeshMatrix
_MeshMatrix = _ScipyMeshMatrix
solver = "pyamg"
except Exception as inst:
exceptions.append(inst)
_exceptions.append("%s: %s" % ("pyamg", inst))

if solver is None and _desired_solver in ["pyamgx", None]:
try:
Expand All @@ -99,14 +121,14 @@ def __init__(self, solver):
_MeshMatrix = _ScipyMeshMatrix
solver = "pyamgx"
except Exception as inst:
exceptions.append(inst)
_exceptions.append("%s: %s" % ("pyamgx", inst))

if solver is None:
if _desired_solver is None:
raise ImportError('Unable to load a solver: %s' % [str(e) for e in exceptions])
raise ImportError('Unable to load a solver: %s' % [str(e) for e in _exceptions])
else:
if len(exceptions) > 0:
raise ImportError('Unable to load solver %s: %s' % (_desired_solver, [str(e) for e in exceptions]))
if len(_exceptions) > 0:
raise ImportError('Unable to load solver %s: %s' % (_desired_solver, [str(e) for e in _exceptions]))
else:
raise ImportError('Unknown solver package %s' % _desired_solver)

Expand All @@ -121,3 +143,4 @@ def __init__(self, solver):
test=lambda: solver != 'pyamgx',
why="the PyAMGX solver is being used.",
skipWarning=True)
del register_skipper
71 changes: 44 additions & 27 deletions fipy/solvers/trilinos/__init__.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,48 @@
from __future__ import unicode_literals
def _dealWithTrilinosImportPathologies():
## The import scipy statement is added to allow PyTrilinos to run
## without throwing a segmentation fault. This is caused by weird
## behavior in scipy and PyTrilinos depending on the order in which
## modules are imported

try:
import scipy
except:
pass

# The fact that I have to do the following manipulation with the current
# directory is really, really bad.
import os
current_working_directory_path = os.getcwd()
from PyTrilinos import ML # Gets around strange Trilinos import-order bugs.
os.chdir(current_working_directory_path)
# When run in MPI mode, the first Trilinos import makes the "current directory"
# be the directory with the executable file that's being run. As best I can
# tell, this happens in MPI_Init, deep in Trilinos. Possibly because "current
# directory" not well-defined in MPI between processors?

# This fix relies on this being the FIRST place to import any Trilinos module.
# The only way to import Trilinos things should be to do "from fipy.solvers
# import *" and have it automatically import Trilinos via this file.

_dealWithTrilinosImportPathologies()
## The import scipy statement is added to allow PyTrilinos to run
## without throwing a segmentation fault. This is caused by weird
## behavior in scipy and PyTrilinos depending on the order in which
## modules are imported

try:
import scipy
except:
pass

# The fact that I have to do the following manipulation with the current
# directory is really, really bad.
import os
current_working_directory_path = os.getcwd()
from PyTrilinos import ML # Gets around strange Trilinos import-order bugs.
os.chdir(current_working_directory_path)
# When run in MPI mode, the first Trilinos import makes the "current directory"
# be the directory with the executable file that's being run. As best I can
# tell, this happens in MPI_Init, deep in Trilinos. Possibly because "current
# directory" not well-defined in MPI between processors?

# This fix relies on this being the FIRST place to import any Trilinos module.
# The only way to import Trilinos things should be to do "from fipy.solvers
# import *" and have it automatically import Trilinos via this file.

del scipy
del os
del ML

from PyTrilinos import Epetra

import platform
if platform.dist()[0] == 'debian':
import PyTrilinos
if '10.0.4' in PyTrilinos.version():
try:
from mpi4py import MPI
del MPI
except ImportError:
raise Exception("Could not import mpi4py. The package mpi4py is a required package if you are using Trilinos on a Debian platform with Trilinos version 10.0.4 due to a Trilinos bug (see <http://matforge.org/fipy/ticket/420>). Try installing using 'easy_install mpi4py'.")
del PyTrilinos

del platform
del Epetra

from fipy.solvers.trilinos.preconditioners import *

Expand Down
Empty file.
88 changes: 88 additions & 0 deletions fipy/solvers/trilinos/comms/epetraCommWrapper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
#!/usr/bin/env python

## -*-Pyth-*-
# ###################################################################
# FiPy - Python-based finite volume PDE solver
#
# FILE: "epetraCommWrapper.py"
#
# Author: Jonathan Guyer <guyer@nist.gov>
# Author: Daniel Wheeler <daniel.wheeler@nist.gov>
# Author: James Warren <jwarren@nist.gov>
# mail: NIST
# www: http://www.ctcms.nist.gov/fipy/
#
# ========================================================================
# This software was developed at the National Institute of Standards
# and Technology by employees of the Federal Government in the course
# of their official duties. Pursuant to title 17 Section 105 of the
# United States Code this software is not subject to copyright
# protection and is in the public domain. FiPy is an experimental
# system. NIST assumes no responsibility whatsoever for its use by
# other parties, and makes no guarantees, expressed or implied, about
# its quality, reliability, or any other characteristic. We would
# appreciate acknowledgement if the software is used.
#
# This software can be redistributed and/or modified freely
# provided that any derivative works bear some notice that they are
# derived from it, and any modified versions bear some notice that
# they have been modified.
# ========================================================================
# See the file "license.terms" for information on usage and redistribution
# of this file, and for a DISCLAIMER OF ALL WARRANTIES.
#
# ###################################################################
##

__docformat__ = 'restructuredtext'

from PyTrilinos import Epetra

from fipy.tools import numerix
from fipy.tools.comms.abstractCommWrapper import AbstractCommWrapper

__all__ = ["EpetraCommWrapper"]

class EpetraCommWrapper(AbstractCommWrapper):
"""MPI Communicator wrapper
Encapsulates capabilities needed for Epetra.
Some capabilities are not parallel.
"""

def __init__(self):
self.epetra_comm = Epetra.PyComm()
super(EpetraCommWrapper, self).__init__()

@property
def procID(self):
return self.epetra_comm.MyPID()

@property
def Nproc(self):
return self.epetra_comm.NumProc()

def Barrier(self):
self.epetra_comm.Barrier()

def sum(self, a, axis=None):
summed = numerix.array(a).sum(axis=axis)
shape = summed.shape
if shape == ():
summed = summed.reshape((1,))
parallelSummed = self.epetra_comm.SumAll(summed)
if shape == ():
parallelSummed = parallelSummed.reshape(())
return parallelSummed

def __setstate__(self, dict):
self.__init__()

def Norm2(self, vec):
return vec.Norm2()

def MaxAll(self, vec):
return self.epetra_comm.MaxAll(numerix.array(vec))

def MinAll(self, vec):
return self.epetra_comm.MinAll(numerix.array(vec))
Original file line number Diff line number Diff line change
@@ -1,39 +1,37 @@
from __future__ import unicode_literals
from fipy.tools.comms.commWrapper import CommWrapper
from mpi4py import MPI

from fipy.tools import numerix
from fipy.solvers.trilinos.comms.epetraCommWrapper import EpetraCommWrapper

__all__ = ["Mpi4pyCommWrapper"]
__all__ = ["ParallelEpetraCommWrapper"]
from future.utils import text_to_native_str
__all__ = [text_to_native_str(n) for n in __all__]

class Mpi4pyCommWrapper(CommWrapper):
class ParallelEpetraCommWrapper(EpetraCommWrapper):
"""MPI Communicator wrapper
Encapsulates capabilities needed for both Epetra and mpi4py.
"""

def __init__(self, Epetra, MPI):
self.MPI = MPI
self.mpi4py_comm = self.MPI.COMM_WORLD
CommWrapper.__init__(self, Epetra)


def __init__(self):
self.mpi4py_comm = MPI.COMM_WORLD
super(ParallelEpetraCommWrapper, self).__init__()

def __setstate__(self, dict):
from PyTrilinos import Epetra
from mpi4py import MPI
self.__init__(Epetra=Epetra, MPI=MPI)

self.__init__()

def all(self, a, axis=None):
return self.mpi4py_comm.allreduce(a.all(axis=axis), op=self.MPI.LAND)
return self.mpi4py_comm.allreduce(a.all(axis=axis), op=MPI.LAND)

def any(self, a, axis=None):
return self.mpi4py_comm.allreduce(a.any(axis=axis), op=self.MPI.LOR)
return self.mpi4py_comm.allreduce(a.any(axis=axis), op=MPI.LOR)

def allclose(self, a, b, rtol=1.e-5, atol=1.e-8):
return self.mpi4py_comm.allreduce(numerix.allclose(a, b, rtol=rtol, atol=atol), op=self.MPI.LAND)
return self.mpi4py_comm.allreduce(numerix.allclose(a, b, rtol=rtol, atol=atol), op=MPI.LAND)

def allequal(self, a, b):
return self.mpi4py_comm.allreduce(numerix.allequal(a, b), op=self.MPI.LAND)
return self.mpi4py_comm.allreduce(numerix.allequal(a, b), op=MPI.LAND)

def bcast(self, obj, root=0):
return self.mpi4py_comm.bcast(obj=obj, root=root)
Expand Down
16 changes: 16 additions & 0 deletions fipy/solvers/trilinos/comms/serialEpetraCommWrapper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
from fipy.tools import numerix
from fipy.solvers.trilinos.comms.epetraCommWrapper import EpetraCommWrapper

__all__ = ["SerialEpetraCommWrapper"]

class SerialEpetraCommWrapper(EpetraCommWrapper):
@property
def procID(self):
return 0

@property
def Nproc(self):
return 1

def Norm2(self, vec):
return numerix.L2norm(numerix.asarray(vec))
Loading

0 comments on commit cf62c2b

Please sign in to comment.