Skip to content

Commit

Permalink
Merge pull request #17 from mscheltienne/docstrings
Browse files Browse the repository at this point in the history
Format Solver.py docstring to match numpy convention
  • Loading branch information
m-reuter authored Feb 23, 2023
2 parents 2b7a7f4 + 78c61b7 commit 5316fab
Show file tree
Hide file tree
Showing 12 changed files with 370 additions and 433 deletions.
44 changes: 13 additions & 31 deletions lapy/Conformal.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

from .Solver import Solver
from .TriaMesh import TriaMesh
from .utils._imports import import_optional_dependency


def spherical_conformal_map(tria):
Expand Down Expand Up @@ -71,14 +72,12 @@ def spherical_conformal_map(tria):
+ sparse.csc_matrix(((1, 1, 1), (fixed, fixed)), shape=(nv, nv))
)

# find embedding of the bigtria (bounary condition later)
# find embedding of the bigtria (boundary condition later)
# arbitrarily set first two points
x0, y0, x1, y1 = 0, 0, 1, 0
a = tria.v[p1, :] - tria.v[p0, :]
b = tria.v[p2, :] - tria.v[p0, :]
sin1 = np.linalg.norm(np.cross(a, b)) / (
np.linalg.norm(a) * np.linalg.norm(b)
)
sin1 = np.linalg.norm(np.cross(a, b)) / (np.linalg.norm(a) * np.linalg.norm(b))
ori_h = np.linalg.norm(b) * sin1
ratio = np.sqrt(((x0 - x1) ** 2 + (y0 - y1) ** 2)) / np.linalg.norm(a)
y2 = ori_h * ratio # compute the coordinates of the third vertex
Expand Down Expand Up @@ -108,9 +107,7 @@ def spherical_conformal_map(tria):

# find the index of the southernmost triangle
index = np.argsort(
np.abs(z[tria.t[:, 0]])
+ np.abs(z[tria.t[:, 1]])
+ np.abs(z[tria.t[:, 2]])
np.abs(z[tria.t[:, 0]]) + np.abs(z[tria.t[:, 1]]) + np.abs(z[tria.t[:, 2]])
)
inner = index[0]
if inner == bigtri:
Expand Down Expand Up @@ -361,30 +358,19 @@ def linear_beltrami_solver(tria, mu, landmark, target):
v11 = (af * uxv1 * uxv1 + 2 * bf * uxv1 * uyv1 + gf * uyv1 * uyv1) / area2
v22 = (af * uxv2 * uxv2 + 2 * bf * uxv2 * uyv2 + gf * uyv2 * uyv2) / area2
v01 = (
af * uxv1 * uxv0
+ bf * uxv1 * uyv0
+ bf * uxv0 * uyv1
+ gf * uyv1 * uyv0
af * uxv1 * uxv0 + bf * uxv1 * uyv0 + bf * uxv0 * uyv1 + gf * uyv1 * uyv0
) / area2
v12 = (
af * uxv2 * uxv1
+ bf * uxv2 * uyv1
+ bf * uxv1 * uyv2
+ gf * uyv2 * uyv1
af * uxv2 * uxv1 + bf * uxv2 * uyv1 + bf * uxv1 * uyv2 + gf * uyv2 * uyv1
) / area2
v20 = (
af * uxv0 * uxv2
+ bf * uxv0 * uyv2
+ bf * uxv2 * uyv0
+ gf * uyv0 * uyv2
af * uxv0 * uxv2 + bf * uxv0 * uyv2 + bf * uxv2 * uyv0 + gf * uyv0 * uyv2
) / area2

# create symmetric A
i = np.column_stack((t0, t1, t2, t0, t1, t1, t2, t2, t0)).reshape(-1)
j = np.column_stack((t0, t1, t2, t1, t0, t2, t1, t0, t2)).reshape(-1)
dat = np.column_stack(
(v00, v11, v22, v01, v01, v12, v12, v20, v20)
).reshape(-1)
dat = np.column_stack((v00, v11, v22, v01, v01, v12, v12, v20, v20)).reshape(-1)
nv = tria.v.shape[0]
A = sparse.csc_matrix((dat, (i, j)), shape=(nv, nv), dtype=complex)

Expand Down Expand Up @@ -413,18 +399,14 @@ def linear_beltrami_solver(tria, mu, landmark, target):


def sparse_symmetric_solve(A, b, use_cholmod=True):
from scipy.sparse.linalg import splu

if use_cholmod:
try:
from sksparse.cholmod import cholesky
except ImportError:
use_cholmod = False
if use_cholmod:
sksparse = import_optional_dependency("sksparse", raise_error=use_cholmod)
if sksparse is not None:
print("Solver: Cholesky decomposition from scikit-sparse cholmod ...")
chol = cholesky(A)
chol = sksparse.cholmod.cholesky(A)
x = chol(b)
else:
from scipy.sparse.linalg import splu

print("Solver: spsolve (LU decomposition) ...")
lu = splu(A)
x = lu.solve(b)
Expand Down
66 changes: 26 additions & 40 deletions lapy/DiffGeo.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

from .Solver import Solver
from .TriaMesh import TriaMesh
from .utils._imports import import_optional_dependency


def compute_gradient(geom, vfunc):
Expand All @@ -27,9 +28,7 @@ def compute_rotated_f(geom, vfunc):
if type(geom).__name__ == "TriaMesh":
return tria_compute_rotated_f(geom, vfunc)
else:
raise ValueError(
'Geometry type "' + type(geom).__name__ + '" not implemented'
)
raise ValueError('Geometry type "' + type(geom).__name__ + '" not implemented')


def compute_geodesic_f(geom, vfunc):
Expand Down Expand Up @@ -177,9 +176,7 @@ def tria_compute_divergence(tria, tfunc):
dat = np.column_stack((x0, x1, x2)).reshape(-1)
# convert back to nparray 1D
vfunc = np.squeeze(
np.asarray(
0.5 * sparse.csc_matrix((dat, (i, j))).todense(), dtype=tfunc.dtype
)
np.asarray(0.5 * sparse.csc_matrix((dat, (i, j))).todense(), dtype=tfunc.dtype)
)
return vfunc

Expand Down Expand Up @@ -226,9 +223,7 @@ def tria_compute_divergence2(tria, tfunc):
i = np.column_stack((tria.t[:, 0], tria.t[:, 1], tria.t[:, 2])).reshape(-1)
j = np.zeros((3 * len(tria.t), 1), dtype=int).reshape(-1)
dat = np.column_stack((x0, x1, x2)).reshape(-1)
vfunc = np.squeeze(
np.asarray(0.5 * sparse.csc_matrix((dat, (i, j))).todense())
)
vfunc = np.squeeze(np.asarray(0.5 * sparse.csc_matrix((dat, (i, j))).todense()))
return vfunc


Expand Down Expand Up @@ -280,38 +275,31 @@ def tria_mean_curvature_flow(
steps. It will normalize surface area of the mesh and translate the barycenter
to the origin. Closed meshes will map to the unit sphere.
"""
if use_cholmod:
try:
from sksparse.cholmod import cholesky
except ImportError:
use_cholmod = False

if not use_cholmod:
# Note, it would be better to do sparse Cholesky (CHOLMOD)
# as it can be 5-6 times faster
from scipy.sparse.linalg import spsolve
sksparse = import_optional_dependency("sksparse", raise_error=use_cholmod)
# pre-normalize
trianorm = TriaMesh(tria.v, tria.t)
trianorm.normalize_()
# compute fixed A
lump = True # for computation here and inside loop
fem = Solver(trianorm, lump)
a_mat = fem.stiffness
if use_cholmod:
print("Solver: Cholesky decomposition from scikit-sparse cholmod ...")
else:
print("Solver: spsolve (LU decomposition) ...")
for x in range(max_iter):
# store last position (for delta computation below)
vlast = trianorm.v
# get current mass matrix and Mv
mass = Solver.fem_tria_mass(trianorm, lump)
mass_v = mass.dot(trianorm.v)
# solve (M + step*A) * v = Mv and update vertices
if use_cholmod:
factor = cholesky(mass + step * a_mat)
if sksparse is not None:
print("Solver: Cholesky decomposition from scikit-sparse cholmod ...")
factor = sksparse.cholmod.cholesky(mass + step * a_mat)
trianorm.v = factor(mass_v)
else:
# Note, it would be better to do sparse Cholesky (CHOLMOD)
# as it can be 5-6 times faster
from scipy.sparse.linalg import spsolve

print("Solver: spsolve (LU decomposition) ...")
trianorm.v = spsolve(mass + step * a_mat, mass_v)
# normalize updated mesh
trianorm.normalize_()
Expand Down Expand Up @@ -480,9 +468,7 @@ def get_flipped_area(triax):
# do a few mean curvature flow euler steps to make more convex
# three should be sufficient
if flow_iter > 0:
tflow = tria_mean_curvature_flow(
TriaMesh(vn, tria.t), max_iter=flow_iter
)
tflow = tria_mean_curvature_flow(TriaMesh(vn, tria.t), max_iter=flow_iter)
vn = tflow.v

# project to sphere and scaled to have the same scale/origin as FS:
Expand Down Expand Up @@ -564,15 +550,15 @@ def tet_compute_gradient(tet, vfunc):
voli = np.divide(1.0, vol)[:, np.newaxis]
# sum weighted edges
# c0 = vfunc[t[:,0],np.newaxis] * np.cross(,)
c1 = (
vfunc[tet.t[:, 1], np.newaxis] - vfunc[tet.t[:, 0], np.newaxis]
) * np.cross(e2, e5)
c2 = (
vfunc[tet.t[:, 2], np.newaxis] - vfunc[tet.t[:, 0], np.newaxis]
) * np.cross(e3, e4)
c3 = (
vfunc[tet.t[:, 3], np.newaxis] - vfunc[tet.t[:, 0], np.newaxis]
) * np.cross(-e2, e0)
c1 = (vfunc[tet.t[:, 1], np.newaxis] - vfunc[tet.t[:, 0], np.newaxis]) * np.cross(
e2, e5
)
c2 = (vfunc[tet.t[:, 2], np.newaxis] - vfunc[tet.t[:, 0], np.newaxis]) * np.cross(
e3, e4
)
c3 = (vfunc[tet.t[:, 3], np.newaxis] - vfunc[tet.t[:, 0], np.newaxis]) * np.cross(
-e2, e0
)
# divided by parallelepiped vol
tfunc = voli * (c1 + c2 + c3)
return tfunc
Expand Down Expand Up @@ -613,9 +599,9 @@ def tet_compute_divergence(tet, tfunc):
x1 = (n1 * tfunc).sum(1)
x2 = (n2 * tfunc).sum(1)
x3 = (n3 * tfunc).sum(1)
i = np.column_stack(
(tet.t[:, 0], tet.t[:, 1], tet.t[:, 2], tet.t[:, 3])
).reshape(-1)
i = np.column_stack((tet.t[:, 0], tet.t[:, 1], tet.t[:, 2], tet.t[:, 3])).reshape(
-1
)
j = np.zeros((4 * len(tet.t), 1), dtype=int).reshape(-1)
dat = np.column_stack((x0, x1, x2, x3)).reshape(-1)
vfunc = -np.squeeze(
Expand Down
32 changes: 9 additions & 23 deletions lapy/FuncIO.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,18 +134,14 @@ def import_ev(infile):
i = i + 1
elif ll[i].lstrip().startswith("Eigenvalues"):
i = i + 1
while (
ll[i].find("{") < 0
): # possibly introduce termination criterion
while ll[i].find("{") < 0: # possibly introduce termination criterion
i = i + 1
if ll[i].find("}") >= 0: # '{' and '}' on the same line
evals = ll[i].strip().replace("{", "").replace("}", "")
else:
evals = str()
while ll[i].find("}") < 0:
evals = evals + ll[i].strip().replace("{", "").replace(
"}", ""
)
evals = evals + ll[i].strip().replace("{", "").replace("}", "")
i = i + 1
evals = evals + ll[i].strip().replace("{", "").replace("}", "")
evals = np.array(evals.split(";")).astype(np.float)
Expand All @@ -156,16 +152,10 @@ def import_ev(infile):
while not (ll[i].strip().startswith("sizes")):
i = i + 1
d.update(
{
"EigenvectorsSize": np.array(
ll[i].strip().split()[1:]
).astype(np.int)
}
{"EigenvectorsSize": np.array(ll[i].strip().split()[1:]).astype(np.int)}
)
i = i + 1
while (
ll[i].find("{") < 0
): # possibly introduce termination criterion
while ll[i].find("{") < 0: # possibly introduce termination criterion
i = i + 1
if ll[i].find("}") >= 0: # '{' and '}' on the same line
evecs = ll[i].strip().replace("{", "").replace("}", "")
Expand All @@ -176,18 +166,14 @@ def import_ev(infile):
"}", ""
).replace("(", "").replace(")", "")
i = i + 1
evecs = evecs + ll[i].strip().replace("{", "").replace(
"}", ""
).replace("(", "").replace(")", "")
evecs = evecs + ll[i].strip().replace("{", "").replace("}", "").replace(
"(", ""
).replace(")", "")
evecs = np.array(
evecs.replace(";", " ").replace(",", " ").strip().split()
).astype(np.float)
if len(evecs) == (
d["EigenvectorsSize"][0] * d["EigenvectorsSize"][1]
):
evecs = np.transpose(
np.reshape(evecs, d["EigenvectorsSize"][1::-1])
)
if len(evecs) == (d["EigenvectorsSize"][0] * d["EigenvectorsSize"][1]):
evecs = np.transpose(np.reshape(evecs, d["EigenvectorsSize"][1::-1]))
d.update({"Eigenvectors": evecs})
else:
print(
Expand Down
24 changes: 9 additions & 15 deletions lapy/Heat.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import numpy as np

from .utils._imports import import_optional_dependency


def diagonal(t, x, evecs, evals, n):
"""
Expand All @@ -17,9 +19,7 @@ def diagonal(t, x, evecs, evals, n):
"""
# maybe add code to check dimensions of input and flip axis if necessary
h = np.matmul(
evecs[x, 0:n] * evecs[x, 0:n], np.exp(-np.matmul(evals[0:n], t))
)
h = np.matmul(evecs[x, 0:n] * evecs[x, 0:n], np.exp(-np.matmul(evals[0:n], t)))
return h


Expand All @@ -43,9 +43,7 @@ def kernel(t, vfix, evecs, evals, n):
"""
# h = evecs * ( exp(-evals * t) .* repmat(evecs(vfix,:)',1,length(t)) )
h = np.matmul(
evecs[:, 0:n], (np.exp(np.matmul(-evals[0:n], t)) * evecs[vfix, 0:n])
)
h = np.matmul(evecs[:, 0:n], (np.exp(np.matmul(-evals[0:n], t)) * evecs[vfix, 0:n]))
return h


Expand All @@ -67,13 +65,7 @@ def diffusion(geometry, vids, m=1.0, aniso=None, use_cholmod=True):
:return:
vfunc heat diffusion at vertices
"""
if use_cholmod:
try:
from sksparse.cholmod import cholesky
except ImportError:
use_cholmod = False
if not use_cholmod:
from scipy.sparse.linalg import splu
sksparse = import_optional_dependency("sksparse", raise_error=use_cholmod)
from .Solver import Solver

nv = len(geometry.v)
Expand All @@ -87,11 +79,13 @@ def diffusion(geometry, vids, m=1.0, aniso=None, use_cholmod=True):
b0[np.array(vids)] = 1.0
# solve H x = b0
print("Matrix Format now: " + hmat.getformat())
if use_cholmod:
if sksparse is not None:
print("Solver: Cholesky decomposition from scikit-sparse cholmod ...")
chol = cholesky(hmat)
chol = sksparse.choldmod.cholesky(hmat)
vfunc = chol(b0)
else:
from scipy.sparse.linalg import splu

print("Solver: spsolve (LU decomposition) ...")
lu = splu(hmat)
vfunc = lu.solve(np.float32(b0))
Expand Down
12 changes: 3 additions & 9 deletions lapy/Plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,9 +370,7 @@ def plot_tria_mesh(
# max_fcol = 1
# colormap = cm.RdBu
colormap = _get_colorscale(min_fcol, max_fcol)
facecolor = [
_map_z2color(zz, colormap, min_fcol, max_fcol) for zz in tfunc
]
facecolor = [_map_z2color(zz, colormap, min_fcol, max_fcol) for zz in tfunc]
# for tria colors overwrite flatshading to be true:
triangles = go.Mesh3d(
x=x,
Expand Down Expand Up @@ -420,9 +418,7 @@ def plot_tria_mesh(
mode="lines",
line=dict(color=tic_color, width=2),
)
triangles = go.Mesh3d(
x=x, y=y, z=z, i=i, j=j, k=k, flatshading=flatshading
)
triangles = go.Mesh3d(x=x, y=y, z=z, i=i, j=j, k=k, flatshading=flatshading)
else:
raise ValueError(
"tfunc should be scalar (face color) or 3d for each triangle"
Expand Down Expand Up @@ -473,9 +469,7 @@ def plot_tria_mesh(
vlines = go.Scatter3d(
x=xv, y=yv, z=zv, mode="lines", line=dict(color=tic_color, width=2)
)
triangles = go.Mesh3d(
x=x, y=y, z=z, i=i, j=j, k=k, flatshading=flatshading
)
triangles = go.Mesh3d(x=x, y=y, z=z, i=i, j=j, k=k, flatshading=flatshading)
else:
raise ValueError("vfunc should be scalar or 3d for each vertex")

Expand Down
Loading

0 comments on commit 5316fab

Please sign in to comment.