Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add bivector exponential #21

Draft
wants to merge 7 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion tfga/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -691,7 +691,7 @@ def compute_output_shape(self, input_shape):

def call(self, inputs):
return self.algebra.exp(
inputs, square_scalar_tolerance=self.square_scalar_tolerance
inputs
)

def get_config(self):
Expand Down
139 changes: 134 additions & 5 deletions tfga/tfga.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
It exposes methods for operating on `tf.Tensor` instances where their last
axis is interpreted as blades of the algebra.
"""
from typing import List, Any, Union, Optional
from typing import List, Union, Optional
import numbers
import tensorflow as tf
import numpy as np
Expand Down Expand Up @@ -547,7 +547,7 @@ def approx_exp(self, a: tf.Tensor, order: int = 50) -> tf.Tensor:
result += v / i_factorial
return result

def exp(self, a: tf.Tensor, square_scalar_tolerance: Union[float, None] = 1e-4) -> tf.Tensor:
def simple_exp(self, a: tf.Tensor, square_scalar_tolerance: Union[float, None] = 1e-4) -> tf.Tensor:
"""Returns the exponential of the passed geometric algebra tensor.
Only works for multivectors that square to scalars.

Expand Down Expand Up @@ -589,6 +589,137 @@ def exp(self, a: tf.Tensor, square_scalar_tolerance: Union[float, None] = 1e-4)

return tf.where(scalar_self_sq == 0, self.from_scalar(1.0) + a, non_zero_result)

def invariant_decomposition_from_w(self, w: tf.Tensor, B: Optional[tf.Tensor] = None) -> tf.Tensor:
"""Returns the simple elements for given W values.

Uses the invariant decomposition introduced in Graded Symmetry Groups: Plane and Simple,
see https://arxiv.org/abs/2107.03771 section 6.

Args:
w: W values (see paper) to return simple elements for

Returns:
Simple elements for `w` of shape [#dims//2, ...B, #dims].
Can contain zeros for the simple elements if there are fewer than #dims//2.
"""
k = len(self.metric) // 2
r = k // 2

# Get lambda polynomial coefficients
pols = []
highest_pol = self.geom_prod(w[0], w[0])[..., 0] * (-1)**k
for i in range(k, 0, -1):
pols.append(
(self.geom_prod(w[i], w[i])[..., 0] * (-1)**(k - i)) / highest_pol)
pols = tf.convert_to_tensor(pols)

# Build companion matrix from polynomial coefficients
comp = tf.concat([
tf.eye(k, batch_shape=w.shape[1:-1])[..., 1:, :], # [...B, k, k]
# [k, ...B] -> [...B, 1, k]
tf.expand_dims(
tf.transpose(-pols, list(range(1, len(pols.shape))) + [0]),
axis=-2
),
], axis=-2)

# Get eigenvalues of the companion matrix, which will be the roots of the polynomial
# [...B, k]
comp_eig_vals, _ = tf.eig(comp)
comp_eig_vals = tf.cast(comp_eig_vals, tf.float32)

if B is not None:
# Sort eigenvalues by their absolute values, so zeros will be on the first index.
# Instead of using the first index, we will throw it away and add the final simple
# elements using B - sum(b).
sorted_eig_val_indices = tf.argsort(tf.abs(comp_eig_vals))
comp_eig_vals = tf.gather(
comp_eig_vals, sorted_eig_val_indices, batch_dims=-1
)[..., 1:]

# Calculate the simple elements using the roots
b = []
even_k = k % 2
for lambda_index in range(k if B is None else k - 1):
num = 0
den = 0
for i in range(k + 1):
if even_k:
exponent = r - (i + 1) // 2
else:
exponent = r - i // 2

eig_val = comp_eig_vals[..., lambda_index:lambda_index+1]
term = eig_val ** exponent * w[i]

if (even_k and i % 2 == 1) or (not even_k and i % 2 == 0):
den += term
else:
num += term
new_b = self.geom_prod(num, self.inverse(den))
b.append(tf.where(tf.reduce_any(
den != 0, axis=-1, keepdims=True),
new_b,
tf.zeros_like(new_b)
))

if B is not None:
b.append(B - sum(b))

b = tf.convert_to_tensor(b, dtype=tf.float32)
# [k, ...B, #dims]

return b

def invariant_decomposition(self, a: tf.Tensor) -> tf.Tensor:
"""Returns the simple elements for a geometric algebra tensor.
The simple elements square to scalars, mutually commute under the
geometric product and sum up to the original multivector.

Uses the invariant decomposition introduced in Graded Symmetry Groups: Plane and Simple,
see https://arxiv.org/abs/2107.03771 section 6.

Args:
a: Geometric algebra tensor to return simple elements for

Returns:
Simple elements that add up to `a` of shape [#dims//2, ...B, #dims].
Can contain zeros for the simple elements if there are fewer than #dims//2.
"""
k = len(self.metric) // 2

# Calculate w
w = [tf.ones_like(a) * self.e("")]
for m in range(1, k + 1):
w.append(self.ext_prod(a, w[-1]))
for m in range(1, k + 1):
w[m] /= np.math.factorial(m)

w = tf.convert_to_tensor(w, dtype=tf.float32)

return self.invariant_decomposition_from_w(w, a)

def exp(self, a: tf.Tensor) -> tf.Tensor:
"""Returns the exponential for a geometric algebra tensor.
Uses the invariant decomposition (see `GeometricAlgebra.invariant_decomposition()`)
and thus only works if one exists.

Args:
a: Geometric algebra tensor to return exponential for

Returns:
`exp(a)`
"""
bs = self.invariant_decomposition(a)

# Exponentiate the simple elements and multiply them.
# exp(a) = exp(bs[0]) * exp(bs[1]) * ...
result = None
exp_bs = self.simple_exp(bs)
for exp_b in exp_bs:
result = exp_b if result is None else self.geom_prod(result, exp_b)
return result

def approx_log(self, a: tf.Tensor, order: int = 50) -> tf.Tensor:
"""Returns an approximation of the natural logarithm using a centered
taylor series. Only converges for multivectors where `||mv - 1|| < 1`.
Expand Down Expand Up @@ -753,9 +884,7 @@ def inverse(self, a: tf.Tensor) -> tf.Tensor:
u_minus_c = u - c
u = self.geom_prod(a, u_minus_c)

if not self.is_pure_kind(u, BladeKind.SCALAR):
raise Exception(
"Can't invert multi-vector (det U not scalar: %s)." % u)
# TODO: Check if u is approximately scalar

# adj / det
return u_minus_c / u[..., :1]
Expand Down