Skip to content
This repository has been archived by the owner on Nov 7, 2024. It is now read-only.

Cholesky Decomposition added to decompositions.py and decompositions_test.py of both the numpy and tensorflow module. #937

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 33 additions & 3 deletions tensornetwork/backends/numpy/decompositions.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

from typing import Optional, Any, Tuple
import numpy

Tensor = Any


Expand All @@ -26,7 +27,7 @@ def svd(
max_truncation_error: Optional[float] = None,
relative: Optional[bool] = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Computes the singular value decomposition (SVD) of a tensor.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are a lot of places where you add whitespace like this. Could you remove this from this PR? Thanks

See tensornetwork.backends.tensorflow.decompositions for details.
"""
left_dims = tensor.shape[:pivot_axis]
Expand Down Expand Up @@ -81,7 +82,7 @@ def qr(
non_negative_diagonal: bool
) -> Tuple[Tensor, Tensor]:
"""Computes the QR decomposition of a tensor.

See tensornetwork.backends.tensorflow.decompositions for details.
"""
left_dims = tensor.shape[:pivot_axis]
Expand All @@ -105,7 +106,7 @@ def rq(
non_negative_diagonal: bool
) -> Tuple[Tensor, Tensor]:
"""Computes the RQ (reversed QR) decomposition of a tensor.

See tensornetwork.backends.tensorflow.decompositions for details.
"""
left_dims = tensor.shape[:pivot_axis]
Expand All @@ -122,3 +123,32 @@ def rq(
r = np.reshape(r, list(left_dims) + [center_dim])
q = np.reshape(q, [center_dim] + list(right_dims))
return r, q


def cholesky(
np, # TODO: Typing
tensor: Tensor,
pivot_axis: int,
) -> Tuple[Tensor, Tensor]:
"""Computes the cholesky decomposition of a tensor.

See tensornetwork.backends.tensorflow.decompositions for details.
"""
left_dims = tensor.shape[:pivot_axis]
right_dims = tensor.shape[pivot_axis:]
tensor = np.reshape(tensor, [numpy.prod(left_dims), numpy.prod(right_dims)])
n = tensor.shape[0]
m = tensor.shape[1]
if n != m:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Generally its always a good idea to add these types of checks! In this case however I think we can remove them because numpy already performs these kinds of checks for us.

Side note: in most cases, instead of printing a message it's better to raise an exception

print("The input must be a square matrix")
elif (np.allclose(tensor, np.matrix.getH(tensor)) == False):
print("The input must be a Hermitian Matrix")
elif (np.all(np.linalg.eigvals(tensor) > 0) == False):
print("The input must be a Positive Definite Matrix")
else:
L = np.linalg.cholesky(tensor)
L_trans = np.matrix.getH(L)
center_dim = L.shape[1]
L = np.reshape(L, list(left_dims) + [center_dim])
L_trans = np.reshape(L_trans, [center_dim] + list(right_dims))
return L, L_trans
10 changes: 10 additions & 0 deletions tensornetwork/backends/numpy/decompositions_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,16 @@ def test_max_truncation_error_relative(self):
np.testing.assert_almost_equal(trunc_sv_absolute, [0.1])
np.testing.assert_almost_equal(trunc_sv_relative, [0.2, 0.1])

def test_expected_shapes_cholesky(self):
val = np.array([[[25, 15, -5]], [[15, 18, 0]], [[-5, 0, 11]]])
L, L_trans = decompositions.cholesky(np, val, -1)
self.assertEqual(L.shape, (3, 1, 3))

def test_cholesky(self):
random_matrix = np.array([[[25, 15, -5]], [[15, 18, 0]], [[-5, 0, 11]]])
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not sure if this is random :)

L, L_trans = decompositions.cholesky(np, random_matrix, -1)
self.assertAllClose(L.dot(L_trans), random_matrix)


if __name__ == '__main__':
tf.test.main()
102 changes: 80 additions & 22 deletions tensornetwork/backends/tensorflow/decompositions.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,37 +26,37 @@ def svd(
max_truncation_error: Optional[float] = None,
relative: Optional[bool] = False) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
"""Computes the singular value decomposition (SVD) of a tensor.

The SVD is performed by treating the tensor as a matrix, with an effective
left (row) index resulting from combining the axes `tensor.shape[:pivot_axis]`
and an effective right (column) index resulting from combining the axes
`tensor.shape[pivot_axis:]`.

For example, if `tensor` had a shape (2, 3, 4, 5) and `pivot_axis` was 2, then
`u` would have shape (2, 3, 6), `s` would have shape (6), and `vh` would
have shape (6, 4, 5).

If `max_singular_values` is set to an integer, the SVD is truncated to keep
at most this many singular values.

If `max_truncation_error > 0`, as many singular values will be truncated as
possible, so that the truncation error (the norm of discarded singular
values) is at most `max_truncation_error`.
If `relative` is set `True` then `max_truncation_err` is understood
relative to the largest singular value.

If both `max_singular_values` snd `max_truncation_error` are specified, the
number of retained singular values will be
`min(max_singular_values, nsv_auto_trunc)`, where `nsv_auto_trunc` is the
number of singular values that must be kept to maintain a truncation error
smaller than `max_truncation_error`.

The output consists of three tensors `u, s, vh` such that:
```python
u[i1,...,iN, j] * s[j] * vh[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]
```
Note that the output ordering matches numpy.linalg.svd rather than tf.svd.

Args:
tf: The tensorflow module.
tensor: A tensor to be decomposed.
Expand All @@ -67,7 +67,7 @@ def svd(
max_truncation_error: The maximum allowed truncation error or `None` to not
do any truncation.
relative: Multiply `max_truncation_err` with the largest singular value.

Returns:
u: Left tensor factor.
s: Vector of ordered singular values from largest to smallest.
Expand Down Expand Up @@ -127,34 +127,34 @@ def svd(


def qr(
tf: Any,
tensor: Tensor,
tf: Any,
tensor: Tensor,
pivot_axis: int,
non_negative_diagonal: bool
) -> Tuple[Tensor, Tensor]:
"""Computes the QR decomposition of a tensor.

The QR decomposition is performed by treating the tensor as a matrix,
with an effective left (row) index resulting from combining the
axes `tensor.shape[:pivot_axis]` and an effective right (column)
index resulting from combining the axes `tensor.shape[pivot_axis:]`.

For example, if `tensor` had a shape (2, 3, 4, 5) and `pivot_axis` was 2,
then `q` would have shape (2, 3, 6), and `r` would
have shape (6, 4, 5).

The output consists of two tensors `Q, R` such that:
```python
Q[i1,...,iN, j] * R[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]
```
Note that the output ordering matches numpy.linalg.svd rather than tf.svd.

Args:
tf: The tensorflow module.
tensor: A tensor to be decomposed.
pivot_axis: Where to split the tensor's axes before flattening into a
matrix.

Returns:
Q: Left tensor factor.
R: Right tensor factor.
Expand All @@ -177,34 +177,34 @@ def qr(


def rq(
tf: Any,
tensor: Tensor,
tf: Any,
tensor: Tensor,
pivot_axis: int,
non_negative_diagonal: bool
) -> Tuple[Tensor, Tensor]:
"""Computes the RQ decomposition of a tensor.

The QR decomposition is performed by treating the tensor as a matrix,
with an effective left (row) index resulting from combining the axes
`tensor.shape[:pivot_axis]` and an effective right (column) index
resulting from combining the axes `tensor.shape[pivot_axis:]`.

For example, if `tensor` had a shape (2, 3, 4, 5) and `pivot_axis` was 2,
then `r` would have shape (2, 3, 6), and `q` would
have shape (6, 4, 5).

The output consists of two tensors `Q, R` such that:
```python
Q[i1,...,iN, j] * R[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]
```
Note that the output ordering matches numpy.linalg.svd rather than tf.svd.

Args:
tf: The tensorflow module.
tensor: A tensor to be decomposed.
pivot_axis: Where to split the tensor's axes before flattening into a
matrix.

Returns:
Q: Left tensor factor.
R: Right tensor factor.
Expand All @@ -226,3 +226,61 @@ def rq(
r = tf.reshape(r, tf.concat([left_dims, [center_dim]], axis=-1))
q = tf.reshape(q, tf.concat([[center_dim], right_dims], axis=-1))
return r, q


def cholesky(
tf: Any,
tensor: Tensor,
pivot_axis: int,
) -> Tuple[Tensor, Tensor]:
"""Computes the Cholesky decomposition of a tensor.

The Cholesky decomposition is performed by treating the tensor as a matrix,
with an effective left (row) index resulting from combining the axes
`tensor.shape[:pivot_axis]` and an effective right (column) index
resulting from combining the axes `tensor.shape[pivot_axis:]`.

For example, if `tensor` had a shape (2, 3, 4, 5) and `pivot_axis` was 2,
then `r` would have shape (2, 3, 6), and `q` would
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

replace q and r with L and L_herm

have shape (6, 4, 5).

The output consists of two tensors `L, L_trans` such that:
```python
L [i1,...,iN, j] * L_trans[j, k1,...,kM] == tensor[i1,...,iN, k1,...,kM]
```
Note that the output ordering matches numpy.linalg.svd rather than tf.svd.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this line can go


Args:
tf: The tensorflow module.
tensor: A tensor to be decomposed.
pivot_axis: Where to split the tensor's axes before flattening into a
matrix.

Returns:
L: Lower Triangular Matrix.
L_trans: Conjugate Transpose of L.
"""
left_dims = tf.shape(tensor)[:pivot_axis]
right_dims = tf.shape(tensor)[pivot_axis:]
tensor = tf.reshape(tensor,
[tf.reduce_prod(left_dims),
tf.reduce_prod(right_dims)])
n = tensor.shape[0]
m = tensor.shape[1]
tensor = tf.cast(tensor, dtype=tf.complex128, name=None)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please remove the cast (is there are reason for this type cast? )

if n != m:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

remove checks (tf also performs checks)

print("The input must be a square matrix")
elif (tf.experimental.numpy.allclose(tensor,
tf.linalg.adjoint(tensor)) == False):
print("The input must be a Hermitian Matrix")
elif (tf.experimental.numpy.all(
tf.math.real(tf.linalg.eigvals(tensor)) > 0) == False):
print("The input must be a Positive Definite Matrix")
else:
L = tf.linalg.cholesky(tensor, name=None)
L_trans = tf.transpose(L, perm=None, conjugate=True)
center_dim = tf.shape(L)[1]
L = tf.reshape(L, tf.concat([left_dims, [center_dim]], axis=-1))
L_trans = tf.reshape(L_trans, tf.concat([[center_dim], right_dims],
axis=-1))
return L, L_trans
10 changes: 10 additions & 0 deletions tensornetwork/backends/tensorflow/decompositions_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,16 @@ def test_max_truncation_error_relative(self):
np.testing.assert_almost_equal(trunc_sv_absolute, [0.1])
np.testing.assert_almost_equal(trunc_sv_relative, [0.2, 0.1])

def test_expected_shapes_cholesky(self):
val = tf.constant([[[25, 15, -5]], [[15, 18, 0]], [[-5, 0, 11]]])
L, L_trans = decompositions.cholesky(tf, val, -1)
self.assertEqual(L.shape, (3, 1, 3))

def test_cholesky(self):
random_matrix = tf.constant([[[25, 15, -5]], [[15, 18, 0]], [[-5, 0, 11]]])
L, L_trans = decompositions.cholesky(tf, random_matrix, -1)
self.assertAllClose(tf.tensordot(L, L_trans, ([2], [0])), random_matrix)


if __name__ == '__main__':
tf.test.main()