Skip to content

Commit

Permalink
allow fitting for hyper TNs: add make_overlap and related methods
Browse files Browse the repository at this point in the history
  • Loading branch information
jcmgray committed Dec 18, 2024
1 parent c6d0bac commit ce9902d
Show file tree
Hide file tree
Showing 7 changed files with 355 additions and 64 deletions.
2 changes: 2 additions & 0 deletions quimb/tensor/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@
HTN_dual_from_edges_and_fill_fn,
HTN_from_clauses,
HTN_from_cnf,
HTN_rand,
HTN_random_ksat,
MPO_ham_heis,
MPO_ham_ising,
Expand Down Expand Up @@ -281,6 +282,7 @@
"HTN_dual_from_edges_and_fill_fn",
"HTN_from_clauses",
"HTN_from_cnf",
"HTN_rand",
"HTN_random_ksat",
"HTN2D_classical_ising_partition_function",
"HTN3D_classical_ising_partition_function",
Expand Down
8 changes: 8 additions & 0 deletions quimb/tensor/array_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,6 +234,14 @@ def norm_fro(x):
norm_fro.register("numpy", norm_fro_dense)


@norm_fro.register("autograd")
def norm_fro_autoray(x):
# seems to be bug with autograd's linalg.norm and complex numbers
# https://github.com/HIPS/autograd/issues/666
# so implement manually
return do("sum", do("abs", x) ** 2) ** 0.5

Check warning on line 242 in quimb/tensor/array_ops.py

View check run for this annotation

Codecov / codecov/patch

quimb/tensor/array_ops.py#L242

Added line #L242 was not covered by tests


def sensibly_scale(x):
"""Take an array and scale it *very* roughly such that random tensor
networks consisting of such arrays do not have gigantic norms.
Expand Down
93 changes: 56 additions & 37 deletions quimb/tensor/fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ def tensor_network_distance(
xBB=None,
method="auto",
normalized=False,
output_inds=None,
**contract_opts,
):
r"""Compute the Frobenius norm distance between two tensor networks:
Expand Down Expand Up @@ -57,6 +58,9 @@ def tensor_network_distance(
If ``'infidelity'``, compute the normalized infidelity
``1 - |<A|B>|^2 / (|A| |B|)``, which can be faster to optimize e.g.,
but does not take into account normalization.
output_inds : sequence of str, optional
Specify the output indices of `tnA` and `tnB` to contract over. This
can be necessary if either network has hyper indices.
contract_opts
Supplied to :meth:`~quimb.tensor.tensor_core.TensorNetwork.contract`.
Expand All @@ -69,12 +73,15 @@ def tensor_network_distance(
tnA = tnA.as_network()
tnB = tnB.as_network()

oix = tnA.outer_inds()
if set(oix) != set(tnB.outer_inds()):
raise ValueError(
"Can only compute distance between tensor "
"networks with matching outer indices."
)
if output_inds is None:
oix = tnA.outer_inds()
if set(oix) != set(tnB.outer_inds()):
raise ValueError(

Check warning on line 79 in quimb/tensor/fitting.py

View check run for this annotation

Codecov / codecov/patch

quimb/tensor/fitting.py#L79

Added line #L79 was not covered by tests
"Can only compute distance between tensor "
"networks with matching outer indices."
)
else:
oix = output_inds

if method == "auto":
d = tnA.inds_size(oix)
Expand All @@ -90,33 +97,40 @@ def tensor_network_distance(

# overlap method
if xAA is None:
xAA = (tnA | tnA.H).contract(**contract_opts)
# <A|A>
xAA = tnA.norm(squared=True, output_inds=oix, **contract_opts)
if xAB is None:
xAB = (tnA | tnB.H).contract(**contract_opts)
# <B|A>
xAB = tnA.overlap(tnB, output_inds=oix, **contract_opts)
if xBB is None:
xBB = (tnB | tnB.H).contract(**contract_opts)
# <B|B>
xBB = tnB.norm(squared=True, output_inds=oix, **contract_opts)

xAA = do("abs", xAA)
xBB = do("abs", xBB)
xAB = do("real", xAB)

if normalized == "infidelity":
# compute normalized infidelity
return 1 - do("abs", xAB**2 / (xAA * xBB))
return 1 - xAB**2 / (xAA * xBB)

if normalized == "infidelity_sqrt":
# compute normalized sqrt infidelity
return 1 - do("abs", xAB / (xAA * xBB) ** 0.5)
return 1 - do("abs", xAB) / (xAA * xBB) ** 0.5

if normalized == "squared":
return (
do("abs", xAA + xBB - 2 * do("real", xAB))
do("abs", xAA + xBB - 2 * xAB)
# divide by average norm-squared of A and B
* 2
/ (do("abs", xAA) + do("abs", xBB))
/ (xAA + xBB)
) ** 0.5

dAB = do("abs", xAA + xBB - 2 * do("real", xAB)) ** 0.5
dAB = do("abs", xAA + xBB - 2 * xAB) ** 0.5

if normalized:
# divide by average norm of A and B
dAB = dAB * 2 / (do("abs", xAA) ** 0.5 + do("abs", xBB) ** 0.5)
dAB = dAB * 2 / (xAA**0.5 + xBB**0.5)

return dAB

Expand All @@ -130,6 +144,7 @@ def tensor_network_fit_autodiff(
contract_optimize="auto-hq",
distance_method="auto",
normalized="squared",
output_inds=None,
xBB=None,
inplace=False,
progbar=False,
Expand Down Expand Up @@ -164,6 +179,9 @@ def tensor_network_fit_autodiff(
If ``'infidelity'``, compute the normalized infidelity
``1 - |<A|B>|^2 / (|A| |B|)``, which can be faster to optimize e.g.,
but does not take into account normalization.
output_inds : sequence of str, optional
Specify the output indices of `tnA` and `tnB` to contract over. This
can be necessary if either network has hyper indices.
xBB : float, optional
If you already know, have computed ``tn_target.H @ tn_target``, or
don't care about the overall scale of the norm distance, you can supply
Expand All @@ -183,9 +201,8 @@ def tensor_network_fit_autodiff(
from .tensor_core import tensor_network_distance

if xBB is None:
xBB = (tn_target | tn_target.H).contract(
output_inds=(),
optimize=contract_optimize,
xBB = tn_target.norm(

Check warning on line 204 in quimb/tensor/fitting.py

View check run for this annotation

Codecov / codecov/patch

quimb/tensor/fitting.py#L203-L204

Added lines #L203 - L204 were not covered by tests
squared=True, output_inds=output_inds, optimize=contract_optimize
)

tnopt = TNOptimizer(
Expand All @@ -196,6 +213,7 @@ def tensor_network_fit_autodiff(
"method": distance_method,
"optimize": contract_optimize,
"normalized": normalized,
"output_inds": output_inds,
},
autodiff_backend=autodiff_backend,
progbar=progbar,
Expand All @@ -218,7 +236,7 @@ def vdot_broadcast(x, y):
return do("sum", x * do("conj", y), axis=0)


def conjugate_gradient(A, b, x0=None, tol=1e-10, maxiter=1000):
def conjugate_gradient(A, b, x0=None, tol=1e-5, maxiter=1000):
"""
Conjugate Gradient solver for complex matrices/linear operators.
Expand Down Expand Up @@ -273,7 +291,7 @@ def _tn_fit_als_core(
pos_smudge=1e-15,
progbar=False,
):
from .tensor_core import group_inds, TNLinearOperator
from .tensor_core import TNLinearOperator, group_inds

if solver == "auto":
# XXX: maybe make this depend on local tensor as well?
Expand All @@ -300,7 +318,7 @@ def _tn_fit_als_core(
# should we form the dense operator explicitly?
if dense_solve == "auto":
# only for small enough tensors
dense_i = tk.size <= 2**10
dense_i = tk.size <= 512

Check warning on line 321 in quimb/tensor/fitting.py

View check run for this annotation

Codecov / codecov/patch

quimb/tensor/fitting.py#L321

Added line #L321 was not covered by tests
else:
dense_i = dense_solve

Expand Down Expand Up @@ -367,12 +385,9 @@ def _tn_fit_als_core(
b = b_tn.to_dense(rix, bix)

if solver_i is None:
x0 = tk.to_dense(lix, bix)
x = conjugate_gradient(

Check warning on line 389 in quimb/tensor/fitting.py

View check run for this annotation

Codecov / codecov/patch

quimb/tensor/fitting.py#L388-L389

Added lines #L388 - L389 were not covered by tests
A,
b,
x0=tk.to_dense(lix, bix),
tol=tol,
maxiter=solver_maxiter,
A, b, x0=x0, tol=tol, maxiter=solver_maxiter
)
elif enforce_pos or solver_i == "eigh":
el, V = do("linalg.eigh", A)
Expand Down Expand Up @@ -429,6 +444,7 @@ def tensor_network_fit_als(
tnAA=None,
tnAB=None,
xBB=None,
output_inds=None,
contract_optimize="auto-hq",
inplace=False,
progbar=False,
Expand Down Expand Up @@ -517,19 +533,25 @@ def tensor_network_fit_als(

# form the norm of the varying TN (A) and its overlap with the target (B)
if tnAA is None or tnAB is None:
tn_fit_conj = tn_fit.conj().retag_({"__KET__": "__BRA__"})
tn_fit_conj = tn_fit.conj(mangle_inner=True, output_inds=output_inds)
tn_fit_conj.retag_({"__KET__": "__BRA__"})
else:
tn_fit_conj = None

Check warning on line 539 in quimb/tensor/fitting.py

View check run for this annotation

Codecov / codecov/patch

quimb/tensor/fitting.py#L539

Added line #L539 was not covered by tests

if tnAA is None:
tnAA = tn_fit | tn_fit_conj
# <A|A>
tnAA = tn_fit.combine(
tn_fit_conj, virtual=True, check_collisions=False
)
if tnAB is None:
tnAB = tn_target | tn_fit_conj

# <A|B>
tnAB = tn_target.combine(
tn_fit_conj, virtual=True, check_collisions=False
)
if (tol != 0.0 or progbar) and (xBB is None):
# <B|B>
xBB = (tn_target | tn_target.H).contract(
optimize=contract_optimize,
output_inds=(),
xBB = tn_target.norm(
squared=True, output_inds=output_inds, optimize=contract_optimize
)

if pos_smudge is None:
Expand Down Expand Up @@ -619,10 +641,7 @@ def tensor_network_fit_tree(
TensorNetwork
"""
if xBB is None:
xBB = (tn_target | tn_target.H).contract(
optimize=contract_optimize,
output_inds=(),
)
xBB = tn_target.norm(squared=True, optimize=contract_optimize)

tn_fit = tn.conj(inplace=inplace)
tnAB = tn_fit | tn_target
Expand Down
2 changes: 1 addition & 1 deletion quimb/tensor/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ class AutoGradHandler:
def __init__(self, device="cpu"):
if device != "cpu":
raise ValueError(
"`autograd` currently is only " "backed by cpu, numpy arrays."
"`autograd` currently is only backed by cpu, numpy arrays."
)

def to_variable(self, x):
Expand Down
75 changes: 74 additions & 1 deletion quimb/tensor/tensor_builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@
)
from ..gen.rand import (
choice,
get_rand_fill_fn,
rand_phase,
randn,
random_seed_fn,
get_rand_fill_fn,
)
from ..utils import concat, deprecated, unique
from .array_ops import asarray, do, reshape, sensibly_scale
Expand Down Expand Up @@ -831,6 +831,79 @@ def TN_from_strings(
return tn


def HTN_rand(
n,
reg,
n_out=0,
n_hyper_in=0,
n_hyper_out=0,
d_min=2,
d_max=3,
seed=None,
dtype="float64",
dist="normal",
scale=1.0,
loc=0.0,
site_ind_id="k{}",
):
"""Create a random, possibly hyper, tensor network, with a mix of normal
and hyper inner and outer indices. Useful for testing edges cases.
Parameters
----------
n : int
The number of tensors.
reg : int
The average degree (number of dimensions per tensor) of the tensor
network (prior to placing extra indices).
n_out : int, optional
The number of normal outer indices to add (i.e. appear exactly once).
n_hyper_in : int, optional
The number of hyper inner indices to add (i.e. appear on three or more
tensors).
n_hyper_out : int, optional
The number of hyper outer indices to add (i.e. appear in output and
two or more tensors).
d_min : int, optional
The minimum size of any dimension.
d_max : int, optional
The maximum size of any dimension.
seed : int, optional
A random seed.
Returns
-------
TensorNetwork
"""
import cotengra as ctg

fill_fn = get_rand_fill_fn(
dtype=dtype, dist=dist, scale=scale, loc=loc, seed=seed
)

inputs, output, shapes, _ = ctg.utils.rand_equation(
n=n,
reg=reg,
n_out=n_out,
n_hyper_in=n_hyper_in,
n_hyper_out=n_hyper_out,
d_min=d_min,
d_max=d_max,
seed=seed,
)

# map output symbols to explicitly enumerated indices
outmap = {ix: site_ind_id.format(i) for i, ix in enumerate(output)}

return TensorNetwork(
Tensor(
data=fill_fn(shape),
inds=[outmap.get(ix, ix) for ix in term],
)
for term, shape in zip(inputs, shapes)
)


def HTN_CP_from_inds_and_fill_fn(
fill_fn,
inds,
Expand Down
Loading

0 comments on commit ce9902d

Please sign in to comment.