From 11d1c3a14f63a0ca135092f1fe3c6a862137528b Mon Sep 17 00:00:00 2001 From: Johnnie Gray Date: Thu, 19 Oct 2023 17:46:44 -0700 Subject: [PATCH] add lazy 2-norm BP, various tree funcs --- .../belief_propagation/bp_common.py | 17 +- quimb/experimental/belief_propagation/d2bp.py | 4 +- quimb/experimental/belief_propagation/l2bp.py | 453 ++++++++++++++++++ quimb/tensor/__init__.py | 2 + quimb/tensor/geometry.py | 236 +++++---- quimb/tensor/tensor_arbgeom.py | 360 ++++++++------ quimb/tensor/tensor_builder.py | 103 ++-- .../test_belief_propagation/test_d2bp.py | 30 +- .../test_belief_propagation/test_l2bp.py | 81 ++++ 9 files changed, 971 insertions(+), 315 deletions(-) create mode 100644 quimb/experimental/belief_propagation/l2bp.py create mode 100644 tests/test_tensor/test_belief_propagation/test_l2bp.py diff --git a/quimb/experimental/belief_propagation/bp_common.py b/quimb/experimental/belief_propagation/bp_common.py index 661df33a..d9ab19b9 100644 --- a/quimb/experimental/belief_propagation/bp_common.py +++ b/quimb/experimental/belief_propagation/bp_common.py @@ -6,6 +6,11 @@ import quimb.tensor as qtn +def prod(xs): + """Product of all elements in ``xs``.""" + return functools.reduce(operator.mul, xs) + + class RollingDiffMean: """Tracker for the absolute rolling mean of diffs between values, to assess effective convergence of BP above actual message tolerance. @@ -59,11 +64,14 @@ def run(self, max_iterations=1000, tol=5e-6, progbar=False): rdm = RollingDiffMean() self.converged = False while not self.converged and it < max_iterations: - # can only converge if tol > 0.0 + # perform a single iteration of BP + # we supply tol here for use with local convergence nconv, ncheck, max_mdiff = self.iterate(tol=tol) + it += 1 + + # check rolling mean convergence rdm.update(max_mdiff) self.converged = (max_mdiff < tol) or (rdm.absmeandiff() < tol) - it += 1 if pbar is not None: pbar.set_description( @@ -85,11 +93,6 @@ def run(self, max_iterations=1000, tol=5e-6, progbar=False): ) -def prod(xs): - """Product of all elements in ``xs``.""" - return functools.reduce(operator.mul, xs) - - def initialize_hyper_messages(tn, fill_fn=None, smudge_factor=1e-12): """Initialize messages for belief propagation, this is equivalent to doing a single round of belief propagation with uniform messages. diff --git a/quimb/experimental/belief_propagation/d2bp.py b/quimb/experimental/belief_propagation/d2bp.py index 681d1870..d66a67b6 100644 --- a/quimb/experimental/belief_propagation/d2bp.py +++ b/quimb/experimental/belief_propagation/d2bp.py @@ -449,11 +449,11 @@ def compress_d2bp( Computed automatically if not specified. optimize : str or PathOptimizer, optional The path optimizer to use when contracting the messages. + damping : float, optional + The damping parameter to use, defaults to no damping. local_convergence : bool, optional Whether to allow messages to locally converge - i.e. if all their input messages have converged then stop updating them. - damping : float, optional - The damping parameter to use, defaults to no damping. inplace : bool, optional Whether to perform the compression inplace. progbar : bool, optional diff --git a/quimb/experimental/belief_propagation/l2bp.py b/quimb/experimental/belief_propagation/l2bp.py new file mode 100644 index 00000000..201a3fc4 --- /dev/null +++ b/quimb/experimental/belief_propagation/l2bp.py @@ -0,0 +1,453 @@ +import math + +import autoray as ar + +import quimb.tensor as qtn +from .bp_common import ( + BeliefPropagationCommon, + create_lazy_community_edge_map, + combine_local_contractions, +) + + +class L2BP(BeliefPropagationCommon): + """A simple class to hold all the data for a L2BP run.""" + + def __init__( + self, + tn, + site_tags=None, + damping=0.0, + local_convergence=True, + optimize="auto-hq", + **contract_opts, + ): + self.backend = next(t.backend for t in tn) + self.damping = damping + self.local_convergence = local_convergence + self.optimize = optimize + self.contract_opts = contract_opts + + if site_tags is None: + self.site_tags = tuple(tn.site_tags) + else: + self.site_tags = tuple(site_tags) + + ( + self.edges, + self.neighbors, + self.local_tns, + self.touch_map, + ) = create_lazy_community_edge_map(tn, site_tags) + self.touched = set() + + _abs = ar.get_lib_fn(self.backend, "abs") + _sum = ar.get_lib_fn(self.backend, "sum") + _transpose = ar.get_lib_fn(self.backend, "transpose") + _conj = ar.get_lib_fn(self.backend, "conj") + + def _normalize(x): + return x / _sum(x) + + def _symmetrize(x): + N = ar.ndim(x) + perm = (*range(N // 2, N), *range(0, N // 2)) + return x + _conj(_transpose(x, perm)) + + def _distance(x, y): + return _sum(_abs(x - y)) + + self._normalize = _normalize + self._symmetrize = _symmetrize + self._distance = _distance + + # initialize messages + self.messages = {} + + for pair, bix in self.edges.items(): + cix = tuple(ix + "*" for ix in bix) + remapper = dict(zip(bix, cix)) + output_inds = cix + bix + + # compute leftwards and righwards messages + for i, j in (sorted(pair), sorted(pair, reverse=True)): + tn_i = self.local_tns[i] + tn_i2 = tn_i & tn_i.conj().reindex_(remapper) + tm = tn_i2.contract( + output_inds=output_inds, + optimize=self.optimize, + drop_tags=True, + **self.contract_opts, + ) + tm.modify(apply=self._symmetrize) + tm.modify(apply=self._normalize) + self.messages[i, j] = tm + + # initialize contractions + self.contraction_tns = {} + for pair, bix in self.edges.items(): + for i, j in (sorted(pair), sorted(pair, reverse=True)): + # form the ket side and messages + tn_i_left = self.local_tns[i] + # get other incident nodes which aren't j + ks = [k for k in self.neighbors[i] if k != j] + tks = [self.messages[k, i] for k in ks] + + # form the 'bra' side + tn_i_right = tn_i_left.conj() + # get the bonds that attach the bra to messages + outer_bix = { + ix for k in ks for ix in self.edges[tuple(sorted((k, i)))] + } + # need to reindex to join message bonds, and create bra outputs + remapper = {} + for ix in tn_i_right.ind_map: + if ix in bix: + # bra outputs + remapper[ix] = ix + "**" + elif ix in outer_bix: + # messages connected + remapper[ix] = ix + "*" + # remaining indices are either internal and will be mangled + # or global outer indices and will be contracted directly + + tn_i_right.reindex_(remapper) + + self.contraction_tns[i, j] = qtn.TensorNetwork( + (tn_i_left, *tks, tn_i_right), virtual=True + ) + + def iterate(self, tol=5e-6): + if (not self.local_convergence) or (not self.touched): + # assume if asked to iterate that we want to check all messages + self.touched.update( + pair for edge in self.edges for pair in (edge, edge[::-1]) + ) + + ncheck = len(self.touched) + + new_data = {} + while self.touched: + i, j = self.touched.pop() + bix = self.edges[(i, j) if i < j else (j, i)] + cix = tuple(ix + "**" for ix in bix) + output_inds = cix + bix + + tn_i_to_j = self.contraction_tns[i, j] + + tm_new = tn_i_to_j.contract( + output_inds=output_inds, + drop_tags=True, + optimize=self.optimize, + **self.contract_opts, + ) + tm_new.modify(apply=self._symmetrize) + tm_new.modify(apply=self._normalize) + # defer setting the data to do a parallel update + new_data[i, j] = tm_new.data + + nconv = 0 + max_mdiff = -1.0 + for key, data in new_data.items(): + tm = self.messages[key] + + if self.damping > 0.0: + data = (1 - self.damping) * data + self.damping * tm.data + + mdiff = float(self._distance(tm.data, data)) + + if mdiff > tol: + # mark touching messages for update + self.touched.update(self.touch_map[key]) + else: + nconv += 1 + + max_mdiff = max(max_mdiff, mdiff) + tm.modify(data=data) + + return nconv, ncheck, max_mdiff + + def contract(self, strip_exponent=False): + """Estimate the contraction of the norm squared using the current + messages. + """ + tvals = [] + for i, ket in self.local_tns.items(): + # we allow missing keys here for tensors which are just + # disconnected but still appear in local_tns + ks = self.neighbors.get(i, ()) + bix = [ix for k in ks for ix in self.edges[tuple(sorted((k, i)))]] + bra = ket.H.reindex_({ix: ix + "*" for ix in bix}) + tni = qtn.TensorNetwork( + ( + ket, + *(self.messages[k, i] for k in ks), + bra, + ) + ) + tvals.append( + tni.contract(optimize=self.optimize, **self.contract_opts) + ) + + mvals = [] + for i, j in self.edges: + mvals.append( + (self.messages[i, j] & self.messages[j, i]).contract( + optimize=self.optimize, + **self.contract_opts, + ) + ) + + return combine_local_contractions( + tvals, mvals, self.backend, strip_exponent=strip_exponent + ) + + def partial_trace( + self, + site, + normalized=True, + optimize="auto-hq", + ): + example_tn = next(tn for tn in self.local_tns.values()) + + site_tag = example_tn.site_tag(site) + ket_site_ind = example_tn.site_ind(site) + + ks = self.neighbors[site_tag] + tn_rho_i = self.local_tns[site_tag].copy() + tn_bra_i = tn_rho_i.H + + for k in ks: + tn_rho_i &= self.messages[k, site_tag] + + outer_bix = { + ix for k in ks for ix in self.edges[tuple(sorted((k, site_tag)))] + } + + ind_changes = {} + for ix in tn_bra_i.ind_map: + if ix == ket_site_ind: + # open up the site index + bra_site_ind = ix + "**" + ind_changes[ix] = bra_site_ind + if ix in outer_bix: + # attach bra message indices + ind_changes[ix] = ix + "*" + tn_bra_i.reindex_(ind_changes) + + tn_rho_i &= tn_bra_i + + rho_i = tn_rho_i.to_dense( + [ket_site_ind], + [bra_site_ind], + optimize=optimize, + **self.contract_opts, + ) + if normalized: + rho_i = rho_i / ar.do("trace", rho_i) + + return rho_i + + def compress( + self, + tn, + max_bond=None, + cutoff=5e-6, + cutoff_mode='rsum2', + renorm=0, + lazy=False, + ): + """Compress the state ``tn``, assumed to matched this L2BP instance, + using the messages stored. + """ + for (i, j), bix in self.edges.items(): + tml = self.messages[i, j] + tmr = self.messages[j, i] + + bix_sizes = [tml.ind_size(ix) for ix in bix] + dm = math.prod(bix_sizes) + + ml = ar.reshape(tml.data, (dm, dm)) + dl = self.local_tns[i].outer_size() // dm + Rl = qtn.decomp.squared_op_to_reduced_factor( + ml, dl, dm, right=True + ) + + mr = ar.reshape(tmr.data, (dm, dm)).T + dr = self.local_tns[j].outer_size() // dm + Rr = qtn.decomp.squared_op_to_reduced_factor( + mr, dm, dr, right=False + ) + + Pl, Pr = qtn.decomp.compute_oblique_projectors( + Rl, + Rr, + cutoff_mode=cutoff_mode, + renorm=renorm, + max_bond=max_bond, + cutoff=cutoff, + ) + + Pl = ar.do("reshape", Pl, (*bix_sizes, -1)) + Pr = ar.do("reshape", Pr, (-1, *bix_sizes)) + + ltn = tn.select(i) + rtn = tn.select(j) + + new_lix = [qtn.rand_uuid() for _ in bix] + new_rix = [qtn.rand_uuid() for _ in bix] + new_bix = [qtn.rand_uuid()] + ltn.reindex_(dict(zip(bix, new_lix))) + rtn.reindex_(dict(zip(bix, new_rix))) + + # ... and insert the new projectors in place + tn |= qtn.Tensor(Pl, inds=new_lix + new_bix, tags=(i,)) + tn |= qtn.Tensor(Pr, inds=new_bix + new_rix, tags=(j,)) + + if not lazy: + for st in self.site_tags: + try: + tn.contract_tags_( + st, optimize=self.optimize, **self.contract_opts + ) + except KeyError: + pass + + return tn + + +def contract_l2bp( + tn, + site_tags=None, + damping=0.0, + local_convergence=True, + optimize="auto-hq", + max_iterations=1000, + tol=5e-6, + strip_exponent=False, + progbar=False, + **contract_opts, +): + """Estimate the norm squared of ``tn`` using lazy belief propagation. + + Parameters + ---------- + tn : TensorNetwork + The tensor network to estimate the norm squared of. + site_tags : sequence of str, optional + The tags identifying the sites in ``tn``, each tag forms a region. + damping : float, optional + The damping parameter to use, defaults to no damping. + local_convergence : bool, optional + Whether to allow messages to locally converge - i.e. if all their + input messages have converged then stop updating them. + optimize : str or PathOptimizer, optional + The contraction strategy to use. + max_iterations : int, optional + The maximum number of iterations to perform. + tol : float, optional + The convergence tolerance for messages. + strip_exponent : bool, optional + Whether to strip the exponent from the final result. If ``True`` + then the returned result is ``(mantissa, exponent)``. + progbar : bool, optional + Whether to show a progress bar. + contract_opts + Other options supplied to ``cotengra.array_contract``. + """ + bp = L2BP( + tn, + site_tags=site_tags, + damping=damping, + local_convergence=local_convergence, + optimize=optimize, + **contract_opts, + ) + bp.run( + max_iterations=max_iterations, + tol=tol, + progbar=progbar, + ) + return bp.contract(strip_exponent=strip_exponent) + + +def compress_l2bp( + tn, + max_bond, + cutoff=0.0, + cutoff_mode="rsum2", + max_iterations=1000, + tol=5e-6, + site_tags=None, + damping=0.0, + local_convergence=True, + optimize="auto-hq", + lazy=False, + inplace=False, + progbar=False, + **contract_opts, +): + """Compress ``tn`` using lazy belief propagation, producing a tensor + network with a single tensor per site. + + Parameters + ---------- + tn : TensorNetwork + The tensor network to form the 2-norm of, run BP on and then compress. + max_bond : int + The maximum bond dimension to compress to. + cutoff : float, optional + The cutoff to use when compressing. + cutoff_mode : int, optional + The cutoff mode to use when compressing. + max_iterations : int, optional + The maximum number of iterations to perform. + tol : float, optional + The convergence tolerance for messages. + site_tags : sequence of str, optional + The tags identifying the sites in ``tn``, each tag forms a region. If + the tensor network is structured, then these are inferred + automatically. + damping : float, optional + The damping parameter to use, defaults to no damping. + local_convergence : bool, optional + Whether to allow messages to locally converge - i.e. if all their + input messages have converged then stop updating them. + optimize : str or PathOptimizer, optional + The path optimizer to use when contracting the messages. + lazy : bool, optional + Whether to perform the compression lazily, i.e. to leave the computed + compression projectors uncontracted. + inplace : bool, optional + Whether to perform the compression inplace. + progbar : bool, optional + Whether to show a progress bar. + contract_opts + Other options supplied to ``cotengra.array_contract``. + + Returns + ------- + TensorNetwork + """ + tnc = tn if inplace else tn.copy() + + bp = L2BP( + tnc, + site_tags=site_tags, + damping=damping, + local_convergence=local_convergence, + optimize=optimize, + **contract_opts, + ) + bp.run( + max_iterations=max_iterations, + tol=tol, + progbar=progbar, + ) + return bp.compress( + tnc, + max_bond=max_bond, + cutoff=cutoff, + cutoff_mode=cutoff_mode, + lazy=lazy, + ) diff --git a/quimb/tensor/__init__.py b/quimb/tensor/__init__.py index 6b14b820..041badf0 100644 --- a/quimb/tensor/__init__.py +++ b/quimb/tensor/__init__.py @@ -203,6 +203,7 @@ edges_3d_diamond_cubic, edges_3d_diamond, edges_3d_pyrochlore, + edges_tree_rand, ) @@ -238,6 +239,7 @@ "edges_3d_diamond_cubic", "edges_3d_diamond", "edges_3d_pyrochlore", + "edges_tree_rand", "expec_TN_1D", "FullUpdate", "gate_TN_1D", diff --git a/quimb/tensor/geometry.py b/quimb/tensor/geometry.py index 2f778c4e..97222b5a 100644 --- a/quimb/tensor/geometry.py +++ b/quimb/tensor/geometry.py @@ -2,23 +2,23 @@ """ import itertools +import random def sort_unique(edges): """Make sure there are no duplicate edges and that for each ``coo_a < coo_b``. """ - return tuple(sorted( - tuple(sorted(edge)) - for edge in set(map(frozenset, edges)) - )) + return tuple( + sorted(tuple(sorted(edge)) for edge in set(map(frozenset, edges))) + ) # ----------------------------------- 2D ------------------------------------ # + def check_2d(coo, Lx, Ly, cyclic): - """Check ``coo`` in inbounds for a maybe cyclic 2D lattice. - """ + """Check ``coo`` in inbounds for a maybe cyclic 2D lattice.""" x, y = coo if (not cyclic) and not ((0 <= x < Lx) and (0 <= y < Ly)): return @@ -85,22 +85,22 @@ def edges_2d_hexagonal(Lx, Ly, cyclic=False, cells=None): edges = [] for i, j in cells: for *coob, lbl in [ - (i, j, 'B'), - (i, j - 1, 'B'), - (i - 1, j, 'B'), + (i, j, "B"), + (i, j - 1, "B"), + (i - 1, j, "B"), ]: coob = check_2d(coob, Lx, Ly, cyclic) if coob: - edges.append(((i, j, 'A'), (*coob, lbl))) + edges.append(((i, j, "A"), (*coob, lbl))) for *coob, lbl in [ - (i, j, 'A'), - (i, j + 1, 'A'), - (i + 1, j, 'A'), + (i, j, "A"), + (i, j + 1, "A"), + (i + 1, j, "A"), ]: coob = check_2d(coob, Lx, Ly, cyclic) if coob: - edges.append(((i, j, 'B'), (*coob, lbl))) + edges.append(((i, j, "B"), (*coob, lbl))) return sort_unique(edges) @@ -168,22 +168,22 @@ def edges_2d_triangular_rectangular(Lx, Ly, cyclic=False, cells=None): edges = [] for i, j in cells: for *coob, lbl in [ - (i, j, 'B'), - (i, j - 1, 'B'), - (i, j + 1, 'A'), + (i, j, "B"), + (i, j - 1, "B"), + (i, j + 1, "A"), ]: coob = check_2d(coob, Lx, Ly, cyclic) if coob: - edges.append(((i, j, 'A'), (*coob, lbl))) + edges.append(((i, j, "A"), (*coob, lbl))) for *coob, lbl in [ - (i + 1, j, 'A'), - (i, j + 1, 'B'), - (i + 1, j + 1, 'A'), + (i + 1, j, "A"), + (i, j + 1, "B"), + (i + 1, j + 1, "A"), ]: coob = check_2d(coob, Lx, Ly, cyclic) if coob: - edges.append(((i, j, 'B'), (*coob, lbl))) + edges.append(((i, j, "B"), (*coob, lbl))) return sort_unique(edges) @@ -215,43 +215,43 @@ def edges_2d_kagome(Lx, Ly, cyclic=False, cells=None): edges = [] for i, j in cells: for *coob, lbl in [ - (i, j, 'B'), - (i, j - 1, 'B'), - (i, j, 'C'), - (i - 1, j, 'C') + (i, j, "B"), + (i, j - 1, "B"), + (i, j, "C"), + (i - 1, j, "C"), ]: coob = check_2d(coob, Lx, Ly, cyclic) if coob: - edges.append(((i, j, 'A'), (*coob, lbl))) + edges.append(((i, j, "A"), (*coob, lbl))) for *coob, lbl in [ - (i, j, 'C'), - (i - 1, j + 1, 'C'), - (i, j, 'A'), - (i, j + 1, 'A') + (i, j, "C"), + (i - 1, j + 1, "C"), + (i, j, "A"), + (i, j + 1, "A"), ]: coob = check_2d(coob, Lx, Ly, cyclic) if coob: - edges.append(((i, j, 'B'), (*coob, lbl))) + edges.append(((i, j, "B"), (*coob, lbl))) for *coob, lbl in [ - (i, j, 'A'), - (i + 1, j, 'A'), - (i, j, 'B'), - (i + 1, j - 1, 'B') + (i, j, "A"), + (i + 1, j, "A"), + (i, j, "B"), + (i + 1, j - 1, "B"), ]: coob = check_2d(coob, Lx, Ly, cyclic) if coob: - edges.append(((i, j, 'C'), (*coob, lbl))) + edges.append(((i, j, "C"), (*coob, lbl))) return sort_unique(edges) # ----------------------------------- 3D ------------------------------------ # + def check_3d(coo, Lx, Ly, Lz, cyclic): - """Check ``coo`` in inbounds for a maybe cyclic 3D lattice. - """ + """Check ``coo`` in inbounds for a maybe cyclic 3D lattice.""" x, y, z = coo OBC = not cyclic inbounds = (0 <= x < Lx) and (0 <= y < Ly) and (0 <= z < Lz) @@ -324,52 +324,52 @@ def edges_3d_pyrochlore(Lx, Ly, Lz, cyclic=False, cells=None): edges = [] for i, j, k in cells: for *coob, lbl in [ - (i, j, k, 'B'), - (i, j - 1, k, 'B'), - (i, j, k, 'C'), - (i - 1, j, k, 'C'), - (i, j, k, 'D'), - (i, j, k - 1, 'D'), + (i, j, k, "B"), + (i, j - 1, k, "B"), + (i, j, k, "C"), + (i - 1, j, k, "C"), + (i, j, k, "D"), + (i, j, k - 1, "D"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'A'), (*coob, lbl))) + edges.append(((i, j, k, "A"), (*coob, lbl))) for *coob, lbl in [ - (i, j, k, 'C'), - (i - 1, j + 1, k, 'C'), - (i, j, k, 'D'), - (i, j + 1, k - 1, 'D'), - (i, j, k, 'A'), - (i, j + 1, k, 'A'), + (i, j, k, "C"), + (i - 1, j + 1, k, "C"), + (i, j, k, "D"), + (i, j + 1, k - 1, "D"), + (i, j, k, "A"), + (i, j + 1, k, "A"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'B'), (*coob, lbl))) + edges.append(((i, j, k, "B"), (*coob, lbl))) for *coob, lbl in [ - (i, j, k, 'D'), - (i + 1, j, k - 1, 'D'), - (i, j, k, 'A'), - (i + 1, j, k, 'A'), - (i, j, k, 'B'), - (i + 1, j - 1, k, 'B'), + (i, j, k, "D"), + (i + 1, j, k - 1, "D"), + (i, j, k, "A"), + (i + 1, j, k, "A"), + (i, j, k, "B"), + (i + 1, j - 1, k, "B"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'C'), (*coob, lbl))) + edges.append(((i, j, k, "C"), (*coob, lbl))) for *coob, lbl in [ - (i, j, k, 'A'), - (i, j, k + 1, 'A'), - (i, j, k, 'B'), - (i, j - 1, k + 1, 'B'), - (i, j, k, 'C'), - (i - 1, j, k + 1, 'C'), + (i, j, k, "A"), + (i, j, k + 1, "A"), + (i, j, k, "B"), + (i, j - 1, k + 1, "B"), + (i, j, k, "C"), + (i - 1, j, k + 1, "C"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'D'), (*coob, lbl))) + edges.append(((i, j, k, "D"), (*coob, lbl))) return sort_unique(edges) @@ -403,20 +403,20 @@ def edges_3d_diamond(Lx, Ly, Lz, cyclic=False, cells=None): edges = [] for i, j, k in cells: for *coob, lbl in [ - (i, j, k, 'B'), + (i, j, k, "B"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'A'), (*coob, lbl))) + edges.append(((i, j, k, "A"), (*coob, lbl))) for *coob, lbl in [ - (i, j, k + 1, 'A'), - (i, j + 1, k, 'A'), - (i + 1, j, k, 'A'), + (i, j, k + 1, "A"), + (i, j + 1, k, "A"), + (i + 1, j, k, "A"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'B'), (*coob, lbl))) + edges.append(((i, j, k, "B"), (*coob, lbl))) return sort_unique(edges) @@ -451,65 +451,109 @@ def edges_3d_diamond_cubic(Lx, Ly, Lz, cyclic=False, cells=None): edges = [] for i, j, k in cells: for *coob, lbl in [ - (i, j, k, 'E'), + (i, j, k, "E"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'A'), (*coob, lbl))) + edges.append(((i, j, k, "A"), (*coob, lbl))) for *coob, lbl in [ - (i, j, k, 'E'), - (i, j, k, 'F'), + (i, j, k, "E"), + (i, j, k, "F"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'B'), (*coob, lbl))) + edges.append(((i, j, k, "B"), (*coob, lbl))) for *coob, lbl in [ - (i, j, k, 'E'), - (i, j, k, 'G'), + (i, j, k, "E"), + (i, j, k, "G"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'C'), (*coob, lbl))) + edges.append(((i, j, k, "C"), (*coob, lbl))) for *coob, lbl in [ - (i, j, k, 'E'), - (i, j, k, 'H'), + (i, j, k, "E"), + (i, j, k, "H"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'D'), (*coob, lbl))) + edges.append(((i, j, k, "D"), (*coob, lbl))) for *coob, lbl in []: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'E'), (*coob, lbl))) + edges.append(((i, j, k, "E"), (*coob, lbl))) for *coob, lbl in [ - (i, j + 1, k, 'C'), - (i + 1, j, k, 'D'), + (i, j + 1, k, "C"), + (i + 1, j, k, "D"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'F'), (*coob, lbl))) + edges.append(((i, j, k, "F"), (*coob, lbl))) for *coob, lbl in [ - (i + 1, j, k + 1, 'A'), - (i, j, k + 1, 'B'), - (i + 1, j, k, 'D'), + (i + 1, j, k + 1, "A"), + (i, j, k + 1, "B"), + (i + 1, j, k, "D"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'G'), (*coob, lbl))) + edges.append(((i, j, k, "G"), (*coob, lbl))) for *coob, lbl in [ - (i, j + 1, k + 1, 'A'), - (i, j, k + 1, 'B'), - (i, j + 1, k, 'C'), + (i, j + 1, k + 1, "A"), + (i, j, k + 1, "B"), + (i, j + 1, k, "C"), ]: coob = check_3d(coob, Lx, Ly, Lz, cyclic) if coob: - edges.append(((i, j, k, 'H'), (*coob, lbl))) + edges.append(((i, j, k, "H"), (*coob, lbl))) return sort_unique(edges) + + +def edges_tree_rand(n, max_degree=None, seed=None): + """Return a random tree with ``n`` nodes. This a convenience function for + testing purposes and the trees generated are not guaranteed to be uniformly + random (for that see ``networkx.random_tree``). + + Parameters + ---------- + n : int + The number of nodes. + max_degree : int, optional + The maximum degree of the nodes. For example ``max_degree=3`` means + generate a binary tree. + seed : int, optional + The random seed. + + Returns + ------- + edges : list[(int, int)] + """ + rng = random.Random(seed) + edges = [] + + if max_degree is None: + nodes = [0] + for i in range(1, n): + ib = rng.choice(nodes) + nodes.append(i) + edges.append((ib, i)) + else: + degrees = {0: 0} + for i in range(1, n): + ib = rng.choice(list(degrees)) + edges.append((ib, i)) + + degrees[i] = 1 + if degrees[ib] + 1 == max_degree: + # node is finished + degrees.pop(ib) + else: + degrees[ib] += 1 + + return edges diff --git a/quimb/tensor/tensor_arbgeom.py b/quimb/tensor/tensor_arbgeom.py index 81963834..43c53a60 100644 --- a/quimb/tensor/tensor_arbgeom.py +++ b/quimb/tensor/tensor_arbgeom.py @@ -2,19 +2,22 @@ """ import functools +import contextlib from operator import add -from autoray import do, dag +from autoray import do, dag, size from ..utils import check_opt, deprecated, ensure_dict from ..utils import progbar as Progbar from .tensor_core import ( - TensorNetwork, get_symbol, - rand_uuid, + group_inds, oset, + rand_uuid, tags_to_oset, + TensorNetwork, ) +from . import decomp def get_coordinate_formatter(ndims): @@ -76,8 +79,10 @@ def tensor_network_align(*tns, ind_ids=None, trace=False, inplace=False): elif i == n - 1: tn.site_ind_id = ind_ids[i - 1] else: - raise ValueError("An TN 'vector' can only be aligned as the " - "first or last TN in a sequence.") + raise ValueError( + "An TN 'vector' can only be aligned as the " + "first or last TN in a sequence." + ) elif hasattr(tn, "upper_ind_id") and hasattr(tn, "lower_ind_id"): if i != 0: @@ -95,13 +100,10 @@ def tensor_network_align(*tns, ind_ids=None, trace=False, inplace=False): def tensor_network_apply_op_vec( - tn_op, - tn_vec, - compress=False, - **compress_opts + tn_op, tn_vec, contract=True, compress=False, **compress_opts ): """Apply a general a general tensor network representing an operator (has - ``up_ind_id`` and ``lower_ind_id``) to a tensor network representing a + ``upper_ind_id`` and ``lower_ind_id``) to a tensor network representing a vector (has ``site_ind_id``), by contracting each pair of tensors at each site then compressing the resulting tensor network. How the compression takes place is determined by the type of tensor network passed in. The @@ -114,8 +116,12 @@ def tensor_network_apply_op_vec( The tensor network representing the operator. tn_vec : TensorNetwork The tensor network representing the vector. + contract : bool + Whether to contract the tensors at each site after applying the + operator, yielding a single tensor at each site. compress : bool - Whether to compress the resulting tensor network. + Whether to compress the resulting tensor network, if ``contract`` is + ``True``. compress_opts Options to pass to ``tn_vec.compress``. @@ -133,16 +139,18 @@ def tensor_network_apply_op_vec( # form total network and contract each site x |= A - for site in x.gen_sites_present(): - x ^= site - x.fuse_multibonds_() - # optionally compress - if compress: - x.compress(**compress_opts) + if contract: + for site in x.gen_sites_present(): + x ^= site - return x + x.fuse_multibonds_() + + # optionally compress + if compress: + x.compress(**compress_opts) + return x class TensorNetworkGen(TensorNetwork): @@ -196,27 +204,23 @@ def combine(self, other, *, virtual=False, check_collisions=True): @property def nsites(self): - """The total number of sites. - """ + """The total number of sites.""" return len(self._sites) def gen_site_coos(self): - """Generate the coordinates of all sites, same as ``self.sites``. - """ + """Generate the coordinates of all sites, same as ``self.sites``.""" return self._sites @property def sites(self): - """Tuple of the possible sites in this tensor network. - """ + """Tuple of the possible sites in this tensor network.""" sites = getattr(self, "_sites", None) if sites is None: sites = tuple(self.gen_site_coos()) return sites def _get_site_set(self): - """The set of all sites. - """ + """The set of all sites.""" if getattr(self, "_site_set", None) is None: self._site_set = set(self.sites) return self._site_set @@ -235,19 +239,18 @@ def gen_sites_present(self): """ return ( - site for site in self.gen_site_coos() + site + for site in self.gen_site_coos() if self.site_tag(site) in self.tag_map ) @property def site_tag_id(self): - """The string specifier for tagging each site of this tensor network. - """ + """The string specifier for tagging each site of this tensor network.""" return self._site_tag_id def site_tag(self, site): - """The name of the tag specifiying the tensor at ``site``. - """ + """The name of the tag specifiying the tensor at ``site``.""" return self.site_tag_id.format(site) def retag_sites(self, new_id, where=None, inplace=False): @@ -274,16 +277,14 @@ def retag_sites(self, new_id, where=None, inplace=False): @property def site_tags(self): - """All of the site tags. - """ + """All of the site tags.""" if getattr(self, "_site_tags", None) is None: self._site_tags = tuple(map(self.site_tag, self.gen_site_coos())) return self._site_tags @property def site_tags_present(self): - """All of the site tags still present in this tensor network. - """ + """All of the site tags still present in this tensor network.""" return tuple(map(self.site_tag, self.gen_sites_present())) @site_tag_id.setter @@ -294,8 +295,7 @@ def site_tag_id(self, new_id): self._site_tags = None def retag_all(self, new_id, inplace=False): - """Retag all sites and change the ``site_tag_id``. - """ + """Retag all sites and change the ``site_tag_id``.""" tn = self if inplace else self.copy() tn.site_tag_id = new_id return tn @@ -303,15 +303,13 @@ def retag_all(self, new_id, inplace=False): retag_all_ = functools.partialmethod(retag_all, inplace=True) def _get_site_tag_set(self): - """The oset of all site tags. - """ + """The oset of all site tags.""" if getattr(self, "_site_tag_set", None) is None: self._site_tag_set = set(self.site_tags) return self._site_tag_set def filter_valid_site_tags(self, tags): - """Get the valid site tags from ``tags``. - """ + """Get the valid site tags from ``tags``.""" return oset(sorted(self._get_site_tag_set().intersection(tags))) def maybe_convert_coo(self, x): @@ -326,8 +324,7 @@ def maybe_convert_coo(self, x): return x def gen_tags_from_coos(self, coos): - """Generate the site tags corresponding to the given coordinates. - """ + """Generate the site tags corresponding to the given coordinates.""" return map(self.site_tag, coos) def _get_tids_from_tags(self, tags, which="all"): @@ -420,8 +417,8 @@ def gauge_product_boundary_vector( (ix,) = [i for i in t.inds if i in region_inds] _, s, VH = do("linalg.svd", t.data) s = s + smudge - G = do("reshape", s ** 0.5, (-1, 1)) * VH - Ginv = dag(VH) * do("reshape", s ** -0.5, (1, -1)) + G = do("reshape", s**0.5, (-1, 1)) * VH + Ginv = dag(VH) * do("reshape", s**-0.5, (1, -1)) tid_l, tid_r = sorted(tn.ind_map[ix], key=lambda tid: tid in tids) tn.tensor_map[tid_l].gate_(Ginv.T, ix) @@ -430,12 +427,12 @@ def gauge_product_boundary_vector( return tn -_VALID_GATE_PROPAGATE = {'sites', 'register', False, True} +_VALID_GATE_PROPAGATE = {"sites", "register", False, True} _LAZY_GATE_CONTRACT = { False, - 'split-gate', - 'swap-split-gate', - 'auto-split-gate', + "split-gate", + "swap-split-gate", + "auto-split-gate", } @@ -453,8 +450,7 @@ class TensorNetworkGenVector(TensorNetworkGen): @property def site_ind_id(self): - """The string specifier for the physical indices. - """ + """The string specifier for the physical indices.""" return self._site_ind_id def site_ind(self, site): @@ -462,16 +458,14 @@ def site_ind(self, site): @property def site_inds(self): - """Return a tuple of all site indices. - """ + """Return a tuple of all site indices.""" if getattr(self, "_site_inds", None) is None: self._site_inds = tuple(map(self.site_ind, self.gen_site_coos())) return self._site_inds @property def site_inds_present(self): - """All of the site inds still present in this tensor network. - """ + """All of the site inds still present in this tensor network.""" return tuple(map(self.site_ind, self.gen_sites_present())) def reset_cached_properties(self): @@ -503,6 +497,8 @@ def reindex_sites(self, new_id, where=None, inplace=False): inplace=inplace, ) + reindex_sites_ = functools.partialmethod(reindex_sites, inplace=True) + @site_ind_id.setter def site_ind_id(self, new_id): if self._site_ind_id != new_id: @@ -511,8 +507,7 @@ def site_ind_id(self, new_id): self._site_inds = None def reindex_all(self, new_id, inplace=False): - """Reindex all physical sites and change the ``site_ind_id``. - """ + """Reindex all physical sites and change the ``site_ind_id``.""" tn = self if inplace else self.copy() tn.site_ind_id = new_id return tn @@ -520,8 +515,7 @@ def reindex_all(self, new_id, inplace=False): reindex_all_ = functools.partialmethod(reindex_all, inplace=True) def gen_inds_from_coos(self, coos): - """Generate the site inds corresponding to the given coordinates. - """ + """Generate the site inds corresponding to the given coordinates.""" return map(self.site_ind, coos) def phys_dim(self, site=None): @@ -533,11 +527,7 @@ def phys_dim(self, site=None): return self.ind_size(self.site_ind(site)) def to_dense( - self, - *inds_seq, - to_qarray=False, - to_ket=None, - **contract_opts + self, *inds_seq, to_qarray=False, to_ket=None, **contract_opts ): """Contract this tensor network 'vector' into a dense array. By default, turn into a 'ket' ``qarray``, i.e. column vector of shape @@ -570,9 +560,7 @@ def to_dense( to_ket = True x = TensorNetwork.to_dense( - self, *inds_seq, - to_qarray=to_qarray, - **contract_opts + self, *inds_seq, to_qarray=to_qarray, **contract_opts ) if to_ket: @@ -583,7 +571,9 @@ def to_dense( to_qarray = functools.partialmethod(to_dense, to_qarray=True) def gate( - self, G, where, + self, + G, + where, contract=False, tags=None, propagate_tags=False, @@ -642,7 +632,7 @@ def gate( -------- TensorNetwork.gate_inds """ - check_opt('propagate_tags', propagate_tags, _VALID_GATE_PROPAGATE) + check_opt("propagate_tags", propagate_tags, _VALID_GATE_PROPAGATE) tn = self if inplace else self.copy() @@ -652,14 +642,11 @@ def gate( # potentially add tags from current tensors to the new ones, # only do this if we are lazily adding the gate tensor(s) - if ( - (contract in _LAZY_GATE_CONTRACT) and - (propagate_tags in (True, 'sites')) + if (contract in _LAZY_GATE_CONTRACT) and ( + propagate_tags in (True, "sites") ): - old_tags = oset.union( - *(t.tags for t in tn._inds_get(*inds)) - ) - if propagate_tags == 'sites': + old_tags = oset.union(*(t.tags for t in tn._inds_get(*inds))) + if propagate_tags == "sites": old_tags = tn.filter_valid_site_tags(old_tags) tags = tags_to_oset(tags) @@ -671,9 +658,9 @@ def gate( ) # possibly add tags based on where the gate was applied - if propagate_tags == 'register': + if propagate_tags == "register": for ix, site in zip(inds, where): - t, = tn._inds_get(ix) + (t,) = tn._inds_get(ix) t.add_tag(tn.site_tag(site)) return tn @@ -700,6 +687,13 @@ def gate_simple_(self, G, where, gauges, renorm=True, **gate_opts): Whether to renormalise the singular after the gate is applied, before reinserting them into ``gauges``. """ + if isinstance(where, int): + where = (where,) + + if len(where) == 1: + # single site gate + return self.gate_(G, where, contract=True) + gate_opts.setdefault("absorb", None) gate_opts.setdefault("contract", "reduce-split") @@ -729,7 +723,7 @@ def gate_fit_local_( ): # select a local neighborhood of tensors tids = self._get_tids_from_tags( - tuple(map(self.site_tag, where)), 'any' + tuple(map(self.site_tag, where)), "any" ) if len(tids) == 2: tids = self._get_string_between_tids(*tids) @@ -832,7 +826,7 @@ def local_expectation_cluster( """ # select a local neighborhood of tensors tids = self._get_tids_from_tags( - tuple(map(self.site_tag, where)), 'any' + tuple(map(self.site_tag, where)), "any" ) if len(tids) == 2: @@ -857,7 +851,7 @@ def local_expectation_cluster( optimize=optimize, normalized=normalized, rehearse=rehearse, - **contract_opts + **contract_opts, ) return k.local_expectation_exact( @@ -866,13 +860,13 @@ def local_expectation_cluster( optimize=optimize, normalized=normalized, rehearse=rehearse, - **contract_opts + **contract_opts, ) local_expectation_simple = deprecated( local_expectation_cluster, - 'local_expectation_simple', - 'local_expectation_cluster', + "local_expectation_simple", + "local_expectation_cluster", ) def compute_local_expectation_cluster( @@ -988,8 +982,8 @@ def compute_local_expectation_cluster( compute_local_expectation_simple = deprecated( compute_local_expectation_cluster, - 'compute_local_expectation_simple', - 'compute_local_expectation_cluster', + "compute_local_expectation_simple", + "compute_local_expectation_cluster", ) def local_expectation_exact( @@ -1010,14 +1004,16 @@ def local_expectation_exact( tn = (b | self) if rehearse: - if rehearse == 'tn': + if rehearse == "tn": return tn - if rehearse == 'tree': + if rehearse == "tree": return tn.contraction_tree( - optimize, output_inds=k_inds + b_inds) + optimize, output_inds=k_inds + b_inds + ) if rehearse: return tn.contraction_info( - optimize, output_inds=k_inds + b_inds) + optimize, output_inds=k_inds + b_inds + ) rho = tn.to_dense(k_inds, b_inds, optimize=optimize, **contract_opts) expec = do("tensordot", rho, G, axes=((0, 1), (1, 0))) @@ -1100,9 +1096,9 @@ def partial_trace( flatten=True, reduce=False, normalized=True, - symmetrized='auto', + symmetrized="auto", rehearse=False, - method='contract_compressed', + method="contract_compressed", **contract_compressed_opts, ): """Partially trace this tensor network state, keeping only the sites in @@ -1149,7 +1145,7 @@ def partial_trace( rho : array_like The reduce density matrix of sites in ``keep``. """ - if symmetrized == 'auto': + if symmetrized == "auto": symmetrized = not flatten # form the partial trace @@ -1157,7 +1153,7 @@ def partial_trace( k = self.copy() if reduce: - k.reduce_inds_onto_bond(*k_inds, tags='__BOND__', drop_tags=True) + k.reduce_inds_onto_bond(*k_inds, tags="__BOND__", drop_tags=True) b_inds = tuple(map("_bra{}".format, keep)) b = k.conj().reindex_(dict(zip(k_inds, b_inds))) @@ -1167,7 +1163,7 @@ def partial_trace( if flatten: for site in self.gen_site_coos(): - if (site not in keep) or (flatten == 'all'): + if (site not in keep) or (flatten == "all"): # check if site exists still to permit e.g. local methods # to use this same logic tag = tn.site_tag(site) @@ -1176,21 +1172,22 @@ def partial_trace( tn.fuse_multibonds_() - if method == 'contract_compressed': - + if method == "contract_compressed": if reduce: output_inds = None - tn, tn_reduced = tn.partition('__BOND__', inplace=True) + tn, tn_reduced = tn.partition("__BOND__", inplace=True) if rehearse: - if rehearse == 'tn': + if rehearse == "tn": return tn - if rehearse == 'tree': + if rehearse == "tree": return tn.contraction_tree( - optimize, output_inds=output_inds) + optimize, output_inds=output_inds + ) if rehearse: return tn.contraction_info( - optimize, output_inds=output_inds) + optimize, output_inds=output_inds + ) t_rho = tn.contract_compressed( optimize, @@ -1204,25 +1201,30 @@ def partial_trace( rho = t_rho.to_dense(k_inds, b_inds) - elif method == 'contract_around': + elif method == "contract_around": tn.contract_around_( - tuple(map(self.site_tag, keep)), 'any', + tuple(map(self.site_tag, keep)), + "any", max_bond=max_bond, - **contract_compressed_opts + **contract_compressed_opts, ) if rehearse: - if rehearse == 'tn': + if rehearse == "tn": return tn - if rehearse == 'tree': + if rehearse == "tree": return tn.contraction_tree( - optimize, output_inds=output_inds) + optimize, output_inds=output_inds + ) if rehearse: return tn.contraction_info( - optimize, output_inds=output_inds) + optimize, output_inds=output_inds + ) rho = tn.to_dense( - k_inds, b_inds, optimize=optimize, + k_inds, + b_inds, + optimize=optimize, ) else: @@ -1244,7 +1246,7 @@ def local_expectation( optimize, flatten=True, normalized=True, - symmetrized='auto', + symmetrized="auto", reduce=False, rehearse=False, **contract_compressed_opts, @@ -1319,7 +1321,7 @@ def compute_local_expectation( *, flatten=True, normalized=True, - symmetrized='auto', + symmetrized="auto", reduce=False, return_all=False, rehearse=False, @@ -1406,10 +1408,12 @@ def compute_local_expectation( ) compute_local_expectation_rehearse = functools.partialmethod( - compute_local_expectation, rehearse=True) + compute_local_expectation, rehearse=True + ) compute_local_expectation_tn = functools.partialmethod( - compute_local_expectation, rehearse='tn') + compute_local_expectation, rehearse="tn" + ) class TensorNetworkGenOperator(TensorNetworkGen): @@ -1427,30 +1431,55 @@ class TensorNetworkGenOperator(TensorNetworkGen): @property def upper_ind_id(self): - """The string specifier for the upper phyiscal indices. - """ + """The string specifier for the upper phyiscal indices.""" return self._upper_ind_id + def upper_ind(self, site): + """Get the upper physical index name of ``site``.""" + return self.upper_ind_id.format(site) + + def reindex_upper_sites(self, new_id, where=None, inplace=False): + """Modify the upper site indices for all or some tensors in this + operator tensor network (without changing the ``upper_ind_id``). + + Parameters + ---------- + new_id : str + A string with a format placeholder to accept a site, e.g. "up{}". + where : None or sequence + Which sites to update the index labels on. If ``None`` (default) + all sites. + inplace : bool + Whether to reindex in place. + """ + if where is None: + where = self.gen_sites_present() + + return self.reindex( + {self.upper_ind(x): new_id.format(x) for x in where}, + inplace=inplace, + ) + + reindex_upper_sites_ = functools.partialmethod( + reindex_upper_sites, inplace=True + ) + @upper_ind_id.setter def upper_ind_id(self, new_id): if new_id == self._lower_ind_id: - raise ValueError("Setting the same upper and upper index ids will" - " make the two ambiguous.") + raise ValueError( + "Setting the same upper and upper index ids will" + " make the two ambiguous." + ) if self._upper_ind_id != new_id: self.reindex_upper_sites_(new_id) self._upper_ind_id = new_id self._upper_inds = None - def upper_ind(self, site): - """Get the upper physical index name of ``site``. - """ - return self.upper_ind_id.format(site) - @property def upper_inds(self): - """Return a tuple of all upper indices. - """ + """Return a tuple of all upper indices.""" if getattr(self, "_upper_inds", None) is None: self._upper_inds = tuple(map(self.upper_ind, self.gen_site_coos())) return self._upper_inds @@ -1464,30 +1493,55 @@ def upper_inds_present(self): @property def lower_ind_id(self): - """The string specifier for the lower phyiscal indices. - """ + """The string specifier for the lower phyiscal indices.""" return self._lower_ind_id + def lower_ind(self, site): + """Get the lower physical index name of ``site``.""" + return self.lower_ind_id.format(site) + + def reindex_lower_sites(self, new_id, where=None, inplace=False): + """Modify the lower site indices for all or some tensors in this + operator tensor network (without changing the ``lower_ind_id``). + + Parameters + ---------- + new_id : str + A string with a format placeholder to accept a site, e.g. "up{}". + where : None or sequence + Which sites to update the index labels on. If ``None`` (default) + all sites. + inplace : bool + Whether to reindex in place. + """ + if where is None: + where = self.gen_sites_present() + + return self.reindex( + {self.lower_ind(x): new_id.format(x) for x in where}, + inplace=inplace, + ) + + reindex_lower_sites_ = functools.partialmethod( + reindex_lower_sites, inplace=True + ) + @lower_ind_id.setter def lower_ind_id(self, new_id): if new_id == self._upper_ind_id: - raise ValueError("Setting the same upper and lower index ids will" - " make the two ambiguous.") + raise ValueError( + "Setting the same upper and lower index ids will" + " make the two ambiguous." + ) if self._lower_ind_id != new_id: self.reindex_lower_sites_(new_id) self._lower_ind_id = new_id self._lower_inds = None - def lower_ind(self, site): - """Get the lower physical index name of ``site``. - """ - return self.lower_ind_id.format(site) - @property def lower_inds(self): - """Return a tuple of all lower indices. - """ + """Return a tuple of all lower indices.""" if getattr(self, "_lower_inds", None) is None: self._lower_inds = tuple(map(self.lower_ind, self.gen_site_coos())) return self._lower_inds @@ -1499,12 +1553,7 @@ def lower_inds_present(self): """ return tuple(map(self.lower_ind, self.gen_sites_present())) - def to_dense( - self, - *inds_seq, - to_qarray=False, - **contract_opts - ): + def to_dense(self, *inds_seq, to_qarray=False, **contract_opts): """Contract this tensor network 'operator' into a dense array. Parameters @@ -1528,23 +1577,20 @@ def to_dense( inds_seq = (self.upper_inds_present, self.lower_inds_present) return TensorNetwork.to_dense( - self, *inds_seq, - to_qarray=to_qarray, - **contract_opts + self, *inds_seq, to_qarray=to_qarray, **contract_opts ) to_qarray = functools.partialmethod(to_dense, to_qarray=True) - def phys_dim(self, site=None, which='upper'): - """Get the physical dimension of ``site``. - """ + def phys_dim(self, site=None, which="upper"): + """Get the physical dimension of ``site``.""" if site is None: site = next(iter(self.gen_sites_present())) - if which == 'upper': + if which == "upper": return self[site].ind_size(self.upper_ind(site)) - if which == 'lower': + if which == "lower": return self[site].ind_size(self.lower_ind(site)) @@ -1566,7 +1612,7 @@ def _compute_expecs_maybe_in_parallel( if executor is None: results = (fn(tn, G, where, **kwargs) for where, G in terms.items()) else: - if hasattr(executor, 'scatter'): + if hasattr(executor, "scatter"): tn = executor.scatter(tn) futures = [ @@ -1580,26 +1626,22 @@ def _compute_expecs_maybe_in_parallel( expecs = dict(zip(terms.keys(), results)) - if return_all or kwargs.get('rehearse', False): + if return_all or kwargs.get("rehearse", False): return expecs return functools.reduce(add, expecs.values()) def _tn_local_expectation(tn, *args, **kwargs): - """Define as function for pickleability. - """ + """Define as function for pickleability.""" return tn.local_expectation(*args, **kwargs) def _tn_local_expectation_cluster(tn, *args, **kwargs): - """Define as function for pickleability. - """ + """Define as function for pickleability.""" return tn.local_expectation_cluster(*args, **kwargs) def _tn_local_expectation_exact(tn, *args, **kwargs): - """Define as function for pickleability. - """ + """Define as function for pickleability.""" return tn.local_expectation_exact(*args, **kwargs) - diff --git a/quimb/tensor/tensor_builder.py b/quimb/tensor/tensor_builder.py index c31c09cb..0c3e3ed9 100644 --- a/quimb/tensor/tensor_builder.py +++ b/quimb/tensor/tensor_builder.py @@ -29,7 +29,11 @@ from .array_ops import asarray, sensibly_scale, reshape, do from .contraction import array_contract from .decomp import eigh -from .tensor_arbgeom import TensorNetworkGen, TensorNetworkGenVector +from .tensor_arbgeom import ( + TensorNetworkGen, + TensorNetworkGenVector, + TensorNetworkGenOperator, +) from .tensor_1d import MatrixProductState, MatrixProductOperator from .tensor_2d import gen_2d_bonds, gen_2d_plaquettes, TensorNetwork2D from .tensor_3d import gen_3d_bonds, gen_3d_plaquettes, TensorNetwork3D @@ -64,7 +68,9 @@ def fill_fn(shape): @random_seed_fn -def rand_tensor(shape, inds, tags=None, dtype="float64", left_inds=None): +def rand_tensor( + shape, inds, tags=None, dtype="float64", left_inds=None, **randn_opts +): """Generate a random tensor with specified shape and inds. Parameters @@ -87,7 +93,7 @@ def rand_tensor(shape, inds, tags=None, dtype="float64", left_inds=None): ------- Tensor """ - data = randn(shape, dtype=dtype) + data = randn(shape, dtype=dtype, **randn_opts) return Tensor(data=data, inds=inds, tags=tags, left_inds=left_inds) @@ -245,12 +251,16 @@ def TN_from_edges_and_fill_fn( at each node. site_tag_id : str, optional String with formatter to tag sites. - site_ind_id : str, optional - String with formatter to tag indices (if ``phys_dim`` specified). + site_ind_id : str or (str, str), optional + String with formatter to tag indices (if ``phys_dim`` specified). If a + single str is supplied, the tensor network will have a single index at + each site, representing a vector. If a pair of strings is supplied, the + tensor network will have two indices at each site, representing an + operator with upper and lower indices. Returns ------- - TensorNetworkGen or TensorNetworkGenVector + TensorNetworkGen, TensorNetworkGenVector or TensorNetworkGenOperator """ terms = collections.defaultdict(list) bonds = collections.defaultdict(rand_uuid) @@ -261,27 +271,52 @@ def TN_from_edges_and_fill_fn( terms[node_a].insert(0, bond) terms[node_b].insert(0, bond) + if phys_dim is not None: + if isinstance(site_ind_id, str): + qtype = "vector" + else: + qtype = "operator" + upper_ind_id, lower_ind_id = site_ind_id + else: + qtype = "scalar" + ts = [] sites = [] for node, inds in sorted(terms.items(), key=lambda x: x[0]): sites.append(node) shape = [D] * len(inds) - if phys_dim is not None: + + # check if not scalar tensor + if qtype == "vector": inds.append(site_ind_id.format(node)) shape.append(phys_dim) + elif qtype == "operator": + inds.append(upper_ind_id.format(node)) + inds.append(lower_ind_id.format(node)) + shape.append(phys_dim) + shape.append(phys_dim) + data = fill_fn(shape) tags = site_tag_id.format(node) ts.append(Tensor(data=data, inds=inds, tags=tags)) tn = TensorNetwork(ts) - if phys_dim is not None: + if qtype == "vector": tn.view_as_( TensorNetworkGenVector, sites=sites, site_tag_id=site_tag_id, site_ind_id=site_ind_id, ) + elif qtype == "operator": + tn.view_as_( + TensorNetworkGenOperator, + sites=sites, + site_tag_id=site_tag_id, + upper_ind_id=upper_ind_id, + lower_ind_id=lower_ind_id, + ) else: tn.view_as_(TensorNetworkGen, sites=sites, site_tag_id=site_tag_id) @@ -319,7 +354,7 @@ def TN_from_edges_empty( Returns ------- - TensorNetworkGen or TensorNetworkGenVector + TensorNetworkGen, TensorNetworkGenVector or TensorNetworkGenOperator """ def fill_fn(shape): @@ -370,7 +405,7 @@ def TN_from_edges_with_value( Returns ------- - TensorNetworkGen or TensorNetworkGenVector + TensorNetworkGen, TensorNetworkGenVector or TensorNetworkGenOperator """ element = np.array(value, dtype=dtype) @@ -395,6 +430,7 @@ def TN_from_edges_rand( dtype="float64", site_tag_id="I{}", site_ind_id="k{}", + **randn_opts, ): """Create a random tensor network with geometry defined from a sequence of edges defining a graph. @@ -411,42 +447,33 @@ def TN_from_edges_rand( to mimic a wavefunction of ``len(G)`` sites. seed : int, optional A random seed. + dtype : str, optional + The data type of the tensors. site_tag_id : str, optional String with formatter to tag sites. site_ind_id : str, optional String with formatter to tag indices (if ``phys_dim`` specified). + randn_opts + Supplied to :func:`~quimb.gen.rand.randn`. Returns ------- - TensorNetworkGen or TensorNetworkGenVector + TensorNetworkGen, TensorNetworkGenVector or TensorNetworkGenOperator """ - ts = {} - - sites = tuple(sorted(set(concat(edges)))) - - for node in sites: - t = Tensor(tags=site_tag_id.format(node)) - if phys_dim is not None: - t.new_ind(site_ind_id.format(node), size=phys_dim) - ts[node] = t - - for node_a, node_b in gen_unique_edges(edges): - new_bond(ts[node_a], ts[node_b], size=D) - - tn = TensorNetwork(ts.values()) - tn.randomize_(seed=seed, dtype=dtype) + if seed is not None: + seed_rand(seed) - if phys_dim is not None: - tn.view_as_( - TensorNetworkGenVector, - sites=sites, - site_tag_id=site_tag_id, - site_ind_id=site_ind_id, - ) - else: - tn.view_as_(TensorNetworkGen, sites=sites, site_tag_id=site_tag_id) + def fill_fn(shape): + return randn(shape, dtype=dtype, **randn_opts) - return tn + return TN_from_edges_and_fill_fn( + fill_fn=fill_fn, + edges=edges, + D=D, + phys_dim=phys_dim, + site_tag_id=site_tag_id, + site_ind_id=site_ind_id, + ) TN_rand_from_edges = deprecated( @@ -487,7 +514,7 @@ def TN_rand_reg( Returns ------- - TensorNetworkGen or TensorNetworkGenVector + TensorNetworkGen, TensorNetworkGenVector or TensorNetworkGenOperator """ import networkx as nx @@ -532,7 +559,7 @@ def TN_rand_tree( Returns ------- - TensorNetworkGen or TensorNetworkGenVector + TensorNetworkGen, TensorNetworkGenVector or TensorNetworkGenOperator """ import networkx as nx diff --git a/tests/test_tensor/test_belief_propagation/test_d2bp.py b/tests/test_tensor/test_belief_propagation/test_d2bp.py index 45ff748f..8983324d 100644 --- a/tests/test_tensor/test_belief_propagation/test_d2bp.py +++ b/tests/test_tensor/test_belief_propagation/test_d2bp.py @@ -8,36 +8,40 @@ ) -@pytest.mark.parametrize("damping", [0.0, 0.1, 0.5]) -def test_contract(damping): - peps = qtn.PEPS.rand(3, 4, 3, seed=42) +@pytest.mark.parametrize("damping", [0.0, 0.1]) +@pytest.mark.parametrize("dtype", ["float32", "complex64"]) +def test_contract(damping, dtype): + peps = qtn.PEPS.rand(3, 4, 3, seed=42, dtype=dtype) # normalize exactly peps /= (peps.H @ peps) ** 0.5 N_ap = contract_d2bp(peps, damping=damping) assert N_ap == pytest.approx(1.0, rel=0.3) -def test_tree_exact(): - psi = qtn.TN_rand_tree(20, 3, 2) +@pytest.mark.parametrize("dtype", ["float32", "complex64"]) +def test_tree_exact(dtype): + psi = qtn.TN_rand_tree(20, 3, 2, dtype=dtype, seed=42) norm2 = psi.H @ psi norm2_bp = contract_d2bp(psi) - assert norm2_bp == pytest.approx(norm2, rel=1e-6) + assert norm2_bp == pytest.approx(norm2, rel=5e-6) -@pytest.mark.parametrize("damping", [0.0, 0.1, 0.5]) -def test_compress(damping): - peps = qtn.PEPS.rand(3, 4, 3, seed=42) +@pytest.mark.parametrize("damping", [0.0, 0.1]) +@pytest.mark.parametrize("dtype", ["float32", "complex64"]) +def test_compress(damping, dtype): + peps = qtn.PEPS.rand(3, 4, 3, seed=42, dtype=dtype) # test that using the BP compression gives better fidelity than purely # local, naive compression scheme peps_c1 = peps.compress_all(max_bond=2) peps_c2 = compress_d2bp(peps, max_bond=2, damping=damping) fid1 = peps_c1.H @ peps_c2 fid2 = peps_c2.H @ peps_c2 - assert fid2 > fid1 + assert abs(fid2) > abs(fid1) -def test_sample(): - peps = qtn.PEPS.rand(3, 4, 3, seed=42) +@pytest.mark.parametrize("dtype", ["float32", "complex64"]) +def test_sample(dtype): + peps = qtn.PEPS.rand(3, 4, 3, seed=42, dtype=dtype) # normalize exactly peps /= (peps.H @ peps) ** 0.5 config, peps_config, omega = sample_d2bp(peps, seed=42) @@ -49,7 +53,7 @@ def test_sample(): nrepeat = 4 for _ in range(nrepeat): _, peps_config, _ = sample_d2bp(peps, seed=42) - ptotal += peps_config.contract() ** 2 + ptotal += abs(peps_config.contract()) ** 2 # check we are doing better than random guessing assert ptotal > nrepeat * 2**-peps.nsites diff --git a/tests/test_tensor/test_belief_propagation/test_l2bp.py b/tests/test_tensor/test_belief_propagation/test_l2bp.py new file mode 100644 index 00000000..a095be3e --- /dev/null +++ b/tests/test_tensor/test_belief_propagation/test_l2bp.py @@ -0,0 +1,81 @@ +import pytest + +import quimb.tensor as qtn +from quimb.experimental.belief_propagation.l2bp import ( + contract_l2bp, + compress_l2bp, +) + + +@pytest.mark.parametrize("dtype", ["float32", "complex64"]) +def test_contract_tree_exact(dtype): + psi = qtn.TN_rand_tree(20, 3, 2, dtype=dtype) + norm2 = psi.H @ psi + norm2_bp = contract_l2bp(psi) + assert norm2_bp == pytest.approx(norm2, rel=1e-6) + + +@pytest.mark.parametrize("dtype", ["float32", "complex64"]) +def test_contract_loopy_approx(dtype): + peps = qtn.PEPS.rand(3, 4, 3, dtype=dtype, seed=42) + norm_ex = peps.H @ peps + norm_bp = contract_l2bp(peps, damping=0.1) + assert norm_bp == pytest.approx(norm_ex, rel=0.2) + + +@pytest.mark.parametrize("damping", [0.0, 0.1]) +@pytest.mark.parametrize("dtype", ["float32", "complex64"]) +def test_compress_loopy(damping, dtype): + peps = qtn.PEPS.rand(3, 4, 3, seed=42, dtype=dtype) + # test that using the BP compression gives better fidelity than purely + # local, naive compression scheme + peps_c1 = peps.compress_all(max_bond=2) + peps_c2 = compress_l2bp(peps, max_bond=2, damping=damping) + fid1 = peps_c1.H @ peps_c2 + fid2 = peps_c2.H @ peps_c2 + assert abs(fid2) > abs(fid1) + + +@pytest.mark.parametrize("dtype", ["float32", "complex64"]) +def test_contract_double_layer_tree_exact(dtype): + # generate a random binary tree + edges = qtn.edges_tree_rand(10, max_degree=3, seed=42) + # generate a random tree product state and operator on this tree + tps = qtn.TN_from_edges_rand( + edges, 3, phys_dim=2, site_ind_id="k{}", dtype=dtype + ) + tpo = qtn.TN_from_edges_rand( + edges, 3, phys_dim=2, site_ind_id=("k{}", "b{}"), dtype=dtype + ) + # join into double layer tree + tn = qtn.tensor_network_apply_op_vec(tpo, tps, contract=False) + assert tn.num_tensors == 20 + + norm_ex = tn.H @ tn + norm_bp = contract_l2bp(tn) + + assert norm_bp == pytest.approx(norm_ex, rel=1e-6) + + +@pytest.mark.parametrize("dtype", ["float32", "complex64"]) +@pytest.mark.parametrize("damping", [0.0, 0.1]) +def test_compress_double_layer_loopy(dtype, damping): + peps = qtn.PEPS.rand(3, 4, bond_dim=3, seed=42, dtype=dtype) + pepo = qtn.PEPO.rand(3, 4, bond_dim=2, seed=42, dtype=dtype) + + tn_lazy = qtn.tensor_network_apply_op_vec(pepo, peps, contract=False) + assert tn_lazy.num_tensors == 24 + + # compress using basic local compression + tn_eager = qtn.tensor_network_apply_op_vec(pepo, peps, contract=True) + assert tn_eager.num_tensors == 12 + tn_eager.compress_all_(max_bond=3) + fid_basic = abs(tn_eager.H @ tn_lazy) + + # compress using BP + tn_bp = compress_l2bp(tn_lazy, max_bond=3, damping=damping) + assert tn_bp.num_tensors == 12 + + # assert we did better than basic local compression + fid_bp = abs(tn_bp.H @ tn_lazy) + assert fid_bp > fid_basic