Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
90 changes: 52 additions & 38 deletions src/schemes/symmetric_looptnr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,16 @@ $(TYPEDFIELDS)
"""
mutable struct SLoopTNR <: TNRScheme
T::TensorMap
S::TensorMap

gradalg::OptimKit.LBFGS
finalize!::Function
function SLoopTNR(
T::TensorMap;
gradalg = LBFGS(10; verbosity = 0, gradtol = 6.0e-7, maxiter = 40000),
finalize = (finalize!)
)
return new(T, gradalg, finalize)
T::TensorMap;
gradalg = LBFGS(10; verbosity = 0, gradtol = 6.0e-7, maxiter = 40000),
finalize = (finalize!),
)
return new(T, T, gradalg, finalize)
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If S is a three-leg tensor, why is it instantiated as a four-leg one?

end
end

Expand All @@ -42,8 +43,8 @@ function classical_ising_inv(β)

S = ℤ₂Space(0 => 1, 1 => 1)
T = zeros(Float64, S ⊗ S ← S' ⊗ S')
block(T, Irrep[ℤ₂](0)) .= [2x^2 2x * y; 2x * y 2y^2]
block(T, Irrep[ℤ₂](1)) .= [2x * y 2x * y; 2x * y 2x * y]
block(T, Irrep[ℤ₂](0)) .= [2x^2 2x*y; 2x*y 2y^2]
block(T, Irrep[ℤ₂](1)) .= [2x*y 2x*y; 2x*y 2x*y]

return permute(T, (1, 2, 3, 4))
end
Expand Down Expand Up @@ -77,14 +78,17 @@ function TTtoNorm(TT)
return tr(T8 * b)
end

function cost_looptnr(S, T)
function to_const_TT(T)
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What does this function do?

@tensor TT[-1 -2; -3 -4] := T[1 -1 2 -3] * conj(T[1 -2 2 -4])
return TTtoNorm(TT)
end

function cost_looptnr(S, T, const_TT)
@assert eltype(S) == Float64 "Modification is needed for complex numbers!"
SS = StoSS(S)
@tensor TT[-1 -2; -3 -4] := T[1 -1 2 -3] * conj(T[1 -2 2 -4])
@tensor TSS[-1 -2; -3 -4] := T[1 -1 2 -3] * conj(SS[1 -2 2 -4])
@tensor S4[-1 -2; -3 -4] := SS[1 -1 2 -3] * conj(SS[1 -2 2 -4])
# T
return TTtoNorm(TT) + TTtoNorm(S4) - 2 * TTtoNorm(TSS)
return (const_TT + TTtoNorm(S4) - 2 * TTtoNorm(TSS)) / const_TT
end

########## Gradient Optimization ##########
Expand All @@ -95,12 +99,10 @@ function fg(f, A)
end

function optimize_S(scheme, S)
opt_fun(x) = cost_looptnr(x, scheme.T)
const_TT = to_const_TT(scheme.T)
opt_fun(x) = cost_looptnr(x, scheme.T, const_TT)
opt_fg(x) = fg(opt_fun, x)
Sopt, fx, gx, numfg, normgradhistory = optimize(
opt_fg, S,
scheme.gradalg
)
Sopt, fx, gx, numfg, normgradhistory = optimize(opt_fg, S, scheme.gradalg)
return Sopt
end

Expand All @@ -127,30 +129,35 @@ function Ψ_corner(T)
return psi
end

function entanglement_filtering(T; trunc = truncbelow(1.0e-12))
function entanglement_filtering(T; ef_trunc = truncbelow(1.0e-14))
entanglement_function(steps, data) = abs(data[end])
entanglement_criterion = maxiter(100) & convcrit(1.0e-12, entanglement_function)

psi_center = Ψ_center(T)
psi_corner = Ψ_corner(T)

PR_list, PL_list = TNRKit.find_projectors(
psi_center, [1, 1, 1, 1], [3, 3, 3, 3],
entanglement_criterion, trunc
PR_list, PL_list = find_projectors(
psi_center,
[1, 1, 1, 1],
[3, 3, 3, 3],
entanglement_criterion,
ef_trunc,
)
P_bottom = PL_list[1]
P_right = PL_list[1]

PR_list, PL_list = TNRKit.find_projectors(
PR_list, PL_list = find_projectors(
psi_corner,
[1, 1, 1, 1], [3, 3, 3, 3],
entanglement_criterion, trunc
[1, 1, 1, 1],
[3, 3, 3, 3],
entanglement_criterion,
ef_trunc,
)
P_top = PL_list[3]
P_left = PL_list[3]

@tensor T_new[-1 -2 -3 -4] := T[1 2 3 4] * P_left[-1; 1] * P_bottom[-2; 2] *
P_top[-3; 3] * P_right[-4; 4]
@tensor T_new[-1 -2 -3 -4] :=
T[1 2 3 4] * P_left[-1; 1] * P_bottom[-2; 2] * P_top[-3; 3] * P_right[-4; 4]
return T_new
end

Expand All @@ -164,7 +171,7 @@ function ef_oneloop(T, trunc::TensorKit.TruncationScheme)
ΨA = Ψ_center(T)
ΨB = []

for i in 1:4
for i = 1:4
s1, s2 = SVD12(ΨA[i], truncdim(trunc.dim * 2))
push!(ΨB, s1)
push!(ΨB, s2)
Expand All @@ -173,14 +180,16 @@ function ef_oneloop(T, trunc::TensorKit.TruncationScheme)
ΨB_function(steps, data) = abs(data[end])
criterion = maxiter(100) & convcrit(1.0e-12, ΨB_function)
PR_list, _ = find_projectors(
ΨB, [1, 1, 1, 1, 1, 1, 1, 1], [2, 2, 2, 2, 2, 2, 2, 2],
criterion, trunc
ΨB,
[1, 1, 1, 1, 1, 1, 1, 1],
[2, 2, 2, 2, 2, 2, 2, 2],
criterion,
trunc,
)

ΨB_disentangled = []
for i in 1:1
@tensor B1[-2 -1; -3] := ΨB[i][-1; -2 2] *
PR_list[mod(i, 8) + 1][2; -3]
for i = 1:1
@tensor B1[-2 -1; -3] := ΨB[i][-1; -2 2] * PR_list[mod(i, 8)+1][2; -3]
push!(ΨB_disentangled, B1)
end
S = ΨB_disentangled[1]
Expand All @@ -195,23 +204,28 @@ function combine_4S(S)
end

########## Main funcitons ##########
function step!(scheme, trunc, oneloop)
scheme.T = entanglement_filtering(scheme.T)
function step!(scheme, trunc, oneloop; ef_trunc = truncbelow(1.0e-14))
scheme.T = entanglement_filtering(scheme.T; ef_trunc)
if oneloop == true
S = ef_oneloop(scheme.T, trunc)
else
S = decompose_T(scheme.T, trunc)
end
S = optimize_S(scheme, S)
scheme.T = combine_4S(S)
scheme.S = S
return scheme
end

function run!(
scheme::SLoopTNR, trscheme::TensorKit.TruncationScheme,
criterion::TNRKit.stopcrit; finalize_beginning = true, oneloop = true,
verbosity = 1
)
scheme::SLoopTNR,
trscheme::TensorKit.TruncationScheme,
criterion::TNRKit.stopcrit;
finalize_beginning = true,
oneloop = true,
verbosity = 1,
ef_trunc = truncbelow(1.0e-14),
)
data = []

LoggingExtras.withlevel(; verbosity) do
Expand All @@ -225,7 +239,7 @@ function run!(

t = @elapsed while crit
@infov 2 "Step $(steps + 1), data[end]: $(!isempty(data) ? data[end] : "empty")"
step!(scheme, trscheme, oneloop)
step!(scheme, trscheme, oneloop; ef_trunc)
push!(data, scheme.finalize!(scheme))
steps += 1
crit = criterion(steps, data)
Expand Down
Loading