commit 54336ce70aa58c1c4f6e1f1a0c71a048f8f64265
parent 26bcda26277a4ca3877e9915a319663c9a80a49e
Author: miksa234 <milutin@popovic.xyz>
Date: Thu, 16 Dec 2021 08:34:57 +0100
hw05 done
Diffstat:
14 files changed, 469 insertions(+), 201 deletions(-)
diff --git a/sesh4/src/main.jl b/sesh4/src/main.jl
@@ -16,203 +16,203 @@ include("functions.jl")
function main()
# Exercise 2
- #d = 4;
- #n = [20+k for k=1:d];
- #r = [2*k for k=1:d];
- #V = [rand(Uniform(-1, 1), n[k], r[k]) for k=1:d];
- #S = rand(Uniform(-1, 1), r...);
- #C = tucker_eval(S, V);
- #Vs, S0, vals, errors = my_hosvd(C, r);
+ d = 4;
+ n = [20+k for k=1:d];
+ r = [2*k for k=1:d];
+ V = [rand(Uniform(-1, 1), n[k], r[k]) for k=1:d];
+ S = rand(Uniform(-1, 1), r...);
+ C = tucker_eval(S, V);
+ Vs, S0, vals, errors = my_hosvd(C, r);
- #p = plot(errors,
- # ylabel="error",
- # label=L"$\frac{\|\hat{A}_k - A\|_F}{\|A\|_F}$",
- # xlabel=L"k",
- # lw = 1,
- # xticks=collect(1:d),
- # markershape=:circle,
- # legend=:topleft,
- # margin=5mm,
- # dpi=300,
- # title="HOSVD of Uniform Tensor")
- #savefig(p, "./plots/hosvd-uniform-error.png")
+ p = plot(errors,
+ ylabel="error",
+ label=L"$\frac{\|\hat{A}_k - A\|_F}{\|A\|_F}$",
+ xlabel=L"k",
+ lw = 1,
+ xticks=collect(1:d),
+ markershape=:circle,
+ legend=:topleft,
+ margin=5mm,
+ dpi=300,
+ title="HOSVD of Uniform Tensor")
+ savefig(p, "./plots/hosvd-uniform-error.png")
- ## Exercise 3
- #d = 4; n = 51;
- #A = [f([t1, t2, t3, t4]) for t1 in t(n), t2 in t(n), t3 in t(n), t4 in t(n)];
- #B = [g([t1, t2, t3, t4]) for t1 in t(n), t2 in t(n), t3 in t(n), t4 in t(n)];
+ # Exercise 3
+ d = 4; n = 51;
+ A = [f([t1, t2, t3, t4]) for t1 in t(n), t2 in t(n), t3 in t(n), t4 in t(n)];
+ B = [g([t1, t2, t3, t4]) for t1 in t(n), t2 in t(n), t3 in t(n), t4 in t(n)];
- ## Exercise 3a
- #σ_unf_a = σ_unfold(A, d);
- #σ_unf_b = σ_unfold(B, d);
- #p1 = plot(margin=5mm)
- #p2 = plot(margin=5mm)
- #for k=1:d
- # plot!(p1,
- # σ_unf_a[k],
- # ylabel=L"$\log(\Sigma_{A_k})$",
- # label=L"$\Sigma_{A_%$k}$",
- # xlabel=L"n",
- # lw = 1,
- # yaxis=:log,
- # markershape=:circle,
- # dpi=300,
- # title="Unfolding of A")
- # plot!(p2,
- # σ_unf_b[k],
- # ylabel=L"$\log(\Sigma_{B_k})$",
- # label=L"$\Sigma_{_B%$k}$",
- # xlabel=L"n",
- # lw = 1,
- # dpi=300,
- # markershape=:circle,
- # yaxis=:log,
- # title="Unfolding of B")
- #end
- #savefig(p1, "./plots/singular-dec-A.png")
- #savefig(p2, "./plots/singular-dec-B.png")
+ # Exercise 3a
+ σ_unf_a = σ_unfold(A, d);
+ σ_unf_b = σ_unfold(B, d);
+ p1 = plot(margin=5mm)
+ p2 = plot(margin=5mm)
+ for k=1:d
+ plot!(p1,
+ σ_unf_a[k],
+ ylabel=L"$\log(\Sigma_{A_k})$",
+ label=L"$\Sigma_{A_%$k}$",
+ xlabel=L"n",
+ lw = 1,
+ yaxis=:log,
+ markershape=:circle,
+ dpi=300,
+ title="Unfolding of A")
+ plot!(p2,
+ σ_unf_b[k],
+ ylabel=L"$\log(\Sigma_{B_k})$",
+ label=L"$\Sigma_{_B%$k}$",
+ xlabel=L"n",
+ lw = 1,
+ dpi=300,
+ markershape=:circle,
+ yaxis=:log,
+ title="Unfolding of B")
+ end
+ savefig(p1, "./plots/singular-dec-A.png")
+ savefig(p2, "./plots/singular-dec-B.png")
- ϵ_s = [1/(10^(j*2)) for j=1:5] # computer not goode enough for j = 6
+ _s = [1/(10^(j*2)) for j=1:5] # computer not goode enough for j = 6
- #r_jk_a, ϵ_jk_a, σ_jk_a = rank_approx(A, tucker_unfold);
- #r_jk_b, ϵ_jk_b, σ_jk_b = rank_approx(B, tucker_unfold);
- #p1 = plot(size=[700, 350], margin=5mm, dpi=300)
- #p2 = plot(size=[700, 350], margin=5mm, dpi=300)
- #for k=1:d
- # plot!(p1,
- # 1 ./ ϵ_s,
- # r_jk_a[:, k],
- # ylabel=L"$r_{jk}$",
- # label=L"A-$\varepsilon_{j%$k}$",
- # xlabel=L"\varepsilon_{j}^{-1}",
- # lw = 2,
- # markershape=:circle,
- # xaxis=:log10,
- # legend=:topleft,
- # title=L"Tensor $f(x_1, x_2, x_3, x_4)$")
- # plot!(p2,
- # 1 ./ ϵ_s,
- # r_jk_b[:, k],
- # ylabel=L"$r_{jk}$",
- # label=L"B-$\varepsilon_{j%$k}$",
- # xlabel=L"\varepsilon_{j}^{-1}",
- # lw = 2,
- # markershape=:circle,
- # xaxis=:log10,
- # legend=:topleft,
- # title=L"Tensor $g(x_1, x_2, x_3, x_4)$")
- #end
- #savefig(p1, "./plots/hosvd-error-A.png")
- #savefig(p2, "./plots/hosvd-error-B.png")
+ r_jk_a, ϵ_jk_a, σ_jk_a = rank_approx(A, tucker_unfold);
+ r_jk_b, ϵ_jk_b, σ_jk_b = rank_approx(B, tucker_unfold);
+ p1 = plot(size=[700, 350], margin=5mm, dpi=300)
+ p2 = plot(size=[700, 350], margin=5mm, dpi=300)
+ for k=1:d
+ plot!(p1,
+ 1 ./ ϵ_s,
+ r_jk_a[:, k],
+ ylabel=L"$r_{jk}$",
+ label=L"A-$\varepsilon_{j%$k}$",
+ xlabel=L"\varepsilon_{j}^{-1}",
+ lw = 2,
+ markershape=:circle,
+ xaxis=:log10,
+ legend=:topleft,
+ title=L"Tensor $f(x_1, x_2, x_3, x_4)$")
+ plot!(p2,
+ 1 ./ ϵ_s,
+ r_jk_b[:, k],
+ ylabel=L"$r_{jk}$",
+ label=L"B-$\varepsilon_{j%$k}$",
+ xlabel=L"\varepsilon_{j}^{-1}",
+ lw = 2,
+ markershape=:circle,
+ xaxis=:log10,
+ legend=:topleft,
+ title=L"Tensor $g(x_1, x_2, x_3, x_4)$")
+ end
+ savefig(p1, "./plots/hosvd-error-A.png")
+ savefig(p2, "./plots/hosvd-error-B.png")
- ## Exercise 3b
- #Nj_hosvd_a = []; ϵ_hosvd_a = []; σ_hosvd_a = [];
- #Nj_hosvd_b = []; ϵ_hosvd_b = []; σ_hosvd_b = [];
+ # Exercise 3b
+ Nj_hosvd_a = []; ϵ_hosvd_a = []; σ_hosvd_a = [];
+ Nj_hosvd_b = []; ϵ_hosvd_b = []; σ_hosvd_b = [];
- #println("Checking validity of error analysis for HOSVD")
- #for j=1:length(ϵ_s)
- # S0_a, Vs_a, vals_a, errors_a = my_hosvd(A, r_jk_a[j, :])
- # N_vals = 0
- # for V in Vs_a
- # N_vals += length(V)
- # end
- # append!(Nj_hosvd_a, N_vals + length(S0_a))
- # append!(ϵ_hosvd_a, errors_a[end])
- # append!(σ_hosvd_a, [vals_a])
- # println("ϵ_hosvd_a[j] <= |ϵ_jk_a|_f : $(ϵ_hosvd_a[j] <= norm(ϵ_jk_a)) ϵ_hosvd_a[j] = $(ϵ_hosvd_a[j])")
+ println("Checking validity of error analysis for HOSVD")
+ for j=1:length(ϵ_s)
+ S0_a, Vs_a, vals_a, errors_a = my_hosvd(A, r_jk_a[j, :])
+ N_vals = 0
+ for V in Vs_a
+ N_vals += length(V)
+ end
+ append!(Nj_hosvd_a, N_vals + length(S0_a))
+ append!(ϵ_hosvd_a, errors_a[end])
+ append!(σ_hosvd_a, [vals_a])
+ println("ϵ_hosvd_a[j] <= |ϵ_jk_a|_f : $(ϵ_hosvd_a[j] <= norm(ϵ_jk_a)) ϵ_hosvd_a[j] = $(ϵ_hosvd_a[j])")
- # S0_b, Vs_b, vals_b, errors_b = my_hosvd(B, r_jk_b[j, :])
- # N_vals = 0
- # for V in Vs_b
- # N_vals += length(V)
- # end
- # append!(Nj_hosvd_b, N_vals + length(S0_b))
- # append!(ϵ_hosvd_b, errors_b[end])
- # append!(σ_hosvd_b, [vals_b])
- # println("ϵ_hosvd_b[j] <= |ϵ_jk_b|_f : $(ϵ_hosvd_b[j] <= norm(ϵ_jk_b)) ϵ_hosvd_b[j] = $(ϵ_hosvd_b[j])")
- #end
+ S0_b, Vs_b, vals_b, errors_b = my_hosvd(B, r_jk_b[j, :])
+ N_vals = 0
+ for V in Vs_b
+ N_vals += length(V)
+ end
+ append!(Nj_hosvd_b, N_vals + length(S0_b))
+ append!(ϵ_hosvd_b, errors_b[end])
+ append!(σ_hosvd_b, [vals_b])
+ println("ϵ_hosvd_b[j] <= |ϵ_jk_b|_f : $(ϵ_hosvd_b[j] <= norm(ϵ_jk_b)) ϵ_hosvd_b[j] = $(ϵ_hosvd_b[j])")
+ end
- ## Exercise 3b
- #j = 5
- #p1 = plot(dpi=300, size=[800, 350], margin=5mm)
- #p2 = plot(dpi=300, size=[800, 350], margin=5mm)
- #for k=1:d
- # plot!(p1, σ_unf_a[k][1:r_jk_a[j, k]] ./ σ_hosvd_a[j][k],
- # ylabel=L"\frac{\sigma^{A_k}_\alpha}{\sigma^{hosvd}_{k\alpha}}",
- # label=L"k=%$k",
- # xlabel=L"j",
- # xticks=collect(1:length(r_jk_a)),
- # yticks=[1, 1.001],
- # ylim=[1, 1.001],
- # lw = 2,
- # markershape=:circle,
- # legend=:topleft,
- # title=L"$f(x_1, x_2, x_3, x_4)$")
- # plot!(p2, σ_unf_b[k][1:r_jk_b[j, k]] ./ σ_hosvd_b[j][k],
- # ylabel=L"\frac{\sigma^{B_k}_\alpha}{\sigma^{hosvd}_{k\alpha}}",
- # label=L"k=%$k",
- # xlabel=L"j",
- # yticks=[1, 1.001],
- # ylim=[1, 1.001],
- # lw = 2,
- # markershape=:circle,
- # legend=:topleft,
- # title=L"$g(x_1, x_2, x_3, x_4)$")
- #end
- #savefig(p1, "./plots/hosvd-sigmaratio-a.png")
- #savefig(p2, "./plots/hosvd-sigmaratio-b.png")
+ # Exercise 3b
+ j = 5
+ p1 = plot(dpi=300, size=[800, 350], margin=5mm)
+ p2 = plot(dpi=300, size=[800, 350], margin=5mm)
+ for k=1:d
+ plot!(p1, σ_unf_a[k][1:r_jk_a[j, k]] ./ σ_hosvd_a[j][k],
+ ylabel=L"\frac{\sigma^{A_k}_\alpha}{\sigma^{hosvd}_{k\alpha}}",
+ label=L"k=%$k",
+ xlabel=L"j",
+ xticks=collect(1:length(r_jk_a)),
+ yticks=[1, 1.001],
+ ylim=[1, 1.001],
+ lw = 2,
+ markershape=:circle,
+ legend=:topleft,
+ title=L"$f(x_1, x_2, x_3, x_4)$")
+ plot!(p2, σ_unf_b[k][1:r_jk_b[j, k]] ./ σ_hosvd_b[j][k],
+ ylabel=L"\frac{\sigma^{B_k}_\alpha}{\sigma^{hosvd}_{k\alpha}}",
+ label=L"k=%$k",
+ xlabel=L"j",
+ yticks=[1, 1.001],
+ ylim=[1, 1.001],
+ lw = 2,
+ markershape=:circle,
+ legend=:topleft,
+ title=L"$g(x_1, x_2, x_3, x_4)$")
+ end
+ savefig(p1, "./plots/hosvd-sigmaratio-a.png")
+ savefig(p2, "./plots/hosvd-sigmaratio-b.png")
- ## Exercise 3d
+ # Exercise 3d
- #p1 = plot(size=[700, 350], margin=5mm, dpi=300)
- #p2 = plot(size=[700, 350], margin=5mm, dpi=300)
- #plot!(p1,
- # 1 ./ ϵ_s,
- # Nj_hosvd_a,
- # ylabel=L"$N_{j}$",
- # label=L"A-$N_{j}$",
- # xlabel=L"\varepsilon_{j}^{-1}",
- # lw = 2,
- # markershape=:circle,
- # xaxis=:log10,
- # legend=:topleft,
- # title=L"Tucker of $f(x_1, x_2, x_3, x_4)$")
- #plot!(p2,
- # 1 ./ ϵ_s,
- # Nj_hosvd_b,
- # ylabel=L"$N_{j}$",
- # label=L"B-$N_{j}$",
- # xlabel=L"\varepsilon_{j}^{-1}",
- # lw = 2,
- # markershape=:circle,
- # xaxis=:log10,
- # legend=:topleft,
- # title=L"Tucker of $g(x_1, x_2, x_3, x_4)$")
- #savefig(p1, "./plots/hosvd-Nj-a.png")
- #savefig(p2, "./plots/hosvd-Nj-b.png")
+ p1 = plot(size=[700, 350], margin=5mm, dpi=300)
+ p2 = plot(size=[700, 350], margin=5mm, dpi=300)
+ plot!(p1,
+ 1 ./ ϵ_s,
+ Nj_hosvd_a,
+ ylabel=L"$N_{j}$",
+ label=L"A-$N_{j}$",
+ xlabel=L"\varepsilon_{j}^{-1}",
+ lw = 2,
+ markershape=:circle,
+ xaxis=:log10,
+ legend=:topleft,
+ title=L"Tucker of $f(x_1, x_2, x_3, x_4)$")
+ plot!(p2,
+ 1 ./ ϵ_s,
+ Nj_hosvd_b,
+ ylabel=L"$N_{j}$",
+ label=L"B-$N_{j}$",
+ xlabel=L"\varepsilon_{j}^{-1}",
+ lw = 2,
+ markershape=:circle,
+ xaxis=:log10,
+ legend=:topleft,
+ title=L"Tucker of $g(x_1, x_2, x_3, x_4)$")
+ savefig(p1, "./plots/hosvd-Nj-a.png")
+ savefig(p2, "./plots/hosvd-Nj-b.png")
- ## exercise 6
- #d = 4;
- #n = [20+k for k=1:d];
- #r = [[2*k for k=1:(d-1)]..., 1];
- #r_new = [1, r...]
- #V = [rand(Uniform(-1, 1), (r_new[i], n[i], r_new[i+1])) for i=1:d];
- #D = ttmps_eval(V, n)
- #C, singular_val, errors= tt_svd(D, n, r, d);
- #p = plot(errors,
- # ylabel="error",
- # label=L"$\frac{\|\hat{A}_k - A\|_f}{\|A\|_f}$",
- # xlabel=L"k",
- # lw = 1,
- # xticks=collect(1:d),
- # markershape=:circle,
- # legend=:topleft,
- # margin=5mm,
- # dpi=300,
- # title="TT-SVD of uniform tensor")
- #savefig(p, "./plots/ttsvd-uniform-error.png")
+ # exercise 6
+ d = 4;
+ n = [20+k for k=1:d];
+ r = [[2*k for k=1:(d-1)]..., 1];
+ r_new = [1, r...]
+ V = [rand(Uniform(-1, 1), (r_new[i], n[i], r_new[i+1])) for i=1:d];
+ D = ttmps_eval(V, n)
+ C, singular_val, errors= tt_svd(D, n, r, d);
+ p = plot(errors,
+ ylabel="error",
+ label=L"$\frac{\|\hat{A}_k - A\|_f}{\|A\|_f}$",
+ xlabel=L"k",
+ lw = 1,
+ xticks=collect(1:d),
+ markershape=:circle,
+ legend=:topleft,
+ margin=5mm,
+ dpi=300,
+ title="TT-SVD of uniform tensor")
+ savefig(p, "./plots/ttsvd-uniform-error.png")
- ## exercise 7
+ # exercise 7
d = 4; n = 51;
ndim = [n for i=1:d]
diff --git a/sesh5/src/.ipynb_checkpoints/Untitled-checkpoint.ipynb b/sesh5/src/.ipynb_checkpoints/Untitled-checkpoint.ipynb
@@ -0,0 +1,6 @@
+{
+ "cells": [],
+ "metadata": {},
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/sesh5/src/arithmetic.jl b/sesh5/src/arithmetic.jl
@@ -73,18 +73,18 @@ end
with different TT-MPS decompositions U with ranks $p_1, … , p_{d-1}$
and V with ranks $q_1, … ,q_{d-1}$ respectively.
- Output is the TT-MPS representation of $Au$ with ranks $(q_1 ⋅ p_1), ⋯ , (q_{d-1} ⋅ p_{d-1})$.
+ Output is the TT-MPS representation of $A ⋅ u$ with ranks $(q_1 ⋅ p_1), ⋯ , (q_{d-1} ⋅ p_{d-1})$.
"""
function tt_matvec(A, U)
W = []
d = length(U)
for k ∈ 1:d
- α_k, m_k, n_k, α_k1 = size(A[k])
+ α_k, n_k, m_k, α_k1 = size(A[k])
β_k, m_k, β_k1 = size(U[k])
W_k = zeros(α_k * β_k, n_k, α_k1 * β_k1)
- for i_k ∈ 1:m_k
- for j_k ∈ 1:n_k
- W_k[:, i_k, :] += A[k][:, i_k, j_k, :] * U[k][:, j_k, :]
+ for i_k ∈ 1:n_k
+ for j_k ∈ 1:m_k
+ W_k[:, i_k, :] += kron(A[k][:, i_k, j_k, :], U[k][:, j_k, :])
end
end
append!(W, [W_k])
diff --git a/sesh5/src/main.jl b/sesh5/src/main.jl
@@ -18,6 +18,7 @@ function main()
d = 4
p_Σ = []
p_R = []
+ global R_x = []; global R_y = []; global R_s = []; global R_z = []
for (p, q) ∈ ([3, 4], [5, 7])
X = [che_pol(sum([t_1/1, t_2/2, t_3/3, t_4/4]), p) for t_1 ∈ t(n), t_2 ∈ t(n), t_3 ∈ t(n), t_4 ∈ t(n)];
Y = [che_pol(sum([t_1/1, t_2/2, t_3/3, t_4/4]), q) for t_1 ∈ t(n), t_2 ∈ t(n), t_3 ∈ t(n), t_4 ∈ t(n)];
@@ -38,24 +39,28 @@ function main()
append!(Y_Σ, [σ_y])
append!(S_Σ, [σ_s])
append!(Z_Σ, [σ_z])
- append!(r_x, [rank(X_k)])
- append!(r_y, [rank(Y_k)])
- append!(r_s, [rank(S_k)])
- append!(r_z, [rank(Z_k)])
+ append!(r_x, rank(X_k))
+ append!(r_y, rank(Y_k))
+ append!(r_s, rank(S_k))
+ append!(r_z, rank(Z_k))
end
+ append!(R_x, [r_x])
+ append!(R_y, [r_y])
+ append!(R_s, [r_s])
+ append!(R_z, [r_z])
+
p_σ = plot(layout = 4, margin=5mm, size=(800, 800), title="p=$p, q=$q", dpi=500)
plot!(p_σ[1], X_Σ, markershape=:circle, ylabel=L"\Sigma_{X_k}", label=[L"X_1" L"X_2" L"X_3"])
plot!(p_σ[2], Y_Σ, markershape=:circle, ylabel=L"\Sigma_{Y_k}", label=[L"Y_1" L"Y_2" L"Y_3"])
plot!(p_σ[3], S_Σ, markershape=:circle, ylabel=L"\Sigma_{S_k}", label=[L"S_1" L"S_2" L"S_3"])
plot!(p_σ[4], Z_Σ, markershape=:circle, ylabel=L"\Sigma_{Z_k}", label=[L"Z_1" L"Z_2" L"Z_3"])
append!(p_Σ, [p_σ])
- p_r = plot(margin=5mm, size=(700, 350), title="p=$p, q=$q", ylabel=L"r_k", dpi=300)
+ p_r = plot(margin=5mm, size=(500, 350), title="p=$p, q=$q", ylabel=L"r_k", dpi=300)
plot!(p_r, r_x, markershape=:circle, label=L"r_X")
plot!(p_r, r_y, markershape=:circle, label=L"r_Y")
plot!(p_r, r_s, markershape=:circle, label=L"r_S", alpha=0.5)
plot!(p_r, r_z, markershape=:circle, label=L"r_Z")
append!(p_R, [p_r])
-
end
savefig(p_Σ[1], "./plots/sigma_34.png")
savefig(p_Σ[2], "./plots/sigma_57.png")
@@ -72,13 +77,17 @@ function main()
S = X .+ Y
Z = X .* Y
- C_x, r_x, σ_x, ϵ_x = tt_svd_ϵ(X, 1e-12);
- C_y, r_y, σ_y, ϵ_y = tt_svd_ϵ(Y, 1e-12);
+ #C_x, r_x, σ_x, ϵ_x = tt_svd_ϵ(X, 1e-12);
+ #C_y, r_y, σ_y, ϵ_y = tt_svd_ϵ(Y, 1e-12);
+ C_x, σ_x, ϵ_x = tt_svd(X, R_x[2]);
+ C_y, σ_y, ϵ_y = tt_svd(Y, R_y[2]);
C_s = tt_add(C_x, C_y, 1, 1);
C_z = tt_mult(C_x, C_y);
- Q_s, r_s = t_mpstt_ϵ(C_s, 1e-12);
- Q_z, r_z = t_mpstt_ϵ(C_z, 1e-7);
+ ##Q_s, r_s = t_mpstt_ϵ(C_s, 1e-12);
+ ##Q_z, r_z = t_mpstt_ϵ(C_z, 1e-7);
+ Q_s = t_mpstt(C_s, R_s[2]);
+ Q_z = t_mpstt(C_z, R_z[2]);
println("Exercise 3b")
println("|ttmps_eval(C_s) - S||_F/||S||_F = $(norm(S - ttmps_eval(C_s, dims))/norm(S))")
diff --git a/sesh5/src/plots/rank_34.png b/sesh5/src/plots/rank_34.png
Binary files differ.
diff --git a/sesh5/src/plots/rank_57.png b/sesh5/src/plots/rank_57.png
Binary files differ.
diff --git a/sesh5/src/ttmps.jl b/sesh5/src/ttmps.jl
@@ -96,9 +96,8 @@ end
function tt_svd(A, r)
n = size(A)
d = length(n)
- r_new = [1, r...]
+ r_new = [1, r..., 1]
S_0_hat = copy(A)
- S0s = []
C = []; σ = []; ϵ = []
for k=2:d
B_k = reshape(S_0_hat, (r_new[k-1] * n[k-1], prod([n[i] for i=k:d])))
diff --git a/sesh5/tex/main.pdf b/sesh5/tex/main.pdf
Binary files differ.
diff --git a/sesh5/tex/main.tex b/sesh5/tex/main.tex
@@ -0,0 +1,248 @@
+\documentclass[a4paper]{article}
+
+
+\usepackage[T1]{fontenc}
+\usepackage[utf8]{inputenc}
+\usepackage{mathptmx}
+
+%\usepackage{ngerman} % Sprachanpassung Deutsch
+
+\usepackage{graphicx}
+\usepackage{geometry}
+\geometry{a4paper, top=15mm}
+
+\usepackage{subcaption}
+\usepackage[shortlabels]{enumitem}
+\usepackage{amssymb}
+\usepackage{amsthm}
+\usepackage{mathtools}
+\usepackage{braket}
+\usepackage{bbm}
+\usepackage{graphicx}
+\usepackage{float}
+\usepackage{yhmath}
+\usepackage{tikz}
+\usepackage{algorithm}
+\usepackage{algpseudocode}
+\usetikzlibrary{patterns,decorations.pathmorphing,positioning}
+\usetikzlibrary{calc,decorations.markings}
+
+\newcommand{\eps}{\varepsilon}
+
+\usepackage[backend=biber, sorting=none]{biblatex}
+\addbibresource{uni.bib}
+
+\usepackage[framemethod=TikZ]{mdframed}
+
+\tikzstyle{titlered} =
+ [draw=black, thick, fill=white,%
+ text=black, rectangle,
+ right, minimum height=.7cm]
+
+
+\usepackage[colorlinks=true,naturalnames=true,plainpages=false,pdfpagelabels=true]{hyperref}
+\usepackage[parfill]{parskip}
+\usepackage{lipsum}
+
+
+\usepackage{tcolorbox}
+\tcbuselibrary{skins,breakable}
+
+\pagestyle{myheadings}
+
+\markright{Popović\hfill Tensor Methods \hfill}
+
+
+\title{University of Vienna\\ Faculty of Mathematics\\
+\vspace{1cm}TENSOR METHODS FOR DATA SCIENCE AND SCIENTIFIC COMPUTING
+}
+\author{Milutin Popovic}
+
+\begin{document}
+\maketitle
+\tableofcontents
+\section{Assignment 5}
+\subsection{Truncated TT-MPS}
+Given an MPS-TT factorization $U_1, \dots ,U_d$ of $A \in
+\mathbb{R}^{n_1\times \cdots \times n_d}$, with ranks $p_1, \dots, p_{d-1}\
+\in \mathbb{N}$, we will produce a MPS-TT approximation of ranks not
+exceeding $r_1, \dots, r_{d-1}$ with factors $V_1, \dots, V_d$ of
+quasioptimal accuracy in the Forbenius norm.
+
+
+\begin{algorithm}[H]
+ \caption{rank truncation MPS-TT}\label{alg: hosvd}
+\begin{algorithmic}
+ \State $\tilde{U_1} \gets U_1$
+ \For{$k = 1, \dots, d-1$}
+ \State $\tilde{U}_k \gets \text{reshape}\left(\tilde{U}_k, (p_{k-1} \cdot n_k, r_k\right)$
+
+ \State $\bar{U}_k \gets \hat{P}_k \hat{\Sigma}_k \hat{W}^*_k$
+ \Comment{rank- $r_k$ T-SVD of $\tilde{U}_k$}
+
+ \State $\hat{Q}_k \gets \text{reshape}\left(\hat{P}_k, (p_{k-1}, n_k,
+ r_k) \right)$
+
+ \State $\hat{Z}_k \gets \text{reshape}\left(\hat{\Sigma}_k \hat{W}^*_k,
+ (r_{k}, 1, p_k) \right)$
+
+ \State $(\tilde{U}_{k+1})_{\beta_k,i_{k+1},\alpha_{k+1}} \gets
+ \sum_{\alpha_k = 1}^{r_k} (\hat{Z}_k)_{\beta_k, 1, \alpha_k}
+ (U_{k+1})_{\alpha_k, i_{k+1}, \alpha_{k+1}}$
+ \State $\text{save}(\hat{Q_k})$
+ \EndFor
+ \State $\text{save}\left(\tilde{U}_d\right)$
+\end{algorithmic}
+\end{algorithm}
+\subsection{TT-MPS arithmetic}
+\subsubsection{Addition}
+Consider two tensors $U$ and $V$ of size $n_1 \times \cdots \times n_d \ \in
+\mathbb{N}$ for $d \in \mathbb{N}$, with TT-MPS factorization $U_k$ with rank
+$p_k$ and $V_k$ with rank $q_k$ respectively (for $k \in \{1, \dots ,d-1\}$).
+Then TT-MPS factorization of $W = U + V$ can be calculated without
+evaluating $U$ or $V$, just by using $U_1, \dots, U_{d}$ and $V_1, \dots,
+V_d$ by the following
+\begin{align}
+ W &= U_1 \bowtie \cdots \bowtie U_d + V_1 \bowtie \cdots \bowtie V_d
+ =\nonumber\\
+ &= \begin{bmatrix}U_1 & V_1\end{bmatrix} \bowtie
+ \begin{bmatrix} U_2 \bowtie \cdots \bowtie U_d \\ V_2 \bowtie \cdots
+ \bowtie V_d \end{bmatrix} =\nonumber\\
+ &= \cdots =\nonumber\\
+ &=\begin{bmatrix}U_1 & V_1\end{bmatrix} \bowtie \begin{bmatrix} U_2 &
+ 0\\ 0& V_2 \end{bmatrix} \bowtie \cdots \bowtie \begin{bmatrix} U_{d-1} &
+ 0\\ 0& V_{d-1} \end{bmatrix} \bowtie \begin{bmatrix} U_d \\ V_d
+\end{bmatrix} =\nonumber\\
+ &= W_1 \bowtie \cdots \bowtie W_d,
+\end{align}
+where $W$ has a decomposition of ranks $(p_1 + q_1), \dots, (p_{d-1}
+q_{d-1})$. The construction of these matrices and vectors can be done by
+initializing the $W_k$ and then slicing for $U_k$ and $V_k$ in the second
+dimension, meaning $n_k$. Thereby for the $k$-th ($2\leq k \leq d-2$) step we would have
+\begin{align}
+ (W_k)_{:, i_k, :} = \begin{bmatrix} (U_k)_{:, i_k, :} & 0 \\
+ 0 & (V_k)_{:, i_k, :}
+ \end{bmatrix} \;\;\;\; \forall i_k \in \{1, \dots,
+ n_k\}.
+\end{align}
+For the first step we have $p_0 = q_0 = 1$ and thereby
+\begin{align}
+ (W_1)_{:, i_1, :} = \begin{bmatrix} (U_1)_{1, i_1,:} & (V_1)_{1, i_1, :}
+ \end{bmatrix} \;\;\;\;\; \forall i_1 \in \{1, \dots, n_1 \}.
+\end{align}
+And for the $k=d$-th step we have $p_d = q_d = 1$ and thereby
+\begin{align}
+ (W_d)_{:, i_d, :} = \begin{bmatrix} (U_d)_{:, i_d,1} \\ (V_d)_{:, i_d, 1}
+ \end{bmatrix} \;\;\;\;\; \forall i_d \in \{1, \dots, n_d \}.
+\end{align}
+This concludes the construction of the algorithm for addition
+\subsubsection{Hadamard Product}
+Now consider again two tensors $U$ and $V$ of size $n_1 \times \cdots \times n_d \ \in
+\mathbb{N}$ for $d \in \mathbb{N}$, with TT-MPS factorization $U_k$ with rank
+$p_k$ and $V_k$ with rank $q_k$ respectively (for $k \in \{1, \dots ,d-1\}$).
+Then TT-MPS factorization of $W = U \otimes V$ can be calculated without
+evaluating $U$ or $V$, just by using $U_1, \dots, U_{d}$ and $V_1, \dots,
+V_d$ by the following
+\begin{align}
+ W_{i_1, \dots, i_d} &= \sum_{\alpha_1 = 1}^{q_1} \cdots \sum_{\alpha_{d-1}
+ = 1}^{q_{d-1}} \prod_{k=1}^d U_k(\alpha_{k-1}, i_k, \alpha_k) \cdot
+ \sum_{\beta_1 = 1}^{q_1} \cdots \sum_{\beta_{d-1}
+ = 1}^{q_{d-1}} \prod_{k=1}^d V_k(\beta_{k-1}, i_k, \beta_k)
+ =\nonumber\\
+ =&
+ \sum_{\gamma_1 = 1}^{r_1} \cdots \sum_{\gamma_{d-1}
+ = 1}^{r_{d-1}} \prod_{k=1}^d W_k(\gamma_{k-1}, i_k, \gamma_k).
+\end{align}
+Thereby the factors $W_k$ can be calculated with the factor wise product
+\begin{align}
+ W_k(\alpha_{k-1}\beta{k-1}, i_k, \alpha_k\beta_k) = U_k(\alpha_{k-1},
+ i_k, \alpha_k) \cdot V_k(\beta_{k-1}, i_k, \beta_k),
+\end{align}
+for all $\alpha_{k} \in \{1, \dots, p_{k}\}$ and $\beta_k \in \{1, \dots,
+q_k\}$ and $p_0 = p_d = q_0 = q_d = 1$. Where the TT-MPS calculated by the
+algorithm is of ranks $(p_1q_1), \dots, (p_{d-1} q_{d-1})$.
+
+The computer reads in slices and Kronecker products
+\begin{align}
+ (W_k)_{:, i_k, :} = (U_k)_{:, i_k, :} \otimes (V_k)_{:, i_k, :}
+ \;\;\;\;\;\; \forall i_k \in \{1, \dots, n_k\}.
+\end{align}
+\subsubsection{Matrix Vector product}
+Now consider $A\in \mathbb{R}^{n_1\cdots n_d \times m_1 \cdots m_d}$ and
+$u \in \mathbb{R}^{m_1 \cdots m_d}$. The TT-MPS factorization of $A$ is $A_k
+\in \mathbb{R}^{p_{k-1} \times n_k \times m_k \times q_k}$ and the TT-MPS
+factorization of $U$ is $U_k \in \mathbb{R}^{q_{k-1} \times m_k \times q_k}$.
+The TT-MPS factors $W_k$ of $w = A\cdots u$ can be explicitly calculated with
+\begin{align}
+ W_{i_1,\dots, i_d} &= \sum_{j_1,\dots,j_d} A_{i_1\cdots i_d, j_1\cdots
+ j_d} U_{j_1\cdots j_d}\nonumber \\
+ &= \sum_{j_1, \dots, j_d}
+ \sum_{\alpha_1=1}^{p_1} \cdots \sum_{\alpha_{d-1}=1}^{p_{d-1}}
+ \prod_{k=1}^d A_k(\alpha_{k-1}, i_k, j_k, \alpha_k) \cdot
+ \sum_{\beta_1=1}^{q_1} \cdots \sum_{\beta_{d-1}=1}^{q_{d-1}}
+ \prod_{k=1}^d U_k(\beta_{k-1} j_k, \beta_k) \cdot \nonumber\\
+ &= \sum_{\alpha_1\beta_1} \cdots \sum_{\alpha_{d-1}\beta_{d-1}}
+ \prod_{k=1}^{d} \left(
+ \sum_{j_k}^{n_k} A_k(\alpha_{k-1}, i_k, j_k, \alpha_k)
+ U_K(\beta_{k-1}, j_k, \beta_k)
+ \right) \nonumber \\
+ &= \sum_{\gamma_1}^{r_1} \cdots \sum_{\gamma_{d-1}}^{r_{d-1}}
+ \prod_{k=1}^{d} W_k(\gamma_{k-1}, i_k, \gamma_k),
+\end{align}
+for $r_k = q_k \cdot p_k$.
+The computer reads
+\begin{align}
+ (W_k)_{:, i_k, :} = \sum_{j_k=1}^{n_k} (A_k)_{:, i_k, j_k, :} \otimes
+ (U_k)_{:, j_k, :} \;\;\;\;\; \forall i_k \in \{1, \dots, n_k\}
+\end{align}
+for all $k = 1,\dots, d$
+\subsection{Testing}
+Consider the grid for $n=51$
+\begin{align}
+ t_i = 2\frac{i-1}{n-1} - 1 \;\;\;\;\; i = 1, \dots ,n
+\end{align}
+for the tensors $X, Y \in \mathbb{R}^{n\times n\times n\times n}$ given by
+\begin{align}
+ x_{i_1,\dots, i_4} &= T_p\left(\sum_{k=1}^4 \frac{t_{i_k}}{4}\right) \\
+ y_{i_1,\dots, i_4} &= T_q\left(\sum_{k=1}^4 \frac{t_{i_k}}{4}\right) \\
+\end{align}
+for $p, q \in \mathbb{N}_0$ and $T_r$ is the Chebyshev polynomial for a $k
+\in \mathbb{N}_0$. The Chebyshev polynomials are defined by
+\begin{align}
+ T_r(x) = \begin{cases}
+ \cos(r\cdot\text{arccos}(x)) & \;\;\;\;\; |x| \leq 1\\
+ \cosh(r\cdot\text{arccosh}(x))&\;\;\;\;\; x \geq 1\\
+ (-1)^r\cosh(r\cdot\text{arccosh}(x))&\;\;\;\;\; x \leq -1
+ \end{cases}.
+\end{align}
+Additionally we define
+\begin{align}
+ S := X + Y\\
+ Z := X \otimes Y.
+\end{align}
+The following figures show for $(p, q) = 3,4$ and $(p, q) = 5,7$ the TT-MPS
+unfolding singular values of tensors $X, Y, S, Z$.
+\begin{figure}[H]
+ \centering
+ \includegraphics[width=\textwidth]{./plots/sigma_34.png}
+ \caption{Some text}
+\end{figure}
+
+\begin{figure}[H]
+ \centering
+ \includegraphics[width=\textwidth]{./plots/sigma_57.png}
+ \caption{Some text}
+\end{figure}
+
+The following figures show the ranks MPS-TT unfolding matrices of $X, Y, S,
+Z$ for $(p, q) = 3,4$ and $(p, q) = 5,7$ respectively
+
+\begin{figure}[H]
+ \centering
+ \includegraphics[width=0.48\textwidth]{./plots/rank_34.png}
+ \includegraphics[width=0.48\textwidth]{./plots/rank_57.png}
+ \caption{Some text}
+\end{figure}
+\nocite{code}
+\printbibliography
+\end{document}
diff --git a/sesh5/tex/plots/rank_34.png b/sesh5/tex/plots/rank_34.png
Binary files differ.
diff --git a/sesh5/tex/plots/rank_57.png b/sesh5/tex/plots/rank_57.png
Binary files differ.
diff --git a/sesh5/tex/plots/sigma_34.png b/sesh5/tex/plots/sigma_34.png
Binary files differ.
diff --git a/sesh5/tex/plots/sigma_57.png b/sesh5/tex/plots/sigma_57.png
Binary files differ.
diff --git a/sesh5/tex/uni.bib b/sesh5/tex/uni.bib
@@ -0,0 +1,6 @@
+@online{code,
+ author = {Popovic Milutin},
+ title = {Git Instance, Tensor Methods for Data Science and Scientific Computing},
+ urldate = {2021-24-10},
+ url = {git://popovic.xyz/tensor_methods.git},
+}