notes

uni notes
git clone git://popovic.xyz/notes.git
Log | Files | Refs

prb2.tex (12808B)


      1 \include{preamble.tex}
      2 
      3 \usepackage[final]{pdfpages}
      4 
      5 \begin{document}
      6 \maketitle
      7 \tableofcontents
      8 \section{Sheet 2}
      9 \subsection{Problem 1}
     10 \subsubsection{}
     11 We let $\rho(A)$ be the spectral radius of a matrix $A \in
     12 \mathbb{R}^{n\times n}$. A matrix norm $ \|\cdot\|_{1}$ is consistent with the
     13 vector norm $\|\cdot\|_{2}$ if
     14 \begin{align}
     15     \|Ax\|_{2} \le \|A\|_1\|x\|_2
     16 \end{align}
     17 for all $x \in \mathbb{R}^n$ and all $A \in \mathbb{R}^{n\times n}$. Indeed
     18 every matrix norm induced by a vector norm is consistent. To show this let
     19 $\|\cdot\|_M$ be a matrix norm and $\|\cdot\|_v$ be a vector norm, defined as
     20 \begin{align}
     21     \|x\|_v = \|xv^T\|_M
     22 \end{align}
     23 for all $x \in \mathbb{R}^n$ and some $v \neq 0$, of $\mathbb{R}^{n \times
     24 n}$ and $\mathbb{R}^{n}$ respectively. Then we have
     25 \begin{align}
     26     \|Ax\|_v = \|Axv^T\| \le \|A\|_M \|xv^T\|_M = \|A\|_M \|v\|_v \qed
     27 \end{align}
     28 Note that for $\mathbb{C}^{n \times n}$ and $\mathbb{C}^{n}$ use $v^* \neq 0$ the
     29 conjugate transpose.
     30 \subsubsection{}
     31 Now we consider a splitting of $A = D - (L + U)$, were $D, L$ and $U$ are
     32 defined as
     33 \begin{align}
     34     D &= \text{diag}\left(a_{11},\ldots,a_{nn}\right), \\
     35     \left(L \right)_{ij} &=
     36     \begin{cases}
     37         -\left( A \right)_{ij}\quad i > j\\
     38         0 \quad i\leq 0
     39     \end{cases},
     40     \qquad
     41     \left(U \right)_{ij} =
     42     \begin{cases}
     43         -\left( A \right)_{ij}\quad i < j\\
     44         0 \quad i \ge 0
     45     \end{cases},
     46 \end{align}
     47 Then the matrix of a single Jacobi iteration method is
     48 \begin{align}
     49     B_J = D^{-1}(L+U)
     50 \end{align}
     51 We can show that if $A$ is strictly diagonally dominant then
     52 \begin{align}
     53     \rho(B_J) \le \|B_J\|_\infty < 1.
     54 \end{align}
     55 If A is strictly diagonally dominant, this means that
     56 \begin{align}
     57     &\mid A_{ii}\mid > \sum_{j\neq i}^{n}  \mid A_{ij} \mid \qquad \forall\
     58      i\in\{1, \ldots , n\} \\
     59     \Leftrightarrow &\sum_{j\neq i} \frac{ \mid A_{ij} \mid}{ \mid A_{ii}  \mid}
     60      < 1.
     61 \end{align}
     62 Now let $(\lambda, v)$ be an eigen-pair of $B_J$, then
     63 \begin{align}
     64     B_J v &= D^{-1}(L+U)v = \lambda v\\
     65     (L+U)v &= \lambda D v.
     66 \end{align}
     67 For a chosen $i$ this means
     68 \begin{align}
     69     \left| \lambda \right| \left| A_{ii} \right| \left| v_i \right|
     70     &=
     71     \left|
     72     -\sum_{j>i} A_{ij}v_i - \sum_{j<i} A_{ij}v_i
     73     \right|\\
     74     &\le \sum_{j>i}
     75     \left|A_{ij} \right| \left| v_i \right| + \sum_{j<i}
     76     \left|A_{ij} \right| \left| v_i \right|\\
     77     &= \sum_{j\neq i} \left| A_{ij} \right| \left|v_i\right|.
     78 \end{align}
     79 We can choose and $i$ such that $\left| v_i \right| \le \|v\|_\infty$, then
     80 \begin{align}
     81     \left| \lambda \right| \left| A_{ii} \right| \left| v_i \right|
     82     &\le \sum_{j\neq i} \left| A_{ij} \right| \|v\|_\infty\\
     83     \Rightarrow
     84     \left| \lambda \right| \left| A_{ii} \right|
     85     &\le \sum_{j\neq i} \left| A_{ij} \right|\\
     86     \Rightarrow
     87     \left| \lambda \right|
     88     &\le \sum_{j\neq i}\frac{\left| A_{ij} \right|}{\left| A_{ii} \right|} <
     89     1. \qed
     90 \end{align}
     91 \subsubsection{}
     92 Next we show that the Jacobi method converges for every initial guess
     93 $x^0$ to the solution of the equation $Ax = b$, given that $A$ is
     94 strictly diagonally dominant. So with any initial guess $x^0$ at the $k$-th
     95 iteration we have
     96 \begin{align}
     97     Dx^{(k)} &= (L+U)x^{(k-1)} + b\\
     98     \Leftrightarrow x^{(k)} &= D^{-1}(L+U)x^{(k-1)}+D^{-1}B \\
     99                             &= B_J x^{(k-1} + D^{-1}b
    100 \end{align}
    101 Now let $x$ be the exact solution, then the error at the $k$-th iteration is
    102 \begin{align}
    103     e^{(k)} &= x - x^{(k)} = B_J\left( x - x^{(k-1)} \right) = \ldots\\
    104             &= B_J^k e^{(0)}
    105 \end{align}
    106 Assume that $e^{(0)} \neq 0$, then we need $\lim_{n \to \infty}B_J^k = 0$. If
    107 $A$ is \textbf{diagonally dominant} we have $\rho(B_J) < 1$ this means for an
    108 eigen-pair $\left(\lambda, v \right) $ of $B_J$ we have
    109 \begin{align}
    110     \lim_{n \to \infty}B_J^k v = \lim_{n \to \infty}\lambda^k v \\
    111     \Rightarrow \lim_{n \to \infty}\lambda^k = 0,
    112 \end{align}
    113 for all $\lambda$ because $\rho(B_J) < 1$. \qed
    114 \subsection{Problem 2}
    115 Now consider a $A \in \mathbb{R}^{n \times n}$,
    116 \begin{align}
    117     A = \begin{pmatrix} a_{11} & a_{12} \\ a_{21} & a_{22} \end{pmatrix}.
    118 \end{align}
    119 Let $A = D - (L + U)$ like in the above problem.
    120 The Gauss-Siedel method has the iteration matrix $B_G = (D - L)^{-1} U$ and
    121 the Jacobi method has the iteration matrix $B_J = D^{-1}(L + U)$.
    122 \subsubsection{}
    123 We show that the spectral radii of $B_J$ and $B_G$ satisfy
    124 \begin{align}
    125     \rho (B_J) = \sqrt{\left| \rho(B_G) \right| },
    126 \end{align}
    127 by directly calculating the eigenvalues of $B_J$ and $B_G$ respectively.
    128 We start with $B_J$,
    129 \begin{align}
    130     \det(B_J - \lambda I)
    131     &= \det
    132     \begin{pmatrix} -\lambda &
    133     -\frac{a_{12}}{a_{11}} \\ -\frac{a_{21}}{a_{22}} & -\lambda
    134     \end{pmatrix}\\
    135     &= \lambda^2 - \frac{a_{12}a_{21}}{a_{11}a_{22}} = 0\\
    136         \Rightarrow \lambda^2 &=  \frac{a_{12}a_{21}}{a_{11}a_{22}}
    137 \end{align}
    138 For $B_G$ we have
    139 \begin{align}
    140     \det(B_G - \lambda I)
    141     &= \det
    142     \begin{pmatrix}
    143         -\lambda & -\frac{a_{12}}{a_{11}} \\ 0 & -\lambda -
    144         \frac{a_{21}a_{12}}{a_{11}a_{22}}\\
    145     \end{pmatrix}\\
    146     &= -\lambda \left(-\lambda -  \frac{a_{21}a_{12}}{a_{11}a_{22}}\right) - 0
    147     \\
    148     \Rightarrow \lambda_1 &= 0, \qquad \lambda_2 = -
    149     \frac{a_{21}a_{12}}{a_{11}a_{22}}.
    150 \end{align}
    151 Which satisfies the above condition, remember
    152 \begin{align}
    153     \rho(A) = \max\{ \left| \lambda \right| : \text{$\lambda$ is eigenvalue
    154     $A$}\},
    155 \end{align}
    156 especially note the absolute value.
    157 
    158 \qed
    159 \subsubsection{}
    160 Indeed the error of the Gauss-Siedel iteration converges to $0$ if $\rho(B_G)
    161 < 1$. Which is the case, because exactly then $\rho(B_G) = \rho(B_J)^2 < 1$.
    162 Where we can also conclude that the Gauss-Siedel method converges twines as
    163 fast as the Jacobi method for $2 \times 2$ matrices.
    164 \subsubsection{}
    165 Now let $r \in \mathbb{R}$ and
    166 \begin{align}
    167     A_r =
    168     \begin{pmatrix}
    169         1 & r & r\\
    170         r & 1 & r\\
    171         r & r & 1\\
    172     \end{pmatrix}
    173 \end{align}
    174 We can show that the Gauss-Siedel method for $A_r$ converges, provided that
    175 $t \in (-\frac{1}{2}, 1)$ and the Jacobi methods for $A_r$ does not converge
    176 for $r \in (\frac{1}{2}, 2)$. We start of with calculating the iteration
    177 matrices, then calculating their eigenvalues. Then $r$ is determined by
    178 $\rho(A) < 1$. For the Jacobi method we have
    179 \begin{align}
    180     B_J = D^{-1}(E+F) =
    181     \begin{pmatrix}
    182         0 & -r & -r\\
    183         -r & 0 & -r\\
    184         -r & -r & 0\\
    185     \end{pmatrix}.
    186 \end{align}
    187 Then the eigenvalues of the matrix are
    188 \begin{align}
    189     \det(B_J - \lambda I) &= -\lambda^3 - 2r^4 +3r^3\lambda = 0\\
    190     \lambda_1 &= r \qquad \lambda_2 = -2r
    191 \end{align}
    192 Then $\rho(A) < 1$ determines the range of $r$
    193 \begin{align}
    194     |\lambda_max| = 2|r| < 1 \Rightarrow |r| < \frac{1}{2}
    195 \end{align}
    196 We conclude that for $r\in (\frac{1}{2} , 1)$ does not converge.
    197 Now for the Gauss-Siedel method
    198 \begin{align}
    199     B_G = (D-E)^{-1}F =
    200     \begin{pmatrix}
    201         0 & -r & -r\\
    202         0 & r^2 & r^2-r\\
    203         0 & -r^3+r^2 & -r^3+2r^2\\
    204     \end{pmatrix}.
    205 \end{align}
    206 then
    207 \begin{align}
    208     \det\left(B_G - \lambda I \right)  &= -\lambda^3-r^3\lambda^2
    209     3r^2\lambda^2 - r^3\lambda = 0\\
    210     \Rightarrow \lambda_{1 / 2}&=
    211     \frac{1}{2}\left(\pm\sqrt{r-4}(r-1)r^{\frac{3}{2}}-r^{3}+3r^{2}\right),
    212 \end{align}
    213 $\lambda < 1$ for $r \in \left( -\frac{1}{2}, 1\right)$.
    214 \subsection{Problem 3}
    215 Let $Q \in \mathbb{R}^{n \times n}$ be the Poisson matrix, the matrix of
    216 the finite difference method on a $n \times n$ grid,
    217 \begin{align}
    218     Q =
    219     \begin{pmatrix}
    220         2 & -1 &   &  &   \\
    221         -1& 2  & -1&  &   \\
    222           & \ddots  & \ddots & \ddots &   \\
    223           &   & -1 &  2 & -1\\
    224           &   &  &  -1 & 2
    225     \end{pmatrix}
    226 \end{align}
    227 \subsubsection{}
    228 (Can also be done with Gershorin disks)
    229 \newline
    230 
    231 The eigenvalues of $Q$ lie in the interval $[0 ,4]$. To show this let
    232 $(\lambda , v)$ be an eigen-pair of $Q$, they satisfy the equation
    233 \begin{align}
    234     Qv = \lambda v
    235 \end{align}
    236 At the $k$-th ($k \in \{1,\ldots,n\}$) step we have $v^{(0)} = 0$, $v^{(1)} =
    237 1$ and $v^{(n+1)} = 0$
    238 \begin{align}
    239     -v^{(k+1} + 2v^{(k)} - v^{(k+1)} &= \lambda v^{(k)}\\
    240     \Rightarrow v^{(k+1)} &= (2-\lambda) v^{(k)} - v^{(k-1)},
    241 \end{align}
    242 which are the Chebyshev polynomials of the second kind, where $(2-\lambda_k)$
    243 satisfies
    244 \begin{align}
    245     (2-\lambda_k) &= 2\cdot \cos\left(\frac{k\pi}{n+1}\right)\\
    246     \Rightarrow \lambda_k &= 2\left( 1 - \cos\left( \frac{k\pi}{n+1} \right)
    247     \right) \\
    248           &=4\cdot\sin^2\left( \frac{k\pi}{n+1} \right),
    249 \end{align}
    250 thereby we can conclude that $\lambda_k \in [0, 4] \;\; \forall k \in \{1,
    251 \dots n\}$. \qed
    252 \subsubsection{}
    253 \subsubsection{}
    254 In this section we write a Python script that returns the matrix $Q$ given
    255 $n$ and for $b = (1, \ldots ,1)^T$ $n=20$ implements the Gauss-Siedel method
    256 for to solve $Qx = b$ for $x$.
    257 \begin{figure}[htpb]
    258     \centering
    259     \includegraphics[width=0.8\textwidth, clip, trim=0cm 5cm 0cm 10cm]{./prog/prb2_p.pdf}
    260 \end{figure}
    261 \subsection{Problem 4}
    262 Now let $P_n \in \mathbb{R}^{n\times n}$ be the matrix
    263 \begin{align}
    264     P_n =
    265     \begin{pmatrix}
    266         2 & -1 &   &  & -1  \\
    267         -1& 2  & -1&  &   \\
    268           & \ddots  & \ddots & \ddots &   \\
    269           &   & -1 &  2 & -1\\
    270         -1 &   &  &  -1 & 2
    271     \end{pmatrix}
    272 \end{align}
    273 \subsubsection{}
    274 (Can also be done with Gershorin disks)
    275 \newline
    276 
    277 We can show that all the eigenvalues of $P_n$ are in $[0, 4]$ because $P_n$
    278 is the finite differenece/laplacian matrix for periodic boundary conditions
    279 and can be diagonalized by a DFT. So let $\left( \lambda, v \right)$ be the
    280 eigen-pair of $P_n$, which satisfy the equation
    281 \begin{align}
    282     P_n v = \lambda v
    283 \end{align}
    284 This she standard Possion with periodic boundary conditions, with the
    285 eigenvector $v_j = \omega^{jk} = e^{2\pi i \frac{jk}{n}}$ for a $j \in
    286 \{1,\ldots,n\}$.
    287 \begin{align}
    288     (P_n v)_j
    289     &= 2\omega^{jk}- \omega^{(j-1)k} - \omega^{(j+1)k}\\
    290     &= \omega^{jk}(2 - \omega^{-k} - \omega^{k})\\
    291     &= \omega^{jk}(2 - 2\cos\left( \frac{2\pi k}{n} \right)\\
    292     &= 4\sin^2\left( \frac{2\pi k}{n} \right) \omega^{jk} = \lambda_k v^k_j,
    293 \end{align}
    294 thereby we can conclude that $\lambda_k \in [0, 4]$ forall $k$.
    295 \subsubsection{}
    296 Because $P_n$ is a real, symmetric, cercular matrix the orthogonal components
    297 of the eigenvalues are also eigenvectors, i.e. $\text{Re}\left(v \right)$ and
    298 $\text{Im}\left(v \right) $. We may conclude this by pure calculation. In the $j-th$
    299 component we have $k$ eigenvalues
    300 \begin{align}
    301     \left(P_n\text{Re}\left( v^k \right)\right) _j
    302     &= 2\text{Re}\left( \omega^{jk} \right) -\text{Re}\left( \omega^{(j-1)k}
    303     \right) -\text{Re}\left( \omega^{(j+1)k} \right)\\
    304     &= \omega^{jk} +\omega^{-jk} - \frac{1}{2}\omega^{(j-1)k}
    305     -\frac{1}{2}\omega^{-(j-1)k}
    306     -\frac{1}{2}\omega^{(j+1)k} -\frac{1}{2}\omega^{-(j+1)k}\\
    307     &= \left( \omega^{jk} + \omega^{-jk} \right)
    308     -\frac{1}{2}\left( \omega^{jk}\left(\omega^{k} + \omega^{-k} \right)
    309     +\omega^{-jk}\left(\omega^{k} + \omega^{-k} \right)  \right)\\
    310     &= \frac{1}{2}\left( \omega^{jk} + \omega^{-jk} \right)\left(
    311     2-(\omega^{k} + \omega^{-k}\right) \\
    312     &= \text{Re}\left(\omega^{jk}  \right) \left(2-2\cos\left( \frac{2\pi
    313     k}{n}\right)\right)\\
    314     &= 4\sin^2\left( 2\pi \frac{k}{n} \right) \text{Re}\left( \omega^{jk}
    315     \right)  = \lambda_k\text{Re}(v^k_j).
    316 \end{align}
    317 \subsubsection{}
    318 We define the quantity $m(n) = \min\{|\lambda|:\lambda\; \text{eigenvalue of
    319 $P_n$}\}$. We can show that the quantity converges to 0 as $n$ goes to
    320 infinity by calculating for a $k$ that minimises $\lambda_k$ which is for a
    321 $k\neq$.
    322 \begin{align}
    323     \lim_{n \to \infty}m(n)
    324     &= \lim_{n \to \infty}\min_{k\in\{1,\ldots,n\}}\{|\lambda_k(n)|\} = \lim_{n
    325     \to \infty}4\sin^2\left(2\pi \frac{k}{n}\right)\\
    326     &= \lim_{n \to \infty}4\cdot\left(\frac{x^2}{n^{2}} + \frac{x^{4}}{n^{4}}
    327     + O(\frac{1}{n^{6}})\right) = 0,
    328 \end{align}
    329 where $x = 2\pi k$.
    330 \subsection{Problem 5}
    331 Let $Q$ be like in Problem 3. And split $Q$ as
    332 \begin{align}
    333     Q = D-N,
    334 \end{align}
    335 where $D$ consists of diagonal entries of $Q$. For $p \in \mathbb{N}$ let $C_p$ be the
    336 Neumann polynomial preconditioner , defined as
    337 \begin{align}
    338     C_p = D^{-1}\sum_{k=0}^{p} \left( ND^{-1} \right)^{k}
    339 \end{align}
    340 \subsubsection{}
    341 \subsubsection{}
    342 The following is a Phython script, that takes $n, p$ as an Input and returns
    343 $C_p$ furthermore calculates the spectral condition number of the matrix
    344 $C_pQ$
    345 \begin{figure}[htpb]
    346     \centering
    347     \includegraphics[page=2 ,width=\textwidth, clip=true, trim=0cm 7cm 0cm
    348     3cm]{./prog/prb2_p.pdf}
    349     \includegraphics[page=3 ,width=0.8\textwidth, clip=true, trim=0cm 17cm 0cm
    350     2.5cm]{./prog/prb2_p.pdf}
    351 \end{figure}
    352 %\printbibliography
    353 \end{document}
    354