-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathchap7.tex
349 lines (340 loc) · 18.5 KB
/
chap7.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
%!TEX root = ./main.tex
\section{Operators on Inner Product Spaces}
\subsection{Self-Adjoint and Normal Operators}
\begin{definition}
Suppose $T \in L(V,W)$, we define $T*$ by this formula
\[ \la T\vec v, \vec w \ra_W = \la \vec v, T^* \vec w \ra_V\]
\end{definition}
We can think og $\vec w$ as fixed, we note that $\la T \ast, \vec w \ra$ is a linear functiona;. hence it has a representer by Riesz, so we are entitled to call it $T^*\vec w$ such that
\[ \la T\vec v, \vec w \ra = \la \vec v, T^* \vec w \ra \]
Hence we have $T^* \in \c L(W,V)$. We want to verify this property. Consider $T*(\vec w_1 + \lambda \vec w_2)$. We compute
\begin{align*}
\la \vec v, T*(\vec w_1 + \lambda \vec w_2) \ra &= \la T\vec v, \vec w_1 + \lambda \vec w_2\ra \\
&= \la T\vec v, \vec w_1 \ra + \bar \lambda \la T\vec v + \vec w_2 \ra \\
&= \la \vec v, T^*\vec w_1 \ra + \bar \lambda \la \vec v, T^* \vec w_2 \ra \\
&= \la \vec v, T^*\vec w_1 \ra + \la \vec v, \lambda T^* \vec w_2 \ra \\
&= \la \vec v, T^*\vec w_1 + \lambda T^*w_2 \ \ra \forall \vec v \in V, \vec w_1, \vec w_2 \in W, \lambda \in \b F
\end{align*}
So $T^*(\vec w_1 + \lambda \vec w_2) = T^* \vec w_1 + \lambda T^* \vec w_2$.
\begin{theorem}[Properties of the adjoint]
Let $T \in \c L(V,W)$, we have
\begin{enumerate}
\item $(S + T)^* = S^* + T^*$
\item $(\lambda T)^* = \bar \lambda T^*$
\item $(S \cdot T)^* = T^*S^*$
\item $(\lambda T)^* = \bar \lambda T^*$
\item $\b I^* = \b I$
\end{enumerate}
\end{theorem}
\begin{proof} Refer to book page 206
\end{proof}
\begin{theorem}[Null space and range of adjoint]
Let $T \in \c L(V,W)$, then
\begin{enumerate}
\item $\nul T^* = (\range T)^\perp$
\item $\range T^* = (\nul T)^\perp$
\item $\nul T = (\range T*)^\perp$
\item $\range T = (\nul T^*)^\perp$
\end{enumerate}
\end{theorem}
\begin{proof}
Let $\vec w \in W$. Then
\begin{align*}
\vec w \in \nul T^* &\iff T^* \vec w = 0 \\
&\iff \la \vec v, T^* \vec w \ra = 0 \ \forall \vec v \in V \\
&\iff \la T\vec v, \vec w \ra \ \forall \vec v \in V \\
&\iff \vec w \in (\range T)^\perp
\end{align*}
(b),(c),(d) follows by a similar logic and is left as an exercise.
\end{proof}
\begin{definition}
Let $T \in \c L(V)$. $T$ is called self-adjoint if $T^* = T$.
\end{definition}
\begin{definition}
$T$ is called normal if $TT^* = T^*T$.
\end{definition}
%\begin{example}
% Let $V$ be $\b R^3$ with the standard inner product, let $T \in \c L(V)$ such that $T: (x_1,x_2,x_3) \mapsto (x_1,2x_2,3x_3)$.
%\end{example}
\begin{example}
Suppose $T \in \c L(V)$, we can define $T: \vec v \mapsto \la \vec , \vec x \ra \vec y$ for some fixed $\vec x, \vec y$ in $V$. Compute $T^*$. \\
We compute
\begin{align*}
\langle \vec v, T^* \vec w \rangle &= \langle T \vec v, \vec w \rangle \\
&= \langle \langle \vec v, \vec x \rangle \vec y, \vec w \rangle \\
&= \langle \vec v, \vec x \rangle \langle \vec y, \vec w \rangle \\
&= \langle \vec v, \overline{\langle \vec y, \vec w \rangle} \vec x \rangle \\
&= \langle \vec v, \langle \vec w, \vec y \rangle \vec x \rangle
\end{align*}
hence we can concldue $T^* \vec w = \la \vec w, \vec y \ra \vec x$ for all $\vec w \in W$.
\end{example}
\newpage
\subsubsection{Matrix representation}
Suppose $T \in \c L(V,W)$, where $V,W$ are finite-dimensional vector spaces. Let $\li{\vec e}n$ be an orthonormal basis for $V$ and $\li{\vec f}m$ be a orthonormal basis for $W$. We can see that $\c M(T)$ is obtained through
\[ T \vec e_j = \la T\vec e_j, \vec f_1 \ra \vec f_1 + \la T\vec e_j, \vec f_2 \ra \vec f_2 + \cdots + \la T\vec e_j, \vec f_m \ra \vec f_m\]
\[ T^* \vec f_k = \la T^* \vec f_k, \vec e_1 \ra \vec e_1 + \la T^* \vec f_k, \vec e_2 \ra \vec e_2 + \cdots + \la T^* \vec f_k, \vec e_n \ra \vec e_n\]
Then
\[ \c M(T^*)(l,k) = \la T^* \vec f_k, \vec e_l \ra \implies \c M(T^*)(j,i) = \la T^* \vec f_i, \vec e_j \ra \]
Therefore we have $\c M(T^*) = \bar{\c M(T)}^T$
\begin{remark}
The above statement only holds if the basis for $V$ and $W$ are orthonormal.
\end{remark}
\begin{remark}
If an operator $T$ is self-adjoint, then $T$ is normal, but not the converse.
\end{remark}
\begin{proposition}
The eigenvalue of any self-adjoint operator is real.
\end{proposition}
\begin{proof}
Suppose self-adjoint $T \in \c L(V)$. and $\lambda$ is an eigenvalue of $T$ and let $\vec v$ be the eignevector corespond to $\lambda$. We compute
\[
\lambda \la \vec v, \vec v \ra = \la \lambda \vec v, \vec v \ra
= \la T\vec v, \vec v \ra
= \la \vec v, T\vec v \ra
= \la \vec v, \lambda \vec v \ra
= \bar \lambda \la \vec v, \vec v \ra
\] We can see that $\lambda = \bar \lambda \implies \lambda \in \b R$.
\end{proof}
\begin{question}
Suppose $\la T\vec v,\vec v \ra = 0 \forall \vec v \in V$. Does the statement implies $T$ is the zero map?
\end{question}
\begin{answer}[Surprisingly]
Yes over $\b C$ and no over $\b R$.
\end{answer}
\begin{proof}
Supoose $\b F = \b C$, the following holds.
\[ \la T(\vec v + \vec w), \vec v + \vec w \ra = \la T\vec v, \vec v \ra + \la T\vec v, \vec w \ra + \la T\vec w,\vec v \ra + \la T\vec w, \vec w \ra \]
\[ \la T(\vec v - \vec w), \vec v + \vec w \ra = \la T\vec v, \vec v \ra - \la T\vec v, \vec w \ra - \la T\vec w,\vec v \ra + \la T\vec w, \vec w \ra \]
Subtratc the first equation by the second we have
\[ \la T(\vec v + \vec w), (\vec v + \vec w) \ra - \la T(\vec v - \vec w), (\vec v + \vec w) \ra =\boxed{ 2\la T\vec v, \vec w\ra + 2\la T\vec w, \vec v \ra} \]
We also compute
\[ \la T(\vec v + i\vec w) , (\vec v + i\vec w)\ra - \la T(\vec v - i\vec w) , (\vec v - i\vec w)\ra = \boxed{2i\left(\la T\vec w, \vec v\ra - \la T\vec v, \vec w \ra\right)}\]
Take the two boxed equation and divide the second one by $i$ then subtract from first gives us
\[ 4\la T \vec v, \vec w \ra = 0\]
Suppose $\b F = \b R$. Consider $\b R^2$. Take $T\vec v$ and rotate $\pi / 2$ gives us $T(x_1, x_2) := (-x_2 , x_1)$. We can see that $\la T\vec v, \vec v \ra = 0 \forall \vec v$ but $T \neq 0$. However, if $T$ is self-adjoint then $T$ is $0$.
\end{proof}
\begin{remark}
Suppose $\b F = \b R$ and $T = T^*$. We have
\[ 4\la T\vec v, \vec w \ra = \la T(\vec v + \vec w), (\vec v + \vec w) \ra - \la T(\vec v - \vec w), (\vec v - \vec w )\ra\]
Hence $T = 0$.
\end{remark}
\begin{corollary}
$\la T \vec v, \vec v \ra \in \b R$ in a complex space is equivalent to $T$ being self adjoint.
\end{corollary}
\begin{proof}
\[ \la T \vec v, \vec v \ra \in \b R \iff \la T\vec v, \vec v\ra = \la T^*\vec v, \vec v \ra \implies \la (T - T^*)\vec v, \vec v\ra = 0 \implies T - T^* = 0\]
We can see that $T = T^*$. Hence $T$ is self-adjoint.
\end{proof}
\begin{theorem}
$T$ is normal if anf only if $||T\vec v|| = ||T^* \vec v|| \ \forall \vec v \in V$.
\end{theorem}
\begin{proof}
\[ ||T\vec v|| = ||T^* \vec v|| \implies \la T\vec v, T\vec v\ra = \la T^* \vec v, T^* \vec v \ra \implies \la T^* T \vec v, \vec v \ra = \la TT^* \vec v, \vec v \ra\]
Hence $T$ is normal sicne $TT^* = T^*T$.
\end{proof}
\begin{theorem}
Say $\lambda, \vec v$ is an eigenpair of a normal oeprator $T$, then
\[ ||(T - \lambda \b I) \vec v ||=||(T^* - \bar \lambda \b I)\vec v||\]
\end{theorem}
\subsection{Spectral Theorem}
\subsubsection*{Over Complex Vector Space}
\begin{theorem}[Spectral Theorem over Complex Vector Space]
Suppose $T \in \c L(V)$ where $V$ is finite dimensioanl vector space and $\b F = \b C$ and $T$ is normal. Then $V$ is a orthonormal basis of eigenvactors of $T$, and vice versa, if $T$ has a diagonal representation with respect to some orthonormal basis, then $T$ is normal.
\end{theorem}
\begin{proof}
Suppose $T$ has a diagonal matrix representation with some repsect to some orthonormal basis. i.e.
\[ \c M(T) = \begin{bmatrix}
\lambda_1 & & & 0 \\
& \lambda_2 \\
& & \ddots \\
0& & & \lambda_n
\end{bmatrix}\qquad \c M(T^*) = \begin{bmatrix}
\bar \lambda_1 & & & 0 \\
& \bar \lambda_2 \\
& & \ddots \\
0& & & \bar \lambda_n
\end{bmatrix}\]
Since any two diagonal matrices commute, we can see that
\[ \c M(T) \c M(T^*) = \c M(T^*) \c M(T) = \begin{bmatrix}
|\lambda_1|^2 & & & 0 \\
& |\lambda_2|^2 \\
& & \ddots \\
0& & & |\lambda_n|^2
\end{bmatrix}\]
We have $TT^* = T^*T$, hence $T$ is normal.
\noindent Suppose $T$ is normal. By Schur's Theorem, there exists an orthonormal basis such that
\[ \c M(T) = \begin{bmatrix}
a_{11} & a_{12} & \cdots & a_{1n} \\
0 & a_{22} & \cdots & a_{2n} \\
\vdots & \vdots & \ddots & \vdots\\
0 & 0 & \cdots & a_{nn}
\end{bmatrix} \implies \c M(T^*) = \begin{bmatrix}
\bar a_{11} & \bar a_{12} & \cdots & \bar a_{1n} \\
0 & \bar a_{22} & \cdots & \bar a_{2n} \\
\vdots & \vdots & \ddots & \vdots\\
0 & 0 & \cdots & \bar a_{nn}
\end{bmatrix} \]
Rall that $||T\vec v|| = ||T^* \vec v|| \qquad \forall \vec v \in V$. Call this this orthornormal basis $\li{\vec v}n$. We have $T\vec e_1 = a_{11}\vec e_1$, so $||T\vec e_1|| = |a_{11}|$, we then compute
\begin{align*}
T^* \vec e_1 &= \bar a_{11} \vec e_1 + \bar a_{12} \vec e_2 + \cdots + \bar a_{1n} \vec e_n \\
||T^*\vec e_1 || &= \sqrt{|a_{11}|^2 + |a_{12}|^2 + \cdots + |a_{1n}|^2}
\end{align*}
Since $||T\vec e_1|| = ||T^* \vec e_1||$, we get $|a_{12}| = |a_{13}| = \cdots = |a_{1n}| = 0$.
\noindent
Using a similar logic, we have $||T\vec e_j|| = ||T^*\vec e_j||$ implies $|a_{jj+1}| = |a_{jj+2}| = \cdots = |a_{jn}| = 0$. Hence $T$ is diagonal
\end{proof}
\begin{remark}
So actaully the schur form of a normal operator is neccisarily diagonal.
\end{remark}
\newpage
\subsubsection*{Over Real Vector Space}
\begin{lemma}
Suppose $T \in \c L(V)$ is self-adjoint and $\beta, \gamma \in \b R$ such that $b\beta^2 - 4 \gamma$ then
\[ T^2 + \beta T + \gamma I\] is invertible.
\end{lemma}
\begin{proof}
Consider nonzero $\vec v \in V$. We can factor
\begin{align*}
\la (T^2 + \beta T - \gamma I) \ra &= \la T^2 \vec v, \vec v \ra + \la \beta T \vec v, \vec v \ra + \gamma \la \vec v, \vec v\ra \\
&= \la T\vec v, T\vec v \ra + \beta \la T\vec v, \vec v\ra + \gamma||\vec v||^2 \\
&\geq ||T\vec v||^2 - |\beta| \cdot ||T\vec v|| \cdot ||\vec v|| + \gamma||\vec v||^2 \\
&= \left( ||T\vec v|| - \frac{|\beta| \cdot ||\vec v||}{2} \right)^2 + \left( \gamma - \frac{\beta^2}{4} \right)||\vec v||^2 \\
&> 0
\end{align*}
hence we can see that $\nul (T^2 + \beta T - \gamma I) = \lb 0 \rb$. Hence it's injective. Since $T^2 + \beta T + \gamma I \in \c L(V)$, we know $(T^2 + \beta T + \gamma I)$ is invertible.
\end{proof}
\begin{theorem}
$T$ has a eigenvalue if $T$ is self-adjoint in any vector space.
\end{theorem}
\begin{proof}
Assume $\dim V = n$. Consider any $\vec v \in V$. Then $\vec v, T\vec v, T^2 \vec v, \ldots, T^n \vec n$ are linearly dependent ( I.e, there exist $\li an \in \b R$ such that
\[a_0 \vec v + a_1 T \vec v + \cdots + a_n T^\vec v = 0\]
Consider $f(x) = a_0x + a_1x + \cdots + a_nx^n$. We know from chapter 4 we can factor $f(x)$ as
\[ f(x) = c(x^2 + \beta_1 x + \gamma_1) \cdots (x^2 + \beta_m x + \gamma_m) (x - \lambda_1) \cdots (x - \lambda_n)\]
where all coefficients are real and $\beta_i^2 - 4 \gamma < 0$. By lemma we know that the qaudratice term is invertible, then we can simply factor them out. Therefore we have
\[ 0 = (T - \lambda_1 I) \cdots (T - \lambda_m I)\vec v\]
Hence one of the $(T - \lambda_j I)$ is not injective. Hence $T$ has an eigenvalue.
\end{proof}
\begin{theorem}
Suppose $T \in \c L(V)$, where $V$ is a fininite dimensional vector space and $\b F = \b R$ and $T$ is self-adjoint. Then $T$ has a digona matrix representation with some orthonorma basis for $V$. And conversely, if $T$ has a diagonal matrix representation eith repsetc to some orthonormal basis, then $T = T^*$.
\end{theorem}
\begin{proof}
Suppose $T$ has a diagonal matrix representation with some repsect to some orthonormal basis. i.e.
\[ \c M(T) = \begin{bmatrix}
\lambda_1 & & & 0 \\
& \lambda_2 \\
& & \ddots \\
0& & & \lambda_n
\end{bmatrix}\qquad \c M(T^*) = \begin{bmatrix}
\bar \lambda_1 & & & 0 \\
& \bar \lambda_2 \\
& & \ddots \\
0& & & \bar \lambda_n
\end{bmatrix}\]
We know $\c M(T) = \c M(T^*)$ since $\lambda = \bar \lambda$ in reals. Hence $T$ is self-adjoint.
\noindent
Conversely, suppose $T = T^*$. We just found out $T$ has at least one eigenvalue eigenvector pair. Say $T\vec u = \lambda \vec u$. Without the loss of generality $||\vec u|| = 1$. If $\vec w \perp \vec u$, then $\la T\vec u, \vec w \ra = 0 = \la \vec u, T\vec w \ra$. So $T\vec w \perp \vec u$. Notice that $T\vert_{\spa(\vec u)^\perp}$ is still self-adjoint.
\[ \la T_{\spa(\vec u)^\perp} \vec w_1, \vec w_2 \ra = \la T \vec w_1, \vec w_2 \ra = \la \vec w_1, T\vec w_2\ra = \la \vec w_1, T\vert_{\spa(\vec u)^\perp} w_2 \ra \qquad \forall \vec w_1, \vec w_2 \in \spa(\vec u)^\perp\]
Hence $\c M(T) = \begin{bmatrix}
\lambda & 0 & \cdots & 0 \\
0 & \ast & \cdots & \ast \\\
0 & \vdots & \ddots & \vdots \\
0 & \ast & \cdots & \ast
\end{bmatrix}$ Now the problem is reduce to that for $T\vert_{\spa(\vec u)^\perp}$, which has a dimensional of $\dim V - 1$. By induction we can build a orthonomal basis of $V$ which consists of eigenvectors.
\end{proof}
\subsection{Positive Operators and Isometries}
\begin{definition}
Suppose $T \in \c L(V)$ is self-adjoint and satisfies
\[ \la T\vec v, \vec v \ra \geq 0 \qquad \forall \vec v \in V\]
Then $T$ is called \textbf{nonnegative}.
\noindent If instead $\la T\vec v, \vec v\ra > 0 \qquad \forall \vec v \in V$ then $T$ is called \textbf{positive}.
\end{definition}
\begin{theorem}[Characterzation Theorem]
The following are equivalent
\begin{enumerate}
\item $T$ is nonnegative
\item $T = T^*$ and all its eigenvalyes are nonnegative.
\item $T$ has a nonnegative square root, i.e. there exists $T = R^* \in \c L(V)$ such that $R^2 = T$.
\item $T$ has a self-adjoint square root. i.e. $\exists S = S^*$ such that $S^2 = T$.
\item There exists $Q \in \c L(V)$ such that $Q^*Q = T$.
\end{enumerate}
\end{theorem}
\newpage
\begin{proof}
(e) $\implies$ (a). Suppose $T = Q^*Q$, so i
\[ \la T\vec v, \vec v \ra = \la Q^*Q\vec v, \vec v \ra = \la Q\vec v, Q\vec v \ra = ||Q\vec v||^2 \geq 0\]
(a) $\implies$ (b) Suppose $T$ is nonnegative. We know that nonnegative already satisfies self-adjoint. TODO
\end{proof}
\begin{definition}
Suppose $S \in \c L(V)$. $S$ is called an \textbf{isometry} if
\[ ||S\vec v|| = ||\vec v|| \qquad \forall \vec v \in V\]
\end{definition}
\begin{remark}
Observe that isometry necessarily preserves all inner products.
\[ \la S\vec u, S \vec v \ra = \la \vec u, \vec v \ra \qquad \forall \vec u, \vec v \in V\]
This following from polar polarization from 7.11. for $\b F = \b R$ we have
\[ 4\la T\vec u, \vec v \ra = \la T(\vec u + \vec v), (\vec u + \vec v) \ra - \la T(\vec u - \vec v), (\vec u - \vec v )\ra\]
\end{remark}
\begin{corollary}
An isometry maps an orthonormal to another orthonormal basis.
\end{corollary}
\begin{proof}
If $\la \vec e_i, \vec e_j \ra = \delta_{ij}$. Then $\la S\vec e_i, S\vec e_j \ra = \la \vec e_i, \vec e_j \ra = \delta_{ij}$ where $\delta_{ij} = \left\{ \begin{array}{cc}
1 & \text{ if } i = j \\ 0 & \text{ if } i \neq j \end{array} \right.$. \\
So if $(\li{\vec e}n)$ is an northnormal basis then $\li{S\vec e}n$ is an orthonormal basis.
\end{proof}
\newpage
\subsection{Polar Decomposition and Singular Value Decomposition}
\begin{theorem}[Polar Decomposition]
Take $T \in \c L(V)$. There exist an isometry $S \in \c L(V)$ such that
\[ T = S \sqrt{T^*T}\]
where $\sqrt{T^*T}$ is the nonnegative square root of $T^*T$.
\end{theorem}
\begin{proof}
Observe that $||T\vec v || = \left|\left|\sqrt{T^*T} \vec v\right|\right| \qquad \forall \vec v \in V$. Indeed
\begin{align*}
||T\vec v|| &= \la T\vec v, T \vec v \ra \\
&= \la T^*T \vec v, \vec v \ra \\
&= \la \sqrt{T^*T} \cdot \sqrt{T^*T} \vec v, \vec v \ra \\
&= \la \sqrt{T^*T} \vec v, \sqrt{T^*T} \vec v \ra \\
&= \left|\left| \sqrt{T^*T} \vec v \right|\right|
\end{align*}
It's clearly that there exists an isometry between $T$ and $\sqrt{T^*T}$.
\end{proof}
\begin{remark}[Construction of $S$]
For any $\vec v \in V$, define
\[ S_1\left( \sqrt{T^*T} \vec v\right) := T \vec v\]
We first need to check this is well-defined. That is if
\[ \sqrt{T^*T} \vec v_1 = \sqrt{T^*T} \vec v_2 \implies \vec v_1 = \vec v_1\]
This is true because
\[ \sqrt{T^*T}(\vec v_1 - \vec v_2) = \vec 0 \implies 0 = \left|\left| \sqrt{T^*T} (\vec v_1 - \vec v_2)\right|\right| = ||T(\vec v_1 - \vec v_2)||\]
hence $T\vec v_1 = T\vec v_2$. \\
So $S_1$ is now defined as an element of $\c L\left(\range \sqrt{T^*T}, \range T\right)$ and $S_1$ is actually invertiable and an isometry. \\
So $\dim \range \sqrt{T^*T} = \dim \range T$. Now we need to extend $S_1$ to an operator on $V$. \\
Take $\left(\range \sqrt{T^*T}\right)^\perp$ and $(\range T)^\perp$. Send any orthonormal basis of $\left(\range \sqrt{T^*T}\right)^\perp$ to any orthonormal basis of $(\range T)^\perp$. This defines another isometry $S_2$.
Finally define
\[ S\vec v = S_1 \vec u + S_2 \vec w\] where $\vec v = \vec u + \vec w , \vec u \in \range (\sqrt{(T^*T)}), \vec w \in \range \left(\sqrt{(T^*T)}\right)^\perp$. This creates $S$ which is now an isometry on entire $V$. Hence $T = S \sqrt{T^*T}$.
\end{remark}
\begin{theorem}
Let $T \in \c L(V)$, for finite dimensional $V$. Then there exists orthonormal basis $\li {\vec e}n$ and $\li {\vec f}n$ and values $\li sn$, all nonnegative such that
\[ T\vec v = s_1 \la \vec v, \vec e_1 \ra \vec f_1 + s_2 \la \vec v, \vec e_2 \ra \vec f_2 + \cdots + s_n \la \vec v, \vec e_n \ra \vec f_n\]
The $s_j$ are called singular values of $T$.
\end{theorem}
\begin{proof}[Proof. (derivation for polar decomposition)]
Say $T = S\sqrt{T^*T}$. By the charaterization theorem w eknow that $V$ has an orthonormal eigenbasis $\li{\vec e}n$ consisiting of eigenvectors of $\sqrt{T^*T}$ corresponding to (nonnegative) eigenvalues $\li sn$
\begin{align*}
\vec v &= \la \vec v, \vec e_1 \ra + \la \vec v, \vec e_2 \ra + \cdots + \la \vec v, \vec e_n \ra \\
\sqrt{T^*T} \vec v &= s_1\la \vec v, \vec e_1 \ra + s_2\la \vec v, \vec e_2 \ra + \cdots + s_n\la \vec v, \vec e_n \ra \\
S\sqrt{T^*T}\vec v &= s_1\la \vec v, \vec e_1 \ra\vec f_1 + s_2\la \vec v, \vec e_2 \ra\vec f_2 + \cdots + s_n\la \vec v, \vec e_n \ra\vec f_n \\
T\vec v &= s_1\la \vec v, \vec e_1 \ra\vec f_1 + s_2\la \vec v, \vec e_2 \ra\vec f_2 + \cdots + s_n\la \vec v, \vec e_n \ra\vec f_n
\end{align*}
and $\li{\vec f}n$ is also orthonormal.
\end{proof}
\begin{example}
Take $T(x_1, x_2) = (2x_1 + x_2, -x_1 + 2x_2)$.
Find its polar decomposition.
\end{example}
\begin{answer}
\[T = \bml 2 & -1 \\ 1 & 2 \bmr \qquad T^* = \bml 2 & 1 \\ -1 & 2 \bmr \implies T^*T = \bml 5 & 0 \\ 0 & 5 \bmr\]
Therefore we have $s_1 = s_2 = \sqrt 5$ and $f_1 = \left(2 / \sqrt 5, -1 / \sqrt 5\right),f_2 = \left(1 / \sqrt 5, 2 / \sqrt 5\right)$.
\end{answer}