diff --git a/1-manifolds.tex b/1-manifolds.tex index 6f4f566..f241170 100644 --- a/1-manifolds.tex +++ b/1-manifolds.tex @@ -140,7 +140,7 @@ \section{Topological manifolds}\label{sec:top_manifolds} More generally, any $n$-dimensional vector space\footnote{In fact, any open subset of a $n$-dimensional vector space.} is a topological $n$-manifold. \end{example} -\begin{exercise}[The line with two origins] +\begin{exercise}[The line with two origins]\label{exe:line-two-origins} Even though $\R^n$ with the euclidean topology is Hausdorff, being Hausdorff does not follow from being locally euclidean. A famous counterexample is the following\sidenote[][-2em]{See also \cite[Problem 1-1]{book:lee} and \cite[Problem 5.1]{book:tu}.}. \begin{marginfigure} \includegraphics{1_ex_1_0_11.pdf} diff --git a/5-tensors.tex b/5-tensors.tex index a4b77ff..fb243a2 100644 --- a/5-tensors.tex +++ b/5-tensors.tex @@ -92,7 +92,7 @@ \section{Tensors} \marginnote{A more general approach to this proposition is by proving the universal property of tensor spaces. See for instance~\cite[Propositions 12.5, 12.7 and 12.8]{book:lee}.} \begin{proposition}\label{prop:tensorbasis} Let $V$ be an $n$-dimensional vector space. - Let $\{e_j\}$ and $\{\varepsilon^i\}$ respectively denote the bases of $V=T_0^1(V)$ and $V^*=T_1^0(V)$ respectively. + Let $\{e_j\}$ and $\{\varepsilon^i\}$ respectively denote bases of $V=T_0^1(V)$ and $V^*=T_1^0(V)$ respectively. Then, every $\tau\in T_s^r(V)$ can be uniquely written as the linear combination\marginnote{\textit{Exercise}: expand Einstein's notation to write the full sum on the left with the relevant indices.}% In the expression, all indices $j_1,\ldots,j_r$, $i_1,\ldots,i_s$ run from $1$ to $n$.} \begin{equation}\label{eq:tensor:decomposition} \tau = \tau^{j_1\cdots j_r}_{i_1\cdots i_s} \, e_{j_1}\otimes\cdots\otimes e_{j_r}\otimes \varepsilon^{i_1}\otimes \cdots\otimes \varepsilon^{i_s}, @@ -107,16 +107,18 @@ \section{Tensors} \end{proposition} \begin{proof} - Let $\{\beta^j\}$ and $\{b_i\}$ denote the bases of $V^*$ and $V$ that are dual to $\{e_j\}$ and $\{\varepsilon^i\}$, that is, + % Let $\{\beta^j\}$ and $\{b_i\}$ denote the bases of $V^*$ and $V$ that are dual to $\{e_j\}$ and $\{\varepsilon^i\}$, that is, \marginnote{A linear map is uniquely specified by its action on a basis, which in particular means that these dual bases are unique.} - \begin{equation} - (\beta^j\mid e_i) = \delta^j_i = (\varepsilon^j \mid b_i). - \end{equation} + %\begin{equation} + % (\beta^j\mid e_i) = \delta^j_i = (\varepsilon^j \mid b_i). + %\end{equation} Define \begin{equation} - \tau^{j_1\cdots j_r}_{i_1\cdots i_s} := \tau(\beta^{j_1}, \ldots, \beta^{j_r}, b_{i_1}, \ldots, b_{i_s}). + %\tau^{j_1\cdots j_r}_{i_1\cdots i_s} := \tau(\beta^{j_1}, \ldots, \beta^{j_r}, b_{i_1}, \ldots, b_{i_s}). + \tau^{j_1\cdots j_r}_{i_1\cdots i_s} := \tau(\varepsilon^{j_1}, \ldots, \varepsilon^{j_r}, e_{i_1}, \ldots, e_{i_s}). \end{equation} - Then, on any element of the form $(\beta^{j_1}, \ldots, \beta^{j_r}, b_{i_1}, \ldots, b_{i_s})$, we trivially have the decomposition~\eqref{eq:tensor:decomposition}. By multilinearity of all the terms involved,~\eqref{eq:tensor:decomposition} holds for any element $(\omega^1, \ldots, \omega^r, v_1, \ldots, v_s)$ after decomposing it on the basis. + Then, on any element of the form $(\varepsilon^{j_1}, \ldots, \varepsilon^{j_r}, e_{i_1}, \ldots, e_{i_s})$, we trivially have the decomposition~\eqref{eq:tensor:decomposition}. + By multilinearity of all the terms involved,~\eqref{eq:tensor:decomposition} holds for any element $(\omega^1, \ldots, \omega^r, v_1, \ldots, v_s)$ after decomposing it on the basis. Uniqueness follows from the linear independence of the tensor products $e_{j_1}\otimes\cdots\otimes e_{j_r}\otimes \varepsilon^{i_1}\otimes \cdots\otimes \varepsilon^{i_s}$ proceeding by contradiction. \end{proof} @@ -130,13 +132,13 @@ \section{Tensors} \begin{equation} T^r_s(V) \simeq \LaTeXoverbrace{V\otimes \cdots \otimes V}^{r\mbox{ times}} \otimes \LaTeXunderbrace{V^*\otimes \cdots \otimes V^*}_{s\mbox{ times}}. \end{equation} - This allows us to choose whichever interpretation is more convenient for the problem at hand: being it a multilinear map on a cross product of spaces or an element of the tensor product of spaces. + This allows us to choose whichever interpretation\footnote{If you are not familiar with tensor products of vector spaces and want to know more, you can start from this \href{https://web.archive.org/web/20231106094511/https://math.stackexchange.com/questions/2138459/understanding-the-definition-of-tensors-as-multilinear-maps/2141663\#2141663}{detailed Stack Exchange answer}. If you want to go deeper, I warmly recommend \href{https://web.archive.org/web/20230922070935/https://www.dpmms.cam.ac.uk/~wtg10/tensors3.html}{Tim Gower's blog post \emph{How to lose your fear of tensor products}}.} is more convenient for the problem at hand: being it a multilinear map on a cross product of spaces or an element of the tensor product of spaces. \end{remark} - Let's go back for a moment to the example of inner products. \begin{definition}\label{def:metric} + \marginnote[1em]{In Definition~\ref{def:metric} and Example~\ref{ex:musicaliso} we are slightly abusing notation: as we will see, the ``true'' pseudo-metric, metric and symplectic tensors will be sections of certain tensor bundles over manifolds which pointwise share the properties presented here. Nevertheless, this definition provides a good intuition for what is coming.} We call \emph{pseudo-metric tensor}, any tensor $g\in T_2^0(V)$ that is \begin{enumerate} \item symmetric, i.e. $g(v,w) = g(w,v)$ for all $v,w\in T_0^1(V)$; @@ -148,8 +150,8 @@ \section{Tensors} g(v,w) = 0 \quad\forall w\in V \qquad\Longrightarrow\qquad v=0. \end{equation} - A \emph{metric tensor} or \emph{scalar product} is a non-degenerate pseudo-metric tensor, that is a symmetric, positive definite $(0,2)$-tensor. - We will briefly see later that a Riemannian metric provides a metric tensor on the tangent spaces of a manifold. + A \emph{metric tensor} or \emph{inner product} is a non-degenerate pseudo-metric tensor, that is, a symmetric, positive definite $(0,2)$-tensor. + We will briefly see later that a Riemannian metric is one such object and it provides an inner product on the tangent spaces of a manifold. An example of non-degenerate tensor which is not a metric is the so-called \emph{symplectic tensor}: a skew-symmetric non-degenerate $(0,2)$-tensor related to the symplectic form, a fundamental object in topology, classical mechanics and the study of Hamiltonian systems. \end{definition} @@ -157,7 +159,7 @@ \section{Tensors} \begin{example}\label{ex:musicaliso} Let $V$ be a $n$-dimensional real vector space with an inner product $g(\cdot, \cdot)$. % - Denote $\{e_1, \ldots, e_n\}$ the basis for $V$ and $\{e^1, \ldots, e^n\}$ the basis for its dual $V^*$. + Denote $\{e_1, \ldots, e_n\}$ the basis for $V$ and $\{e^1, \ldots, e^n\}$ its dual basis\footnote{This is what we called $\{\varepsilon^1, \ldots,\varepsilon^n\}$ before. In this example we follow a common practice in Riemannian geometry: the same letter is used for bases elements of both spaces and the position of the indices is then what discerns where the elements belong.} for $V^*$. As a bilinear map on $V\times V$, the inner product is uniquely associated to a matrix $[g_{ij}]$ by $g_{ij} := g(e_i, e_j)$. We already mentioned that in this case we can canonically identify $V$ with $V^*$. @@ -250,9 +252,7 @@ \section{Tensors} \cI = \cI_g : T_s^r (V) \to T_r^s(V) \\ \cI : \tau \mapsto \tau \circ (\LaTeXunderbrace{{\cdot}^\flat, \ldots, {\cdot}^\flat}_{r\mbox{ times}}, \LaTeXoverbrace{{\cdot}^\sharp, \ldots, {\cdot}^\sharp}^{s\mbox{ times}}). \end{align} - % TODO: improve remark with coordinate representation - % In coordinates the one maping (r,s)-tensors to (r+s, 0)-tensors is $I_g(\tau) = \tau_{i_1\ldots i_r}^{j_1\ldots j_s} g_{j_1 j_{r+1}} \ldots g_{j_s j_{r+s}}}$ - In general, one can use the metric tensor to raise or lower arbitrary indices, changing the tensor type from $(r,s)$ to $(r+1, s-1)$ or $(r-1, s+1)$. + In general, one can use the metric tensor to raise or lower arbitrary indices, changing the tensor type from $(r,s)$ to $(r+1, s-1)$ or $(r-1, s+1)$. A neat application of this is showing that a non-degenerate bilinear map $g\in T_2^0(V)$ can be lifted to a non-degenerate bilinear map on arbitrary tensors, that is \begin{equation} @@ -260,45 +260,58 @@ \section{Tensors} \quad G(\tau, \widetilde\tau) := (\cI_g(\tau), \widetilde\tau), \end{equation} - where the scalar product is defined via the requirement that tensor products of basis elements of $V$ are orthinormal and that $G$ is invariant under the musical isomorphism. - In particular, if $g$ is a metric tensor on $V$, then $G$ is a metric tensor on $T_s^r(V)$. - -% TODO: improve remark with coordinate representation -% If \((g_{ij})\) and \((g^{ij})\) are the matrix element of the matrices representing metric tensor and its inverse resp, then \[ G(\sigma, \tau) = \langle\sigma, \tau\rangle_g := g^{k_1 l_1}\cdots g^{k_rl_r} g_{i_1j_1} \cdots g_{i_sj_s} \sigma_{k_1,\ldots,k_r}^{i_1,\ldots,i_s} \tau_{l_1,\ldots,l_r}^{j_1,\ldots,j_s} \] + where the scalar product is defined via the requirement that tensor products of basis elements of $V$ are orthonormal and that $G$ is invariant under the musical isomorphism. + In particular\footnote{Exercise: write down the detailed proof of this statement.}, if $g$ is a metric tensor on $V$, then $G$ is a metric tensor on $T_s^r(V)$. \end{remark} \begin{exercise} - What do the canonical identifications of $T_s^r(V)$ with $T_0^{r+s}$ and $T_{t+s}^0$ look like? + \begin{enumerate} + \item What do the canonical identifications of $T_s^r(V)$ with $T_0^{r+s}$ and $T_{t+s}^0$ look like? + \item Fix a basis for $V$. What does $I_g$ in the previous remark look like with respect to this basis? + \item Write down $G$ with respect to the basis from the previous point. + \end{enumerate} + %In coordinates the one mapping $(r,s)$-tensors to $(r+s, 0)$-tensors is $I_g(\tau) = \tau_{i_1\ldots i_r}^{j_1\ldots j_s} g_{j_1 j_{r+1}} \ldots g_{j_s j_{r+s}}}$ + %If \((g_{ij})\) and \((g^{ij})\) are the matrix element of the matrices representing metric tensor and its inverse resp, then \[ G(\sigma, \tau) = \langle\sigma, \tau\rangle_g := g^{k_1 l_1}\cdots g^{k_rl_r} g_{i_1j_1} \cdots g_{i_sj_s} \sigma_{k_1,\ldots,k_r}^{i_1,\ldots,i_s} \tau_{l_1,\ldots,l_r}^{j_1,\ldots,j_s} \] \end{exercise} \begin{remark} - Interestingly, even though each of the tensor spaces $T_s^r(V)$ is generally not an algebra, the map $\otimes$ transforms the collection of all tensor spaces + Interestingly, even though none of the tensor spaces $T_s^r(V)$ are algebras, the map $\otimes$ makes the collection of all tensor spaces \marginnote{This is a so-called \emph{graded algebra} since $\otimes : T_s^r(V)\times T_{s'}^{r'}(V) \to T_{s+s'}^{r+r'}(V)$ in some sense moves along the structure of the indices.} \begin{equation} T(V) := \bigoplus_{r,s\geq 0} T_s^r(V), \qquad T_0^0(V):= \R, \end{equation} - to an algebra, called \emph{tensor algebra}. + an algebra, called \emph{tensor algebra}. Here, for $r=s=0$ we define the tensor multiplication with a scalar as the standard multiplication: $r\otimes v = r v$ for $r\in T_0^0(V)=\R$ and $v\in T^1_0(V)=V$. \end{remark} -Before moving on, there is an important operation on tensors that will come back later on and is worth to introducte in its generality. +Before moving on, there is an important operation on tensors that will come back later on and is worth to introduce in its generality. \begin{definition} Let $V$ be a vector space and fix $r,s\geq0$. For $h\leq r$ and $k\leq s$, we define the \emph{$(h,k)$-contraction} of a tensor as the linear mapping $T_s^r(V)\to T_{s-1}^{r-1}(V)$ defined through \begin{align} - v_1 & \otimes\cdots\otimes v_r\otimes\omega^1\otimes\cdots\otimes\omega^s \\ - & \mapsto \omega^k(v_h)\, v_1\otimes\cdots\otimes v_{h-1}\otimes v_{h+1}\cdots\otimes v_r\otimes\omega^1\otimes\cdots\otimes\omega^{k-1}\otimes\omega^{k+1}\cdots\otimes\omega^s + v_1 & \otimes\cdots\otimes v_r\otimes\omega^1\otimes\cdots\otimes\omega^s \\ + & \mapsto \omega^k(v_h)\, v_1\otimes\cdots\otimes v_{h-1}\otimes v_{h+1}\cdots\otimes v_r\otimes\omega^1\otimes\cdots\otimes\omega^{k-1}\otimes\omega^{k+1}\cdots\otimes\omega^s, \end{align} and then extended by linearity, thus mapping $\tau \mapsto \widetilde\tau$ where \begin{align} - \widetilde\tau & (\nu^1,\ldots,\nu^{r-1}, v_1,\ldots,v_{s-1}) \\ + \widetilde\tau & (\nu^1,\ldots,\nu^{r-1}, v_1,\ldots,v_{s-1}) \\ & = \tau(\nu^1,\ldots,\LaTeXunderbrace{e^i}_{h\mbox{th index}},\ldots,\nu^{r-1},w_1,\ldots,\LaTeXunderbrace{e_i}_{k\mbox{th index}},\ldots,w_{s-1}). \end{align} \end{definition} +\begin{notation}[Hat notation for erased elements]\label{notation:hat} + It is common to use an hat to denote elements that have been removed from the tensor product. + For instance, the contraction above would look like + \begin{align} + v_1 & \otimes\cdots\otimes v_r\otimes\omega^1\otimes\cdots\otimes\omega^s \\ + & \mapsto \omega^k(v_h)\, v_1\otimes\cdots\otimes v_{h-1}\otimes v_{h+1}\cdots\otimes v_r\otimes\omega^1\otimes\cdots\otimes\omega^{k-1}\otimes\omega^{k+1}\cdots\otimes\omega^s \\ + &\qquad =: \omega^k(v_h)\, v_1\otimes\cdots\otimes \widehat{v}_{h} \otimes \cdots\otimes v_r\otimes\omega^1\otimes\cdots\otimes\widehat{\omega}^{k}\otimes\cdots\otimes\omega^s. + \end{align} +\end{notation} + \begin{example} - To understand why the two equations in the definition are equivalent it is worth looking at an example over a decomposable element. + To understand better the definition of the contraction it is worth looking at an example over a decomposable element. For simplicity, assume $(r,s) = (2,3)$ and $\tau = v_1\otimes v_2\otimes\omega^1\otimes\omega^2\otimes\omega^3$. Then $\tau$ corresponds to a multilinear function \begin{equation} @@ -306,8 +319,8 @@ \section{Tensors} \end{equation} By definition, the $(1,2)$-contraction is \begin{align} - \widetilde\tau(\nu^1,w_1,w_2) & = \tau(e^i,\nu^1,w_1,e_i,w_2) \\ - & = e^i(v_1)\;\nu^1(v_2)\omega^1(w_1)\omega^2(e_i)\omega^3(w_2) \\ + \widetilde\tau(\nu^1,w_1,w_2) & = \tau(e^i,\nu^1,w_1,e_i,w_2) \\ + & = e^i(v_1)\;\nu^1(v_2)\omega^1(w_1)\omega^2(e_i)\omega^3(w_2) \\ & = \LaTeXunderbrace{e^i(v_1)\omega^2(e_i)}_{=\omega^2_i e^i (v_1) =\omega^2(v_1)}\nu^1(v_2)\omega^1(w_1)\omega^3(w_2) \\ & = \omega^2(v_1)\; v_2\otimes\omega^1\otimes\omega^3 (\nu^1,w_1,w_2). \end{align} @@ -343,6 +356,11 @@ \section{Tensor bundles} & :=(\varphi(p), d\varphi_p e_{j_1}\otimes\cdots\otimes d\varphi_p e_{j_r}\otimes d(\varphi^{-1})^*\varepsilon^{i_1}\otimes \cdots\otimes d(\varphi^{-1})^*\varepsilon^{i_s}). \end{align} +\begin{exercise} + Let $M$ be a $m$-smooth manifold. + Show that $T^r_sM$ is a vector bundle of rank $m^{r+s}$. +\end{exercise} + In analogy to the definition of vector fields, we can introduce tensor fields: these will just be local assignments of tensors to points. \begin{definition} @@ -407,24 +425,24 @@ \section{Tensor bundles} \quad\mbox{and}\quad \frac{\partial}{\partial x^i} = (\varphi^{-1})_* e_i. \end{equation} - This immediately exposes the transformation laws for the change of coordinates: let $(U, \psi)$ be another chart on $U$ with local coordinates $(y^i)$, then $dy^i = \psi^* de^i$ and $\frac{\partial}{\partial y^i} = (\psi^{-1})_* e_i$. If we denote $\phi = \psi\circ\varphi^{-1}$ the transition map in $\R^n$, we get + This immediately exposes the transformation laws for the change of coordinates: let $(U, \psi)$ be another chart on $U$ with local coordinates $(y^i)$, then $dy^i = \psi^* de^i$ and $\frac{\partial}{\partial y^i} = (\psi^{-1})_* e_i$. If we denote $\sigma = \psi\circ\varphi^{-1}$ the transition map in $\R^n$, we get \begin{align} \frac{\partial}{\partial x^i} & = (\varphi^{-1})_* e_i \\ - & = (\varphi^{-1})_* \LaTeXunderbrace{(\phi^{-1})_*\phi_*}_{\id} e_i \\ - & = (\varphi^{-1}\circ \phi^{-1})_* (\phi_* e_i) \\ - & = (\psi^{-1})_* ((D\phi)_i^j e_j) \\ - & = (D\phi)_i^j \frac{\partial}{\partial y^j}, + & = (\varphi^{-1})_* \LaTeXunderbrace{(\sigma^{-1})_*\sigma_*}_{\id} e_i \\ + & = (\varphi^{-1}\circ \sigma^{-1})_* (\sigma_* e_i) \\ + & = (\psi^{-1})_* ((D\sigma)_i^j e_j) \\ + & = (D\sigma)_i^j \frac{\partial}{\partial y^j}, \end{align} which may be easier to think about in terms of the following diagram \begin{equation}\nonumber \begin{tikzcd}[row sep=large, column sep=tiny] - & \frac{\partial}{\partial x^i} \in \cT_0^1(U) \ni (D\phi)_i^j \frac{\partial}{\partial y^j} \arrow[dl, "\varphi_*" description] \arrow[dr, "\psi*" description] & \\ - e_i \in \cT_0^1(V) \arrow[rr, "\phi_*" description] & & \cT_0^1(W) \ni \LaTeXunderbrace{\phi_* e_i}_{= (D\phi)_i^j e_j} + & \frac{\partial}{\partial x^i} \in \cT_0^1(U) \ni (D\sigma)_i^j \frac{\partial}{\partial y^j} \arrow[dl, "\varphi_*" description] \arrow[dr, "\psi*" description] & \\ + e_i \in \cT_0^1(V) \arrow[rr, "\sigma_*" description] & & \cT_0^1(W) \ni \LaTeXunderbrace{\sigma_* e_i}_{= (D\sigma)_i^j e_j} \end{tikzcd} \end{equation} where $V = \varphi(U)$ and $W = \psi(U)$. - From this, we immediately get $dy^j = (D\phi)_i^j dx^i$ and, therefore, $dx^i = (D\phi^{-1})_j^i dy^j$. + From this, we immediately get $dy^j = (D\phi)_i^j dx^i$ and, therefore, $dx^i = (D\sigma^{-1})_j^i dy^j$. \end{example} \begin{exercise} @@ -549,14 +567,14 @@ \section{Tensor bundles} \end{equation} and \begin{equation} - \ell(\gamma) := \int_0^1 g(\gamma'(t), \gamma'(t))\, dt. + \ell(\gamma) := \int_0^1 \sqrt{g_{\gamma(t)}(\gamma'(t), \gamma'(t))}\, dt. \end{equation} This will make $(M, d)$ also a metric space whose metric topology is the same as the original manifold topology. We will not go further into proving these claims or discussing their many interesting consequences. If you are interested or curious you can look at any good book in Riemannian geometry, a good and concise one is \cite[Chapter 6]{book:lee:riemannian}. -The relation between manifolds and metric spaces does not end here. As it happens, all smooth manifolds are \emph{metrizable}\footnote{In fact, all the topological manifolds are metrizable. This more general statement is far harder to prove~\cite[Theorem 34.1 and Exercise 1 of Chapter 4.36]{book:munkres:topology} or \cite{nlab:urysohn_metrization_theorem}. Note that not all topological spaces are metrizable, for example a space with more than one point endowed with the discrete topology is not. And even if a topological space is metrizable, the metric will be far from unique: for example, proportional metrics generate the same collection of open sets.}: there exists some distance on the manifold that induces the given topology on it. +The relation between manifolds and metric spaces does not end here. As it happens, all smooth manifolds are \emph{metrizable}\footnote{In fact, all the topological manifolds are metrizable. This more general statement is far harder to prove~\cite[Theorem 34.1 and Exercise 1 of Chapter 4.36]{book:munkres:topology} or \cite{nlab:urysohn_metrization_theorem}. Note that not all topological spaces are metrizable: for example, a space with more than one point endowed with the trivial topology is not. The line with two origins from Exercise~\ref{exe:line-two-origins} is also not metrizable (but it is locally metrizable). You can find plenty more non-trivial examples on \href{https://topology.pi-base.org/spaces?q=\%7EMetrizable}{$\pi$-Base}. Even if a topological space is metrizable, the metric will be far from unique: for example, proportional metrics generate the same collection of open sets.}: there exists some distance on the manifold that induces the given topology on it. We will show here the proof in the case of smooth manifolds since it is relatively simple consequence of the existence of partitions of unity. @@ -565,7 +583,7 @@ \section{Tensor bundles} \end{theorem} \begin{proof} Let $M$ be a smooth $m$-dimensional manifold, let $\{(U_i, \varphi_i)\}_{i\in I}$ be a countable atlas for the manifold and let $\{\rho_i\}_{i\in I}$ be a partition of unity adapted to it. See Section~\ref{sec:partition_of_unity}. - Denote $g_{\R^m}$ the Euclidean metric on $\R^n$, that is, for any $x \in \R^m$ and any $v,w \in T_x\R^m\simeq\R^m$, $g_{\R^m}(v, w) = v \cdot w$. + Denote $g_{\R^m}$ the Euclidean metric on $\R^m$, that is, for any $x \in \R^m$ and any $v,w \in T_x\R^m\simeq\R^m$, $g_{\R^m}(v, w) = v \cdot w$. We define the metric\footnote{Exercise: check that it is actually a metric} $g$ on $M$ by setting \begin{align} diff --git a/6-differentiaforms.tex b/6-differentiaforms.tex index fad0a98..04cb9fb 100644 --- a/6-differentiaforms.tex +++ b/6-differentiaforms.tex @@ -2,7 +2,7 @@ In the rest of the course we will focus on a particular class of tensors, which generalizes the differential one-forms that we studied on the cotangent bundle. It should not be surprising then, that these will be called differential $k$-forms and that they will be alternating $(0,k)$-tensors, that is, skew-symmetric in all arguments. -Geometrically, they are not dissimilar from the forms you may have seen in multivariable calculus: a $k$-form takes $k$ vectors as arguments and computes the $k$-dimensional volume spanned by these $k$-vectors. +Geometrically, they are similar\footnote{In fact, they are the same, just more general.} to the forms you may have seen in multivariable calculus: a $k$-form takes $k$ vectors as arguments and computes the $k$-dimensional volume spanned by these $k$-vectors. In this sense, they will be the key elements to define integration over $k$-dimensional manifolds, in the same way as one-forms and line integrals. In addition to their role in integration, differential forms provide a framework for generalizing such diverse concepts from multivariable calculus as the cross product, curl, divergence, and Jacobian determinant. @@ -37,9 +37,9 @@ \section{The exterior product} \marginnote{You can find an interesting explanation of the exterior product, based on Penrose's book ``The road to reality'', \href{https://twitter.com/LucaAmb/status/1289244374996406273?s=20}{on a thread by @LucaAmb on Twitter}.} If you remember, we said that the determinant was an example of a $T_n^0(\R^n)$ tensor: an antisymmetric tensor nonetheless. -At the same time, the determinant of a $n\times n$ matrix, is the signed volume of the parallelotope spanned by the $n$ vectors composing the matrix. +At the same time, the determinant of a $n\times n$ matrix, is the signed volume of the parallelotope spanned by the $n$ vectors defining the matrix. We also saw that tensors can be multiplied with the tensor product, which gives rise to a graded algebra on the free sum of tensor spaces. -This leads naturally to the following definition. +Since we are looking for an alternating product, the previous observation leads naturally to the following definition. \begin{definition} Let $V$ be a real $n$-dimensional vector space. @@ -92,8 +92,9 @@ \section{The exterior product} \begin{proof} The last point of Exercise~\ref{ex:propAlt} implies that there are no non-zero alternating $k$-tensors on $V$ if $k >\dim V$, since in that case every $k$-tuple of vectors would be dependent. For $k\leq n$ we need to show that $E$ spans $\Lambda^k(V)$ and its vectors are linearly independent. + The second claim follows directly from \eqref{eq:detLeibniz}. For the rest of the proof we will focus on the first claim. - First of all, observe that by~\eqref{equiv:permut} all the wedge products ${e^{j_1}\wedge\ldots\wedge e^{j_k}\not\in E}$ either vanish\footnote{When two indices are repeated, i.e., a basis vector appears twice} or are linear multiples of an element\footnote{The exterior product with the indices in the same set but in increasing order} of $E$. + First of all, observe that by~\eqref{equiv:permut} all the wedge products ${e^{j_1}\wedge\ldots\wedge e^{j_k}\not\in E}$, where the indices $j_i$ are not necessarily in ascending order, either vanish\footnote{When two indices are repeated, i.e., a basis vector appears twice} or are linear multiples of an element\footnote{The exterior product with the indices in the same set but in increasing order} of $E$. Let now $\{e_i\}$ denote the basis for $V$ dual to $\{e^i\}$ and $\omega\in\Lambda^k$. By definition of alternating form, we have @@ -117,7 +118,7 @@ \section{The exterior product} & = \frac1{k!} \omega\left(e_{j_1}, \ldots, e_{j_k}\right) \sum_{\sigma\in S_k} \sgn(\sigma) \;\left(e^{j_{\sigma^{-1}(1)}}\otimes \cdots \otimes e^{j_{\sigma^{-1}(k)}} \;\mid\; v_1, \ldots, v_k\right) \\ \overset{\mbox{\small\eqref{eq:detLeibniz}}}{} & = \frac1{k!} \omega\left(e_{j_1}, \ldots, e_{j_k}\right) \left( e^{j_1}\wedge \cdots \wedge e^{j_k} \;\mid\; v_1, \ldots, v_k\right) \\ - \overset{\mbox{\small dedup.}}{} + %\overset{\mbox{\tiny dedup.}}{} & = \sum_{j_1=1}^{n-k+1}\sum_{j_2=j_1+1}^{n-k+2}\cdots \sum_{j_k=j_{k-1}+1}^{n} \omega\left(e_{j_1}, \ldots, e_{j_k}\right) \left(e^{j_1}\wedge \cdots \wedge e^{j_k} \;\mid\; v_1, \ldots, v_k\right). \end{align} That is, @@ -172,7 +173,7 @@ \section{The exterior product} & =(\Alt_k \tau)(v_1,\ldots, v_k), \end{align} where we used the fact that $\eta$ runs over all $S_k$, as $\sigma$ does. - Then the result follows from~\eqref{eq:detLeibniz}. + It remains to check that $\Alt_k(\tau)$ is alternating, but this follows directly from~\eqref{eq:detLeibniz}, completing the proof. \end{proof} As we were saying, now we can take the tensor product of two forms $\omega\otimes\nu$ and use the antisymmetrisation $\Alt_{k+h}$ to to project it onto the antisymmetric subspace $\Lambda^{k+h}$ of $T_{k+h}^0(V)$. @@ -219,7 +220,7 @@ \section{The exterior product} \end{exercise} \begin{remark} - As for tensors, if we define the $2^n$-dimensional vector space + As for tensors, if we define the $2^n$-dimensional\footnote{Exercise: why is the dimension $2^n$?} vector space \begin{equation} \Lambda(V) = \bigoplus_{k=0}^n \Lambda^k(V), \end{equation} @@ -232,7 +233,7 @@ \section{The interior product} \begin{definition} Let $V$ be a real $n$-dimensional vector space. - For each $v\in V$, the \emph{interior product by $v$} is a contraction of a $k$-form by $v$, that is, the linear map $\iota_v:\Lambda^{k}(V)\to \Lambda^{k-1}(V)$ defined\footnote{Another common notation for the same operation is $v \iprod \omega$.} by + For each $v\in V$, the \emph{interior product} with $v$ is a contraction of a $k$-form by $v$, that is, the linear map $\iota_v:\Lambda^{k}(V)\to \Lambda^{k-1}(V)$ defined\footnote{Another common notation for the same operation is $v \iprod \omega$.} by \begin{equation} \iota_v\omega(w_1,\ldots,w_{k-1}) = \omega(v,w_1,\ldots,w_{k-1}) \quad \forall w_1,\ldots,w_{k-1} \in V. @@ -261,7 +262,7 @@ \section{Differential forms on manifolds} It is time to turn our attention back to smooth manifolds. Let $M$ be a $n$-dimensional smooth manifold, recall that we had defined the tensor fields $\cT_s^r(M)$ as the sections of $(r,s)$-tensor bundles $T_s^rM$ over $M$. -The subset of $T_k^0(M)$ consisting of alternating $k$-tensors is denoted by $\Lambda^kM:= \bigsqcup_{p\in M} \{p\}\times \Lambda^k(T_p M)$. +The subset of $T_k^0(M)$ consisting of alternating $k$-tensors is denoted by $\Lambda^kM:= \bigsqcup_{p\in M} \{p\}\times \Lambda^k(T_p M)$. This is\footnote{Exercise: prove the claims.} again a vector bundle, which is a subbundle of the $T_k^o M$ tensor bundle. \begin{definition} The sections of $\Lambda^kM$ are called \emph{differential $k$-forms}, or just $k$-forms: these are smooth tensor fields whose values at each point are alternating tensors. The integer $k$ is called the \emph{degree} of the $k$-form.