\documentclass{article}
	\usepackage{tensorstyles}
			\tensorsset{\tensor}[preset=math, preset*=einstein]


	\usepackage{keytheorems} % SETUP

		% Plain style: corollary, lemma, conjecture
		\newkeytheorem{corollary}[name=Corollary, style=plain]
		\newkeytheorem{lemma}[name=Lemma, style=plain]
		\newkeytheorem{conjecture}[name=Conjecture, style=plain]

		% Break style: theorem
		\newkeytheorem{theorem}[name=Theorem, style=plain]
		\newkeytheorem{paradox}[name=Paradox, style=plain]
		%\newkeytheorem{proof}[name=Proof, style=break]

		% Definition style: property, proposition, contraposition, consequence, hypothesis
		\newkeytheorem{property}[name=Property, style=definition]
		\newkeytheorem{proposition}[name=Proposition, style=definition]
		\newkeytheorem{contraposition}[name=Contraposition, style=definition]
		\newkeytheorem{consequence}[name=Consequence, style=definition]
		\newkeytheorem{hypothesis}[name=Hypothesis, style=definition]

		% Defbreak style: Definition, reminder
		\newkeytheorem{definition}[name=Definition, style=definition]
		\newkeytheorem{reminder}[name=Reminder, style=definition]

		% Remark style: remark, example, counterexample, exercise
		\newkeytheorem{remark}[name=Remark, style=remark]
		\newkeytheorem{example}[name=Example, style=remark]
		\newkeytheorem{counterexample}[name=CounterExample, style=remark]


\begin{document}

	% https://grinfeld.org/books/An-Introduction-To-Tensor-Calculus/Chapter7.html

	% https://www.underleaf.ai/learn/latex/tensors

	% https://rodolphe-vaillant.fr/images/tmp/cours_tenseurs_ups.pdf

	Tensors are mathematical objects that generalize scalars, vectors, and matrices to higher dimensions.
	\LaTeX{} provides excellent support for typesetting tensor notation, which is essential in physics, engineering, and advanced mathematics.
	This guide covers the essential \LaTeX{} commands for working with tensors.

\section{Basic notion}

\begin{definition}
	Let $V$ be a $\mathbb{K}$-vectorial space.
	A form (or linear map) on $V$ is an application from $V$ to $\mathbb{K}$.
	For a vector space $V$, that we note $EV*$ it's dual space, which is the vector space of it's linear map on $V$.
\end{definition}

\begin{definition}
	A tensor of order $(p,q)$, noted $T$, is a multi-linear map defined as
	\begin{equation}
		T: \underbrace {V^{*}\times \dots \times V^{*}} _{p{\text{ copies}}}\times \underbrace {V\times \dots \times V} _{q{\text{ copies}}}\rightarrow \mathbb{K}
	\end{equation}
\end{definition}

\begin{remark}
	By applying a multi-linear map $T$ of type $(p, q)$ to a basis ${e_j}$ for $V$ and a canonical co-basis ${\varepsilon_i}$ for $V^*$,
	\begin{equation}
		\tensor[preset=einstein, collapsed-indices=true]{xX}{T}[_{j_{1}\ldots j_{q}}^{i_{1}\ldots i_{p}}]
		\equiv
		T\left({\boldsymbol {\varepsilon }}^{i_{1}},\ldots ,{\boldsymbol {\varepsilon }}^{i_{p}},\mathbf{e}_{j_{1}},\ldots ,\mathbf{e}_{j_{q}}\right),
	\end{equation}
	a $(p + q)$-dimensional array of components can be obtained.
\end{remark}

% Basic Tensor Notation

% Tensors are typically represented using indices to denote their components:

%     Tensor Symbols

%     T, \mathbf{T}, \mathsf{T}, \mathcal{T}, \mathbb{T}
%     T,T,T,T,T
%     T,T,T,T,T

%     Different ways to represent tensor symbols in LaTeX.

%     Tensor Components with Indices

%     T^{i}_{j}, \quad T^{ij}_{k}, \quad T^{i_1 i_2 \ldots i_n}_{j_1 j_2 \ldots j_m}
%     Tji,Tkij,Tj1j2…jmi1i2…in
%     Tji​,Tkij​,Tj1​j2​…jm​i1​i2​…in​​

%     Tensor components with superscript (contravariant) and subscript (covariant) indices.

%     Tensor Rank

%     \text{Rank } (m,n) \text{ tensor: } T^{i_1 i_2 \ldots i_m}_{j_1 j_2 \ldots j_n}
%     Rank (m,n) tensor: Tj1j2…jni1i2…im
%     Rank (m,n) tensor: Tj1​j2​…jn​i1​i2​…im​​

%     A tensor of rank (m,n) has m contravariant indices and n covariant indices.

% Einstein Summation Convention

% The Einstein summation convention is commonly used with tensors, where repeated indices imply summation:

%     Implicit Summation

%     A^i B_i = \sum_{i=1}^n A^i B_i
%     AiBi=∑i=1nAiBi
%     AiBi​=i=1∑n​AiBi​

%     When an index appears once as a superscript and once as a subscript, summation is implied.

%     Matrix Multiplication as Tensor Contraction

%     C^i_j = A^i_k B^k_j = \sum_{k=1}^n A^i_k B^k_j
%     Cji=AkiBjk=∑k=1nAkiBjk
%     Cji​=Aki​Bjk​=k=1∑n​Aki​Bjk​

%     Matrix multiplication expressed as tensor contraction using Einstein notation.

%     Free Indices

%     D^{ij}_k = A^i_l B^j_m C^{lm}_k
%     Dkij=AliBmjCklm
%     Dkij​=Ali​Bmj​Cklm​

%     Indices that appear only once (i, j, k) are free indices and represent components of the resulting tensor.

% Common Tensors in Physics

%     Metric Tensor

%     g_{\mu\nu} = \begin{pmatrix} -1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{pmatrix}
%     gμν=(−1000010000100001)
%     gμν​=
%     ​−1000​0100​0010​0001​
%     ​

%     The Minkowski metric tensor used in special relativity.

%     Kronecker Delta

%     \delta^i_j = \begin{cases} 1 & \text{if } i = j \\ 0 & \text{if } i \neq j \end{cases}
%     δji={1if i=j0if i≠j
%     δji​={10​if i=jif i=j​

%     The Kronecker delta is a rank (1,1) tensor that acts as an identity operator.

%     Levi-Civita Symbol

%     \varepsilon_{ijk} = \begin{cases} +1 & \text{if } (i,j,k) \text{ is an even permutation of } (1,2,3) \\ -1 & \text{if } (i,j,k) \text{ is an odd permutation of } (1,2,3) \\ 0 & \text{if any index is repeated} \end{cases}
%     εijk={+1if (i,j,k) is an even permutation of (1,2,3)−1if (i,j,k) is an odd permutation of (1,2,3)0if any index is repeated
%     εijk​=⎩
%     ⎨
%     ⎧​+1−10​if (i,j,k) is an even permutation of (1,2,3)if (i,j,k) is an odd permutation of (1,2,3)if any index is repeated​

%     The Levi-Civita symbol is used for cross products and determinants.

% Tensor Operations

%     Tensor Addition

%     C^{ij}_k = A^{ij}_k + B^{ij}_k
%     Ckij=Akij+Bkij
%     Ckij​=Akij​+Bkij​

%     Tensors of the same rank can be added component-wise.

%     Tensor Contraction

%     A^i_i = \sum_{i=1}^n A^i_i
%     Aii=∑i=1nAii
%     Aii​=i=1∑n​Aii​

%     Contraction of a tensor by setting a contravariant and covariant index equal.

%     Tensor Product

%     C^{ij}_{kl} = A^i_k \otimes B^j_l
%     Cklij=Aki⊗Blj
%     Cklij​=Aki​⊗Blj​

%     The tensor product combines two tensors into a higher-rank tensor.

% Raising and Lowering Indices

% The metric tensor can be used to raise or lower indices:

%     Raising an Index

%     A^{\mu} = g^{\mu\nu}A_{\nu}
%     Aμ=gμνAν
%     Aμ=gμνAν​

%     Using the inverse metric tensor to raise an index.

%     Lowering an Index

%     A_{\mu} = g_{\mu\nu}A^{\nu}
%     Aμ=gμνAν
%     Aμ​=gμν​Aν

%     Using the metric tensor to lower an index.

% Covariant Derivatives

%     Christoffel Symbols

%     \Gamma^{\lambda}_{\mu\nu} = \frac{1}{2}g^{\lambda\rho}\left(\frac{\partial g_{\rho\mu}}{\partial x^{\nu}} + \frac{\partial g_{\rho\nu}}{\partial x^{\mu}} - \frac{\partial g_{\mu\nu}}{\partial x^{\rho}}\right)
%     Γμνλ=12gλρ(∂gρμ∂xν+∂gρν∂xμ−∂gμν∂xρ)
%     Γμνλ​=21​gλρ(∂xν∂gρμ​​+∂xμ∂gρν​​−∂xρ∂gμν​​)

%     Covariant Derivative of a Vector

%     \nabla_{\mu}V^{\nu} = \partial_{\mu}V^{\nu} + \Gamma^{\nu}_{\mu\lambda}V^{\lambda}
%     ∇μVν=∂μVν+ΓμλνVλ
%     ∇μ​Vν=∂μ​Vν+Γμλν​Vλ

%     The covariant derivative generalizes the partial derivative to curved spaces.

%     Covariant Derivative of a Covector

%     \nabla_{\mu}V_{\nu} = \partial_{\mu}V_{\nu} - \Gamma^{\lambda}_{\mu\nu}V_{\lambda}
%     ∇μVν=∂μVν−ΓμνλVλ
%     ∇μ​Vν​=∂μ​Vν​−Γμνλ​Vλ​

% Advanced Tensor Notation

%     R^{\rho}_{\sigma\mu\nu} = \partial_{\mu}\Gamma^{\rho}_{\nu\sigma} - \partial_{\nu}\Gamma^{\rho}_{\mu\sigma} + \Gamma^{\rho}_{\mu\lambda}\Gamma^{\lambda}_{\nu\sigma} - \Gamma^{\rho}_{\nu\lambda}\Gamma^{\lambda}_{\mu\sigma}
%     Rσμνρ=∂μΓνσρ−∂νΓμσρ+ΓμλρΓνσλ−ΓνλρΓμσλ
%     Rσμνρ​=∂μ​Γνσρ​−∂ν​Γμσρ​+Γμλρ​Γνσλ​−Γνλρ​Γμσλ​

%     The Riemann curvature tensor.

%     R_{\mu\nu} = R^{\lambda}_{\mu\lambda\nu}
%     Rμν=Rμλνλ
%     Rμν​=Rμλνλ​

%     The Ricci tensor, a contraction of the Riemann tensor.

%     R = g^{\mu\nu}R_{\mu\nu}
%     R=gμνRμν
%     R=gμνRμν​

%     The Ricci scalar, a contraction of the Ricci tensor.

%     G_{\mu\nu} = R_{\mu\nu} - \frac{1}{2}g_{\mu\nu}R
%     Gμν=Rμν−12gμνR
%     Gμν​=Rμν​−21​gμν​R

%     The Einstein tensor, used in general relativity.



\end{document}