diff --git a/Reference Manual/biblio.bib b/Reference Manual/biblio.bib
new file mode 100644
index 0000000000000000000000000000000000000000..52ee7632b1b74864693ab021be7951c889c12437
--- /dev/null
+++ b/Reference Manual/biblio.bib	
@@ -0,0 +1,25 @@
+
+
+@inproceedings{DBLP:conf/tacas/GuilloudK22,
+  author    = {Simon Guilloud and
+               Viktor Kuncak},
+  editor    = {Dana Fisman and
+               Grigore Rosu},
+  title     = {Equivalence Checking for Orthocomplemented Bisemilattices in Log-Linear
+               Time},
+  booktitle = {Tools and Algorithms for the Construction and Analysis of Systems
+               - 28th International Conference, {TACAS} 2022, Held as Part of the
+               European Joint Conferences on Theory and Practice of Software, {ETAPS}
+               2022, Munich, Germany, April 2-7, 2022, Proceedings, Part {II}},
+  series    = {Lecture Notes in Computer Science},
+  volume    = {13244},
+  pages     = {196--214},
+  publisher = {Springer},
+  year      = {2022},
+  url       = {https://doi.org/10.1007/978-3-030-99527-0\_11},
+  doi       = {10.1007/978-3-030-99527-0\_11},
+  timestamp = {Fri, 29 Apr 2022 14:50:33 +0200},
+  biburl    = {https://dblp.org/rec/conf/tacas/GuilloudK22.bib},
+  bibsource = {dblp computer science bibliography, https://dblp.org}
+}
+
diff --git a/Reference Manual/lisa.pdf b/Reference Manual/lisa.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..7486581dd85b54c97ded342a5634862f51e17253
Binary files /dev/null and b/Reference Manual/lisa.pdf differ
diff --git a/Reference Manual/lisa.tex b/Reference Manual/lisa.tex
new file mode 100644
index 0000000000000000000000000000000000000000..9de1aacb68bfecfcc9240b14a74147d8e647b3e7
--- /dev/null
+++ b/Reference Manual/lisa.tex	
@@ -0,0 +1,68 @@
+\documentclass[11pt,a4paper]{book}
+\usepackage[utf8]{inputenc}
+\usepackage[english]{babel}
+\usepackage{amsmath}
+\usepackage{amsfonts}
+\usepackage{amssymb}
+\usepackage{listings}
+\usepackage{amsthm}
+\usepackage{csquotes}
+\usepackage[strings]{underscore}
+\usepackage{url}
+\usepackage{bussproofs}
+\usepackage{makecell}
+
+
+\newtheorem{thm}{Theorem}
+\newtheorem{lemma}{Lemma}
+\newtheorem{corollary}{Corollary}
+\theoremstyle{definition}
+\newtheorem{defin}{Definition}
+\newtheorem{ex}{Example}
+
+\newtheorem{axz}{}
+\renewcommand\theaxz{Z\arabic{axz}}
+\newtheorem{axzf}{}
+\renewcommand\theaxzf{ZF\arabic{axzf}}
+\newtheorem{axtg}{}
+\renewcommand\theaxtg{TG\arabic{axtg}}
+
+\lstset{
+language=Scala,
+escapeinside={(*@}{@*)}
+}
+
+%\lstdefinelanguage{scala}{
+%  alsoletter={@,=,>},
+%  morekeywords={abstract, case, class, def, do, Input, Output, then,
+%        else, extends, false, free, if, implicit, match,
+%        object, true, val, var, while, sealed, or, given,
+%        for, dependent, null, type, with, try, catch, finally,
+%        import, final, return, new, override, this, trait,
+%        private, public, protected, package, throw, using},
+%  sensitive=true,  
+%  morecomment=[l]{//},
+%  morecomment=[s]{/*}{*/},
+%  morestring=[b]",  
+%}
+%\lstset{mathescape,escapeinside={!*}{*!},columns=fullflexible,morekeywords={if,then,else,return,match,with,end,let,in, data, type, :=, def}}
+
+
+
+\title{LISA Reference Manual (WIP)}
+\author{Simon Guilloud\\Laboratory for Automated Reasoning and Analysis, EPFL}
+\date{}
+
+\begin{document}
+\maketitle
+\chapter*{Introduction}
+This document aims to give a complete documentation on LISA. Tentatively, every chapter and section will explain a part or concept of LISA, and explains both its implementation and its theoretical foundations.
+\cite{DBLP:conf/tacas/GuilloudK22}
+
+\input{part1.tex}
+
+\input{part2.tex}
+
+\bibliographystyle{plain}
+\bibliography{biblio.bib}
+\end{document}
\ No newline at end of file
diff --git a/Reference Manual/part1.tex b/Reference Manual/part1.tex
new file mode 100644
index 0000000000000000000000000000000000000000..8d69c1b084489fcd03fe59b1e3fbb0c711b05428
--- /dev/null
+++ b/Reference Manual/part1.tex	
@@ -0,0 +1,937 @@
+\newcommand\Var{\operatorname{Var}}
+\newcommand\FormulaVar{\operatorname{FormulaVar}}
+\newcommand\List{\operatorname{List}}
+\newcommand\Lambdaa{\operatorname{Lambda}}
+\newcommand\LambdaTT{\operatorname{LambdaTermTerm}}
+\newcommand\LambdaTF{\operatorname{LambdaTermFormula}}
+\newcommand\LambdaFF{\operatorname{LambdaFormulaFormula}}
+
+
+
+
+
+
+
+\part{Reference Manual}
+\label{part:manual}
+\chapter{LISA's trusted code: The Kernel}
+\label{chapt:kernel}
+LISA's kernel is the starting point of LISA, formalising the foundations of the whole theorem prover. It is the only trusted code base, meaning that if it is bug-free then no further erroneous or malicious code can violate the soundness property and prove invalid statements. Hence, the two main goals of the kernel are to be efficient and trustworthy.
+
+LISA's foundations are based on very traditional (in the mathematical community) foundational theory of all mathematics: \textbf{First Order Logic}, expressed using \textbf{Sequent Calculus} (and augmented with schematic symbols), with axioms of \textbf{Set Theory}.
+Interestingly, while LISA is built with the goal of using Set Theory, the kernel is actually theory-agnostic and is sound to use with any other set of axioms. Hence, we defer Set Theory to chapter~\ref{chapt:settheory}.
+
+\section{First Order Logic}
+\label{sect:FOL}
+\subsection{Syntax}
+\begin{defin}[Terms]
+In LISA, the set of terms $\mathcal{T}$ is defined by the following grammar:
+\begin{equation}
+\begin{split}
+\mathcal{T} := & \mathcal{L}_{Term}(\List[\mathcal{T}])
+\end{split}
+\end{equation}\\
+Where $\mathcal{L}_{Term}$ is the set of \textit{term labels}:
+\begin{equation}
+\begin{split}
+\mathcal{L}_{Term} := & \operatorname{ConstantTermLabel}(\textnormal{Id}, \textnormal{Arity})\\
+              \mid & \operatorname{SchematicTermLabel}(\textnormal{Id}, \textnormal{Arity})
+\end{split}
+\end{equation}
+
+A label can be either \textit{constant} or \textit{schematic}, and is made of an identifier (a string) and the arity of the label (an integer). 
+A term is made of a term label and a list of children, whose length must be equal to the arity of the label.
+A constant label of arity $0$ is sometimes called just a constant, and a schematic label of arity $0$ a variable. We define the shortcut $$\Var(x) \equiv \operatorname{SchematicTermLabel}(x, 0)$$
+\end{defin}
+
+As the definition states, we have to kind of function symbols: \textit{Constant} ones and \textit{Schematic}. Constant labels represent a fixed function symbol in some language, for exemple the addition "+" in peano arithmetic.
+
+Schematic symbols on the other hand are uninterpreted: They can represent any possible term and hence can be substituted by any term. Their use will become clearer in the next section when we introduce the concept of deductions.  Moreover, variables, which are schematic terms of arity 0, can be bound in formulas as we explain bellow. \footnote{In a very traditional presentation of first order logic, we would only have variables, i.e. schematic terms of arity 0, and schematic terms of higher arity would only appear in second order logic. We defer to Part~\ref{part:theory} Section~\ref{sect:theoryfol} the explanation of why our inclusion of schematic function symbols doesn't fundamentally move us out of First Order Logic.}
+
+\begin{defin}[Formulas]
+The set of Formulas $\mathcal{F}$ is defined similarly:
+\begin{equation}
+\begin{split}
+\mathcal{F} := & \mathcal{L}_{Predicate}(\List[\mathcal{T}]) \\
+              \mid & \mathcal{L}_{Connector}(\List[\mathcal{F}]) \\
+              \mid & \operatorname{Binder}(\textnormal{Id})(\Var(\textnormal{Id}), \mathcal{F})
+\end{split}
+\end{equation}
+Where $\mathcal{L}_{Predicate}$ is the set of \textit{predicate labels}:
+\begin{equation}
+\begin{split}
+\mathcal{L}_{Predicate} := & \operatorname{ConstantPredicateLabel}(\textnormal{Id}, \textnormal{Arity})\\
+              \mid & \operatorname{SchematicPredicateLabel}(\textnormal{Id}, \textnormal{Arity})
+\end{split}
+\end{equation}
+and $\mathcal{L}_{Connector}$ is the set of \textit{predicate labels}:
+\begin{equation}
+\begin{split}
+\mathcal{L}_{Connector} := & \operatorname{ConstantConnectorLabel}(\textnormal{Id}, \textnormal{Arity})\\
+              \mid & \operatorname{SchematicConnectorLabel}(\textnormal{Id}, \textnormal{Arity})
+\end{split}
+\end{equation}
+A formula can be constructed from a list of term with a predicate label:
+$${\leq}(x, 7)$$
+A formula can be constructed from a list of smaller formulas using a connector label: 
+$${\leq}(x, 7) \land {\geq}(x, 5)$$
+And finally a formula can be constructed from a variable and a smaller formula using a binder:
+$$\exists x. \left({\leq}(x, 7) \land {\geq}(x, 5)\right)$$
+Connectors and predicate, like terms, can exist in either constant or schematic form. Note that Connectors and predicates vary only in the type of arguments they take and hence Connectors and Predicates of arity 0 are the same thing. Hence, in LISA, we don't permit connectors of arity 0 and ask to use predicates instead.
+A contrario to schematic terms of arity 0, schematic predicates of arity 0 can't be bound, but they still play a special role sometimes, and hence we introduce the special notation for them
+$$
+\FormulaVar(\textnormal{Id}) = \operatorname{SchematicPredicateLabel}(\textnormal{Id}, 0)
+$$
+Moreover in LISA, A contrario to constant predicate and term symbols, which can be freely created, there is only the following finite set of constant connector symbols in LISA:
+$$
+\operatorname{Neg}(\neg, 1)\mid \operatorname{Implies}(\rightarrow, 2)\mid \operatorname{Iff}(\leftrightarrow, 2)\mid \operatorname{And}(\land, -1)\mid \operatorname{Or}(\lor, -1)
+$$
+Moreover, connectors (And and Or) are allowed to have an unrestricted arity, represented by the value $-1$. This means that a conjunction or disjunction can have any finite number of children. 
+Similarly, there are only 3 binder labels.
+$$
+\operatorname{Forall}(\forall)\mid \operatorname{Exists}(\exists)\mid \operatorname{ExistsOne}(\exists !)
+$$
+We also introduce a special constant predicate symbol, equality:
+$$
+\operatorname{Equal}(=)
+$$
+\end{defin}
+
+In this document as well as in the code documentation, we often write terms and formula in a more conventional way, generally hiding the arity of labels and representing the label with it's identifier only, preceded by an interrogation mark $?$ if the symbol is schematic. When the arity is relevant, we write it with an superscript, for example:
+$$
+f^3(x,y,z) \equiv \operatorname{Fun}(f, 3)(\List(\Var(x), \Var(y), \Var(z)))
+$$
+and
+$$
+\forall x. \phi \equiv \operatorname{Binder}(\forall, \Var(x), \phi)
+$$
+We also use other usual representations such as symbols in infix position, omitting parenthesis according to usual precedence rules, etc.
+FInally, note that we use subscript to emphasis that a variable is possibly free in a term or formula:
+$$
+t_{x,y,z}, \phi_{x,y,z}
+$$
+
+\paragraph{Convention} Throughout this book and in the code base we adopt the following conventions. We use $r$, $s$, $t$, $u$ to denote arbitrary terms, $a$, $b$, $c$ to denote constant term symbols of arity $0$ and $f$, $g$, $h$ to denote term symbols of arity non-$0$. We preceede those with an interogation mark, sucha as $?f$ to denote schematic symbols. Moreover, we also use $x$, $y$, $z$ to denote variables (schematic terms of order $0$).
+
+For formulas, we use greek letters such as $\phi$, $\psi$, $\tau$ to denote arbitrary formula, $\nu$, $\mu$ to denote formula variables. We use capital letters like $P$, $Q$, $R$ to denote predicate symbols, precedding them with an interrogation mark $?$ for schematic predicates. Schematic connectors are rarer, but when they appear, we preceede them by 2 interrogation marks, for example $??c$. Sets or sequences of formula are denoted with capital greek letters $\Pi$, $\Sigma$, $\Gamma$, $\Delta$.
+\begin{itemize}
+\item 
+\end{itemize}
+
+\subsection{Substitution}
+\label{subs:substitution}
+On top of basic building of terms and formulas, there is one important type of operations: substitution of schematic symbols, which has to be implemented in a capture-avoiding way. We start with the subcase of variable substitution
+\begin{defin}[Capture-avoiding Substitution of variables]
+Given a base term $t$, a variable $x$ and another term $r$, the substitution of $x$ by $r$ inside $t$ is noted $ t[r/x] $ and simply consists in replacing all appearance of $x$ by $r$.
+
+Given a formula $\phi$, the substitution of $x$ by $r$ inside $\phi$ is defined recursively the obvious way for connectors and predicates, and for binders:
+$$
+(\forall y. \psi)[r/x] \equiv \forall y. (\psi[r/x])
+$$
+if $y$ does not appear in $r$ and
+$$
+(\forall y. \psi)[r/x] \equiv \forall z. (\psi[z/y][r/x])
+$$
+with any fresh variable $z$ (which is not free in $r$ and $\phi$) otherwise.
+\end{defin}
+This definition of substitution is justified by the notion of alpha equivalence: Two formulas which are identical up to renaming of bound variables are considered equivalent. In practice, this means that the free variables inside $r$ will never get caught when substituted.
+
+We can now define \enquote{lambda terms}
+\begin{defin}[Lambda Terms]
+A lambda term is a  meta expression (meaning that it is not part of FOL itself) consisting in a term with "holes" that can be filled by other terms. This is represented with specified variables as arguments, similar to lambda calculus. For example, for a functional term with two arguments, we write
+$$
+L = \Lambdaa(\Var(x), \Var(y))(t_{x,y})
+$$
+It comes with an instantiation operation: given terms $r$, $s$,
+$$L(r, s) = t[r/x, s/y]$$
+\end{defin}
+Those expressions are a generalization of terms, and would be part of our logic if we used Higher Order Logic rather than First Order Logic. For conciseness and familiarity, in this document and in code documentation, we represent those expressions as lambda expressions:
+$$
+\lambda x.y. t
+$$
+
+They are usefull because similarly as variables can be substituted by terms, schematic terms labels of arity greater than 0 can be substituted by such functional terms. As the definition of such substitution is rather convoluted to describe, we prefer to show examples and redirect the reader to the source code of LISA for a technical definition. \footnote{Note that in lambda calculus, this would simply be iterated beta-reduction.}
+\begin{ex}[Functional terms substitution in terms]
+\begin{center}
+\begin{tabular}{|c|r c l|c|}
+\hline
+Base term & \multicolumn{3}{c|}{Substitution} & Result \\
+\hline
+$?f(0, 3)$ & $?f$ & $\rightarrow$ & $\lambda x.y. x+y$ & $0+3$\\ 
+$?f(0, 3)$ & $?f$ & $\rightarrow$ & $\lambda y.x. x-y$ & $3-0$\\ 
+$?f(0, 3)$ & $?f$ & $\rightarrow$ & $\lambda x.y. y+y-10$ & $3+3-10$\\ 
+$10 \times {?g(x)}$ & $?g$ & $\rightarrow$ & $\lambda x. x^2$ & $10 \times x^2$\\
+$10 \times {?g(50)}$ & $?g$ & $\rightarrow$ & $\lambda x. ?f(x+2, z)$ & $10 \times {?f(50+2, z)}$\\
+$?f(x, x+y)$ & $?f$ & $\rightarrow$ & $\lambda x.y. \cos(x-y)*y$ & $\cos(x-(x+y))*(x+y)$\\
+\hline
+\end{tabular}
+\end{center}
+\end{ex}
+
+Those definition extends to substitution of schematic terms inside formulas, with capture free substitution for bound variables. For example:
+
+\begin{ex}[Functional terms substitution in formulas]
+\begin{center}
+\begin{tabular}{|c|r c l|c|}
+\hline
+Base formula & \multicolumn{3}{c|}{Substitution} & Result \\
+\hline
+$?f(0, 3) = ?f(x, x)$ & $?f$ & $\rightarrow$ & $\lambda x.y. x+y$ & $0+3 = x+x$\\ 
+$\forall x. ?f(0, 3) = ?f(x, x)$ & $?f$ & $\rightarrow$ & $\lambda x.y. x+y$ & $\forall x. 0+3 = x+x$\\ 
+
+$\exists y. ?f(y) \leq ?f(5)$ & $?f$ & $\rightarrow$ & $\lambda x. x+y$ & $\exists y_1. y_1+y \leq 5+y$\\
+
+\hline
+\end{tabular}
+\end{center}
+\end{ex}
+Note that if the lambda expression contains free variables (such as $y$ in the last example), then appropriate alpha-renaming of variables may be needed.
+
+We similarly define functional formulas, except than those can take either term arguments of formulas arguments. Specifically, we use $\LambdaTT$, $\LambdaTF$, $\LambdaFF$ to indicate functional expression that take terms or formulas as arguments and return a term or formula.
+
+\begin{ex}[Typical functional expressions]
+\begin{center}
+\begin{tabular}{|r c l|}
+\hline
+$\LambdaTT(Var(x), Var(y))(x+y)$ & $=$ & $\lambda x.y. x+y$ \\
+$\LambdaTF(Var(x), Var(y))(x=y)$ & $=$ & $\lambda x.y. x=y$ \\
+$\LambdaFF(\FormulaVar(\nu), \FormulaVar(\mu))$ & $=$ & $\lambda \nu.\mu. \nu \land \mu$ \\
+\hline
+\end{tabular}
+\end{center}
+
+\end{ex}
+Not that in the last case, we use $\FormulaVar$ to represent the arguments of the lambda formula. Substitution of functional formulas is completely analog to (capture free!) substitution of functional terms.
+
+
+
+\subsection{The Equivalence Checker}
+\label{subs:equivalencechecker}
+While proving theorem, trivial syntactical transformations such as $p\land q \equiv q\land p$ significantly increase the length of proofs, which is desirable neither for the user nor the machine. Moreover, the proof checker will very often have to check whether two formulas that appear in different sequents are the same. Hence, instead of using pure syntactical equality, LISA implements a powerful equivalence checker able to detect a class of equivalence-preserving logical transformation. As an example, two formulas $p\land q$ and $q\land p$ would be naturally treated as equivalent.
+
+For soundness, the relation decided by the algorithm should be contained in the $\Longleftrightarrow$ ''if and only if`` relation of first order logic. It is well known that this this relationship is in general undecidable however, and even the $\Longleftrightarrow$ relation for propositional logic is coNP-complete. For practicality, we need a relation that is efficiently computable
+
+The decision procedure implemented in LISA takes time log-linear in the size of the formula, which means that is is only marginally slower than syntactic equality checking. It is based on an algorithm  that decides the wordproblem for Orthocomplemented Bisemilattices \cite{DBLP:conf/tacas/GuilloudK22}. Informally, the theory of Orthocomplemented Bisemilattices is the same as that of Propositional Logic, but without the distributivity law. Figure~\ref{tab:OCBSL} shows the axioms of this theory and the logical transformations LISA is able to automatically figure out.
+Moreover, the implementation in LISA also takes into account symmetry and reflexivity of equality as well as alpha-equivalence, by which we mean renaming of bound variables.
+
+\begin{table}[bth]
+    \centering
+    \begin{tabular}{r c | r c}
+         L1: & $x \lor y = y \lor x$  & L1': & $x \land y = y \land x$ \\
+         L2: & $x \lor ( y \lor z) = (x \lor y) \lor z$  & L2': & $x \land ( y \land z) = (x \land y) \land z$ \\
+         L3: & $x \lor x = x$  & L3': & $x \land x = x$ \\
+         L4: & $x \lor 1 = 1$  & L4': & $x \land 0 = 0$ \\
+         L5: & $x \lor 0 = x$  & L5': & $x \land 1 = x$ \\
+         L6: & $\neg \neg x = x$  & L6': & same as L6  \\
+         L7: & $x \lor \neg x = 1$  & L7': & $x \land \neg x = 0$ \\
+         L8: & $\neg (x \lor y) = \neg x \land \neg y$  & L8': &  $\neg (x \land y) = \neg x \lor \neg y$ \\
+         L9: & $x \implies y = \neg x \lor y$  &  \\
+         L10: & $x \leftrightarrow y = (\neg x \lor y) \land (\neg y \lor x)  $ & \\
+         L11: & $\exists ! x. P = \exists y. \forall x. (x=y) \leftrightarrow P$ & \\
+    \end{tabular}
+    \
+    \caption{Laws LISA's equivalence checker authomatically account for.
+    LISA's equivalence-checking algorithm is complete (and log-linear time) with respect to laws L1-L11 and L1'-L8'.\label{tab:OCBSL}}
+\end{table}
+
+
+\section{Proofs in Sequent Calculus}
+\label{sect:proofs_lk}
+\subsection{Sequent Calculus}
+\label{subs:lk}
+The deductive system used by LISA is an extended version of Gentzen's sequent Calculus.
+\begin{defin}
+A \textbf{sequent} is a pair of (possibly empty) sets of formula, noted:
+$$\phi_1, \phi_2, ..., \phi_m \vdash \psi_1, \psi_2, ..., \psi_n$$
+The intended semantic of such a sequent is:
+\begin{equation}
+\label{eq:SequentSemantic}
+(\phi_1 \land \phi_2 \land ... \land \phi_m ) \implies (\psi_1 \lor \psi_2 \lor ... \lor \psi_n )
+\end{equation}
+
+\end{defin}
+A sequent $\phi \vdash \psi$ is logically but not conceptually equivalent to a sequent $\vdash \phi \rightarrow \psi$. The distinction is similar to the distinction between meta-implication and inner implication in Isabelle, for example. Typically, a theorem or a lemma should have its various assumptions on the left handside of the sequent and its single conclusion on the right. During proofs however, there may be multiple elements on the right side. \footnote{In a strict description of Sequent Calculus, this is in particular needed to make double negation elimination.}
+
+A deduction rule, also called a proof step, has  (in general) between zero and two prerequisite sequents (which we call \textit{premises} of the rule) and one conclusion sequent, and possibly take some arguments that describe how the deduction rule is applied. The basic deduction rules used in LISA are shown in Figure~\ref{fig:deduct_rules_1}.
+
+Since we work on first order logic with equality and accept axioms, there are also rules for equality reasoning, which include reflexivity of equality. Moreover, we include substitution of equal-for-equal and equivalent-for-equivalent in Figure~\ref{fig:deduct_rules_2}. While those substitution rules are deduced steps, and hence could technically be omitted, simulating them can sometime take a high number of steps so  they are included as base steps for efficiency. 
+
+There are also some special proof steps used to either organise proofs, shown in Figure~\ref{fig:deduct_rules_3}.
+
+%Start of first set of deduction rules
+\begin{figure}
+\begin{center}
+\begin{tabular}{l l}
+\multicolumn{2}{c}{
+\AxiomC{}
+\RightLabel{\texttt { Hypothesis}}
+\UnaryInfC{$\Gamma, \phi \vdash \phi, \Delta$}
+\DisplayProof
+}\\[5ex]
+\multicolumn{2}{c}{
+\AxiomC{$\Gamma \vdash \phi, \Delta$}
+\AxiomC{$\Sigma, \phi \vdash \Pi$}
+\RightLabel{\texttt{ Cut}}
+\BinaryInfC{$\Gamma, \Sigma \vdash \Delta, \Pi$}
+\DisplayProof
+}\\[5ex]
+\AxiomC{$\Gamma, \phi \vdash \Delta$}
+\RightLabel{\texttt { LeftAnd}}
+\UnaryInfC{$\Gamma, \phi \land \psi \vdash \Delta$}
+\DisplayProof &
+\AxiomC{$\Gamma \vdash \phi, \Delta$}
+\AxiomC{$\Sigma \vdash \psi, \Pi$}
+\RightLabel{\texttt{ RightAnd}}
+\BinaryInfC{$\Gamma, \Sigma \vdash \phi \land \psi,  \Delta, \Pi$}
+\DisplayProof
+\\[5ex]
+\AxiomC{$\Gamma, \phi \vdash \Delta$}
+\AxiomC{$\Sigma, \psi \vdash \Pi$}
+\RightLabel{\texttt{ LeftOr}}
+\BinaryInfC{$\Gamma, \Sigma, \phi\lor \psi \vdash \Delta, \Pi$}
+\DisplayProof &
+\AxiomC{$\Gamma \vdash \phi, \Delta$}
+\RightLabel{\texttt{ RightOr}}
+\UnaryInfC{$\Gamma \vdash \phi \lor \psi,  \Delta$}
+\DisplayProof
+\\[5ex]
+\AxiomC{$\Gamma \vdash \phi, \Delta$}
+\AxiomC{$\Sigma, \psi \vdash \Pi$}
+\RightLabel{\texttt{ LeftImplies}}
+\BinaryInfC{$\Gamma, \Sigma, \phi\rightarrow \psi \vdash \Delta, \Pi$}
+\DisplayProof &
+\AxiomC{$\Gamma, \phi \vdash \psi, \Delta$}
+\RightLabel{\texttt{ RightImplies}}
+\UnaryInfC{$\Gamma \vdash \phi \rightarrow \psi,  \Delta$}
+\DisplayProof
+\\[5ex]
+\AxiomC{$\Gamma, \phi \rightarrow \psi \vdash \Delta$}
+\RightLabel{\texttt { LeftIff}}
+\UnaryInfC{$\Gamma, \phi \leftrightarrow \psi \vdash \Delta$}
+\DisplayProof &
+\AxiomC{$\Gamma \vdash \phi \rightarrow \psi, \Delta$}
+\AxiomC{$\Sigma \vdash \psi \rightarrow \phi, \Pi$}
+\RightLabel{\texttt{ RightIff}}
+\BinaryInfC{$\Gamma, \Sigma \vdash \phi \leftrightarrow \psi,  \Delta, \Pi$}
+\DisplayProof
+\\[5ex]
+\AxiomC{$\Gamma \vdash \phi, \Delta$}
+\RightLabel{\texttt { LeftNot}}
+\UnaryInfC{$\Gamma, \neg \phi \vdash \Delta$}
+\DisplayProof &
+\AxiomC{$\Gamma, \phi \vdash \Delta$}
+\RightLabel{\texttt{ RightNot}}
+\UnaryInfC{$\Gamma \vdash \neg \phi ,  \Delta$}
+\DisplayProof
+\\[5ex]
+\AxiomC{$\Gamma, \phi[t/x] \vdash \Delta$}
+\RightLabel{\texttt { LeftForall}}
+\UnaryInfC{$\Gamma, \forall x. \phi  \vdash \Delta$}
+\DisplayProof &
+\AxiomC{$\Gamma \vdash \phi, \Delta$}
+\RightLabel{\texttt { RightForall}}
+\UnaryInfC{$\Gamma \vdash \forall x. \phi,  \Delta$}
+\DisplayProof
+\\[5ex]
+\AxiomC{$\Gamma, \phi \vdash \Delta$}
+\RightLabel{\texttt { LeftExists}}
+\UnaryInfC{$\Gamma, \exists x. \phi \vdash \Delta$}
+\DisplayProof &
+\AxiomC{$\Gamma \vdash \phi[t/x], \Delta$}
+\RightLabel{\texttt { RightExists}}
+\UnaryInfC{$\Gamma \vdash \exists x. \phi,  \Delta$}
+\DisplayProof
+\\[5ex]
+\AxiomC{$\Gamma, \exists y \forall x. (x=y) \leftrightarrow \phi \vdash \Delta$}
+\RightLabel{\texttt { LeftExistsOne}}
+\UnaryInfC{$\Gamma, \exists ! x. \phi \vdash \Delta$}
+\DisplayProof &
+\AxiomC{$\Gamma \vdash \exists y \forall x. (x=y) \leftrightarrow \phi , \Delta$}
+\RightLabel{\texttt { RightExistsOne}}
+\UnaryInfC{$\Gamma \vdash \exists ! x. \phi, \Delta$}
+\DisplayProof
+\\[5ex]
+\AxiomC{$\Gamma \vdash \Delta$}
+\RightLabel{\texttt { LeftWeakening}}
+\UnaryInfC{$\Gamma, \Sigma \vdash \Delta$}
+\DisplayProof &
+\AxiomC{$\Gamma \vdash \Delta$}
+\RightLabel{\texttt { RightWeakening}}
+\UnaryInfC{$\Gamma  \vdash \delta, \Delta$}
+\DisplayProof
+\\[5ex]
+\AxiomC{$\Gamma, t = t \vdash \Delta$}
+\RightLabel{\texttt { LeftRefl}}
+\UnaryInfC{$\Gamma \vdash \Delta$}
+\DisplayProof &
+\AxiomC{}
+\RightLabel{\texttt{ RightRefl}}
+\UnaryInfC{$\vdash t=t$}
+\DisplayProof
+
+\end{tabular}
+\end{center}
+
+\caption{Strict set of deduction rules for sequent calculus.}
+\label{fig:deduct_rules_1}
+\end{figure}
+
+%Start of second set of deduction rules
+\begin{figure}
+\begin{center}
+\begin{tabular}{l l}
+
+\AxiomC{$\Gamma, \phi[s/?f] \vdash \Delta$}
+\RightLabel{\texttt{ LeftSubstEq}}
+\UnaryInfC{$\Gamma, s=t, \phi[t/?f] \vdash \Delta$}
+\DisplayProof &
+\AxiomC{$\Gamma \vdash \phi[s/?f], \Delta$}
+\RightLabel{\texttt{ RightSubstEq}}
+\UnaryInfC{$\Gamma, s=t \vdash \phi[t/?f], \Delta$}
+\DisplayProof
+\\[5ex]
+\AxiomC{$\Gamma, \phi[a/?p] \vdash \Delta$}
+\RightLabel{\texttt{ LeftSubstIff}}
+\UnaryInfC{$\Gamma, a \leftrightarrow b, \phi[b/?p] \vdash \Delta$}
+\DisplayProof &
+\AxiomC{$\Gamma \vdash \phi[a/?p], \Delta$}
+\RightLabel{\texttt{ RightSubstIff}}
+\UnaryInfC{$\Gamma, a \leftrightarrow b \vdash \phi[b/?p], \Delta$}
+\DisplayProof
+\\[5ex]
+\AxiomC{$\Gamma \vdash \Delta$}
+\RightLabel{\texttt{ InstPredSchema}}
+\UnaryInfC{$\Gamma[\psi(\vec{v})/?p] \vdash \Delta[\psi(\vec{v})/?p]$}
+\DisplayProof &
+\AxiomC{$\Gamma \vdash \Delta$}
+\RightLabel{\texttt{ InstFunSchema}}
+\UnaryInfC{$\Gamma[r(\vec{v})/?f]\vdash \Delta[r(\vec{v})/?f]$}
+\DisplayProof
+\\[5ex]
+
+\end{tabular}
+\end{center}
+\caption{Additional deduction rules for substitution and instantiation}
+\label{fig:deduct_rules_2}
+\end{figure}
+
+%Start of third set of deduction rules
+\begin{figure}
+\begin{center}
+\begin{tabular}{l l}
+
+\\[5ex]
+\AxiomC{$\Gamma \vdash \Delta$}
+\RightLabel{\texttt{ Rewrite}}
+\UnaryInfC{$\Gamma \vdash \Delta$}
+\DisplayProof &
+\AxiomC{}
+\RightLabel{\texttt{ RewriteTrue}}
+\UnaryInfC{$\Gamma \vdash \Gamma$}
+\DisplayProof \\
+\multicolumn{2}{c}{
+\AxiomC{...}
+\AxiomC{...}
+\AxiomC{...}
+\RightLabel{\texttt{ Subproof}}
+\TrinaryInfC{$\Gamma \vdash \Delta$}
+\DisplayProof
+}
+
+\end{tabular}
+\end{center}
+\caption{Bonus and structural proof steps. Rewrite allows to deduce a sequent equivalent from a previous sequent by OCBSL laws and sequent interpretation. Subproof hide a part of a proof tree inside a single proof step.}
+\label{fig:deduct_rules_3}
+\end{figure}
+\newpage
+\subsection{Proofs}
+Proof step can be composed into a Directed Acyclic Graph. The root of the proof shows the conclusive statement, and the leaves are assumptions or tautologies (instances of the\texttt{Hypothesis} rule). Figure~\ref{fig:exampleProof} shows an example of a proof tree for Pierce's Law in strict Sequent Calculus.
+
+\begin{figure}
+    \centering
+\AxiomC{}
+\RightLabel{\texttt { Hypothesis}}
+\UnaryInfC{$\phi \vdash \phi$}
+\RightLabel{\texttt { RightWeakening}}
+\UnaryInfC{$\phi \vdash \phi, \psi$}
+\RightLabel{\texttt { RightImplies}}
+\UnaryInfC{$\vdash \phi, (\phi \to \psi)$}
+\AxiomC{}
+\RightLabel{\texttt { Hypothesis}}
+\UnaryInfC{$\phi \vdash \phi$}
+\RightLabel{\texttt { LeftImplies}}
+\BinaryInfC{$(\phi \to \psi) \to \phi \vdash \phi$}
+\RightLabel{\texttt { RightImplies}}
+\UnaryInfC{$ \vdash ((\phi \to \psi) \to \phi) \to \phi$}
+\DisplayProof
+\caption{A proof of Pierce's law in sequent calculus. The bottom most sequent (root) is the conclusion.}
+\label{fig:exampleProof}
+\end{figure}
+
+In the Kernel, proof steps are organised linearly, in a list, to form actual proofs. Each proof step refer to it's premises using numbers, which indicate the place of the premise in the proof. 
+Moreover, proofs are conditional: They can carry an explicit set of assumed sequents, named ''\lstinline{imports}``, which give some starting points to the proof. Typically, these imports will contain previously proven theorems, definitions or axioms (More on that in section~\ref{sect:TheoremsAndTheories}). For a proof step to refer to an imported sequent, one use negative integers. $-1$ corresponds to the first sequent of the import list of the proof, $-2$ to the second, etc.
+
+Formally, a proof is a pair made of a list of proof steps and a list of sequents:
+$$
+\lstinline{Proof(steps:List[ProofStep], imports:List[Sequent])}
+$$
+We call the bottom sequent of the last proof step of the proof the "conclusion" of the proof.
+For the proof to be the linearization of a rooted directed acyclic graph, we require that proof steps must only refer to number smaller then their own number in the proof. Indeed, using topological sorting, it is always possible to order the nodes of a directed acyclic graph such that for any node, its predecessors appear earlier in the list. The linearization of our Pierce's law proof is shown in Figure~\ref{fig:exampleProofLinear}.
+
+\begin{figure}
+
+\begin{equation}
+\begin{split}
+0 &\texttt { Hypothesis} \quad \phi \vdash \phi\\
+1 &\texttt { RightWeakening}(0) \quad  \phi \vdash \phi, \psi\\
+2 &\texttt { RightImplies}(1) \quad  \vdash \phi, (\phi \to \psi)\\
+3 &\texttt { Hypothesis} \quad \phi \vdash \phi\\
+4 &\texttt { LeftImplies}(2,3) \quad (\phi \to \psi) \to \phi \vdash \phi\\
+5 &\texttt { RightImplies}(4) \quad \vdash ((\phi \to \psi) \to \phi) \to \phi
+\end{split}
+\end{equation}
+    \caption{Linearization of the proof of Pierce's Law as represented in LISA.}
+    \label{fig:exampleProofLinear}
+\end{figure}
+
+\subsection{Proof Checker}
+\label{subs:proofchecker}
+
+In LISA, a proof object has no guarantee to be correct. It is perfectly possible to wright a wrong proof. LISA contains a \textit{proof checking} function, which given a proof will verify if it is correct. To be correct, a proof must satisfy the following conditions:
+\begin{enumerate}
+    \item No proof step must refer to itself or a posterior proof step as a premise.
+    \item Every proof step must be correctly constructed, with the bottom sequent correctly following from the premises by the type of the proof step and its arguments.
+\end{enumerate}
+
+Given some proof $p$, the proof checker will verify these points. For most proof steps, this typically involve verifying that the premises and the conclusion match according to a transformation specific to the deduction rule. Not that for most case where there is an intuitive symmetry in arguments, such as \texttt{RightAnd} or \texttt{LeftSubstIff} for example, permutations of those arguments don't matter.
+
+Hence, most of the proof checker's work consists in verifying that some formulas, or subformulas thereof, are identical. This is where the equivalence checker comes into play. By checking equivalence rather than strict syntactic equality, a lot of steps become redundant and can be merged in a single step. That way, \texttt{LeftAnd}, \texttt{RightOr}, \texttt{LeftIff} become instances of the \texttt{Weakening rule}, and \texttt{RightImplies} an instance of \texttt{RightAnd}. 
+
+\texttt{LeftNot}, \texttt{RightNot}, \texttt{LeftImplies}, \texttt{RightImplies}, \texttt{LeftRefl}, \texttt{RightRefl}, \texttt{LeftExistsOne},  \texttt{RightExistsOne}  can be omitted altogether. This gives an intuition of how usefull the equivalence checker is to cut proof length. It also combine very well with substitution steps.
+
+While most proof steps are oblivious to formula transformations allowed by the equivalence checker, they don't allow transformations of the whole sequent: To easily rearrange sequents according to the semantic of \ref{eq:SequentSemantic}, one should use the \texttt{Rewrite} step.
+The proof checking function will output a \textit{judgement}:
+$$\lstinline{SCValidProof(proof: SCProof)}$$
+or
+$$\lstinline{SCInvalidProof(proof: SCProof, path: Seq[Int], message: String)}$$
+\lstinline{SCInvalidProof}{} indicates an erroneous proof. The second argument point to the faulty proofstep (through subproofs), and the third argument is an error message hinting towards the reason why the step is faulty.
+
+
+\section{Theorems and Theories}
+\label{sect:TheoremsAndTheories}
+In mathematics as a discipline, theorems don't exist in isolation. They depend on some agreed uppon set of axioms, definitions, and previously proven theorems. Formally, Theorems are developped within theories. A theory is defined by a language, which contains the symbols allowed in the theory, and by a set of axioms, which are assumed to hold true.
+
+In LISA, a \lstinline{theory}{} is a mutable object that starts by being the pure theory of predicate logic: It has no known symbol and no axiom. Then we can introduce into it symbols of set theory($\in$, $\emptyset$, $\bigcup$ and set theory axioms, see Chapter~\ref{chapt:settheory}) or of any other theory.
+
+To conduct a proof inside a \lstinline{Theory}{}, using its axioms, the proof should be normally constructed and the needed axioms specified in the imports of the proof. Then, the proof can be given to the \lstinline{Theory}{} to check, along with \textit{justifications} for all imports of the proof. A justification is either an axiom, a previously proven theorem, or a definition. The \lstinline{Theory}{} object will check that every import of the proof is properly justified by an axiom introduced in the theory, i.e. that the proof is in fact not conditional in the theory. Then it will pass the proof to the proof checker. If the proof is correct, it will return a \lstinline{Theorem}{} encapsulating the sequent. This sequent will be allowed to be used in all further proofs exactly like an axiom.
+
+
+\subsection{Definitions}
+\label{subs:definitions}
+The user can also introduce definitions in the \lstinline{Theory}{}. 
+LISA's kernel allows to define two kinds of objects: Function (or Term) symbols and Predicate symbols. It is important to remember that in the context of Set Theory, function symbols are not the usual mathematical functions and predicate symbols are not the usual mathematical relations. Indeed, on one hand a function symbol defines an operation on all possible sets, but on the other hand it is impossible to use the symbol alone, without applying it to arguments, or to quantify over function symbol.
+Actual mathematical functions on the other hand, are proper sets which contains the graph of a function on some domain. Their domain must be restricted to a proper set, and it is possible to quantify over such set-like functions or to use them without applications. These set-like functions are represented by constant symbols.  For example ''$f$ is derivable`` cannot be stated about a function symbol. We will come back to this in Chapter~\ref{chapt:settheory}, but for now let us remember that (non-constant) function symbols are suitable for intersection $\bigcap$ between sets but not for, say, the Riemann $\zeta$ function.
+
+\begin{figure}
+A definition in LISA is one of those two kinds of objects:
+\begin{lstlisting}[frame=single]
+PredicateDefinition(
+		label: ConstantPredicateLabel,
+		expression: LambdaTermFormula
+)
+\end{lstlisting}
+$$
+\textnormal{Corresponding to ''let } p^n(\vec{x}) := \phi_{\vec{x}}\textnormal{``}
+$$
+\begin{lstlisting}[frame=single]
+FunctionDefinition(
+		label: ConstantFunctionLabel,
+		out: VariableLabel, 
+		expression: LambdaTermFormula
+)
+\end{lstlisting}
+$$
+\textnormal{Corresponding to ''let } f(\vec{x}) \textnormal{ be the unique element s.t. } \phi[f(\vec{x})/y]\textnormal{``}
+$$
+\caption{Definitions in LISA}
+\label{fig:definitions}
+\end{figure}
+Figure~\ref{fig:definitions} shows how to define and use new function and predicate symbols. To define a predicate on $n$ variables, we must provide any formula with $n$ distinguished free variables. Then, this predicate can be freely used and at any time substituted by its definition. Functions are slightly more complicated: To define a function $f$, one must first prove a statement of the form 
+$$\exists ! y. \phi_{y, x_1,...,x_k}$$
+Then we obtain for free the property that 
+$$\forall y. (f(x_1,...,x_k)=y) \leftrightarrow \phi_{y, x_1,...,x_k}$$
+from which we can deduce in particular $\phi[f(x_1,...,x_k)/y]$
+The special case where $n=0$ defines constant symbols. The special case where $\phi$ is of the form $y=t$, with possibly the $x$'s free in $t$ let us recover a more simple definition \textit{by alias}, i.e. where  $f$ is simply a shortcut for a more complex term $t$.
+This mechanism is typically called \textit{Extension by Definition}, and allows to extend the theory without changing what is or isn't provable. For detailed explanation, see part \ref{part:theory} chapter {chapt:definitions}.
+
+The \lstinline{Theory}{} object is responsible of keeping track of all symbols which have been defined so that it can detect and refuse conflicting definitions. As a general rule, definitions should have a unique identifier and can't contains free schematic symbols.
+
+Once a definition has been introduce, future theorem can refer to those definitional axioms by importing the corresponding sequents in their proof and providing justification for those imports when the proof is verified, just like with axioms.
+
+Figure \ref{fig:justifications} shows the types on Justification in a theory (Theorem, Axiom, Definition). Figure \ref{fig:theorysetters} shows how to introduce new such justifications as well as symbols in the theory. Figure \ref{fig:theorygetters} shows how to obtain various informations from the theory.
+
+
+
+{
+\def\arraystretch{4}
+\begin{figure}
+Justifications:
+\begin{center}
+\begin{tabular}{l|l}
+Explanation & Data Type
+\\ \hline
+
+A proven theorem & 
+\begin{lstlisting}
+Theorem(
+	name: String,
+	proposition: Sequent
+	)
+
+\end{lstlisting}
+\\ %\hline
+
+An axiom of the theory & 
+\begin{lstlisting}
+Axiom(
+	name: String,
+	ax: Formula
+	)
+
+\end{lstlisting}
+\\ %\hline
+
+A predicate definition & 
+\begin{lstlisting}
+PredicateDefinition(
+	label: ConstantPredicateLabel,
+	expression: LambdaTermFormula
+	)
+
+\end{lstlisting} 
+\\ %\hline
+
+A function definition & 
+\begin{lstlisting}
+FunctionDefinition(
+	label: ConstantFunctionLabel,
+	out: VariableLabel,
+	expression: LambdaTermFormula
+	)
+
+\end{lstlisting}
+\\ %\hline
+
+\end{tabular}
+\caption{The different type of justification in a \lstinline{Theory}{} object. \label{fig:justifications}}
+\end{center}
+\end{figure}
+
+\begin{figure}
+Setters/Constructors:
+\begin{center}
+\begin{tabular}{l|l}
+Explanation & Function
+\\ \hline
+
+\makecell[l]{Add a new theorem\\to the theory} & 
+\begin{lstlisting}
+makeTheorem(
+	name: String,
+	statement: Sequent,
+	proof: SCProof,
+	justs: Seq[Justification]
+	)
+\end{lstlisting}
+\\ %\hline
+
+\makecell[l]{Add a new axiom\\ to the theory} & 
+\begin{lstlisting}
+addAxiom(
+	name: String,
+	f: Formula
+	)
+\end{lstlisting}
+\\ %\hline
+
+\makecell[l]{Make a new\\predicate definition} & 
+\begin{lstlisting}
+makePredicateDefinition(
+	label: ConstantPredicateLabel,
+	expression: LambdaTermFormula
+	)
+\end{lstlisting} 
+\\ %\hline
+
+\makecell[l]{Make a new\\function definition} & 
+\begin{lstlisting}
+makeFunctionDefinition(
+	proof: SCProof,
+	justifications: Seq[Justification],
+	label: ConstantFunctionLabel,
+	out: VariableLabel,
+	expression: LambdaTermFormula
+	)
+\end{lstlisting}
+\\ %\hline
+
+\makecell[l]{Add a new symbol\\without definition} & 
+\begin{lstlisting}
+addSymbol(s: ConstantLabel)
+\end{lstlisting} 
+\\ %\hline
+
+\makecell[l]{Add all symbols of a\\formula without definition} & 
+\begin{lstlisting}
+makeFormulaBelongToTheory(phi: Formula)
+\end{lstlisting} 
+\\ %\hline
+
+\makecell[l]{Add all symbols of a\\sequent without definition} & 
+\begin{lstlisting}
+makeSequentBelongToTheory(s: Sequent)
+\end{lstlisting} 
+\\ %\hline
+
+\end{tabular}
+\caption{The mutable interface of a \lstinline{Theory}{} object. \label{fig:theorysetters}}
+\end{center}
+\end{figure}
+
+
+
+\begin{figure}
+Getters:
+\begin{center}
+\begin{tabular}{l|l}
+Explanation & Function
+\\ \hline
+
+\makecell[l]{Check if all symbols in a\\formula, term or sequent\\belong to the theory.} & 
+\begin{lstlisting}
+belongsToTheory(phi: Formula)
+belongsToTheory(t: Term)
+belongsToTheory(s: Sequent)
+\end{lstlisting}
+\\ %\hline
+
+\makecell[l]{Return the list of symbols\\and definitions in the theory} & 
+\begin{lstlisting}
+language()
+\end{lstlisting}
+\\ %\hline
+
+\makecell[l]{Check if a label is\\a symbol of the theory} &
+\begin{lstlisting}
+isSymbol(label: ConstantLabel)
+\end{lstlisting}
+\\ %\hline
+
+\makecell[l]{Check if a label is \textit{not}\\already a symbol of the theory} &
+\begin{lstlisting}
+isAvailable(label: ConstantLabel)
+\end{lstlisting}
+\\ %\hline
+
+\makecell[l]{Return the list of\\axioms in the theory} &
+\begin{lstlisting}
+axiomsList()
+\end{lstlisting}
+\\ %\hline
+
+\makecell[l]{Check if a formula is\\an axiom of the theory} &
+\begin{lstlisting}
+isAxiom(f: Formula)
+\end{lstlisting}
+\\ %\hline
+
+\makecell[l]{Return the Axiom matching\\ the given name or formula,\\ if it exists} &
+\begin{lstlisting}
+getAxiom(f: Formula)
+getAxiom(name: String)
+\end{lstlisting}
+\\ %\hline
+\makecell[l]{Return the Definition\\of a given Label, if defined} &
+\begin{lstlisting}
+getDefinition(label: ConstantLabel)
+\end{lstlisting}
+\\ %\hline
+
+\makecell[l]{Return the Theorem object with\\the given name, if it is one.} &
+\begin{lstlisting}
+getTheorem(name: String)
+\end{lstlisting}
+\\ %\hline
+
+\end{tabular}
+\caption{The static interface of a \lstinline{Theory}{} object. \label{fig:theorygetters}}
+\end{center}
+\end{figure}
+}
+\newpage
+\section{Kernel Supplements and Utilities}
+\label{sect:kernelsuppl}
+The Kernel itself is a logical core, whose main purpose is to attest correctness of mathematical developments and proofs. In particular, it is not intended to use directly to formalise large library, but rather as either a foundation for LISA's user interface and automation, or as a tool to write and verify formal proofs produced by other programs.
+Nonetheless, LISA's kernel comes with a set of utilities and features that make the kernel more usable.
+LISA also provides a set of utilities and a DSL\footnote{Domain Specific Language} to ease and organise the writing of proofs. This is especially directed to people who want to build understanding and intuition regarding formal proofs in sequent calculus.
+
+\subsection{Printer and Parser}
+This feature is under active development.
+
+\subsection{Writing theory files}
+LISA provides a canonical way of writing and organizing Kernel proofs by mean of a set of utilities and a DSL\footnote{Domain Specific Language} made possible by some of scala 3 features such as string interpolation, extension and implicits. This is especially directed to people who want to build understanding and intuition regarding formal proofs in sequent calculus.
+The way to write a new theory file to mathematical development is:
+\begin{lstlisting}[language=Scala, frame=single]
+object MyTheoryName extends lisa.Main {
+
+}
+\end{lstlisting}
+And that's it! To write a theorem, the recommended syntax is (for example):
+
+\begin{lstlisting}[language=Scala, frame=single]
+object MyTheoryName extends lisa.Main {
+
+  THEOREM("theoremName") of "desired conclusion" PROOF {
+    
+    ???: Proof
+    
+  } using (listOfJustifications)
+  show
+}
+\end{lstlisting}
+\lstinline{show}{} is optional and prints the last proven theorem. We can similarily make definition:
+\begin{lstlisting}[language=Scala, frame=single]
+object MyTheoryName extends lisa.Main {
+
+  val myFunction = 
+    DEFINE("symbol", x, y) as definition(x,y)
+  show
+}
+\end{lstlisting}
+This works for definitions of function and predicate symbols with a direct definition. for indirect definitions (via $\exists !$), use the following:
+\begin{lstlisting}[language=Scala, frame=single]
+object MyTheoryName extends lisa.Main {
+
+  val testdef =
+    DEFINE("symbol", x, y) asThe z suchThat {
+       ???:Formula
+     } PROOF {
+       ???:Proof
+    } using (listOfJustifications)
+    show
+}
+\end{lstlisting}
+
+It is important to note that when multiple such files are developped, they all use the same underlying \lstinline{RunningTHeory}{}. This makes it possible to use results proved previously by simple mean of an \lstinline{import}{} statement as one would import a regular object. Similarly one should also import as usual automation and tactics developped aside. It is expecte in medium term that \lstinline{lisa.Main} will come with basic automation.
+
+To check the result of a developped file, and verify that the proofs contain no error, it is possible to run such a library object. All imported theory objects will be run through as well, but only the result of the selected one will be printed. 
+
+It is possible to refer to a theorem or axiom that has been previously proven or added using it's name. the syntax is \lstinline{thm"theoremName"}{} or \lstinline{ax"axiomName"}{}. This makes it possible to write, for example, \lstinline{thm"theoremName".show}{} and \lstinline{... using (ax"comprehensionSchema")}{}. Figure \ref{fig:kernellibrary} shows a typical example of set theory development.
+
+
+\begin{figure}
+\begin{lstlisting}[language=Scala, frame=single]
+object MyTheoryName extends lisa.Main {
+  THEOREM("russelParadox") of 
+    (*@ $\forall$ @*)x. (x(*@$\in$@*)?y)(*@$\leftrightarrow$@*) (*@$\neg$@*)(x(*@$\in$@*)x)(*@$\vdash$@*) PROOF {
+      val y = VariableLabel("y")
+      val x = VariableLabel("x")
+      val s0 = RewriteTrue(in(y, y) <=> !in(y, y) |-())
+      val s1 = LeftForall(
+        forall(x, in(x, y) <=> !in(x, x)) |- (),
+        0, in(x, y) <=> !in(x, x), x, y
+      )
+      Proof(s0, s1)
+    } using ()
+  thm"russelParadox".show
+  
+  
+  THEOREM("unorderedPair_symmetry") of
+    "(*@$\vdash$@*)(*@$\forall$@*)y, x. {x, y} = {y, x}" PROOF {
+      ...
+    } using (ax"extensionalityAxiom", ax"pairAxiom")
+  show
+  
+  
+  val oPair = 
+    DEFINE("", x, y) as pair(pair(x, y), pair(x, x))
+  
+}
+\end{lstlisting}
+\caption{Example of library development in LISA Kernel}
+\label{fig:kernellibrary}
+\end{figure}
+
+
+
+
+
+%Probably comes after discussing the front
+\chapter{Set Theory}
+\label{chapt:settheory}
+LISA is based on set theory. More specifically, it is based on ZF with (still not decided) an axiom of choice, of global choice, or Tarski's universes.
+
+ZF Set Theory stands for Zermelo-Frankel Set Theory. It contains a set of initial predicate symbols and function symbols, shown in Figure \ref{fig:symbolszf}. It also contains the 7 axioms of Zermelo (Figure \ref{fig:axiomsz}), which are technically sufficient to formalize a large portion initial portion of mathematics, plus the axiom of replacement of Frankel(Figure \ref{fig:axiomszf}), which is needed to formalize more complex mathematical theories. 
+In a more typical mathematical introduction to Set Theory, $ZF$ would naturally only contain the set membership symbol $\in$. Axioms defining the other symbols would then only express the existence of functions or predicates with those properties, from which we could get the same symbols using extensions by definitions.
+
+In a very traditional sense, an axiomatization is any possibly infinite semi-recursive set of axioms. Hence, in it's full generality, Axioms should be any function producing possibly infinitely many formulas.
+This is however not a convenient definition. In practice, all infinite axiomatizations are schematic, meaning that they are expressable using schematic variables. Axioms \ref{axz:comprehension} (comprehension schema) and \ref{axzf:replacement} (replacement schema) are such examples of axiom schema, and motivates the use of schematic variables in LISA.
+
+
+
+\begin{figure}
+\begin{center}
+\begin{tabular}{l|c|l}
+{} & Math symbol & LISA Kernel\\ \hline
+Set Membership predicate & $\in$ & \lstinline$in(s,t)$\\
+Subset predicate & $\subset$ & \lstinline$subset(s,t)$ \\
+Empty Set constant & $\emptyset$ & \lstinline$emptyset()$ \\
+Unordered Pair constant & $(\cdot, \cdot )$ & \lstinline$pair(s,t)$ \\
+Power Set function & $\mathcal P$ & \lstinline$powerSet(s)$ \\
+Set Union/Flatten function & $\bigcup$ & \lstinline$union(x)$ \\
+\end{tabular}
+\caption{The basic symbols of ZF}
+\label{fig:symbolszf}
+\end{center}
+\end{figure}
+
+\begin{figure}
+\begin{axz}[empty set]\label{axz:empty}
+$\forall x. x \notin \emptyset$
+\end{axz}
+\begin{axz}[extensionality]\label{axz:extensionality}
+$\forall x, y. (\forall z. z \in x \iff z \in y) \iff (x = y)$
+\end{axz}
+\begin{axz}[extensionality]\label{axz:subset}
+$\forall x, y. x\subset y \iff \forall z. z \in x iff z \in y$
+\end{axz}
+\begin{axz}[pair]\label{axz:pair}
+$\forall x, y, z. (z \in (x, y)) \iff ((x \in y) \lor (y \in z))$
+\end{axz}
+\begin{axz}[union]\label{axz:union}
+$\forall x, z. (x \in \operatorname{U}(z)) \iff (\exists y. (x \in y) \land (y \in z))$
+\end{axz}
+\begin{axz}[power]\label{axz:power}
+$\forall x, y. (x \in \operatorname{\mathcal{P}}(y)) \iff (x \subset y)$
+\end{axz}
+\begin{axz}[foundation]\label{axz:foundation}
+$\forall x. (x \neq \emptyset) \implies (\exists y. (y \in x) \land (\forall z. z \in x))$
+\end{axz}
+\begin{axz}[comprehension schema]\label{axz:comprehension}
+$\forall z, \vec{v}. \exists y. \forall x. (x \in y) \iff ((x \in z) \land \phi(x, z, \vec{v}))$
+\end{axz}
+\caption{Axioms for Zermelo set theory.}
+\label{fig:axiomsz}
+\end{figure}
+
+\begin{figure}
+\begin{axzf}[replacement schema]\label{axzf:replacement}
+$$\forall a. (\forall x. (x \in a) \implies !\exists y. \phi(a, \vec{v}, x, y)) \implies $$
+$$(\exists b. \forall x. (x \in a) \implies (\exists y. (y \in b) \land \phi(a, \vec{v}, x, y)))$$
+\end{axzf}
+\caption{Axioms for Zermelo-Fraenkel set theory.}
+\label{fig:axiomszf}
+\end{figure}
+
+
+
diff --git a/Reference Manual/part2.tex b/Reference Manual/part2.tex
new file mode 100644
index 0000000000000000000000000000000000000000..2c32d0c6e1f9e2a86ed849216d4f75c11e5b75ab
--- /dev/null
+++ b/Reference Manual/part2.tex	
@@ -0,0 +1,85 @@
+\part{Selected Theoretical Topics}
+\label{part:theory}
+
+\chapter{Set Theory and Mathematical Logic}
+\section{First Order Logic with Schematic Variables}
+\label{sect:theoryfol}
+\section{Extension by Definition}
+An extension by definition is the formal way of introducing new symbols in a mathematical theory.
+Theories can be extended into new ones by adding new symbols and new axioms to it. We're interested in a special kind of extension, called \textit{conservative extension}.
+\begin{defin}[Conservative Extension]
+A theory $\mathcal{T}_2$ is a conservative extension over a theory $\mathcal{T}_1$ if:
+\begin{itemize}
+	\item $\mathcal{T}_1 \subset \mathcal{T}_2$
+	\item For any formula $\phi$ in the language of $\mathcal{T}_1$, if $\mathcal{T}_2 \vdash 		\phi$ then $\mathcal{T}_1 \vdash \phi$
+\end{itemize}
+\end{defin}
+
+An extension by definition is a special kind of extension obtained by adding a new symbol and an axiom defining that symbol to a theory. If done properly, it should be a conservative extension.
+
+\begin{defin}[Extension by Definition]
+A theory $\mathcal{T}_2$ is an extension by definition of a theory $\mathcal{T}_1$ if:
+\begin{itemize}
+\item $\mathcal{L}(\mathcal{T}_2) = \mathcal{L}(\mathcal{T}_2) \cup \lbrace S \rbrace$, where $S$ is a single new function or predicate symbol, and
+\item $\mathcal{T}_2$ contains all the axioms of $\mathcal{T}_1$, and one more of the following form:
+\begin{itemize}
+\item If $S$ is a predicate symbol, then the axiom is of the form $\phi_{x_1,...,x_k} \iff S(x_1,...,x_k)$, where $\phi$ is any formula with free variables among $x_1,...,x_k$.
+\item If $S$ is a function symbol, then the axiom is of the form $\phi_{y, x_1,...,x_k} \iff y = S(x_1,...,x_k)$, where $\phi$ is any formula with free variables among $y, x_1,...,x_k$.
+Moreover, in that case we require that 
+$$
+\exists ! y. \phi_{y, x_1,...,x_k}
+$$
+is a theorem of $\mathcal{T}_1$
+\end{itemize}
+\end{itemize}
+\end{defin}
+We also say that a theory $\mathcal{T}_k$ is an extension by definition of a theory $\mathcal{T}_1$ if there exists a chain $\mathcal{T}_1$, $\mathcal{T}_2$, ... , $\mathcal{T}_k$ of extensions by definitions.
+
+
+For function definition, it is common in logic textbooks to only require the existence of $y$ and not its uniqueness. The axiom one would then obtain would only be $\phi[f(x_1,...,x_n)/y]$ This also leads to conservative extension, but it turns out not to be enough in the presence of axiom schemas (axioms containing schematic symbols).
+\begin{lemma}
+In ZF, an extension by definition without uniqueness doesn't necessarily yields a conservative extension if the use of the new symbol is allowed in axiom schemas.
+\end{lemma}
+\begin{proof}
+In ZF, consider the formula $\phi_c := \forall x. \exists y. (x \neq \emptyset) \implies y \in x$ expressing that nonempty sets contain an element, which is provable in ZFC.
+
+Use this formula to introduce a new unary function symbol $\operatorname{choice}$ such that $\operatorname{choice}(x) \in x$. Using it within the axiom schema of replacement we can obtain for any $A$
+$$
+\lbrace (x, \operatorname{choice}(x)) \mid x \in A \rbrace
+$$
+which is a choice function for any set $A$. Hence using the new symbol we can prove the axiom of choice, which is well known to be independent of ZF, so the extension is not conservative.
+\end{proof}
+Note that this example wouldn't work if the definition required uniqueness on top of existence.
+For the definition with uniqueness, there is a stronger result than only conservativity.
+\begin{defin}
+A theory $\mathcal{T}_2$ is a fully conservative extension over a theory $\mathcal{T}_1$ if:
+\begin{itemize}
+\item It is conservative
+\item For any formula $\phi_2$ with free variables $x_1, ..., x_k$ in the language of $\mathcal{T}_2$, there exists a formula $\phi_1$ in the language of $\mathcal{T}_1$ with free variables among $x_1, ..., x_k$ such that
+$$\mathcal{T}_2 \vdash \forall x_1...x_k. (\phi_1 \leftrightarrow \phi_2)$$
+\end{itemize}
+\end{defin}
+\begin{thm}
+An extension by definition with uniqueness is fully conservative.
+\end{thm}
+The proof is done by induction on the height of the formula and isn't difficult, but fairly tedious.
+\begin{thm}
+If an extension $\mathcal{T}_2$ of a theory $\mathcal{T}_1$ with axiom schemas is fully conservative, then for any instance of the axiom schemas of an axiom schemas $\alpha$ containing a new symbol, $\Gamma \vdash \alpha$ where $\Gamma$ contains no axiom schema instantiated with new symbols.
+
+\end{thm}
+\begin{proof}
+Suppose
+$$
+\alpha = \alpha_0[\phi / ?p]
+$$
+Where $\phi$ has free variables among $x_1,...,x_n$ and contains a defined function symbol $f$. By the previous theorem, there exists $\psi$ such that
+$$\vdash \forall A, w_1, ..., w_n, x. \phi \leftrightarrow \psi$$
+or equivalently, as in a formula and its universal closure are deducible from each other,
+$$\vdash \phi \leftrightarrow \psi$$
+which reduces to
+$$
+\alpha_0[\psi / ?p] \vdash \alpha
+$$
+Since $\alpha_0[\psi / ?p]$ is an axiom of $\mathcal{T}_1$, we reach the conclusion.
+\end{proof}
+