\documentstyle[twoside]{article}
\pagestyle{myheadings}
\markboth{\hfil Implicit Quasilinear
Differential Systems\hfil EJDE--1999/10} {EJDE--1999/10\hfil
Miguel C. Mu\~noz-Lecanda \&  N. Rom\'an-Roy \hfil}
\begin{document}
\title{\vspace{-1in}\parbox{\linewidth}{\footnotesize\noindent
{\sc  Electronic Journal of Differential Equations},
Vol. {\bf 1999}(1999), No.~10, pp. 1--33. \newline
ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu
\newline ftp  ejde.math.swt.edu \quad ftp ejde.math.unt.edu (login: ftp)}
 \vspace{\bigskipamount} \\
%
  Implicit quasilinear differential systems:\\ a geometrical approach
\thanks{ {\em 1991 Mathematics Subject Classifications:}
34C40, 57R25, 58A10, 58F99, 70Q05.
\hfil\break\indent
{\em Key words and phrases:} Implicit differential equations,
 constrained systems, \hfil\break\indent
 vector fields, differentiable manifolds.
\hfil\break\indent
\copyright 1999 Southwest Texas State University  and University of
North Texas. \hfil\break\indent
Submitted November 30, 1998. Published April 1, 1999.} }
\date{}
%
\author{Miguel C. Mu\~noz-Lecanda \&  N. Rom\'an-Roy}
\maketitle

 \begin{abstract}
This work is devoted to the study of systems of implicit quasilinear
differential equations.  In general, no set of initial conditions is
admissible for the system.  It is shown how to obtain a vector field 
whose integral curves are the solution of the system, thus reducing 
the system to one that is ordinary.  
Using geometrical techniques, we give an algorithmic procedure in
order to solve these problems for systems of the form
$A({\bf x})\dot {\bf x} =\alpha ({\bf x})$ with $A({\bf x})$
being a singular matrix.  As particular cases, we recover some results of
Hamiltonian and Lagrangian Mechanics.  In addition, a detailed study of the
symmetries of these systems is carried out.  This algorithm is applied to
several examples arising from technical applications related to control
theory.
\end{abstract}

\newtheorem{teor}{Theorem}
\newtheorem{prop}{Proposition}
\newtheorem{corol}{Corollary}
\newtheorem{lem}{Lemma}
\newtheorem{definition}{Definition}
\newtheorem{assum}{Assumption}

\font\tenmsb=msbm10   % A font for R in Reals.
\font\teneufm=eufm10  % Euler Fraktur  font.

\def\feble#1{\mathrel{\mathop =\limits_{#1}}}
\def\vf{\mbox{\teneufm X}}
\def\Cinfty{{\rm C}^\infty}
\def\Tan{{\rm T}}
\def\derpar#1#2{\frac{\partial{#1}}{\partial{#2}}}
\def\coor#1#2#3{{#1}^{#2}, \ldots, {#1}^{#3}}
\def\Lie{\mathop{\rm L}\nolimits}
\def\inn{\mathop{i}\nolimits}
\def\d{{\rm d}}

\section{Introduction}

Implicit systems of differential equations appear in many
theoretical developments in Physics (such as analytical mechanics,
relativistic models or gauge theories), as well as in technical
applications (for instance,  circuit theory, network analysis,
large-scale interconnected systems or social-economic systems).
Systems of this kind appear for two reasons:
\begin{itemize}
\item
There exist some constraints relating the variables and
their derivatives up to a certain order. Thus the number of degrees
of freedom of the system is often less than the number of variables.
\item
The system has some kind of physical symmetry:
there is a group of symmetries acting on the phase space
of the system and some functions (the constraints) are invariant.
\end{itemize}

The outstanding feature that distinguishes this kind of
differential system  is that the equations
are not written in normal form. That is to say, all the highest
order derivatives of the variables are not isolated as functions
of the lower ones. Hence the general form of such a system (of
order $r$) is ${\bf F}(t,{\bf x},{\bf x}',\ldots ,{\bf
x}^{(r)})=0$. In this paper we only deal with systems of the form
 $$
\sum_jA_{ij}({\bf x},{\bf x}',\ldots ,{\bf x}^{(r-1)}){x^j}^{(r)}=
 \alpha_i({\bf x},{\bf x}',\ldots ,{\bf x}^{(r-1)})
 $$
where $A_{ij}$ is a matrix. Taking into account that every system
of order $r$ can be transformed into another of first order, only
systems of the form
 \begin{equation}
\sum_jA_{ij}({\bf x})\dot x^j = \alpha_i({\bf x}) \quad{\rm or\
simply}\quad A({\bf x})\dot{\bf x} = \alpha ({\bf x}) \label{ecu1}
\end{equation}
 need to be considered. The question then is to find the functions
  $x^j(t)$ which give the evolution of the variables.

If the matrix $A$ is regular, the system is reduced to the normal
form by multiplying both members of the equation by the inverse
matrix $A^{-1}$. In this case, the system is given by a vector
field; that is, an ordinary system of differential equations in
the normal form, and the solutions are the integral curves of this
vector field.

In this paper, we assume that the matrix $A$ is singular and has constant
rank.
Therefore, there is no vector field representing the system, and it cannot
be
integrated analytically or numerically.  This fact creates a new set of
problems.  In fact, to obtain an appropriate geometric solution to the
problem,
it is stated in terms of a differentiable manifold $M$ acting as the phase
space
of the system.  Then the above equations represent the coordinate form of
the
general problem.

As a consequence of the singularity of the matrix $A$, the system
is possible incompatible: not every set of values of the
variables is admissible as initial conditions for the system.
Therefore, in order to solve the system, the first step of the
procedure must be to identify the set of admissible initial
conditions; these are termed ``primary'' constraints. This is not the
end of the problem however, since another consistency condition is
required. In fact, the evolution of the variables is restricted
within the set of initial conditions. In other words, for every
value of the evolution parameter $t$, the functions ${\bf x}(t)$
have to take on values in that set. Hence, the problem is not solved
until a set of admissible initial conditions verifying this
consistency condition is found. We will call this the final
constraint submanifold. In order to transform the system into an
ordinary one, a vector field tangent to this submanifold must be
found such that its integral curves are the solutions of the
initial system. This is the geometrical interpretation of the consistency
condition. Furthermore, a problem of non-uniqueness of the
solution (which is closely related to the identification of the
true physical states of the system) can also appear.

In summary, there are two different problems. The first consists in
finding the final constraints submanifold, and the second in
transforming the system into a vector field tangent to this
submanifold.

Solutions to these problems have been found by various means. In
theoretical mechanics, these kinds of equations are called
``singular'', and the development of Analytical Mechanics provides
better means of obtaining equations of motion of mechanical
systems in which the true degrees of freedom are clearly
identified, and the corresponding system of differential equations
are non-singular (i.e., regular) (see, for instance,
\cite{Go-cm}). In theoretical physics, mechanical systems
described by means of singular equations are both usual and
important. Dirac and Bergmann \cite{Di-rmp}, \cite{BG-tps},
\cite{Di-lqm} were the first to make an analytical study of such
systems, although the greatest advances were made when the
techniques of Differential Geometry were applied for describing
mechanical systems \cite{AM-fm}. The works of Gotay {\it et al}
\cite{GNH-pca}, \cite{GN-pls1}, \cite{GN-pls2} and many others
(see, for instance, \cite{Ca-tsl}, \cite{GPR-hols}, \cite{MR-ltps}
and all the references quoted therein) have been devoted to
solving the problem of the compatibility and consistency of the
equations of motion for singular mechanical systems, both in the
Hamiltonian and the Lagrangian formulation, including systems of a
higher order.

Nevertheless, equations of motion for mechanical systems are
particular cases of differential equations, since they are
obtained from a variational principle and start from a selected
function (the {\sl Lagrangian} or the {\sl Hamiltonian}) which
contains all the dynamical information of the system. As a
consequence, the functions $\alpha_i$ and the matrix $A$ are
closely related to that dynamical function. In geometrical terms,
$A$ is the matrix representation of a closed $2$-form and
$\alpha_i$ are the component functions of a closed $1$-form.

The most general case consists in having any matrix of functions
and any $1$-form (not necessarily closed). This situation arises
in many technical applications, as mentioned above. Initial
attempts at solving the problems posed by these systems were aimed
at finding a method of ``inversion'' for the singular matrix of
the system using analytical and algebraic methods. Thus in
\cite{Ca-80} equations like (\ref{ecu1}) with $A$ and $\alpha$
constant matrices are studied, while in \cite{Ta-75} a
generalization of this case is considered, where these matrices
depend on a small parameter and are singular for some values of
this parameter. Questions on singular systems in control theory
are also analyzed in \cite{DD-dn},\cite{VLK-gss}, \cite{Co-84} and
\cite{TV-iss}. Numerical methods relating to the numerical
treatment of ordinary differential equations have likewise been
developed (see, for instance, \cite{HLR-85}).

The results thus obtained however are not completely satisfactory.
Many questions concerning the method and the solution remain
unclear. It is for this reason that some authors have investigated
the geometrical treatment of these problems. So, \cite{Ta-ce} is a
work where a general geometric framework for certain kinds of
implicit equations is given, and a more precise formulation is
developed in \cite{HB-84} for these systems, as well as a
reduction algorithm for these systems. Questions related to the
existence and uniqueness of solutions are treated in \cite{RR-94}
(which is a more geometric treatment of previous works of the
authors, such as \cite{Re-84} and \cite{RR-91}). The existence of
symmetries and constants of motion, as well as integrability
conditions, for these singular systems are analyzed in
\cite{MMT-92}, \cite{MMT-95} and \cite{MMT-97}. A different
geometric framework is given in \cite{CO-88} by studying normal
canonical forms for singular systems. In \cite{GP-ggf}, Gr\`acia
and Pons have pioneered the study of these systems, in the most
general situation, from a geometric point of view. Using fiber
bundles techniques and the concept of {\it sections along a map},
they have developed a geometrical framework which enables us to
describe these generalized singular systems, and to obtain
therefrom the theoretical mechanical models as a particular case
(even for higher order theories). Finally, \cite{GMR-95} is a
brief review of some of these problems and techniques.

In this paper, our goal is to use geometrical techniques to study
implicit systems of the kind mentioned above, in addition to some
of their applications. Within this context, our objectives could
be stated more precisely as:
\begin{enumerate}
\item
To give a simple geometrical description of these systems,
including the study of symmetries and the analysis of the
mechanical systems as a particular case (``simple'', in the sense
that the geometrical objects involved are as easy as possible).
Matrix $A$ is understood as the representation of a bilinear form.
\item
To clarify and solve the problems of the compatibility and
consistency of implicit differential equations dealt with.
\item
To develop a detailed algorithm which gives the solution to the
problem. This algorithm is the generalization of a previous one
used for Hamiltonian and Lagrangian systems.
\item
To apply the obtained results to systems arising from particular problems.
\end{enumerate}

In general, the algorithm we develop is easier to implement than
the analytical or algebraic methods, and our geometric treatment
is simpler than those referred to above.

The paper is organized as follows: In section $2$, we establish
some preliminary algebraic results which are then used in section
$3$, which in turn is devoted to the study of the differentiable
theory, and where we describe the algorithmic procedure which
solves the problems of compatibility and consistency, and which
constitutes the main part of the work. In section $4$, we study
symmetries for implicit differential equations. Section $5$ is
devoted to explaining specific cases of first and second-order
implicit differential equations recovering as particular cases,
some results of the mechanics of singular systems. The final
section contains a detailed analysis of some examples of {\sl
Control theory}.

All the manifolds and maps are assumed to be real, and $\Cinfty$
and the notation is the usual one (for example, see \cite{AM-fm}).
In particular, $\vf (M)$ and ${\mit\Omega}^p(M)$ are respectively
the $\Cinfty (M)$-modules of differentiable vector fields and
$p$-forms on $M$.
%Sum over repeated indices is understood.

\section{Algebraic theory}

\subsection*{The general case}

Let $E$ be a finite dimensional real vector space with $\dim\ E  =
n$ and $T$ a covariant $2$-tensor on $E$ (that is a bilinear
form). Contraction with this tensor defines the following maps,
which we call {\it left {\rm and} right contractions}
respectively: $$
\begin{array}{cccccc}
\Re_T & \colon & E & \to & E^* &
\\
& & v & \mapsto & \Re_T (v) & \colon u \mapsto T(v,u)
\\
\pounds _T & \colon & E & \to & E^* &
\\
& & v & \mapsto & \pounds _T (v) & \colon u \mapsto T(u,v)
\end{array}
$$ Both are ${\mbox{\tenmsb R}}$-linear. If $T$ is a symmetric (
or antisymmetric) tensor then
 $\Re_T = \pounds _T$ ( or $\Re_T = -{\rm l}_T$).
 We will write $\Re (v)T$ and $\pounds (v)T$ for the
images of $v$ by $\Re_T$ and $\pounds _T$ respectively.
 It is easy to prove that
 $\ker\, \Re_T = ({\rm Im}\, \pounds _T)^\vdash$
and $\ker\, \pounds _T = ({\rm Im}\, \Re_T)^\vdash$, then $\dim\
(\ker\, \Re_T) = \dim\ (\ker\, \pounds _T)$. (Let us recall that if
$S$ is a subspace of $E$, then $S^\vdash := \{ \alpha \in E^* \
\mid \ \alpha (v) = 0, \ \forall v  \in E \}$ is the incident
subspace of $S$. The same holds for a subspace $V$ of $E^*$). If
both ${\rm r}_T$ and $\pounds _T$  are injective then we say that
$T$ has no radical.

Let $\{ e_1,\ldots, e_n \}$ be a basis of $E$ and $\{
\omega^1,\ldots, \omega^n \}$ its dual basis. If the expression of
$T$ is $T = \sum_{i,j} a_{ij} \omega^i \otimes \omega^j$ and $A$
is the matrix $(a_{ij})$, then the matrices of $\Re_T$ and ${\rm
l}_T$ in those bases are $^tA$ and $A$ respectively.

Let $\alpha \in E^*$ be a linear $1$-form.
We are interested in the study of the following equations:
\begin{eqnarray}
\pounds (v)T &=& \alpha
\label{ec1}
\\
\Re (v)T &=& \alpha \label{ec2}
\end{eqnarray}
That is: is there any vector
$v \in E$ such that $\Re_T(v) = \alpha$  (or  $\pounds _T  (v)  =
\alpha$ for  the second)?.

Obviously the necessary and sufficient condition for these
equations to have any solution is that
 $\alpha \in {\rm Im}\,\pounds _T$ and $\alpha \in {\rm Im}\, \Re_T$,
 respectively.

The following proposition transforms this obvious condition
into a more suitable one:

\begin{prop}
\begin{enumerate}
\item
The necessary and sufficient condition for $\pounds (v) T = \alpha$ to
have any solution is that $\ker\, \pounds _T \subset \ker\,
\alpha$.
\item
The necessary and sufficient condition for $\Re (v) T = \alpha$ to
have any solution is that $\ker\, \Re_T \subset \ker\, \alpha$.
\end{enumerate}
\label{prop1}
\end{prop}

\paragraph{Proof.}
\begin{enumerate}
\item
Condition is necessary. In fact, suppose $u \in E$ verifies
$\Re_T(u) = \alpha$, then for any $e \in \ker\, \pounds _T$ we
have: $$ \alpha (e) = \Re_T (u)(e) = T(e,u) = 0
$$
Condition is sufficient. If $\ker\, T ^\sharp \subset \ker\,
\alpha$ then $\alpha \in (\ker\, \pounds _T)^\vdash = {\rm Im} \
\Re_T$ and the equation has a solution.

\item The same as above. \hfill$\diamondsuit$
\end{enumerate}

\paragraph{Comments.}
\begin{itemize}
\item
In the given basis of $E$ and $E^*$, the matrix expressions of
(\ref{ec1}) and (\ref{ec2}) are $$ A \left( \matrix {\lambda^1 \cr
\vdots \cr \lambda^n \cr} \right) = \left( \matrix {\alpha_1 \cr
\vdots \cr \alpha_n \cr} \right) \quad ; \quad ^t A \left( \matrix
{\lambda^1 \cr \vdots \cr \lambda^n \cr} \right) = \left( \matrix
{\alpha_1 \cr \vdots \cr \alpha_n \cr} \right) $$ respectively,
where $v = \sum_i \lambda^i e_i$, $\alpha = \sum_i \alpha_i
\omega^i$ and $^tA$ is the transpose of $A$.
\item
If $\dim\ (\ker\, \Re_T)  =  \dim\  (\ker\,  \pounds _T)  = 0$,
then  the equations (\ref{ec1}) and (\ref{ec2}) have a single
solution for every $\alpha$.
\item
If $\dim\ (\ker\, \Re_T)  =  \dim\  (\ker\,  \pounds _T)  >  0$,
and  the equations (\ref{ec1}) and (\ref{ec2}) have a solution,
then it is not single. The difference of two solutions of
(\ref{ec1}) is an element of $\ker\, \Re_T$ and two solutions of
(\ref{ec2}) differ in an element of $\ker\, \pounds _T$.
\item
If $T$ is symmetric or antisymmetric then both equations are the
same (except for a sign in the second case).
\item
The tensor $T$ defines another $T'$ by means of the formula
$T'(u,v) = T(v,u)$.
If $A$ is the matrix of $T$ in a basis, then
$^tA$ is the matrix of $T'$ in the same basis.
Hence, the problem
$\Re (v)T=\alpha$ is the same as
$\pounds (v)T'=\alpha$.
 From now on we will consider the ``left'' problem.
\end{itemize}

\subsection*{Restriction to subspaces}

A problem we need to solve is the following: Let $H$ be a subspace
of $E$, is there any $v\in H$ such that
\begin{equation}
\pounds (v)T = \alpha
\label{ec3}
\end{equation}
where $\alpha \in E^*$?.

In order to solve this we require the following

\begin{lem}
If $H$ is a subspace of $E$ then
\begin{enumerate}
\item
$\Re_T (H) = (H^\perp )^\vdash$
\item
$\pounds _T (H) = (^\perp H)^\vdash$
\end{enumerate}
where
$H^{\perp} := \{ u \in E \mid T (v,u) = 0 \ ,\ \forall v \in H \}$
is the {\it right orthogonal subspace} of $H$ and
$^{\perp}H := \{ u \in E \mid T (u,v) = 0 \ ,\ \forall v \in H \}$
is the {\it left orthogonal subspace} of $H$.
\end{lem}

\paragraph{Proof.} \begin{enumerate}
\item
We have that $u \in (\Re_T (H))^\vdash$ if and only if $T(v,u) =
0$ for every $v\in H$ and this is equivalent to saying that $u \in
H^\perp$. This shows that $(\Re_T (H))^\vdash = (H^\perp )$ and,
since we are working with  finite  dimensional  spaces,  this
proves the assertion.
\item
The proof is much the same as the last one. \hfill$\diamondsuit$
\end{enumerate}

Now, in a way similar to the above proposition, we can prove that:

\begin{prop}
The necessary and sufficient condition for the existence of any
$v \in H$ such that equation (\ref{ec3}) holds is that
$\alpha \in (H^\perp )^\vdash$.
\end{prop}

\section{Differentiable theory}

\subsection*{Statement of the problem and compatibility conditions}

Let $M$ be a $n$-dimensional differentiable manifold and
$T$ a $2$-covariant tensor field on $M$; that is, a section of  the
bundle $\Tan^* M \otimes \Tan^* M\to M$.

Contraction  with  this  tensor  defines  the  following
$\Cinfty (M)$-module homomorphisms:
$$
\begin{array}{cccccc}
\Re_T & \colon & \vf (M) & \to & {\mit\Omega}^1(M) &
\\
& & X & \mapsto & \Re_T (X) & \colon Y \mapsto T(X,Y)
\\
\pounds _T & \colon & \vf (M) & \to & {\mit\Omega}^1(M) &
\\
& & X & \mapsto & \pounds _T (X) & \colon Y \mapsto T(Y,X)
\end{array}
$$ We will write $\pounds (X)T$ and $\Re (X)T$ for the images of $X$
by $\Re_T$ and $\pounds _T$ respectively.

Let $(U,x^i)$ be a local system in $M$. Then, in the open set $U$
we have $$ T = \sum_{i,j} a_{ij}\d x^i \otimes \d x^j $$ where
$a_{ij} = T\left( \derpar{}{x^i},\derpar{}{x^j}\right)$. If we
denote by $A=(a_{ij})$ the matrix of the coordinates of $T$ in
this local system, then $$ \Re_T= \sum_{i,j} a_{ij} \d x^i \otimes
\d x^j \ ,\ \pounds _T = \sum_{ij} a_{ji} \d x^i \otimes \d x^j $$
Notice that $\ker\, \Re_T = ({\rm Im}\, \pounds _T )^\vdash$, and
$\ker\, \pounds _T = ({\rm Im}\, \Re_T)^\vdash$. Consequently, for
any $m \in M$, $\dim\ \ker\, (\Re_T)_m =  \dim\  \ker\, ({\rm
l}_T)_m$. If $\ker\, (\Re_T)_m = \ker\, (\pounds _T)_m = 0$ for
every $m \in M$, we say that $T$ has no radical.

$\dim\ \ker\, (\Re_T)_m$ is a locally constant function on $M$. We
suppose it is constant everywhere in $M$.

\begin{assum}
$\dim\ \ker\, (\Re_T)_m$ does not depend on the point $m \in M$.
\end{assum}

Given a $2$-covariant tensor field $T$ on $M$ and a differentiable
$1$-form $\alpha \in {\mit\Omega}^1(M)$, we can consider the
following equations for a vector field $X \in \vf (M)$:
$$
\pounds (X) T = \alpha \,, \quad \Re (X) T = \alpha
$$ If $T$ has no
radical, then both equations have a single solution, since
$(\Re_T)_m$ and $(\pounds _T)_m$ are isomorphisms, for any $m \in
M$. Observe that, as in the algebraic case, given $T$, we can
define another $T'$ by means of  $T'(X,Y)  :=  T(Y,X)$. Hence the
equation $\Re (X) T = \alpha$ is the same as $\pounds (X) T' =
\alpha$.

Following from the above, henceforth we only consider the equation
$\pounds (X) T= \alpha$, and we call it a {\sl linear singular
differential system}; that is, a LSDS.

Given the LSDS $\pounds (X)T = \alpha$ on the manifold $M$,
the problem we try to solve is the following:
to find a submanifold $P_f \subset M$ and a vector field
$X \in \vf (M)$ such that
\begin{enumerate}
\item
$\pounds (X) T \feble{P_f} \alpha$ (the symbol $\feble{N}$ means an
equality which holds on the points of $N$ for any closed
submanifold $N \subset M$).
\item
$X$ is tangent to $P_f$.
\item
$P_f$ is maximal with respect to conditions $(1)$ and $(2)$.
\end{enumerate}
Observe that if $(P_f,X)$ is the solution of the problem, then the
integral curves of $X$ are tangent to $P_f$, and the tangent
vectors to these curves verify the equation at any point of the
curves. Notice that when we obtain a solution to the problem, we
can calculate the integral curves of the vector field by
analytical or numerical methods. In general the vector field $X$
is not unique.

As we have seen in section $2$, the system has no solution
anywhere in $M$,  but only on the points $m\in M$ satisfying
$\alpha _m \in {\rm Im}\, (\Re_T)_m$. So we have:

\begin{definition}
The set $$ P_1 = \{ m\in M\ \mid \ \alpha_m \in {\rm Im}\,
(\Re_T)_m\} $$ is called the {\rm primary constraint submanifold}
associated  to the equation $\pounds (X) T= \alpha$.
\end{definition}

It is clear that $P_1$ is the maximal subset of $M$ in which  this
equation has solution.
In other words, only the points of $M$ belonging to $P_1$
are admissible as initial conditions of the equations.
For this reason, $P_1$ is sometimes called the
{\it submanifold of initial conditions}.

\begin{assum}
$P_1$ is a closed submanifold of $M$.
\end{assum}

(Let us recall that a {\sl closed submanifold} of $M$ is a closed
subset that is itself a manifold. For instance, if
 $\coor{\xi}{1}{h}$ are independent differentiable functions on
 $M$, then the set
 $\{ m\in M\ ;\  \xi_i(m)=0\ , \ i=1,\ldots ,h\}$
 is a closed submanifold of $M$).

\begin{prop}
$P_1 = \{m \in M \ \mid \ \ker\, (\pounds _T)_m \subset \ker\,
\alpha_m \}$
\end{prop}

\paragraph{Proof.}  ($\Longrightarrow$) \quad If $m \in P_1$, then
$\alpha_m \in {\rm Im}\, (\Re_T)_m$, then, according to
proposition \ref{prop1}, we have that $\ker\, (\pounds _T)_m
\subset \ker\, \alpha_m$.

\quad \quad ($\Longleftarrow$) If $m \in M$ and $\ker\, ({\rm
l}_T)_m \subset \ker\, \alpha_m$ then $\alpha_m \subset (\ker\,
(\pounds _T)_m )^\vdash = {\rm Im}\, (\Re_T)_m$, and therefore the
equation has a solution. \hfill$\diamondsuit$

\paragraph{Comments.} \begin{itemize}
\item
If $(U,x^i)$ is a local system on $M$,
$T = \sum a_{ij}\d x^i \otimes \d x^j$
and $\alpha = \sum \alpha_i \d x^i$, then
the local expression of the  system
$\pounds (X) T = \alpha$ is
$$
A\left( \matrix {\lambda^1 \cr \vdots \cr \lambda^n \cr} \right)
= \left( \matrix {\alpha_1 \cr \vdots \cr \alpha_n \cr} \right)
$$
where $X=\sum \lambda_i \derpar{}{x^i}$
is the vector field we are looking for
and $A$ is the matrix $(a_{ij})$.
\item
If $T$ has no radical, that is $\dim\ \ker\, (\Re_T)_m = 0$ for
every $m \in M$, then the system is compatible everywhere in $M$,
and the solution is unique. This means that $P_1 = M$ and there is
only one vector field $X$ which is a solution at every point of
$M$.
\item
If $T$ has radical, that is $\dim\ \ker\, (\Re_T)_m > 0$ for every
$m \in M$, then the system has a solution only on the points of
$P_1$. Moreover, the solution is not unique and the difference
between two solutions is in $\ker\, \Re_T := \{ X \in \vf (M)
\ \mid \ T(X,Y) = 0 \ , \ \forall Y \in \vf (M) \}$. Hence, if
$X_0$ is a solution of the system, then $X_0 + \ker\, \Re_T$ is
the set of solutions at this level of the study.
\item
Sometimes we have an LSDS on a manifold $M$, but we are only
interested in its restriction to a submanifold $M_0 \subset M$
(for instance, when dealing with singular Hamiltonian systems). In
this case, the primary constraints submanifold is $P_1 \cap M_0$.
Then, we suppose that $M_0$ is the zero set of a finite family of
functions on $M$, and the study continues in the same way.
\item
Since we have assumed that $P_1$ is a closed submanifold of $M$,
we can suppose that the vector field solutions of the system
are defined everywhere in $M$, although
they are solutions only on the points of $P_1$.
\end{itemize}

It is worth pointing out that the submanifold $P_1$
can be defined as the zero set of a family of functions.
In fact, we have:

\begin{corol}
$P_1 = \{ m \in M \ \mid \ \alpha (Z)_m = 0 \, \ \forall Z \in
\ker\, \pounds _T \}$.

If $Z \in \ker\, \pounds _T$, then the function $\xi^{(1)} :=
\alpha (Z)$ is the {\rm primary constraint} associated to $Z$.
\end{corol}

\subsection*{Stability conditions}

Suppose that $\dim\ P_1 \not= 0$ (if this is not the case, $P_1$
is a set of isolated points and the differential system has no
solution) and let $X_0 + \ker\, \Re_T$ be the set of solutions of
the system on the submanifold $P_1$.

The vector field solutions are solutions of the system only on the
points of $P_1$. Then the integral curves of these vector fields
must be contained in $P_1$. That is, these vector fields must be
tangent to $P_1$. As a consequence of this fact, new conditions on
the solutions arise. Hence we define:

\begin{definition}
The set $$ P_2 := \{ m\in P_1\ \mid \ \exists Y \in \ker\, \Re_T \
, \ (X_0+Y)(m) \in \Tan_m P_1 \} $$ is called the {\rm first
generation secondary constraints submanifold} associated  to the
equation $\pounds (X) T= \alpha$.
\end{definition}

But the submanifold $P_1$ is defined as the zero set of a family
of functions, the primary constrains. Hence, the condition for a
vector field to be tangent to $P_1$ is that its action on this
family of functions goes to zero on the points of $P_1$. That is:
$$ P_2 = \{ m \in P_1 \ \mid \ \exists Y \in \ker\, \Re_T \ , \
(X_0+Y)_m \alpha (Z) = 0 \ , \ \forall Z \in \ker\, \pounds _T \}
$$

Now, in order to give another characterization of $P_2$, we need
to introduce some notation. We denote by $\underline{\vf (N)}$
the subset of $\vf (M)$ made of the vector fields tangent to
$N$. Then $$ \vf (N)^\perp := \{X \in \vf (M) \ \mid \ T
(Y,X) \feble{N} 0 \ , \ \forall Y \in \underline{\vf (N)} \}
$$ is the {\rm right orthogonal submodule} of $\vf (N)$ with
respect to $T$. More precisely, for any $m \in N$ we write $$
\Tan_mN^\perp := \{ u \in \Tan_mM \ \mid \ T_m(v,u)=0 \ , \
\forall v \in \Tan_mM \mid_N \} $$ then the vector fields of
$\vf (N)^\perp$ are the sections of the subbundle of $\Tan
M\mid_N$ whose fibers are the subspaces $\Tan_mN^\perp$. In the
same way, we can define $$ ^\perp{\cal X}(N) := \{X \in {\cal
X}(M) \ \mid \ T (X,Y) \feble{N} 0 \ , \ \forall Y \in
\underline{\vf (N)} \} $$ which is called the {\it left
orthogonal submodule} of $\vf (N)$ with respect to $T$.

The following theorem gives a characterization of $P_2$.

\begin{teor}
$$
P_2 = \{ m \in P_1 \ \mid \ \alpha (Z)_m = 0 \ , \
\forall Z \in \vf (P_1)^\perp \}
$$
\label{teor1}
\end{teor}

\paragraph{Proof.}
Let $C = \{ m \in P_1 \ \mid \ \alpha (Z)_m = 0 \ , \
\forall Z \in \vf (P_1)^\perp \}$.
We have
\begin{enumerate}
\item
$P_2 \subset C$.

If $m \in P_2$, there exists $Y \in \ker\, \Re_T$ such that
$(X_0+Y)_m \in \Tan_mP_1$. If $Z \in \vf (P_1)^\perp$, we
have: $$ \alpha (Z)_m = (\Re_T(X_0+Y)(Z))_m = (T(X_0+Y,Z))_m =0 $$
then $m \in C$.
\item
$C \subset P_2$.

If $m \in C$ and $Z \in \vf (P_1)^\perp$, then $\alpha
(Z)_m=0$, hence we have that $\alpha_m \in (\Tan_mP_1^\perp
)^\vdash = \Re_T(\Tan_mP_1)$. Hence, the system $\pounds (v)T_m
=\alpha_m$ has solution in $\Tan P_1$ and therefore $m \in P_2$.
\hfill$\diamondsuit$
\end{enumerate}

\begin{definition}
If $Z \in \vf (P_1)^\perp$,
the function $\xi^{(2)} := \alpha (Z)$
is the {\rm secondary constraint} associated to $Z$.
\end{definition}

At this point we find ourselves in the same situation as at the
end of the last subsection. Hence the stability condition must be
imposed again. The procedure is iterative and we are going to
analyze the general situation.

Consider the following sequence of subsets of $M$
defined inductively:
\begin{eqnarray*}
P_0 &:=& M
\\
P_1 &:=& \{ m \in M \ \mid \ \alpha (Z)_m = 0 \ , \ \forall Z \in
\ker\, \pounds _T \}
\\
P_{i+1} &:=& \{ m \in P_i \ \mid \ \alpha (Z)_m = 0 \ , \
\forall Z \in \vf (P_i)^\perp \}
\quad (i \geq 1)\end{eqnarray*}
and the following

\begin{assum}
The subsets $P_i$ (for all $i \geq 1$)
are closed submanifolds of~$M$.
\end{assum}

\begin{teor}
The equation $\pounds (X) T = \alpha$
has solution tangent to $P_k$ if and only if
$$
\langle \vf (P_k)^\perp ,\alpha \rangle_m = 0
\ , \ \forall m \in P_k
$$
\label{teor2}
\end{teor}

\paragraph{Proof.}
($\Longrightarrow$) \quad
Let $X \in \vf (P_k)$ be a solution of the equation.
If $Z \in \vf (P_k)^\perp$ and
$m \in P_k$, we have:
$$
\alpha (Z)_m = (\pounds (X)T)(Z)_m = T(X,Z))_m = 0
$$

\quad \quad ($\Longleftarrow$) \quad If the condition holds, and
$m \in P_k$, then we have that $\alpha_m \in
(\Tan_mP_k^\perp)^\vdash = \Re_T(\Tan_mP_k)$. Hence the equation
has solution at $m$. \hfill$\diamondsuit$

\paragraph{Comments.}
\begin{itemize}
\item
The sequence $\{ P_i \}$ stabilizes necessarily.
That is, there exists $k \geq 0$ such that
$P_k=P_{k+1}$, because $\dim\ M$ is finite.
This condition implies that theorem \ref{teor2} holds for $P_k$.
\item
If $P_k = \hbox{\rm\O}$ or $\dim\ P_k = 0$,
then the system has no solution.
\item
If $\dim\ P_k > 0$, then the system is compatible and the equation
$\pounds (X)T = \alpha$ has solution tangent to $P_k$. In this case we
call $P_k$ the {\it final constraints submanifold} and denote it
by $P_f$. If $X_0$ is a solution, then the set of solutions is
$X_0+\ker\, {\rm r}_{T_f}$, where $$ \ker{\rm r}_{T_f} := \{ Z \in
\underline{\vf (P_f)} \ \mid \ \Re_T(Z) \feble{P_f} 0 \} $$

In some cases (classical analytical mechanics,
linear circuits,...), we have that
$\ker\, {\rm r}_{T_f} = \{ 0 \}$. Then
the solution is unique.

In other cases (relativistic mechanics, electromagnetism,...),
the solution is not unique. The non uniqueness of the
solution is called (in Physics) {\it gauge freedom},
the elements of $\ker\, {\rm r}_{T_f}$ are the {\it gauge fields}
and two different solutions are called
{\it gauge equivalent vector fields}
(see, for instance, \cite{BK-ymtcs} for more details).
\end{itemize}

Notice that the last theorem characterizes $P_f$,
but does not give the tangent vector field $X$.
In order to apply the algorithm for obtaining the couple
$(P_f,X)$ we will proceed as follows:
\begin{itemize}
\item
We calculate a local basis for $\ker\, \Re_T$. Let $\{ Z_1,\ldots
,Z_h \}$ be this basis. Then $P_1$ is defined by $\alpha (Z_i) =
0$, for $I=1,\ldots ,h$. These are the compatibility conditions.
\item
If $X_0$ is a solution of the system on $P_1$,
then the general solution on $P_1$ is
$X = X_0 + \sum_j f_jZ_j$, with $f_j$ arbitrary functions.
\item
In order to obtain $P_2$, we apply $X$ to the functions $\alpha
(Z_i)$ and we obtain the system $$ X_0(\alpha (Z_i)) + \sum_j
f_jZ_j(\alpha (Z_i)) = 0 \quad , \quad i=1,\ldots ,h $$ The
equations with $Z_j (\alpha (Z_i))=0$ for every $j$ leads to new
constraints given by $X_0 (\alpha (Z_i))=0$. They are the
stability conditions defining $P_2$. The other equations fix
conditions on the functions $\{ f_j \}$. That is, they define a
submodule of $\ker\, \Re_T$.
\item
Now, we proceed in the same way on $P_2$,
with the remaining functions $f_j$ in the general solution
and the new constraints.
The algorithm stops when we do not obtain new constraints.
\end{itemize}

This procedure is a generalization of the one developed in
references \cite{MR-ltps} and \cite{Mu-hsc}, where the particular
cases of Hamiltonian and Lagrangian mechanics is studied; that is,
when the tensor $T$ is antisymmetric and closed under the action
of the exterior differential operator (see also Sections \ref{pp}
and \ref{hp}). We call it the {\sl Stabilization General
Algorithm}, or, in abbreviation, SGA.

\paragraph{Remark.}
  Observe that in the first step of the algorithm
  (compatibility conditions), we transform the initial implicit
  differential equation $A\dot{\bf x}-\alpha=0$ into a
  differential algebraic equation (DAE), which is given
  by the vector field  $X_0 + \sum_{j=1}^hf_jZ_j$ and the constraints
  $\langle Z_j,\alpha\rangle=0$  defining $P_1$
  ($\coor{Z}{1}{h}$ is a basis of $\ker\,\Re_T$ and
  $\coor{f}{1}{h}$ are free).
  Then we apply the algorithm (stabilization condition)
  to this DAE to get $P_f$ and the reduced equation on it.

  The so-called {\sl index} of this DAE
  (see, for instance, \cite{Hairer} or \cite{GS-98},
  and references quoted therein) coincides with the number
  of steps needed to stabilize the system, and to obtain
  $P_f$ and the reduced equation, and hence it is equal to $f-1$.

\section{Symmetries}

\subsection*{Symmetries of a singular differential system}

Let $\pounds (X)T=\alpha$ be a singular differential system on $M$,
which is assumed to be compatible, $P_f$ its final constraint
submanifold, and ${\cal S} := \{ X_0+\ker\, {\rm r}_{T_f} \}$ the
set of solutions.

\begin{definition}
A {\rm symmetry} of the system is a diffeomorphism
$\varphi \colon M \to M$
such that:
\begin{enumerate}
\item
$\varphi (P_f) = P_f$.
\item
If $X \in {\cal S}$, then $\varphi_*X \in {\cal S}$.
\end{enumerate}
\label{ss}
\end{definition}

 (The symbols $\varphi_*$ and $\varphi^*$ denote the {\sl push-forward}
  and the {\sl pull-back} defined by the diffeomorphism $\varphi$
  \cite{AM-fm}).

In the classical study of symmetries of a regular Hamiltonian
system $(M,\Omega ,h)$ (where $\Omega$ is a symplectic form and
$h$ is a smooth function called the {\it Hamiltonian} of the
system), the symmetries are considered to be symplectomorphisms of
$(M,\Omega )$; that is, diffeomorphisms $\varphi \colon M \to M$
such that $\varphi^*\Omega =\Omega$. A distinction is then made
between {\it symmetries of the system}, which verify that
$\varphi_*X_h=X_h$, (where $X_h$ is the Hamiltonian vector field
associated to $h$), and {\it symmetries of the Hamiltonian},
satisfying $\varphi^*h=h$. It is clear that every symmetry of the
Hamiltonian is a symmetry of the system, but not the converse, and
a simple example is a symplectomorphism $\varphi$ such that
$\varphi^*h=h+c$ (with $c\in{\mbox{\tenmsb R}}$), which is not a
symmetry of the Hamiltonian but a symmetry of the system (see, for
instance, \cite{AM-fm} for more information on these topics).

In the present case, the situation is different because now $M$
has no prescribed geometric structure. Thus, we have to study
methods in order to find symmetries which preserve the elements
defining our system; that is, $T$ and $\alpha$. The results thus
obtained are:

\begin{teor}
Let $\varphi \colon M \to M$ be a diffeomorphism
such that $\varphi (P_f)=P_f$.
\begin{enumerate}
\item
If $\varphi^*T \mid_{P_f}=T \mid_{P_f}$ and
$\varphi^*\alpha\mid_{P_f} =\alpha\mid_{P_f}$,
then $\varphi$ is a symmetry of the system.
\item
If $\varphi$ is a symmetry of the system and
$\varphi^*T \mid_{P_f}=T \mid_{P_f}$, then
$\varphi^*\alpha\mid_{P_f} =\alpha\mid_{P_f}$
\item
If $\varphi$ is a symmetry of the system and
$\varphi^*\alpha\mid_{P_f} =\alpha\mid_{P_f}$
then $T$ and $\varphi^*T$ give equivalent systems
(in the sense that they have the same solutions).
\end{enumerate}
\end{teor}

In order to prove these results, we require the following:

\begin{lem}
If $X \in \vf (M)$ and $\varphi \colon M \to M$
is a diffeomorphism, then
$$
(\varphi^*\circ \pounds (\varphi_*X))T = (\pounds (X)\circ\varphi^*)T
$$
\label{lemref}
\end{lem}

\paragraph{Proof.}
Let $Y \in \vf (M)$, we have:
 \begin{eqnarray*}
 [(\varphi^*\circ \pounds (\varphi_*X))T]Y &=&
 (\varphi^*(\pounds (\varphi_*X)T))Y =
 (\pounds (\varphi_*X)T))\varphi_*Y
 \\ &=&
 T(\varphi_*X,\varphi_*Y) = (\varphi^*T)(X,Y )
 \\  &=&
 [\pounds (X)(\varphi^*T)]Y =
 [(\pounds (X)\circ\varphi^*)T]Y
 \end{eqnarray*}

\paragraph{Proof of the theorem.} \begin{enumerate}
\item
Consider $X \in {\cal S}$, then
$$
\pounds (\varphi_*X)T\mid_{P_f} =
(\varphi^{*-1}\circ \pounds (X))\circ \varphi^*)T \mid_{P_f} =
\varphi^{*-1}\alpha\mid_{P_f} = \alpha\mid_{P_f}
$$
\item
If $X \in {\cal S}$, then $\pounds (X)T\mid_{P_f} = \alpha\mid_{P_f}$,
therefrom $$ \varphi^*(\pounds (X)T\mid_{P_f}) =
\varphi^*\alpha\mid_{P_f} $$ and, furthermore
 $$ \varphi^*(\pounds (X)T\mid_{P_f}) = \pounds (\varphi_*^{-1}X)\circ
\varphi^*T\mid_{P_f}
= \pounds (\varphi_*^{-1}X)T\mid_{P_f} = \alpha\mid_{P_f} $$
\item
Let $X$ be a solution of the system defined by $T$,
that is, $\pounds (X)T\mid_{P_f}=\alpha\mid_{P_f}$.
Then, we have:
$$
\pounds (X)\varphi^*T\mid_{P_f}=
\varphi^*\pounds (\varphi_*X)T\mid_{P_f}=
\varphi^*\alpha\mid_{P_f}=
\alpha\mid_{P_f}
$$
Therefore, $X$ is also a solution of the system
defined by $\varphi^*T$. But $\varphi^{-1}$ also verifies
the same properties, hence the converse is also true. \hfill$\diamondsuit$
\end{enumerate}

\subsection*{Infinitesimal symmetries}

Next we see how the above concepts can be interpreted in terms of
vector fields, which is to say the infinitesimal version of the
concept of symmetry.

\begin{definition}
A vector field $D \in \vf (M)$ is an {\rm infinitesimal symmetry}
of the system iff
\begin{enumerate}
\item
$D$ is tangent to $P_f$.
\item
If $X \in {\cal S}$ then $\Lie(D)X \in {\cal S}$; that is,
$\Lie(D)X-X \in \ker\, \Re_T$ (\/ $\Lie (D)$ denotes the Lie
derivative with respect to the vector field $D$).
\end{enumerate}
\end{definition}

Observe that this definition is equivalent to saying that,
if $\varphi_t$ is a local uniparametric group associated to $D$,
then $\varphi_t$ are symmetries in the sense of definition \ref{ss}.

In a way analogous to the above subsection,
we first have the following results:

\begin{prop}
\begin{enumerate}
\item
If $X \in {\cal S}$ and $D\in \vf (M)$ is a vector field
tangent to $P_f$ such that $\Lie(D)T=0$ and $\Lie(D)\alpha=0$,
then $\Lie(D)X\mid_{P_f} \in \ker\, \Re_T\mid_{P_f}$.
\item
If $D\in \vf (M)$ is an infinitesimal symmetry and $\Lie(D)T=0$,
then $\Lie(D)\alpha =0$.
\end{enumerate}
\end{prop}

\paragraph{Proof.} \begin{enumerate}
\item
If $X \in {\cal S}$ then $\Lie(X)T\mid_{P_f} =\alpha\mid_{P_f}$.
Therefore
 \begin{eqnarray*} 0 &=& \Lie(D)\alpha\mid_{P_f} = \Lie(D)(\pounds
(X)T)\mid_{P_f}
 \\ &=& \pounds (\Lie(D)X)T\mid_{P_f} + \pounds (X)\Lie(D)T\mid_{P_f}
  = \pounds (\Lie(D)X)T\mid_{P_f}\end{eqnarray*}
\item
If $X \in {\cal S}$ then $\Lie(X)T\mid_{P_f} =\alpha\mid_{P_f}$.
Therefore
 \begin{eqnarray*}
  0 &=& \Lie(D)\alpha\mid_{P_f} = \Lie(D)(\pounds
 (X)T)\mid_{P_f}
  \\ &=& \pounds (\Lie(D)X)T\mid_{P_f} + \pounds
 (X)\Lie(D)T\mid_{P_f}
 \end{eqnarray*} \hfill$\diamondsuit$
\end{enumerate}
\medskip

The main result in relation to this topic is the following:

\begin{prop}
Let $D\in \vf (M)$ be a vector field tangent to $P_f$ satisfying
that $\Lie(D)T\mid_{P_f}=0$ and $\Lie(D)\alpha\mid_{P_f}=0$. Then
the local uniparametric groups $\{ \varphi_t \}$ of $D$ transform
integral curves of vector field solutions of the system into
integral curves of vector field solutions.
\end{prop}

\paragraph{Proof.} \quad
Let $m \in M$ and $\gamma \colon [0,s] \to M$ be
an integral curve of a vector field solution, that is,
$\gamma (t) \in P_f$, $\forall t \in [0,s]$ and
$\pounds (\dot\gamma (t))T\mid_{\gamma (t)} = \alpha \mid_{\gamma (t)}$.

Let $\{ \varphi_t \}$ be the local uniparametric group of $D$
defined in a neighborhood of $m$. Consider now the curve $\sigma
(t) := (\varphi_t \circ \gamma )(t)$. We see that $$ \pounds
(\dot\sigma (t))T\mid_{\sigma (t)} = \alpha \mid_{\sigma (t)} $$
We have that $\dot\sigma (t) = {\varphi_t}_*\dot\gamma (t)$, and
using lemma \ref{lemref} we obtain
 \begin{eqnarray*}
  [\varphi_t^* \circ\pounds (\dot\sigma (t))]T &=&
  [\varphi_t^*\circ\pounds
({\varphi_t}_*\circ\pounds ({\varphi_t}_*\dot\gamma (t))]T =
(\pounds (\dot\gamma (t))(\varphi_t^*T)
 \\ &=& \pounds (\dot\gamma (t))T = \alpha\mid_{\gamma (t)}
 \end{eqnarray*}
 because  $T$ is invariant by
$\varphi_t$. Then $$ (\pounds (\dot\sigma (t))T =
\varphi_t^*\alpha\mid_{\gamma (t)} = \alpha\mid_{\sigma (t)} $$
because $\Lie (D)\alpha = 0$, hence $\alpha$ is invariant by
$\varphi_t$. \hfill$\diamondsuit$

\paragraph{Comments.}
\begin{itemize}
\item
The vector fields $D \in \vf (M)$ tangent to $P_f$, which
transform integral curve solutions of the system into integral
curve solutions of the system, are called {\sl gauge vector
fields} of the system.
\item
Observe that, if  $D \in \ker\, \Re_T\mid_{P_f}$, then $\alpha
(D)\mid_{P_f} = 0$, since $P_f$ is a submanifold such that
$\langle \vf (P_f)^\perp ,\alpha \rangle = 0$ and $\ker\, {\rm
r}_{T_f}\mid_{P_f} \subset \vf (P_f)^\perp$. Therefrom, if
$\alpha$ is a closed form and $D \in \ker\, {\rm
r}_{T_f}\mid_{P_f}$, then $\Lie(D)\alpha =0$. Hence, if  $D \in
\ker\, {\rm r}_{T_f}\mid_{P_f}$, $\Lie(D)\alpha =0$ and $\alpha$
is closed, then $D$ is a gauge vector field.
\item
Furthermore, if $T \in {\mit\Omega}^2(M)$;
that is, an antisymmetric tensor; and $\alpha$ is closed, then
$D \in \ker\, {\rm r}_{T_f}\mid_{P_f}$
if and only if $\Lie(D)T\mid_{P_f} = 0$,
and all the vector fields belonging to
$\ker\, {\rm r}_{T_f}\mid_{P_f}$ are gauge vector fields.
\item
In this last case, $\ker\, {\rm r}_{T_f}\mid_{P_f}$
parametrizes the set of vector fields
which are solutions of the system, and the
local uniparametric groups of these fields parametrize
the set of integral curve solutions passing through a point.
This is the case of Hamiltonian mechanics.
\end{itemize}

\section{Particular cases}

\subsection*{Presymplectic problems}
\protect\label{pp}

In these cases, the general situation is the following:
$M$ is a $n$-dimensional manifold,
$\alpha \in {\mit\Omega}^1(M)$ (is a $1$-form)
and $T$ is a closed $2$-form
(i.e., $T \in {\mit\Omega}^2(M)$ with $\d T=0$)
such that ${\rm rank}\ T < n$; that is, $T$ is not regular.

These kind of systems have been studied
in recent years. The  problem of the
compatibility and consistency of the equations of motion
is solved by applying different but equivalent methods,
such as the {\it Presymplectic Constraint Algorithm}
\cite{GNH-pca}, \cite{GN-pls1},
the algorithmic procedure of \cite{Mu-hsc} and \cite{MR-ltps},
or the generalized algorithm of \cite{GP-ggf}.

\subsection*{Hamiltonian problems}
\protect\label{hp}

There are two cases of interest:
\begin{enumerate}
\item
{\bf Singular Hamiltonian systems}.

They can be treated as a particular case of the above, where now
$\dim\ M = n = 2k$ and $\alpha = \d h$ (at least locally) for some
$h \in {\mit\Omega}^0(M)$ called the {\it Hamiltonian function}.
\item
{\bf Regular Hamiltonian systems}.

Once again, $\dim\ M = n = 2k$ and $\alpha = \d h$ (at least
locally) for some $h \in {\mit\Omega}^0(M)$. The difference with
the singular case is that ${\rm rank}\ T = 2k$ (i.e., it is
regular). The consequence of this fact is that $\Re_T$ and ${\rm
l}_T$ are linear isomorphisms, hence the equations of motion are
compatible everywhere in $M$, and determined. This implies that
$P_f = M$ and the solution $X_h \in \vf (M)$ is unique
\cite{AM-fm}. In this case, $T$ is called a {\it symplectic form}
and $(M,T)$ is a {\it symplectic manifold}.

In addition, we have that every symmetry of the Hamiltonian is a
symmetry of the system.
\end{enumerate}

\subsection*{Second Order Differential Equations problems:
            Mechanical systems}

Next we consider the particular case in which $M=\Tan Q$ is the
tangent bundle of some $n$-dimensional manifold $Q$. Then, let
$\pi \colon \Tan Q \to Q$ be the natural projection, $J$ the
vertical endomorphism and $\Delta \in {\cal X}(\Tan Q)$ the
Liouville vector field (i.e., the vector field of dilatations
along the fibers of $\Tan Q$). If $(q^i,v^i)$ are natural
coordinates in $\Tan Q$, the local expression of these elements
are
 $$
 J=\sum_i\d q^i\otimes\derpar{}{v^i}
 \quad ,\quad
 \Delta=\sum_i v^i\derpar{}{v^i}
 $$
 (see, for instance, \cite{Kl-evm} and \cite{Cr-83}
 for more details about these geometric concepts).

The most interesting cases are the Lagrangian systems in which $T$
is a $2$-form; that is, an antisymmetric $2$-covariant tensor
field. The standard formulation of these models deals with a
dynamical function ${\cal L} \in {\mit\Omega}^0(\Tan Q)$, called
the {\it Lagrangian function}, from which an exact $2$-form
$\omega \in {\mit\Omega}^2(\Tan Q)$ can be constructed, given by
$\omega := -\d (\d {\cal L}\circ J)$, and another function $E \in
{\mit \Omega}^0(\Tan Q)$ (the {\it energy function}), defined as
$E := {\cal L}-\Delta ({\cal L})$ (see, for instance,
\cite{Kl-evm} and \cite{Ca-tsl} for more details about all these
concepts). In this situation, the problem is the same as in the
above cases, where $\omega$ plays the role of $T$ and $\alpha = \d
E$; that is, the equation to be solved is
 \begin{equation} \inn(X)\omega = \d
E \label{eqlag} \end{equation} Nevertheless, physical and variational
considerations lead us to introduce an additional feature: we must
search for solutions satisfying the so-called {\it second order
condition}:
 \begin{equation} \Tan \pi \circ X = {\rm Id}_{\Tan Q} \label{sode}
\end{equation} which can be set equivalently as $$ J(X) = \Delta $$ A vector
field satisfying the second order condition is said to be a {\sl
holonomic vector field} or a {\sl second order differential
equation} (SODE), and is characterized by the fact that its
integral curves are canonical lifting of curves in $Q$.

If the system is regular (i.e., $\omega$ is a non-degenerate form)
then the system (\ref{eqlag}) is compatible and determined
everywhere in $\Tan Q$, and its unique solution automatically
satisfies the SODE condition (\ref{sode}) (see \cite{Ca-tsl}).

If the system is singular ($\omega$ is a degenerate form), then to
the problem of the incompatibility and inconsistency of equations
(\ref{eqlag}) we must add condition (\ref{sode}) for the
solutions. This fact introduces new constraints to those obtained
in the stabilization algorithm applied to the equations
(\ref{eqlag}), and restricts the gauge freedom (for a detailed
discussion on this problem, see \cite{MR-ltps}).

\section{Examples}

\subsection*{Example 1: Control systems}
\subsubsection*{A general situation}

First of all we analyze an example in {\sl Control theory} that
shows how the algorithm behaves, depending on different situations
which can arise.

Consider a {\sl control system} described by the equations $$
A\dot {\bf x} = B{\bf x}+C{\bf u} $$ where ${\bf u}=\{ u_i\}$
($i=1,\ldots ,m$) are the control parameters or {\sl inputs},
${\bf x}=\{ x_i\}\in{\mbox{\tenmsb R}}^n$ are the {\sl outputs},
and $A,B,C$ are constant matrices. In some cases the inputs have
the form ${\bf u}=D\dot{\bf x}$ ($D$ being another constant
matrix), and hence the equations of the system take the form
 \begin{equation} T\dot {\bf x}
= B{\bf x} \label{ecuno} \end{equation} where $T=A-CD$ is sometimes a
singular matrix.

In this case, the manifold $M$ is ${\mbox{\tenmsb R}}^n$, (with
$\{ x_i\}$ as coordinates), the 1-form $\alpha$ is $\alpha
=\sum_{i,j}b^{ij}x_j\d x_i$ (where $B=(b^{ij})$\/) and $T$
represents a constant 2-tensor in $M$. Then we have that \( X =
\sum_if_i(x_j)\derpar{}{x_i}\) and the integral curves of the
vector field satisfying the equality are the solution of the
system; that is, $\dot x_i=f_i({\bf x})$.

For simplicity, we only consider the case $n\geq 4$ and $\dim({\rm
ker}\ T)=2$. Then, let $$ Y^1=\sum_iw^1_i\derpar{}{x_i}\quad
,\quad Y^2=\sum_iw^2_i\derpar{}{x_i} $$ be a basis of ${\rm ker}\
T$ and $$ Z_1=\sum_iv^1_i\derpar{}{x_i}\quad ,\quad
Z_2=\sum_iv^2_i\derpar{}{x_i} $$ a basis of ${\rm ker}\ T^t$.
Thus, the compatibility conditions are
 \begin{equation} \alpha (Z_1) \equiv
\sum_{i,j}v^1_ib^{ij}x_j =0 \quad , \quad \alpha (Z_2) \equiv
\sum_{i,j}v^2_ib^{ij}x_j = 0 \label{ecdos} \end{equation} and, if $X_0$ is a
particular solution of the system, the general one is
 \begin{equation} X =
X_0+g_1Y^1+g_2Y^2 \label{sol} \end{equation} where, at first, $g_1,g_2$ are
arbitrary functions.

We have the following possible situations:
\begin{enumerate}
\item
Both equalities (\ref{ecdos}) hold identically.

Then the system of equations (\ref{ecuno}) {\it is compatible
everywhere in $M$ but undetermined} because its solution is
(\ref{sol}), which {\it depends on two arbitrary functions}.
\item
The first equality does not hold identically, but the second does
(or conversely). Then $$ \xi_1^{(1)}:=\alpha
(Z_1)=\sum_{i,j}v^1_ib^{ij}x_j $$ is the only primary constraint
which defines the submanifold $P_1$, where the system is
compatible and has the vector field (\ref{sol}) as solution.

Now the stability condition of the SGA algorithm must be imposed,
which leads to $$ X(\xi_1^{(1)}) \equiv
X_0(\xi_1^{(1)})+g_1Y^1(\xi_1^{(1)})+g_2Y^2(\xi_1^{(1)}) = 0 \quad
({\rm on}\ P_1\/) $$ Notice that the coefficients $Y^\alpha
(\xi_\alpha^{(1)})$ are constant, since $Y^\alpha$ are constant
vector fields. Then we have the following possibilities: \begin{enumerate}
\item
$Y^1(\xi_1^{(1)})\not= 0$ and $Y^2(\xi_1^{(1)})\not= 0$.

In this case, one function $g$ can be expressed in terms of the
other. For instance,
\begin{eqnarray*}
 g_2\vert_{P_1}
 &=& -g_1\frac{Y^1(\xi_1^{(1)})}{Y^2(\xi_1^{(1)})}-
   \frac{X_0(\xi_1^{(1)})}{Y^2(\xi_1^{(1)})} \\
&=& -\frac{1}{\sum_{i,j}v^1_ib^{ij}w^2_j} \left(
g_1\sum_{i,j}v^1_ib^{ij}w^1_j+X_0(\sum_{i,j}v^1_ib^{ij}x_j)\right)
\end{eqnarray*}
 $P_1$ is the final constraint submanifold, and {\it the
solution is not unique} since {\it it depends on one arbitrary
 function}
 \begin{eqnarray*}
 X\vert_{P_1} &=&
 X_0+g_1Y^1 \\
 &&- \frac{1}{\sum_{i,j}v^1_ib^{ij}w^2_j} \left(
 g_1\sum_{i,j}v^1_ib^{ij}w^1_j+
 X_0(\sum_{i,j}v^1_ib^{ij}x_j)\right)Y^2
 \\ &\equiv&
X'_0+g_1\left( Y^1-
\frac{\sum_{i,j}v^1_ib^{ij}w^1_j}{\sum_{i,j}v^1_ib^{ij}w^2_j}Y^2\right)
\end{eqnarray*}
\item
$Y^1(\xi_1^{(1)}) = 0$ and $Y^2(\xi_1^{(1)})\not= 0$ (or conversely).

In this case, one function $g$ can be completely determined $$
g_2\vert_{P_1} =-\frac{X_0(\xi_1^{(1)})}{Y^2(\xi_1^{(1)})} =
-\frac{X_0(\sum_{i,j}v^1_ib^{ij}x_j)}{\sum_{i,j}v^1_ib^{ij}w^2_j}
$$ $P_1$ is the final constraint submanifold, and {\it the
solution is not unique: it depends on one arbitrary function} $$
X\vert_{P_1} =
X_0-\frac{X_0(\sum_{i,j}v^1_ib^{ij}x_j)}{\sum_{i,j}v^1_ib^{ij}w^2_j}Y^2+
g_1Y^1 \equiv X'_0+g_1Y^1 $$
\item
$Y^1(\xi_1^{(1)})=0$, $Y^2(\xi_1^{(1)})=0$.

We have two possible situations:
\begin{enumerate}
\item
$X_0(\xi_1^{(1)})\vert_{P_1} = 0$.

No arbitrary function can be determined or expressed in terms of
others. $P_1$ is the final constraint submanifold, and {\it the
solution is not unique: it depends on two arbitrary functions} $$
X\vert_{P_1} = X_0+g_1Y^1+g_2Y^2 $$
\item
$X_0(\xi_1^{(1)})\vert_{P_1} \not= 0$.

{\it We have a new constraint} $$
\xi_1^{(2)}:=X_0(\xi_1^{(1)})=X_0(\sum_{i,j}v^1_ib^{ij}x_j) $$
which, together with $\xi_1^{(1)}$, defines the submanifold $P_2$.
Now we are again in the same situation as at the beginning of item
2, and the procedure continues in the same way.
\end{enumerate} \end{enumerate}
\item
Neither equality holds identically, and the functions
$\xi^{(1)}_\alpha$ are linearly independent. Then $$
\xi_1^{(1)}:=\alpha (Z_1)=\sum_{i,j}v^1_ib^{ij}x_j \quad , \quad
\xi_2^{(1)}:=\alpha (Z_2)=v^2_ib^{ij}x_j $$ are the primary
constraints defining the submanifold $P_1$, where the system is
compatible and has the vector field (\ref{sol}) as solution.

Now the stability condition of the SGA algorithm must be imposed,
which leads to the system of equations \begin{eqnarray*} X(\xi_1^{(1)})
&\equiv& X_0(\xi_1^{(1)})+g_1Y^1(\xi_1^{(1)})+g_2Y^2(\xi_1^{(1)})
= 0 \qquad ({\rm on}\ P_1\/)
\\
X(\xi_2^{(1)}) &\equiv&
X_0(\xi_2^{(1)})+g_1Y^1(\xi_2^{(1)})+g_2Y^2(\xi_2^{(1)}) = 0
\qquad ({\rm on}\ P_1\/)\end{eqnarray*} This system can be written in
matrix form as $$ E\left(\matrix{g_1 \cr g_2 \cr}\right) \equiv
\left(\matrix{Y^1(\xi_1^{(1)}) & Y^2(\xi_1^{(1)}) \cr
Y^1(\xi_2^{(1)}) & Y^2(\xi_2^{(1)}) \cr}\right) \left(\matrix{g_1
\cr g_2 \cr}\right) = -\left(\matrix{X_0(\xi_1^{(1)}) \cr
X_0(\xi_2^{(1)}) \cr}\right) \qquad ({\rm on}\ P_1\/) $$ and we
have the following possibilities: \begin{enumerate}
\item
${\rm rank}\ E = 2$.

Both arbitrary functions can be determined by solving the last
linear system. $P_1$ is the final constraint submanifold, and {\it
the solution is unique}.
\item
${\rm rank}\ E = 1$.

One function $g$ can be completely determined or expressed in
terms of the other, and, in general, a new compatibility condition
appears (a function which must vanish on $P_1$). If this condition
holds on $P_1$, then $P_1$ is the final constraint submanifold,
and {\it the solution is not unique: it depends on one arbitrary
function}. Otherwise, a new constraint $\xi^{(2)}_1$, defining the
new submanifold $P_2$, is obtained, and the tangency condition
must be applied to this constraint, as above.
\item
${\rm rank}\ E = 0$.

In this case, both functions are the compatibility conditions for
the system, and we are in the same situation as at the beginning
of the procedure, except that now the vanishing of these functions
must be studied on the submanifold $P_1$. \end{enumerate} \end{enumerate}

\paragraph{Remarks:}
\begin{itemize}
\item
All the submanifolds and constraints appearing are linear.
\item
All the steps in the algorithm can be implemented using the Gauss
method, since all the systems of equations that appear are linear.
\item
The algorithm ends when we arrive at one of the situations marked
in the items 1, 2a, 2b, 2c(i), 3a, and in one of the situations of
3b, or when ${\rm dim}\ P_i =0$. Only in this last case does the
problem have no dynamically consistent solution. \end{itemize}

\subsubsection*{A particular case}

As a particular case of this kind of situation, consider the
following system of equations: \begin{eqnarray*} \dot x_1 &=& A_{11}x_1 +
A_{12}x_2 + u_1
\\
0 &=& A_{21}x_1 + A_{22}x_2 + u_2\end{eqnarray*} where the $A_{ij}$ and
$u_i$ are constants. These kind of system appears in many
technical applications. For instance, when subsystems with widely
separated natural frequencies are coupled (such as in the
modeling of parasitic elements in a system, or when an
electrical generator is connected to an electrical transmission
network). Actually, this is a simplified model of a more general
case in which $x_i$ are vectors and $A_{ij}$ matrices
\cite{VLK-gss}.

The manifold $M$ is ${\mbox{\tenmsb R}}^2$ coordinated by $\{
x_1,x_2 \}$. The general form of the vector field solution will be
$$ X = f_1(x)\derpar{}{x_1} + f_2(x)\derpar{}{x_2} $$ The tensor
$T$ is symmetric, again: $$ T = \d x_1 \otimes \d x_1 $$ that is,
its associated matrix is $$ \Re_T = \pounds _T = \left(\matrix{ 1
& 0 \cr 0 & 0 \cr} \right) $$ and the $1$-form $\alpha$ is $$
\alpha = (A_{11}x_1+A_{12}x_2+u_1)\d x_1 +
(A_{21}x_1+A_{22}x_2+u_2)\d x_2 $$

A basis of $\ker\, T$ is made up by the vector field
$$
Z_1 = \derpar{}{x_2}
$$
Now, the primary constraint defining $P_1$ is
$$
\xi_1^{(1)} := \alpha (Z_1) = A_{21}x_1+A_{22}x_2+u_2
$$
and the solution on the points of $P_1$ is
$$
X\vert_{P_1} = (A_{11}x_1+A_{12}x_2+u_1)\derpar{}{x_1} + f_2\derpar{}{x_2}
$$

Using the SGA, we obtain
\begin{equation}
0 = X(\xi_1^{(1)})\vert_{P_1} = A_{21}(A_{11}x_1+A_{12}x_2+u_1)+A_{22}f_2
\label{sex1}
\end{equation}
and the final solution depends on the value of the coefficients $A_{ij}$.
So we have the following options:
\begin{enumerate}
\item
$A_{22} \not= 0$:
then equation (\ref{sex1}) enables us to determine the arbitrary function:
$$
f_2\vert_{P_1} = \frac{A_{21}}{A_{22}}(A_{11}x_1+A_{12}x_2+u_1)
$$
and the final solution is
$$
X\vert_{P_1} = (A_{11}x_1+A_{12}x_2+u_1)(\derpar{}{x_1}
             + \frac{A_{21}}{A_{22}}\derpar{}{x_2})
$$
Summarizing, the system to be solved has been reduced to
\begin{eqnarray*}
\dot x_1 &=& A_{11}x_1 + A_{12}x_2 + u_1
\\
\dot x_2 &=& \frac{A_{21}}{A_{22}}(A_{11}x_1 + A_{12}x_2 + u_1)
\end{eqnarray*}
on the submanifold defined by
$$
0 = A_{21}x_1+A_{22}x_2+u_2
$$
The solution of this system is unique (there is no gauge freedom).
\item
$A_{22} = 0$:
in this case  we have:
\begin{enumerate}
\item
If $A_{21} = 0$
(and this implies $u_2 = 0$ in order the system to be compatible):
then equation $\alpha (Z_1) = 0$ is satisfied
everywhere in $M$ and the solution is
$$
X = (A_{11}x_1+A_{12}x_2+u_1)\derpar{}{x_1} + f_2 \derpar{}{x_2}
$$
In other words, there is gauge freedom.
 From the physical point of view, this means that
the coordinate $x_2$ is an ignorable degree of freedom.
\item
If $A_{21} \neq 0$: then
equation (\ref{sex1}) gives the secondary constraint
$$
\xi_2^{(2)} = A_{11}x_1+A_{12}x_2+u_1
$$
Once again, we have two possible cases:
\begin{enumerate}
\item
$A_{12}=0$: then the constraints
$\xi_1^{(1)}$ and $\xi_2^{(2)}$
define two parallel lines in $M$, that is, $P_f = \hbox{\rm\O}$.
\item
$A_{12} \neq 0$: then the constraints
$\xi_1^{(1)}$ and $\xi_2^{(2)}$
define $P_2$, which is a single point.
Therefore, this is another case with no solution.
\end{enumerate}
\end{enumerate}
\end{enumerate}

\subsection*{Example 2: Sliding control}

\subsubsection*{Single input systems}

Once again, in the framework of {\sl Control theory}, a particular
problem which often arises is the following: let $$ \dot{\bf x} =
F+Gu $$ be a system of differential equations in
$U\subset{\mbox{\tenmsb R}}^n$, where $\{ x_i\}$ ($i=1,\ldots n$)
are the coordinates, $u\colon U\to{\mbox{\tenmsb R}}$ is the {\sl
input} and $$ \dot{\bf x}=\sum_i\dot x_i\derpar{}{x_i} \quad ,
\quad F=\sum_if_i({\bf x})\derpar{}{x_i} \quad , \quad
G=\sum_ig_i({\bf x})\derpar{}{x_i} $$ are vector fields in $U$.
Then the question is to seek an input $u$ such that the evolution
of the system is constrained to be in a submanifold $$ S\equiv\{
{\bf x}\in U\ \vert\ \xi^{(1)}({\bf x})=0\} $$ where
$\xi^{(1)}\colon U\to{\mbox{\tenmsb R}}$ is a differentiable
function satisfying $\nabla \xi^{(1)}({\bf x})\not= 0$ for every
${\bf x}\in U$.

The study of this problem is equivalent to solving the following
singular system in $M\subset{\mbox{\tenmsb R}}^{n+1}$ (with $\{
x_i,u\}$ as coordinates) $$ \left(\matrix{ {\rm Id}_n & 0 \cr 0 &
0 \cr} \right) \left(\matrix{ \dot{\bf x} \cr \dot u \cr} \right)
= \left(\matrix{ F+Gu \cr \xi^{(1)} \cr} \right) $$ Now we apply
the SGA as indicated in the above example. We first have that the
solution on $S$ is $$
X=\sum_i(f_i+g_iu)\derpar{}{x_i}+\gamma\derpar{}{u} \equiv
F+uG+\gamma\derpar{}{u} $$ Then the stability condition
$X(\xi^{(1)})=0$ (on $S$) means that the evolution of the system
must be in $S$. Thus there are two options: \begin{enumerate}
\item
If $G(\xi^{(1)})\not= 0$ (on $S$), then it is said that the system
verifies the {\sl transversality condition}. In this case $$
F(\xi^{(1)})+uG(\xi^{(1)})=0 \quad\Leftrightarrow\quad
u=-\frac{F(\xi^{(1)})}{G(\xi^{(1)})}\quad {\rm (on\ S)} $$ and we
obtain the so-called {\sl equivalent control method} in the study
of the {\sl sliding control}.
\item
If $G(\xi^{(1)})=0$ (on $S$) then $\xi^{(2)}\equiv F(\xi^{(1)})$
is a new constraint, and the stabilization algorithm leads either
to the new condition
 \begin{eqnarray*}
 (F+uG)(F(\xi^{(1)}))=(F(F(\xi^{(1)}))+u[G,F](\xi^{(1)})=0
 \\
 \quad\Leftrightarrow\quad
 u=-\frac{F(F(\xi^{(1)}))}{[G,F](\xi^{(1)})}\quad\quad {\rm (on\ S)}
 \end{eqnarray*}
if $[G,F](\xi^{(1)})\not= 0$ (on $S$), or to another constraint
$\xi^{(2)}\equiv F(F(\xi^{(1)}))$, and so on. \end{enumerate}

Observe that, with this method, the problem can be solved even
though the transversality condition does not hold. Compare with
\cite{Sira} and the method of \textsl{equivalent control} for the
solution of this kind of problem.

\subsubsection*{Multiple input systems}

We now suppose that the control system has more than one input. In
this case the system of differential equations is
 $$
\dot{\bf x} = F+G^ju_j \label{ecc1}
 $$
  defined in
$U\subset{\mbox{\tenmsb R}}^n$. As above, $\{ x_i\}$ ($i=1,\ldots
n$) are the coordinates, $u_j\colon U\to{\mbox{\tenmsb R}}$
($j=1,\ldots m$) are the inputs, and 
$$ \dot{\bf x}=\sum_i\dot
x_i\derpar{}{x_i} \quad , \quad F=\sum_if_i({\bf x})\derpar{}{x_i}
\quad , \quad G^j=\sum_ig^j_i({\bf x})\derpar{}{x_i} $$ are vector
fields in $U$. Inputs $u_j$ must now be found such that the
evolution of the system is constrained to a $(n-m)$-dimensional
submanifold $$ S\equiv\{ {\bf x}\in U\ \vert\ \xi^{(1)}_j({\bf
x})=0\} $$ where $\xi_j^{(1)}\colon U\to{\mbox{\tenmsb R}}$ are
independent differentiable functions for every ${\bf x}\in U$.

Once again, the study of the problem is equivalent to solving the
following singular system in $M\subset{\mbox{\tenmsb R}}^{n+m}$
(with $\{ x_i,u_j\}$ as coordinates) $$ \left(\matrix{ {\rm Id}_n
& 0 \cr 0 & (0)_m \cr} \right) \left(\matrix{ \dot{\bf x} \cr
\dot{\bf u} \cr} \right) = \left(\matrix{ F+\sum_jG^ju_j \cr {\bf
\xi}^{(1)} \cr} \right) $$ where ${\bf u}=(u_1,\ldots ,u_j)$ and
${\bf \xi}^{(1)}=(\xi^{(1)}_1,\ldots ,\xi^{(1)}_j)$ The solution
on $S$ is $$
X=\sum_i(f_i+\sum_jg_i^ju_j)\derpar{}{x_i}+\sum_j\gamma_j\derpar{}{u_j}
\equiv F+\sum_ju_jG^j+\sum_j\gamma_j\derpar{}{u_j} $$ Now the
stability conditions are $X(\xi^{(1)}_j)=0$ (on $S$) and several
options exist: \begin{enumerate}
\item
If the matrix $G^j(\xi^{(1)}_k)$ ($j,k=1,\ldots ,m$) has rank
equal to $m$, then all the functions $u_j$ can be determined, no
new constraints appear and the procedure ends.
\item
If $0\leq{\rm rank}\ G^j(\xi^{(1)}_k)<m$, then only some (or none)
of the functions $u_j$ can be determined; in addition, new
constraints arise. Therefore, the procedure follows in an
iterative way. \end{enumerate}

\subsubsection*{A particular case}

 As a particular case, consider the following control system:
 a {\sl telescopic robotic arm} in an horizontal plane
 which moves following a determined trajectory,
 that is, a {\sl tracking problem}
 (see \cite{Ma-pfc} for more details on the study of this system).

 The robot is made of two arms of length $l$:
 the first, with mass $m_1$, has one fixed end,
 and the other, with mass $m_2$, slides inside of the first.
 There is a motor in the fixed end which makes the robot
 turn around this point, and another motor, with mass $m$
 and a rotor of radius $R$, at the other end of the first arm,
 which makes the second arm slide. The robot must
 carry a mass $m_0$, placed on the outer end of the
 second arm, from one point to another one along a fixed curve.
 The problem consists in determining the torques
 $\tau_1,\tau_2$ of both motors.

 We take the origin at the fixed end. Then
 $\varphi$ denotes the angle swept by the robot
 and $x$ the length of the second arm which emerges from the first one.
 Then, the dynamical equations are
 \begin{eqnarray*}
 \ddot\varphi &=&
 \frac{\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l) \dot\varphi\dot x}
 {I+(m_0+m_2)x^2+(2m_0+m_2)l x}
 \\
  \ddot x &=& \frac{1}{m_0+m_2}\left(
\frac{\tau_2}{R}+\dot\varphi^2(m_2(x+\frac{l}{2})+m_0(x+l))\right)
\end{eqnarray*}
 (where \(I=\left( m+m_0+\frac{m_1+m_2}{3}\right)l^2\) ).
  This system of second order differential equations can be
  transformed into one of first order by adding the variables
  $\omega$, $v$, and the equations
  $$
 \dot\varphi=\omega \quad , \quad \dot x=v
  $$
  As a trajectory, we take an arc of spiral whose implicit equation is

 \begin{equation}
 l+x-\varphi=0 \label{ligad1}
 \end{equation}

 The new system can be expressed in the form (\ref{ecc1}), (in
 $U\subset{\mbox{\tenmsb R}}^4$),  where $\varphi,\omega,x,v$ are the variables,
 $\tau_1\tau_2$ are the inputs, and
 \begin{eqnarray*}
 F&=&\omega\derpar{}{\varphi}-
 \frac{(2(m_0+m_2)x+(2m_0+m_2)l) \omega v}
  {I+(m_0+m_2)x^2+(2m_0+m_2)l x}
 \, \derpar{}{\omega} 
 \\ & & +
  v\derpar{}{x}+
  \frac{\omega^2(m_2(x+\frac{l}{2})+m_0(x+l))}{m_0+m_2}\, \derpar{}{v}
\\
 G^1&=&
  \frac{1}{I+(m_0+m_2)x^2+(2m_0+m_2)l x}\, \derpar{}{\omega}
  \\
  G^2 &=& \frac{1}{R(m_0+m_2)}\,\derpar{}{v}\end{eqnarray*}
and subjected to the constraint (\ref{ligad1}).

 Now we write the system in the form of equation $\pounds (X) T =
\alpha$, in $M\subset{\mbox{\tenmsb R}}^6$, which has
 $\{\varphi,\omega,x,v,\tau_1,\tau_2\}$ as coordinates.
 Hence, the tensor $T$ and the $1$-form $\alpha$ are
  \begin{eqnarray*}
   T &=& \d\varphi\otimes\d\varphi +\d\omega\otimes\d\omega +
   \d x\otimes\d x +\d v\otimes\d v
\\
\alpha &=& \omega\d\varphi+
\frac{\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l)\omega v}
 {I+(m_0+m_2)x^2+(2m_0+m_2)l x}\d \omega +  v\d x 
 \\ & &
 +\frac{1}{m_0+m_2}\left(\frac{\tau_2}{R}+
 \omega^2(m_2(x+\frac{l}{2})+m_0(x+l))\right)\d v +
 (l+x-\varphi)\d\tau_1\end{eqnarray*}
 that is, the matrix form of the system is
 $$
  \left(\matrix{
 1 & 0 & 0 & 0 & 0 & 0 \cr 0 & 1 & 0 & 0 & 0 & 0\cr
 0 & 0 & 1 & 0 & 0 & 0 \cr 0 & 0 & 0 & 1 & 0 & 0 \cr
 0 & 0 & 0 & 0 & 0 & 0 \cr 0 & 0 & 0 & 0 & 0 & 0 \cr} \right)
  \left(\matrix{
\dot\varphi \cr \dot\omega \cr \dot x \cr \dot v \cr \dot\tau_1
\cr \dot\tau_2 \cr} \right)
=
\left(\matrix{ \omega \cr
\frac{\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l)\omega v}
 {I+(m_0+m_2)x^2+(2m_0+m_2)l x} \cr v\cr
 \frac{\frac{\tau_2}{R}+
 \omega^2(m_2(x+\frac{l}{2})+m_0(x+l))}{m_0+m_2} \cr
 l+x-\varphi \cr 0 \cr} \right)
  $$
 As $\ker\, T$ is spanned by the vector fields
 \(\derpar{}{\tau_1},\derpar{}{\tau_2}\) ,
  the compatibility condition gives the constraint
 $$
 \xi^{(1)}:= l+x-\varphi =0
 $$
  which defines $P_1\hookrightarrow M$. Then, the vector field
 solution on $P_1$ is
  \begin{eqnarray}
 X &=&\omega\derpar{}{\varphi}+
 \frac{\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l)\omega v}
 {I+(m_0+m_2)x^2+(2m_0+m_2)l x}\, \derpar{}{\omega} + v\derpar{}{x}
  \label{solucvf}
 \\ & &
 +\frac{1}{m_0+m_2}\left(\frac{\tau_2}{R}+
 \omega^2(m_2(x+\frac{l}{2})+m_0(x+l))\right)\derpar{}{v} 
 \nonumber \\ 
 & &  +f_1\derpar{}{\tau_1}+f_2\derpar{}{\tau_2} \nonumber
  \end{eqnarray}
 Next, we have to impose (in an iterative way) the stability
 condition which enables us to obtain a sequence of submanifolds
 $P_1\hookleftarrow P_2\hookleftarrow P_3$
 defined by the constraints
 \begin{eqnarray*}
 \xi^{(2)} &:=& X(\xi^{(1)})=v-\omega
\\
 \xi^{(3)} &:=& X(\xi^{(2)})=
 \frac{\frac{\tau_2}{R}+
 \omega^2[m_2(x+\frac{l}{2})+m_0(x+l)]}{m_0+m_2} -
 \\ & &
  \frac{\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l)\omega v}
 {I+(m_0+m_2)x^2+(2m_0+m_2)l x}
\end{eqnarray*}
  and a relation between the arbitrary functions $f_1,f_2$
 \begin{eqnarray*}
 0 &=& X(\xi^{(3)})
 \\ &=&
 \frac{2\omega [m_2(x+\frac{l}{2})+m_0(x+l)]
 [\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l)\omega v]}
 {(m_0+m_2)[I+(m_0+m_2)x^2+(2m_0+m_2)l x]} +
 \\ & &
  \frac{[\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l)\omega v]
 [2(m_0+m_2)x+(2m_0+m_2)l]v}
 {(I+(m_0+m_2)x^2+(2m_0+m_2)l x)^2}
  \\& &
 + v\omega^2+
 \frac{2\omega v^2(m_0+m_2)}{I+(m_0+m_2)x^2+(2m_0+m_2)l x}+
 \\ & &
 \frac{v[2x(m_0+m_2)+(2m_0+m_2)l]
 [\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l)\omega v]}
 {(I+(m_0+m_2)x^2+(2m_0+m_2)l x)^2}
 \\ & &
 + \frac{[\frac{\tau_2}{R}+\omega^2(m_2(x+\frac{l}{2})+m_0(x+l))]
 [2(m_0+m_2)x+(2m_0+m_2)l]\omega}
 {(m_0+m_2)(I+(m_0+m_2)x^2+(2m_0+m_2)l x)}
 \\ & &
 -\frac{f_1}{I+(m_0+m_2)x^2+(2m_0+m_2)l x}+\frac{f_2}{R(m_0+m_2)}
 \end{eqnarray*}
 Thus, the vector field solution on $P_3$ is (\ref{solucvf}),
 where the variables and the arbitrary functions are
 related by all the above relations.
 So we have the (regular) system of differential equations
  \begin{eqnarray*}
\dot\varphi &=&\omega
\\
\dot\omega &=&
 \frac{\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l)\omega v}
 {I+(m_0+m_2)x^2+(2m_0+m_2)l x}
 \\
 \dot x&=& v
 \\
 \dot v &=& \frac{1}{m_0+m_2}\left(
 \frac{\tau_2}{R}+\dot\varphi^2(m_2(x+\frac{l}{2})+m_0(x+l))\right)
 \\
 \dot\tau_1 &=& f_1
 \\
 \dot \tau_2&=&
  \left(
  \frac{2\omega [m_2(x+\frac{l}{2})+m_0(x+l)]
  [\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l)\omega v]}
 {(m_0+m_2)[I+(m_0+m_2)x^2+(2m_0+m_2)l x]}+ \right.
 \\ & &
 \frac{[\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l)\omega v]
  [2(m_0+m_2)x+(2m_0+m_2)l]v}
 {(I+(m_0+m_2)x^2+(2m_0+m_2)l x)^2}
  \\& &
   + v\omega^2+
 \frac{2\omega v^2(m_0+m_2)}{I+(m_0+m_2)x^2+(2m_0+m_2)l x}+
 \\ & &
 \frac{v[2x(m_0+m_2)+(2m_0+m_2)l]
 [\tau_1-(2(m_0+m_2)x+(2m_0+m_2)l)\omega v]}
 {(I+(m_0+m_2)x^2+(2m_0+m_2)l x)^2}
 \\ & &
 + \frac{[\omega^2(m_2(x+\frac{l}{2})+m_0(x+l))]
 [2(m_0+m_2)x+(2m_0+m_2)l]\omega}
 {(m_0+m_2)(I+(m_0+m_2)x^2+(2m_0+m_2)l x)}
 \\ & &
 \left.-\frac{f_1}{I+(m_0+m_2)x^2+(2m_0+m_2)l x}
 \right)(-R(m_0+m_2))
 \end{eqnarray*}
  Observe that the inputs  $\tau_1,\tau_2$ are not determined
 (their evolution depends on an arbitrary function).
 At this point some criteria can be imposed for
 tracking the trajectory in a predefined way, for instance,
 minimizing the cost (for going from one point to another in
 a given time).
 Of course, the integral curves of $X$ are on the surface $P_3$, and,
  hence, on $P_1$.

\section{Conclusions}

The goal of this work is to give a relatively simple geometric
framework which allows us to describe systems of differential
equations
 $$ A({\bf x})\dot {\bf x} = \alpha({\bf x}) $$
  (where $A$ is a singular matrix which represents
  a $2$-covariant tensor),
as well as solve the problems of incompatibility and inconsistency
of these equations.

Our treatment enables us to overcome
the difficulties arising from other analytic and algebraic
procedures previously developed for these systems.
It is important to point out that the
geometric framework here developed is not as general
as the one in reference \cite{GP-ggf}, but it is simpler.
Both of them are equivalent when
they are used for describing the same system.
In addition, in our treatment we pay special attention to the study
of the symmetries of these systems and we give an accurate description
of the algorithm.

It also represents an improvement on the other geometric
treatments cited in the introduction, since we have developed an
algorithm which solves the above mentioned problems of
incompatibility and inconsistency. In the most interesting cases,
the final result of the algorithm is a submanifold (of the space
of states where the system is defined) and a vector field solution
tangent to this submanifold, whose integral curves are the
trajectories of the system. Consequently, the restriction of the
system of differential equations to the submanifold found can be
integrated by analytic or numerical methods.

In general, the vector field solution is not single.
In fact, there are two possibilities :
\begin{enumerate}
\item
If the singularity of the initial system of differential equations
arises from a non suitable choice of the variables
(that is, the initial phase space is too large in order to
describe the real degrees of freedom of the problem),
then in the final constraint submanifold the system
has a single solution.
\item
If the singularity of the system is a consequence of the existence
of a certain kind of internal symmetry, then the final system of
equations can be undetermined; that is, the solution of the system
is not single. This means that for every point in the final
constraint submanifold which is taken as an initial condition, the
evolution of the system is not determined because a multiplicity
of integral curves (of different vector fields solution) pass
through it. This is known as the {\sl gauge freedom} (in the
physical literature). The question of removing this ambiguity has
already been studied for some special cases (see, for instance,
\cite{MW-74}). \end{enumerate}

Another essential point to which we pay special attention is the
study of the symmetries of these systems. This is a subject which
has not previously been treated (at least geometrically), and we
believe our analysis is enlightening.

An interesting subject might be the study of {\it non autonomous
singular differential equations}; that is, those of the form:
 $$ A({\bf x},t)\dot {\bf x} = \alpha({\bf x},t) $$
 As is known, these systems can be considered as autonomous by
 adding the equation $t'=1$. It is obvious that this equation
 remains unchanged throughout the algorithmic procedure, so the method
 is directly applicable to this kind of system. Observe that the
 constraints may depend on time.

 We further believe that the analysis of {\it second order
singular differential equations} is a subject of interest.
Finally, as we have remarked in the examples, problems on {\sl
Control theory} (such as those related to {\sl sliding control}
and others) could be analyzed in this way.

\paragraph{Acknowledgments}
We thank the financial support of the CICYT TAP97-0969-C03-01. We
thank Mr. Jeff Palmer for the correction of the English version of
the manuscript.

\begin{thebibliography}{00}

\bibitem{AM-fm}
{\sc R. Abraham, J.E. Marsden},
{\sl Foundations of Mechanics\/} (2nd ed.),
     Addison-Wesley, Reading, (1978).

\bibitem{BG-tps}
{\sc P.G. Bergmann, I. Goldberg},
``Transformations in phase space and Dirac brackets'',
{\sl Phys. Rev} {\bf 98} (1955) 531-538.

\bibitem{BK-ymtcs}
{\sc M.J. Bergvelt, E.A. de Kerf}, ``Yang-Mills theories as
constrained Hamiltonian systems'', {\sl Physica A} {\bf 139}
(1986) 101-124.

\bibitem{Ca-80}
{\sc S.L. Campbell},
{\sl Singular systems of differential equations},
Research Notes in Mathematics,
Pitman Advanced Publishing Program, San Francisco 1980.

\bibitem{Ca-tsl}
{\sc J.F. Cari\~nena},
``Theory of singular Lagrangians'',
{\sl Fortschr. Phys.} {\bf 38} (1990) 641-679.

\bibitem{CO-88}
{\sc L.O. Chua, H. Oka},
``Normal Forms for Constrained Nonlinear Differential Equations.
Part I: Theory'',
{\sl IEEE Trans. Circ. Syst.} {\bf 35}(7) (1988) 881-901.

\bibitem{Co-84}
{\sc D. Cobb},
``Controllability, Observability and Duality in Singular Systems'',
{\sl IEEE Trans. Aut. Control} {\bf AC-29}(12) (1984) 1076-1082.

 \bibitem{Cr-83}
 {\sc M. Crampin},
 ``Tangent Bundle Geometry for Lagrangian Dynamics'',
 {\sl J. Phys A: Math. Gen.} {\bf 16} (1983) 2705-2715.

\bibitem{DD-dn}
{\sc A. Dervisoglu, C.A. Desoer},
``Degenerate Networks and Minimal Differential Equations'',
{\sl I.E.E.E. Trans. Circ. Syst.}
{\bf Cas 22}(10) (1975) 769-775.

\bibitem{Di-lqm}
{\sc P.A.M. Dirac},
{\sl Lectures on Quantum Mechanics},
Yeshiva Univ., New York, 1964.

\bibitem{Di-rmp}
{\sc P.A.M. Dirac},
{\sl Rev. Mod. Phys.}
{\bf 21} (1949) 392.

\bibitem{Go-cm}
{\sc H. Goldstein},
{\sl Classical Mechanics},
Addison-Wesley, Reading, Mass. 1950.

\bibitem{GS-98}
{\sc B.W. Gordon, Sheng Liu},
 ``A Singular Perturbation Approach for Modeling
 Differential-Algebraic Systems'', {\sl J. Dynamic Systems,
 Meassurement and Control} {\bf 120} (1998) 541-545.

\bibitem{Go-tesis}
{\sc M.J. Gotay},
{\sl Presymplectic manifolds, Geometric constraint theory
     and the Dirac-Bergmann theory of constraints},
Ph. D. Thesis, Univ Maryland, 1979.

\bibitem{GNH-pca}
{\sc M.J. Gotay, J.M. Nester, G Hinds},
``Presymplectic manifolds and the Dirac-Bergmann theory of constraints'',
{\sl J. Math. Phys.} {\bf 27}  (1978) 2388-2399.

\bibitem{GN-pls1}
{\sc M.J. Gotay, J.M. Nester},
``Presymplectic Lagrangian systems I:
  the constraint algorithm and the equivalence theorem'',
{\sl Ann. Inst. H. Poincar\'e A} {\bf 30}  (1979) 129-142.

\bibitem{GN-pls2}
{\sc M.J. Gotay, J.M. Nester},
``Presymplectic Lagrangian systems II: the second-order equation problem'',
{\sl Ann. Inst. H. Poincar\'e A} {\bf 32}  (1980) 1-13.

\bibitem{GMR-95}
{\sc X. Gr\`acia, M.C. Mu\~noz-Lecanda, N. Rom\'an-Roy},
``Singular Systems: Their Origins, General Features and Non-numerical
  Solutions''.
{\sl Preprints of the IFAC Conference System Structure and Control},
Ed. M. Gugliemi-IFAC (1995) 37-42.

\bibitem{GPR-hols}
{\sc X. Gr\`acia, J.M. Pons, N. Rom\'an-Roy },
``Higher-order Lagrangian systems:
  Geometric structures, dynamics and constraints'',
{\sl J. Math. Phys.}
{\bf 32} (1991), 2744-2763.

\bibitem{GP-ggf}
{\sc X. Gr\`acia, J.M. Pons},
``A generalized geometric framework for constrained systems'',
{\sl Dif. Geom. Appl.} {\bf 2} (1992) 223-247.

\bibitem{HB-84}
{\sc B.C. Haggman, P.P. Bryant},
``Solutions of Singular Constrained Differential Equations:
A Generalization of Circuits Containing Capacitor-Only Loops and
Inductor-Only Cutsets'',
{\sl IEEE Trans. Circ. Syst.}
{\bf CAS-31}(12) (1984) 1015-1025.

\bibitem{HLR-85}
{\sc E. Hairer, C. Lubich, M. Roche},
{\sl The Numerical Solution of Differential-Algebraic Systems
by Runge-Kutta Methods},
Lecture Notes in Mathematics {\bf 1409},
Springer-Verlag, Berlin 1989.

\bibitem{Hairer}
{\sc E. Hairer, G. Wanner}, {\sl Solving Ordinary Differential
Equations, II\/}, Springer-Verlag, Berlin (1991).

\bibitem{Kl-evm}
{\sc J. Klein}, ``Espaces variationneles et Mecanique'', {\sl Ann.
Inst. Fourier Grenoble} {\bf 12} (1962) 1-124.

\bibitem{MMT-92}
{\sc G. Marmo, G. Mendella, W.M. Tulcczijew},
``Symmetries and constant of the motion for dynamics in implicit form'',
{\sl Ann. Inst. H. Poincar\'e A} {\bf 57}(2) (1992) 147-166.

\bibitem{MMT-97}
{\sc G. Marmo, G. Mendella, W.M. Tulcczijew},
``Constrained Hamiltonian systems as implicit differential equations'',
{\sl J. Phys. A} {\bf 30}(1) (1997) 277-293.

\bibitem{MW-74}
{\sc J.E. Marsden, A. Weinstein},
``Reduction of symplectic manifolds with symmetry'',
{\sl Rep. Math. Phys.} {\bf 5} (1974) 121-130.

\bibitem{Ma-pfc}
 {\sc R. Matute-Ramos},
 {\sl Seguimiento de Trayectorias Planas por un Brazo extensible
 mediante Ecuaciones \'Algebro-Diferenciales},
 PFC MAT-UPC (1998).

\bibitem{MMT-95}
{\sc G. Mendella, G. Marmo, W.M. Tulcczijew},
``Integrability of implicit differential equations'',
{\sl J. Phys. A: Math. Gen.} {\bf 28} (1995) 149-163.

\bibitem{Mu-hsc}
{\sc M.C. Mu\~noz-Lecanda},
``Hamiltonian systems with constraints: a geometric approach''.
{\sl Int. J. Theor. Phys.} {\bf 28} (11) (1989) 1405-1417.

\bibitem{MR-ltps}
{\sc M.C. Mu\~noz, N. Rom\'an Roy},
``Lagrangian theory for presymplectic systems'',
{\sl Ann. Inst. H. Poincar\'e: Phys. Th\`eor.} {\bf 57}(1) (1992) 27-45.

\bibitem{RR-91}
{\sc P.J. Rabier, W.C. Rheinboldt},
``A General Existence and Uniqueness Theory for
Implicit Differential-Algebraic Equations'',
{\sl Diff. Int. Eqs.} {\bf 4} (1991) 563-582.

\bibitem{RR-94}
{\sc P.J. Rabier, W.C. Rheinboldt},
``A Geometric Treatment of Implicit Differential-Algebraic Equations'',
{\sl J. Diff. Eqs.} {\bf 109} (1994) 110-146.

\bibitem{Re-84}
{\sc W.C. Rheinboldt},
``Differential-Algebraic Systems as Differential Equations on Manifolds'',
{\sl Math. Comp.} {\bf 43}(168) (1984) 473-482.

\bibitem{SD-81}
{\sc S. Shankar-Sastry, C.A. Desoer},
``Jump Behavior of Circuits and Systems'',
{\sl IEEE Trans. Circ. Syst.} {\bf CAS-28}(12) (1981) 1109-1124.

\bibitem{Sira}
\textsc{H. Sira-Ram\'\i rez}, ``Differential geometric methods in
variable structure control'', \textsl{Int. J. Control}
\textbf{48}(4) (1988) 1359-1390.

\bibitem{Ta-75}
{\sc F. Takens},
``Implicit differential equations: Some open problems'',
in {\sl Singularit\'es d'Applications Differentiables},
Lectures Notes in Mathematics {\bf 535}
Springer, Berlin 1975.

\bibitem{Ta-ce}
{\sc F. Takens},
``Constrained Equations: A study of Implicit Differential
Equations and their discontinuous Solutions'',
in {\sl Lecture Notes in Mathematics} {\bf 525},
Springer-Verlag, Berlin (1976), 143-235.

\bibitem{TV-iss}
{\sc S. Tan, J. Vandewalle},
``Inversion of Singular Systems'',
{\sl I.E.E.E. Trans. Circ. Syst.} {\bf 35}(5) (1988) 583-586.

\bibitem{VLK-gss}
{\sc G.C. Verghese, B. L\'evy, T. Kailath},
``A Generalized State-Space for Singular Systems'',
{\sl I.E.E.E. Trans. Aut. Control}
{\bf 26}(4) (1981) 811-831.

\end{thebibliography} \bigskip

\noindent {\sc Miguel C. Mu\~noz-Lecanda } (e-mail: matmcml@mat.upc.es)\\ 
{\sc Narciso Rom\'an-Roy} (e-mail: matnrr@mat.upc.es)\\
 Departamento de Matem\'atica Aplicada y Telem\'atica\\ 
 Campus Norte U.P.C., M\'odulo C-3\\ 
 C/ Jordi Girona 1\\ 
 E-08034 Barcelona, Spain

\end{document}
