\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
2014 Madrid Conference on Applied Mathematics in honor of Alfonso Casal,\\
\emph{Electronic Journal of Differential Equations},
Conference 22 (2015),  pp. 111--116.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2015 Texas State University.}
\vspace{9mm}}

\begin{document} \setcounter{page}{111}
\title[\hfilneg EJDE-2015/Conf/22 \hfil Finite-time stabilization]
{Finite-time stabilization by using degenerate feedback delay}

\author[J. M. Vegas \hfil EJDE-2015/conf/22 \hfilneg]
{Jos\'e M. Vegas}

\address{Jos\'e M. Vegas \newline
Colegio Universitario de Estudios Financieros\\
28040 Madrid, Spain}
\email{jm.vegas@cunef.edu}

\thanks{Published November 20, 2015.}
\subjclass[2010]{35R10, 35R35}
\keywords{Feedback delay control; stabilization; Pyragas control}

\begin{abstract}
 Some examples are studied in which a linear controllable dynamical system can
 be steered towards a specific steady state by using some appropriate linear,
 time-varying delayed feedback controller. The associated linear retarded
 differential equation has a finite-dimensional invariant subspace which
 attracts all orbits in finite time, and this degeneracy property is the reason
 why the target is attained in finite time rather than just asymptotically.

\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{remark}[theorem]{Remark}
\allowdisplaybreaks
 
\section{Introduction}

In previous papers 
\cite{CasalDiazVegas1,CasalDiazVegas2,CasalDiazVegas3,CasalDiazVegas4,CasalVegas}, 
 Casal, Diaz and the author have considered different variants of
delay-differential equations of the type
\begin{equation}
\dot{x}=Ax-M(t)x(t-\tau),\quad t\geq0, \label{1}
\end{equation}
where $\tau>0$ is a given delay, $A$ is the infinitesimal generator of a
continuous semigroup on some Banach space $X$, $M(t)$ is a $t$-continuous
bounded linear map on $X$ whose main characteristic is that it has
\emph{compact support} contained on $(0,\infty)  $. In this
paper we will only deal with the finite-dimensional case, so $A$ and
 $M(t)  $ will be $n\times n$ matrices and $x(t)  $ an
$n$-dimensional vector.

Equation \eqref{1} arises mainly as the \emph{closed-loop} system associated
to a general linear, time-invariant controllable system
\begin{equation}
\dot{x}=Ax+Bu(t)  \label{2}
\end{equation}
when a \emph{delayed feedback law} $u(t)  =K(t)
x(t-\tau)  $ is applied for any of the usual purposes of
stabilization, tracking, disturbance rejection, etc.

$\bullet$ If $M(t)  =0$ except for $t\in\lbrack\tau,2\tau
]\subset(0,\infty)  $ then one can prove (see
\cite{CasalDiazVegas3}) that \emph{every solution} $x(t)  $
vanishes for $t\geq2\tau$ if matrix $M(t)  $ commutes with
$e^{At}$ (or, equivalently, with $A)$, and 
$\int_{\tau}^{2\tau}M(t)  dt=e^{A\tau}$. 

$\bullet$ Concerning system \eqref{2}, if $M(t)  $ is factorized as
$BK(t)  $ in order to study the closed-loop delayed feedback
system, the situation is much more complicated (see \cite{CasalVegas}) but
similar conclusion can be attained under some quite general circumstances.
Also, some optimality properties of the delayed control are studied.

$\bullet$ These results mean that, even if $A$ es a completely unstable matrix,
the closed-loop system is, in fact, \emph{superstable}, that is, all its
solutions vanish after some specified finite time. This finite-time exact
recovery of a lost equilibrium is usually called \emph{deadbeat control,
} (see, e.g., \cite{Gawthrop}) mostly in the context of the regulator problem. 
Of course, uniqueness
considerations prevent this behavior from happening in 
``standard'' control unless severe discontinuities in the
coefficients of the equations are allowed, and it is the fact that the control
action starts after a given time lapse is what enables us to handle the
problem. In more technical terms, a functional differential equation, which is
inherently infinite-\allowbreak\allowbreak dimensional, is \emph{squashed
}into $\mathbb{R}^{n}$ and that fact simplifies many arguments and computations.

$\bullet$  For more general linear systems like $\dot{x}=Ax+z+B(t)  u$
(with $A$ nonsingular) the equilibrium $x_{\rm eq}=-A^{-1}z$ of the
uncontrolled system can also be reached in finite time by a similar delayed
feedback control. However, its calculation requires previous knowledge of this
equilibrium. However, this problem is avoided if, instead of a single-delay
feedback $u(t)  =K(t)  x(t-\tau)  $, we
try a ``Pyragas'' type control 
$u(t)  =K(t)  [x(t-\tau)  -x(t-2\tau)]$
(see \cite{pyragas1,pyragas2})  to
eliminate the effect of the nonzero equilibrium.

$\bullet$ As will be shown here, this can also be done for periodic steady states,
which is, in fact, more akin to Pyragas's original purpose (stabilizing
unstable periodic orbits in chaotic systems).

\section{Main result}

In this article we consider a more general control system of the type
\begin{equation}
\dot{x}=Ax+f(t)  +B(t)  u,  \label{forced}
\end{equation}
with $f(t)  $ a continuous $\tau$-periodic function
which corresponds to an external forcing. The associated 
``unforced system'' 
\begin{equation}
\dot{x}=Ax+B(t)  u,
\end{equation}
will be assumed to be \emph{controllable. }

\begin{theorem} \label{thm1}
Consider the periodically forced linear time-invariant system
\begin{equation}
\dot{x}=Ax+f(t),  \label{forced1}
\end{equation}
and assume that the homogeneous system $\dot{x}=Ax$ has no nonconstant 
$\tau $-periodic solution or, equivalently, the matrix $I-e^{A\tau}$ 
is invertible.
Then this system has a \emph{unique} $\tau$-periodic solution 
$p(t)  $ and there exists a delayed feedback law
\[
u(t)  =K(t)  [x(t-\tau)  -x(t-2\tau)  ]
\]
of Pyragas type with $K(t)  =0$ outside $[2\tau,3\tau]$ and such
that every solution $x(t)  $ of the closed-loop system
\[
\dot{x}=Ax+f(t)  +B(t)  K(t)  [x(t-\tau)  -x(t-2\tau)  ],
\]
is steered toward the \textbf{unique }$\tau$-periodic solution
$p(t)  $ of the forced system \eqref{forced}. More specifically
\[
x(t)  =p(t)  \quad\text{for }t\geq3\tau.
\]
\end{theorem}

\begin{remark} \label{rmk1} \rm
The existence and uniqueness of the $\tau$-periodic solution under the
hypothesis that $I-e^{A\tau}$ is invertible can be found, for instance, in
\cite{HaleODE}.
\end{remark}

Again, irrespective of the possible instability of the matrix $A$, the
(possibly unstable) steady state (a periodic solution in this case) is reached
in finite time.

\section{The basic change of variables}

Let us recall the complete control system
\begin{equation}
\dot{x}=Ax+f(t)  +Bu(t), \label{control}
\end{equation}
where $A$ is $n\times n$, $B(t)  $ is $n\times m$ continuous
(written simply as $B)$ together with its associated \emph{uncontrolled system}
(i.e., for $u(t)  =0)$:
\begin{equation}
\dot{x}=Ax+f(t). \label{uncontrolled}
\end{equation}


Let us consider new variables $y(t)  $ and $w(t)  $
related to $x(t)  $ by
\begin{equation}
x(t)  =e^{At}y(t)  +w(t). \label{change_variables}
\end{equation}
Then $\dot{x}(t)  =Ae^{At}y(t)  +e^{At}\dot
{y}(t)  +\dot{w}(t)  $ must equal $A\left[
e^{At}y(t)  +w(t)  \right]  +Bu(t)  $
and this gives the ``reduced'' system
\begin{equation}
\dot{y}=e^{-At}\left[  Aw-\dot{w}+f(t)  +Bu(t)
\right]. \label{reduced}
\end{equation}

Observe that if $w(t)  $ is any solution of the uncontrolled
system \eqref{uncontrolled}, the reduced system \eqref{reduced} is just a
simple control system $\dot{y}=e^{-At}Bu(t)  $ involving only
$\dot{y}$ and $u$ but not $y$.

If $u(t)  $ is prescribed to be given by 
$K(t) [  x(t-\tau) -x(t-2\tau)]$, a delayed feedback law,  we must also 
transform this part, thus obtaining the full
\emph{transformed closed-loop delay system}
\begin{equation} \label{full transformed}
\begin{aligned}
\dot{y}  & =e^{-At}[  Aw-\dot{w}+f(t)]
+e^{-At}BK(t)  \big[  e^{A(t-\tau)}y(t-\tau)\\
&\quad -e^{A(t-2\tau)  }y(t-2\tau)  +w(t-\tau)  +w(t-2\tau)  \big]  .
\end{aligned}
\end{equation}
Assume now that $w(t)  $ (so far an arbitrary function)
is $p(t)$, the unique $\tau$-periodic solution of the
uncontrolled system \eqref{uncontrolled}. Then both
$Aw(t)-\dot{w}(t)  + f(t)  $ and $w(t-\tau) -w(t-2\tau)
$ vanish for every $t$ and thus \eqref{full transformed}
becomes
\begin{equation}
\dot{y}=e^{-At}BK(t)  \big[  e^{A(t-\tau)}y(
t-\tau)  -e^{A(t-2\tau)  }y(t-2\tau)  \big].
\label{simp}
\end{equation}


\subsection*{First proof of Theorem \ref{thm1}}

From the previous discussion, if we can show that for some continuous
$K(t)  $ vanishing outside $[2\tau,3\tau]$ every solution
$y(t)  $ of \eqref{simp} becomes zero for $t\geq3\tau$, then
\[
x(t)  =e^{At}y(t)  +w(t)
\]
will be equal to $w(t)  $ for $t\geq3\tau$ as stated in the theorem.

The most direct way to handling this problem is integrating both sides of
\eqref{simp} on the interval $[2\tau,3\tau]$ taking into account that, since
$K(t)  =0$ for $t\leq2\tau$, $y(t)  $ is constant on
$[0,2\tau]$ and observing that $y(t-\tau)  $ and $y(
t-2\tau)  $ are equal to $y(0)  $ (denoted $y_0)$. Then
\[
y(3\tau)  =y_0+\Big(\int_{2\tau}^{3\tau}
e^{-At}BK(t)  e^{At}[  e^{-A\tau}-e^{-2A\tau}]
dt\Big)  y_0.
\]
Hence $y(3\tau)  $ will equal zero for every initial value $y_0
$ if and only if
\begin{equation}
\int_{2\tau}^{3\tau} e^{-At}BK(t)  e^{At}dt
=[  e^{-2A\tau}-e^{-A\tau}]^{-1}
=[  I-e^{A\tau}]  ^{-1}e^{2A\tau}, \label{K_integral_eq}
\end{equation}
where the inverse is well defined as assumed in the statement of the theorem.

Following an argument similar to Sontag's \cite{sontag}, we see that
controllability is equivalent to the fact that the 
``controllability map''
$\mathcal{C}:C([2\tau,3\tau],\mathbb{R}^{m})  \to
\mathbb{R}^{n}$
given by
\[
\mathcal{C}[u]:=e^{3A\tau}
\int_{2\tau}^{3\tau}e^{-At}Bu(t)  dt
\]
is onto, and so is
\[
u(\cdot)  \mapsto
\int_{2\tau}^{3\tau}
e^{-At}Bu(t)  dt
\]
and so is
\[
U(\cdot)  \mapsto
\int_{2\tau}^{3\tau}
e^{-At}BU(t)  dt
\]
defined on the space of continuous matrices $U(t)  $ on
$[2\tau,3\tau]$. This means that the \emph{matrix Fredholm integral equation}
\[
\int_{2\tau}^{3\tau}e^{-At}BU(t)  dt
=[  I-e^{A\tau}]  ^{-1}e^{2A\tau}
\]
has at least one solution $\hat{U}(\cdot)  $, and setting
$K(t)  :=\hat{U}(t)  e^{-At}$
we finally obtain
\[
\int_{2\tau}^{3\tau}e^{-At}BK(t)  e^{At}dt
=[  I-e^{A\tau}]  ^{-1}e^{2A\tau}
\]
as desired.

\subsection*{Second proof: optimality considerations}

Another possibility is using a well-known explicit expression 
for a special control $u(t)  $ steering any given initial value $x_0$ to any
desired final value $x_1$ over the time interval $[t_0,t_1]$.

\begin{proposition}[Minimum-energy control] \label{prop1}
Consider the control system  
$\dot{z}=Pz+Q(t)  u(t)$, where $P$ is $n\times n$, and 
$Q(t)$ is $n\times m$ continuous on $[t_0,t_1]$:

(1) The system
\[
\dot{x}=Px+Q(t)  u
\]
is controllable on $[t_0,t_1]$ if and only if the so-called
\emph{controllability Gramian}
\[
W:=\int_{t_0}^{t_1}e^{-Pt}Q(t)  Q(t)  ^Te^{-P^Tt}dt
\]
is nonsingular.

(2) Assume $\dot{x}=Px+Q(t)  u$ is controllable and let
$x_0\in\mathbb{R}^{n}$. Then: the control law
\[
u^{\ast}(t,x_0)  :=-Q(t) ^Te^{-P^Tt}W^{-1}x_0
\]
minimizes the ``total energy''
\[
\mathcal{E}[u]  :=
\int_{t_0}^{t_1}u(t)  ^{2}dt
\]
over the set of controls steering $x_0$ to $0$ on $[t_0,t_1]$ (which is
nonempty by assumption).
\end{proposition}

For the proof of the above propositon, see, e.g., Sontag \cite[Section 3.5]{sontag}.
In our case, the controllable system is just
\[
y=e^{-At}B(t)  u(t),
\]
for which the matrix $P$ above is the zero matrix and 
$Q(t) =e^{-At}B(t) $. The Gramian is
\[
W:=\int_{t_0}^{t_1}e^{-At}B(t)  B(t)  ^Te^{-A^Tt}dt,
\]
which is nonsingular by assumption and the minimum-energy control is 
$u^{\ast}(t,y_0)  =U^{\ast}(t)  y_0$ where 
$U^{\ast}(t)  $ is the $n\times n$ matrix
\[
U^{\ast}(t)  =-B(t)  ^Te^{-A^Tt}W^{-1},
\]
which ``steers the identity matrix $I$ to the zero matrix $O$
on the interval $[t_0,t_1]$''.

In our case, we need $u^{\ast}(t,y_0)  $ to be of the delayed
feedback type
\[
u^{\ast}(t,y_0)  =K(t)  \big[  e^{A(t-\tau
)}y(t-\tau)  -e^{A(t-2\tau)  }y(t-2\tau)  \big].
\]
As previously pointed out, $y(t-\tau)  =y(t-2\tau)
=y_0=y(0)  $. Therefore, we must find an $m\times n$ matrix
$K^{\ast}(t)  $, vanishing at the endpoints $2\tau$ and $3\tau$,
such that
\[
-B(t)  ^Te^{-A^Tt}W^{-1}=K^{\ast}(t)
e^{At}(e^{-A\tau}-e^{-2A\tau})  =K,
\]
which is just
\begin{equation}
K^{\ast}(t)  =-B(t)  ^Te^{-A^Tt}W^{-1}(
e^{-A\tau}-e^{-2A\tau})  ^{-1}\label{Kstar}.
\end{equation}
The feedback law is thus obtained by extending this $K^{\ast}$ outside of
$[2\tau,3\tau]$ by setting $K^{\ast}(t)=0$.

\section{Final remarks}

(1) Some of the above results work perfectly well for linear, time-varying
systems $\dot{x}=A(t)  x+B(t)  u$ by substituting
$e^{At},\ e^{-As}$ by $\Phi(t)  ,\ \Phi(s)  ^{-1}$,
where $\Phi(t)  $ is the fundamental matrix solution satisfying
$\Phi(0)  =I$. The existence of a unique periodic solution to
$\dot{x}=A(t)  x+f(t)  $ for $\tau$-periodic
$f(t)  $ is, of course, quite a difficult matter, even requiring
the coefficient matrix $A(t)  $ to be also $\tau$-periodic (see
\cite{HaleODE}).

(2) From a practical viewpoint, the question of \emph{robustness} presents
itself immediately. Exact equilibria attained in finite time, deadbeat
control, etc., are not found in real life, since neither the plants nor the
control links are 100\% valid. If all the eigenvalues of matrix 
$A$ have negative real parts, the consequences are
not so bad, since the system's own internal dynamics will drive the state back
to (a neighborhood of) equilibrium. But in the unstable case, this will not be true.

A possible solution to this problem is to extend the control to a stream of
equal actions on intervals 
$[2\tau,3\tau]$, $[5\tau,6\tau]$, $[8\tau,9\tau]$, and so on. The
``inaction intervals'' of length $2\tau$
enable the system to ``forget past history''
 and start al over again. No recovery of equilibrium (or some other periodic
steady state) will happen, but at least we will make sure that the system will
not deviate too far from it. This idea comes quite close to the so-called
``act-and-wait'' control strategy or
``intermittent control'' (see \cite{Gawthrop}).

(3) It is well-known that sparse jump discountinuities in controller
functions are not a real problem (at least in finite-dimensional problems),
and our delayed feedback gain function $K(t)  $ is usually
discontinuous at $2\tau$ and $3\tau$. Yet, if they are considered undesirable
in some specific situation, these jumps can easily be avoided by choosing a
continuous scalar function $\beta:\mathbb{R}\to\mathbb{R}$ such that
$\beta(t)  >0$ on $[2\tau,3\tau]$ and is 0 outside this interval,
and modifying our original system 
$\dot{x}=Ax+B(t)  u(t)  $ by 
$\dot{x}=Ax+\beta(t)  B(t)  u(t)  $. 
If the former system is controllable on $[2\tau,3\tau]$, the
latter will have the same property, as can be easily proven (the assumption
``$\beta>0$ on $[2\tau,3\tau]$'' plays an important role). We then pick the new
$K(t)  $ as in \eqref{Kstar} with $B(t)  ^T$
substituted by $\beta(t)  B(t)  ^T$ and $W$
redefined as 
\begin{gather*}
\int_{2\tau}^{3\tau} \beta(t)  ^{2}e^{-At}B(t)  B(t)
^Te^{-A^Tt}dt:\\
K_{\rm new}^{\ast}(t)  
:=-\beta(t)  B(t)  ^Te^{-A^Tt}W^{-1}(e^{-A\tau}-e^{-2A\tau})  ^{-1}.
\end{gather*}
Some interesting questions arise as to the ``right'' choice of $\beta(t)  $, 
depending on the performance index associated to the problem under study. 
Some results in this line will appear elsewhere.

\subsection*{Acknowledgments}

It is my privilege (and my pleasure) to thank Prof. Alfonso Casal for
introducing me into the world of differential equations as an undergraduate
student and to the world of functional differential equations in graduate
school, both in Madrid. Even more, he introduced me (in all senses of the
word) to Jack K. Hale, both teacher and friend. Jack is now gone, but his
influence will always be with us, thanks to Alfonso's pioneering work. And,
last but not least, let me mention Alfonso's stature as a human being which
has been so commented upon throughout this conference.


\begin{thebibliography}{99}

\bibitem{CasalDiazVegas1} Casal, A.; D\'{\i}az, J. I.; Vegas, J. M.;
Finite extinction time via delayed feedback actions. \
emph{Dyn. Contin. Discrete Impuls. }Syst. Ser. A Math. Anal., 14 (2007), 
Advances in Dynamical Systems, suppl. S2, 23--27.

\bibitem{CasalDiazVegas2} Casal, A.; D\'{\i}az, J. I.; Vegas, J. M.;
\newblock  Blow-up in some ordinary and partial differential equations 
with time-delay. \textit{Dynam. Systems Appl.}, 18 (2009), no. 1, 29-46.

\bibitem{CasalDiazVegas3} Casal, A.; D\'{\i}az, J. I.; Vegas, J. M.;
 Finite extinction and null controllability via delayed feedback non-local actions.
\emph{Nonlinear Anal.}, 71 (2009), no. 12, e2018--e2022.

\bibitem{CasalDiazVegas4} Casal, A.; D\'{\i}az, J. I.; Vegas, J. M.;
 Blow-up in some ordinary and partial differential equations with time-delay. 
\emph{Dynam. Systems Appl.}, 18 (2009), no. 1, 29--46.

\bibitem{CasalVegas} Casal, A. C.; Vegas, J. M.;
 Finite extinction and control in some delay models. 
\emph{Differ. Equ. Appl.}, 4 (2012), no. 1, 95--110.

\bibitem{Gawthrop} Gawthrop, P.;
 Act-and-wait and intermittent control: some
comments. \emph{IEEE Trans. on Control Systems Technology,}, 18(5) (2010), 1195-1198.

\bibitem{HaleODE} Hale, Jack K.;
 \emph{Ordinary Differential Equations. }Wiley-Interscience, 1969.

\bibitem{pyragas1} Pyragas, K.;
 Continuous control of chaos by self-controlling feedback. 
\emph{Phys. Lett. A}, (1992), 170:421--8.

\bibitem{pyragas2} Pyragas, K.;
 Control of chaos via extended delay feedback.
\emph{Phys. Lett. A}, (1995), 206, no. 5-6, 323--330.

\bibitem{sontag} Sontag, E.;
\emph{Deterministic Control Systems, 2nd. ed.}, Springer Verlag, 1998.
\end{thebibliography}


\end{document}
