\documentclass[twoside]{article}
\usepackage{amssymb,amsmath}
\pagestyle{myheadings}
\markboth{\hfil Stabilization of linear continuous time-varying systems
\hfil EJDE--2001/67}
{EJDE--2001/67\hfil Vu Ngoc Phat \hfil}
\begin{document}
\title{\vspace{-1in}\parbox{\linewidth}{\footnotesize\noindent
{\sc Electronic Journal of Differential Equations},
Vol. {\bf 2001}(2001), No. 67, pp. 1--13. \newline
ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.swt.edu (login: ftp)}
\vspace{\bigskipamount} \\
%
Stabilization of linear continuous time-varying systems
with state delays in Hilbert spaces
%
\thanks{ {\em Mathematics Subject Classifications:} 93D15, 93B05, 34K20.
\hfil\break\indent
{\em Key words:} Stabilization, time-varying, delay system, Riccati equation.
\hfil\break\indent
\copyright 2001 Southwest Texas State University. \hfil\break\indent
Submitted August 16, 2001. Published October 19, 2001.} }
\date{}
%
\author{Vu Ngoc Phat}
\maketitle
\begin{abstract}
This paper studies the stabilization of the infinite-dimensional
linear time-varying system with state delays
$$\dot x = A(t)x + A_1(t)x(t-h)+B(t)u\,.$$
The operator $A(t)$ is assumed to be the generator of
a strong evolution operator. In contrast to the previous results,
the stabilizability conditions are obtained via solving a Riccati
differential equation and do not involve any stability property
of the evolution operator. Our conditions are easy to construct
and to verify. We provide a step-by-step procedure for finding
feedback controllers and state stability conditions for some
linear delay control systems with nonlinear perturbations.
\end{abstract}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{proposition}[theorem]{Proposition}
\renewcommand{\theequation}{\thesection.\arabic{equation}}
\catcode`@=11
\@addtoreset{equation}{section}
\catcode`@=12
\section{Introduction}
Consider a linear control system with state delays
\begin{equation}\begin{gathered}
\dot x(t) = A(t)x(t) + A_1(t)x(t-h) + B(t)u(t), \quad t\geq t_0,\\
x(t)= \phi(t), \quad t\in [-h,t_0],
\end{gathered}\end{equation}
where $x\in X $ is the state, $u\in U$ is the control, $h \geq 0$.
The stabilizability question consists on finding a feedback control
$u(t) = K(t)x(t)$ for keeping the closed-loop system
\begin{eqnarray}
\dot x(t) = [A(t) + B(t)K(t)]x(t) + A_1(t)x(t-h) \nonumber
\end{eqnarray}
asymptotically stable in the Lyapunov sense.
In the qualitative theory of dynamical systems, the stabilizability is one
of the most important properties of the systems and has attracted the attention
of many researchers; see for example \cite{b2,f1,g3,p1,p2,t1} and references therein. It is well known that the main technique for solving stabilizability for control systems is the Lyapunov function method, but finding Lyapunov functions is still a difficult task (see, e.g. \cite{c1,l1,l3,n1,s1,z1}). However, for linear control system (1.1), the system can be made exponentially stabilizable if the underlying system $\dot x(t) = A(t)x(t)$ is asymptotically stable. In other words, if the evolution operator $E(t,s)$ generated by $A(t)$ is stable, then the delay control system (1.1) is asymptotically stabilizable under appropriate
conditions on $A_1(t)$ (see \cite{b2,p2,z1}). For infinite-dimensional control
systems, the investigation of stabilizability is more complicated and
requires sophisticated techniques from semigroup theory. The difficulties
increase to the same extent as passing from time-invariant to time-varying
systems. Some results have been given in \cite{b1,c2,g2,p2} for time-invariant
systems in Hilbert spaces.
This paper considers linear abstract control systems with both time-varying
and time-delayed states and the object is to find stabilizability conditions
based on the global controllability of undelayed control system $[A(t),B(t)]$.
In contrast to \cite{b2,p2,n1}, the stabilizability conditions obtained in
this paper are derived by solving Riccati differential equations and do not
involve any stability assumption on the evolution operator $E(t,s)$.
New sufficient conditions for the stabilizability of a class of linear
systems with nonlinear delay perturbations in Hilbert spaces are also
established. The main results of the paper are further generalizations to
infinite-dimensional case and can be regarded as extensions of the results
of \cite{f1,k1,l2,t1}.
The paper is organized as follows. In Section 2 we give the notation, and
definitions to be used in this paper. Auxiliary propositions are given in
Section 3. Sufficient conditions for the stabilizability are presented in
Section 4.
\section{Notation and definitions}
We will use the following notation:
$\mathbb{R}^+$ denotes the set of all non-negative real numbers.
$X$ denotes a Hilbert space with the norm $\|.\|_{X}$ and the inner product
$\langle ., .\rangle_{X}$, etc.
$L(X)$ (respectively, $L(X, Y)$) denotes the Banach space of all linear
bounded operators $S$ mapping $X$ into $X$ (respectively, $X$ into $Y$)
endowed with the norm
$$\|S\| = \sup \{\|Sx\|: x\in X, \|x\| \leq 1 \}.$$
$L_2([t,s], X)$ denotes the set of all strongly measurable square integrable
$X$-valued functions on $[t,s]$.
$D(A), Im(A), A^*$ and $A^{-1}$ denote the domain, the image, the adjoint
and the inverse of the operator $A$, respectively. If $A$ is a matrix,
then $A^T$ denotes the conjugate transpose of $A$.
$B_1 =\{x\in X:\quad \|x\| =1\}$.
$\mathop{\rm cl}M$ denotes the closure of a set $M$; $I$ denotes the
identity operator.
$C_{[t,s],X}$ denotes the set of all $X$-valued continuous functions on $[t,s]$.
Let $X, U$ be Hilbert spaces. Consider a linear time-varying control
undelayed system $[A(t),B(t)]$ given by
\begin{equation} \begin{gathered}
\dot x(t)= A(t)x(t) + B(t)u(t), \quad t\geq t_0,\nonumber \\
x(t_0) = x_0,
\end{gathered}\end{equation}
where $x(t)\in X$, $u(t)\in U$; $A(t):X\to X$; $B(t)\in L(U, X)$.
In the sequel, we say that control $u(t)$ is admissible if
$u(t)\in L_2([t_0,\infty),U)$.
We make the following assumptions on the system (2.1):
\begin{enumerate}
\item[(i)] $B(t)\in L(U,X)$ and $B(.)u\in C_{[t_0,\infty),X}$ for all $u\in U$.
\item[(ii)] The operator $A(t): D(A(t))\subset X\to X$,
$\mathop{\rm cl}D(A(t)) = X$
is a bounded function in $t\in [t_0,\infty)$ and generates a strong evolution
operator $E(t,\tau): \{(t,\tau): t\geq \tau \geq t_0\} \to L(X)$
(see, e.g. \cite{c3,d1}):
$$E(t,t) = I, \quad t \geq t_0,\quad
E(t,\tau)E(\tau, r) = E(t, r), \quad \forall t\geq\tau \geq r \geq t_0\,,$$
$E(t,\tau)$ is continuous in $t$ and $\tau$,
$E(t,t_0)x = x + \int_{t_0}^tE(t,\tau)A(\tau)xd\tau$, for all $x\in D(A(t))$,
so that the system (2.1), for every admissible control $u(t)$ has a unique
solution given by
$$x(t) = E(t,t_0)x_0 + \int_{t_0}^t E(t,\tau)B(\tau)u(\tau)d\tau.$$
\end{enumerate}
\paragraph{Definition} The system $[A(t), B(t)]$ is called globally
null-controllable in time $T > 0$, if every state can be transferred to $0$
in time $T$ by some admissible control $u(t)$, i.e.,
$$\mathop{\rm Im} U(T,t_0) \subset L_T(L_2([t_0,T),U),$$
where
$L_T = \int_{t_0}^TE(T,s)B(s)ds$.
\paragraph{Definition}
The system $[A(t),B(t)]$ is called stabilizable if there exists an operator
function $K(t)\in L(X,U)$ such that the zero solution of the closed loop
system $\dot x = [A(t)+B(t)K(t)]x$ is asymptotically stable in the
Lyapunov sense. \medskip
Following the setting in \cite{b1}, we give a concept of the Riccati
differential equation in a Hilbert space. Consider a differential operator
equation
\begin{eqnarray}
\dot P(t) + A^*(t)P(t) + P(t)A(t) -P(t)B(t)R^{-1}B^*(t)P(t) + Q(t) = 0,
\end{eqnarray}
where $P(t), Q(t)\in L(X)$ and $R > 0$ is a constant operator.
\paragraph{Definition} An operator $P(t)\in L(X)$ is said to be a
solution of the Riccati differential equation (2.2) if for all $t\geq t_0$
and all $x\in D(A(t))$,
$$\langle \dot Px,x\rangle + \langle PAx,x\rangle + \langle Px, Ax\rangle - \langle PBR^{-1}B^*Px, x\rangle
+ \langle Qx, x\rangle = 0\,.$$
An operator $Q\in L(X)$ is said to be non-negative definite
,denote by $Q \geq 0$, if $\langle Qx,x\rangle \geq 0$, for all $x\in X$.
If for some $c > 0, \langle Qx,x\rangle > c\|x\|^2$ for all $x\in X$, then $Q$
is called positive definite and is denote by $Q > 0$.
Operator $Q\in L(X)$ is called self-adjoint if $Q=Q^*$. The self-adjoint
operator is characterized by the fact that its inner product $\langle Qx,x\rangle$
takes only real values and its spectrum is a bounded closed set on the real
axis. The least segment that contains the spectrum is
$[\lambda_{\rm min}(Q), \lambda_{\rm max}(Q)]$, where
\begin{gather*}
\lambda_{\rm min}(Q) = \inf\{\langle Qx,x\rangle: x\in B_1\},\\
\lambda_{\rm max}(Q) = \sup\{\langle Qx,x\rangle: x\in B_1\} = \|Q\|.
\end{gather*}
We denote by $BC([t,\infty], X^+)$ the set
of all linear bounded self-adjoint non-negative definite operators
in $L(X)$ that are continuous and bounded on $[t,\infty)$.
\section{Auxiliary propositions}
To prove the main results we need the following propositions.
\begin{proposition}[\cite{c3}] \label{prop3.1}
If $Q\in L(X)$ is a self-adjoint positive definite operator, then
$\lambda_{\rm min}(Q) > 0$ and
$$\lambda_{\rm min}(Q)\|x\|^2\leq \langle Qx,x\rangle \leq \lambda_{\rm max}(Q)\|x\|^2,
\quad \forall x\in X.$$
\end{proposition}
\begin{proposition}[\cite{b1,c3}] \label{prop3.2}
The system $[A(t),B(t)]$ is globally null-controllable in time $T > 0$ if and
only if one of the following conditions hold:
\begin{enumerate}
\item[(i)] There is a number $c>0$ such that
$$\int_{t_0}^T\|B^*(s)E^*(T,s)x\|^2ds \geq c\|E^*(T,t_0)x\|^2, \quad
\forall x\in X.$$
\item[(ii)] The operator $\int_{t_0}^TE(T,s)B(s)B^*(s)E^*(T,s)ds$ is positive
definite.
\end{enumerate}
\end{proposition}
Associated with control system $[A(t),B(t)]$ we consider the cost functional
\begin{eqnarray}
J(u) = \int_0^\infty [\langle Ru(t),u(t)\rangle + \langle Q(t)x(t),x(t)\rangle ]dt,
\end{eqnarray}
where $R > 0$, $Q(t)\in BC([t_0,\infty),X^+)$.
The following proposition solves the optimal
quadratic problem (2.1)--(3.1).
\begin{proposition}[\cite{p3}] \label{prop3.3}
Assume that the optimal quadratic problem (2.1)-(3.1) is solved in the sense
that for every initial state $x_0$ there is an admissible control $u(t)$
such that the cost functional (3.1) exists and is finite.
Then the Riccati differential equation (2.2) has a solution
$P(t)\in BC([t_0,\infty), X^+)$. Moreover, the control $u(t)$ is given in the
feedback form
\begin{eqnarray}
u(t) = - R^{-1}B^*(t)P(t)x(t), \quad t\geq t_0
\end{eqnarray}
minimizes functional (3.1).
\end{proposition}
For the finite-dimensional case, it is well known \cite{k1,l2}
that if system $[A,B]$ is globally null-controllable then the control
$$u(t) = -B^TP^{-1}(T)x(t),\quad T > t_0,$$
where $P(T) > 0$ is the solution of the Riccati equation
$$\dot P(t) + AP(t) + P(t)A^T+P(t)QP(t) + BR^{-1}B^T, \quad P(t_0) = 0,$$
for a matrix $Q > 0$, minimizes the cost functional (3.1).
In the proposition below, we extend this assertion to the
infinite-dimensional case based on solving an optimal quadratic control
problem.
\begin{proposition} \label{prop3.4}
If control system $[A(t),B(t)]$ is globally null-controllable in finite time,
then for every operator $Q(t)\in BC([t_0,\infty), X^+)$, Riccati differential
equation (2.2) has a solution $P(t)\in BC([t_0,\infty), X^+)$ and the feedback
control (3.2) minimizes the cost functional (3.1).
\end{proposition}
\paragraph{Proof.} Assume that the system is globally null-controllable in
some $T > t_0$. Let us take operators $R > 0, Q(t)\in BC([t_0,\infty),X^+)$
and consider a linear optimal quadratic control problem for the system
$[A(t),B(t)]$ with the cost functional (3.1).
Due to the global null-controllability, for every initial state $x_0\in X$
there is an admissible control $u(t)\in L_2([t_0,T],U)$ such that the solution
$x(t)$ of the system, according to the control $u(t)$, satisfies
$$x(t_0) = x_0, \quad x(T) = 0.$$
Let $u_x(t)$ denote an admissible control according to the solution
$x(t)$ of the system. Define
\begin{gather*}
\tilde u (t) = u_x(t), t\in [t_0,T], \\
\tilde u(t) = 0,\quad t > T.
\end{gather*}
If $\tilde x(.)$ is the solution corresponding to $\tilde u_{\tilde x} (.)$,
then $\tilde x(t) = 0$ for all $t > T$. Therefore, for every initial state
$x_0$, we have
$$J(u) = \int_{t_0}^\infty[\langle Q(s)\tilde x(s),\tilde x(s)\rangle +
\langle R\tilde u_{\tilde x}(s),\tilde u_{\tilde x}(s)]ds < + \infty.
$$
The assumption of Proposition \ref{prop3.3} for the optimal quadratic
problem (2.1), (3.1) is satisfied and hence there is an operator function
$P(t)\in BC([t_0,\infty),X^+)$, which is a solution of the Riccati
equation (2.2) and the control (3.2) minimizes the cost functional (3.1).
Proposition is proved. \hfill$\diamondsuit$\medskip
We conclude this section with a Lyapunov stability result on functional
differential equations. Consider a general functional differential
equation of the form
\begin{equation}\begin{gathered}
\dot x(t)= f(t,x_t), \quad t\geq t_0, \\
x(t) = \phi(t), \quad t\in [-h, t_0],
\end{gathered}\end{equation}
where $\phi(t)\in C_{[-h,t_0],X}$, $x_t(s) = x(t+s),$ $-h\leq s\leq t_0$.
Define
$$\|x_t\| = \sup_{s\in[-h,t_0]}\|x(t+s)\|.$$
\begin{proposition}[\cite{h1}] \label{prop3.5}
Assume that there exist a function $V(t,x_t): R^+\times C([t_0,-h])\to R^+$
and numbers $c_1 > 0, c_2 > 0, c_3 > 0$ such that
\begin{enumerate}
\item[(i)] $c_1\|x(t)\|^2 \leq V(t,x_t)\leq c_2\|x_t\|^2$, for all $t \geq t_0$.
\item[(ii)] $\frac{d}{dt}V(t,x_t) \leq - c_3\|x(t)\|^2$, for all $t\geq t_0$.
\end{enumerate}
Then the system (3.3) is asymptotically stable.
\end{proposition}
\section{Stabilizability conditions }
Consider the linear control delay system (1.1), where $x(t)\in X$,
$u(t)\in U$; $X, U$ are infinite-dimensional Hilbert spaces; $A_1(t):X\to X$
and $A(t), B(t)$ satisfy the assumptions stated in Section 2 so that the
control system (1.1) has a unique solution for every initial condition
$\phi(t)\in C_{[0,\infty),X}$ and admissible control $u(t)$. Let
$$p = \sup_{t\in [t_0,\infty)}\|P(t)\|.$$
\begin{theorem} \label{thm4.1}
Assume that for some self-adjoint constant positive definite operator
$Q\in L(X)$, the Riccati differential equation (2.2), where $R=I$ has a
solution $P(t)\in BC([t_0,\infty),X^+)$ such that
\begin{equation}
a_1 := \sup_{t\in [t_0,\infty)}\|A_1(t)\|
< \frac{\sqrt{\lambda_{\rm min}(Q)}}{2p}.
\end{equation}
Then the control delay system (1.1) is stabilizable.
\end{theorem}
\paragraph{Proof.} For simplicity of expression, let $t_0 = 0$. Let
$0 < Q\in L(X)$, $P(t)\in BC([0,\infty),X^+)$ satisfy the Riccati equation
(2.2), where $R = I$. Let
\begin{eqnarray}
u(t) = K(t)x(t),
\end{eqnarray}
where
$K(t) = -\frac{1}{2}B^*(t)P(t)$, $t \geq 0$.
For some number $\alpha \in (0, 1)$ to be chosen later, we consider
a Lyapunov function, for the delay system (1.1),
$$
V(t,x_t) = \langle P(t)x(t),x(t)\rangle +\alpha \int_{t-h}^t\langle Qx(s),x(s)\rangle ds.
$$
Since $Q > 0$ and $P(t)\in BC([0,\infty),X^+)$, it is easy to verify that
$$c_1\|x(t)\|^2\leq V(t,x_t)\leq c_2\|x_t\|^2,$$
for some positive constants $c_1, c_2$.
On the other hand, taking the derivative of $V(t,x_t)$ along the solution
$x(t)$ of the system, we have
\begin{equation}\begin{aligned}
\dot V(t,x_t) =& \langle \dot P(t)x(t),x(t)\rangle + 2 \langle P(t)\dot x(t), x(t) \rangle \\
& + \alpha[\langle Qx(t),x(t)\rangle - \langle Qx(t-h),x(t-h)\rangle].
\end{aligned}\end{equation}
Substituting the control (4.2) into (4.3) gives
\begin{eqnarray*}
\dot V(t,x_t) &=& -(1- \alpha )\langle Qx(t),x(t)\rangle
+ 2 \langle P(t)A_1(t)x(t-h),x(t)\rangle \\
&& - \alpha \langle Qx(t-h),x(t-h)\rangle .
\end{eqnarray*}
From Proposition \ref{prop3.1} it follows that
$$
\lambda_{\rm min}(Q)\|x\|^2 \leq \langle Qx,x\rangle \leq
\lambda_{\rm max}(Q)\|x\|^2,\quad x\in X,
$$
where $\lambda_{\rm min}(Q) > 0$. Therefore,
$$
\dot V(t,x_t) \leq -\lambda_{\rm min}(Q)(1-\alpha)\|x\|^2
+ 2pa_1\|x(t-h)\|\|x(t)\| - \lambda_{\rm min}(Q)\alpha \|x(t-h)\|^2.
$$
By completing the square, we obtain
\begin{eqnarray*}
\lefteqn{2pa_1\|x(t-h)\|\|x(t)\| - \lambda_{\rm min}(Q)\alpha \|x(t-h)\|^2}\\
&=& - \Bigr[\sqrt{\alpha \lambda_{\rm min}(Q)}\|x(t-h)\|
- \frac{pa_1}{\sqrt{\alpha \lambda_{\rm min}(Q)}}\|x(t)\|\Bigr]^2
+ \frac{p^2a_1^2}{\alpha \lambda_{\rm min}(Q)}\|x(t)\|^2.
\end{eqnarray*}
Therefore,
\begin{eqnarray*}
\dot V(t,x_t) &\leq& -\lambda_{\rm min}(Q)(1-\alpha)\|x(t)\|^2
+\frac{p^2a_1^2}{\alpha \lambda_{\rm min}(Q)}\|x(t)\|^2 \\
&=& - \Bigr[\lambda_{\rm min}(Q)(1-\alpha)
- \frac{1}{\alpha \lambda_{\rm min}(Q)}p^2a_1^2\Bigr]\|x(t)\|^2.
\end{eqnarray*}
Since the maximum value of $\alpha(1-\alpha)$ in $(0, 1)$ is attained
at $\alpha = 1/2$, from (4.1) it follows that for some $c_3 > 0$,
$$\dot V(t,x_t) \leq - c_3 \|x(t)\|^2,\quad \forall t\geq t_0.$$
The the present proof is complete by using Proposition \ref{prop3.5}.
\hfill$\diamondsuit$\smallskip
The following theorem shows that if the system $[A(t),B(t)]$ is globally
null-controllable then the delay system (1.1) is stabilizable under an
appropriate condition on $A_1(t)$.
\begin{theorem} \label{thm4.2}
Assume that $[A(t),B(t)]$ is globally null-controllable in finite time.
Then the delay system (1.1) is stablizable if (4.1) holds, where $Q(t)=I$,
and $P(t)$ satisfies the Riccati equation (2.2).
Moreover, the feedback control is given by
%\begin{equation}
$$u(t) = -\frac{1}{2}B^*(t)P(t)x(t),\quad t\geq 0.$$
%\end{equation}
\end{theorem}
\paragraph{Proof.} By assumption, the system $[A(t), B(t)]$ is globally
null-controllable in some $T>0$ time. This means that for every initial
state $x_0\in X$ there is an admissible control $u(t)\in L_2([0,T],U)$
such that the solution $x(t)$ of the system according to the control $
u(t)$ satisfies
$$x(0) = x_0, \quad x(T) = 0.$$
Define an admissible control $\tilde u(t)$, $t\geq 0$ by
\begin{gather*}
\tilde u(t) = u(t), \quad t\in [0,T],\\
\tilde u(t) = 0, \quad t > T.
\end{gather*}
Denoting by $\tilde x(t)$ the solution under to the control $\tilde u(t)$,
we have
\begin{eqnarray*}
J(\tilde u) &=& \int_0^\infty[\|\tilde u(t)\|^2 + \|\tilde x(t)\|^2 ]dt \\
&=& \int_0^T[\|u(t)\|^2 + \|x(t)\|^2]dt < + \infty.
\end{eqnarray*}
Therefore, by Proposition \ref{prop3.4}, there is $P(t)\in BC([0,\infty),X^+)$
satisfying the Riccati differential equation (2.2), where $Q = R = I$.
Based on the condition (4.1) the proof is completed by the same arguments
used in the proof of Theorem \ref{thm4.1}, where we use the same feedback control
operator $K(t)$ and the Lyapunov function $V(t,x_t)$.
\paragraph{Remark} % 4.1
Note that when $Q = I$, then the condition (4.1)
is replaced by the condition
\begin{eqnarray}
\sup_{t\in [0,\infty)}\|A_1(t)\| < \frac{1}{2p}.
\end{eqnarray}
Therefore, when the controllability problem of the linear control system is
solvable, the following step-by-step procedure can be used to find the
feedback controller for system (1.1).
\\
{\it Step 1:} Verify the controllability conditions by Proposition \ref{prop3.1}.
\\
{\it Step 2:} Find a solution $P(t)\in BC([t_0,\infty),X^+)$ to the
Riccati differential equation
\begin{eqnarray}
\dot P(t) + A^*(t)P(t) + P(t)A(t) - P(t)B(t)B^*(t)P(t) + I = 0
\end{eqnarray}
{\it Step 3:} Compute $a_1 = \sup_{t\in[0,\infty)}\{\|A(t)\|\}$ and
$p = \sup_{t\in[0,\infty)}\{\|P(t)\|\}$.
\\
{\it Step 4:} Verify the condition (4.4)
\\
{\it Step 5:} The stabilizing controller is then defined by (4.2). \medskip
In the same way, Theorem \ref{thm4.2} can be extended to the system with multiple
delays
\begin{equation}\begin{gathered}
\dot x(t)= A(t)x(t) + \sum_{i=1}^rA_i(t)x(t-h_i) + B(t)u(t), \quad t\geq t_0,\\
x(t) = \phi(t), \quad t\in [-h_r, t_0],
\end{gathered}\end{equation}
where $A_i(t)\in L(X)$, $0 \leq h_1\leq \dots \leq h_r$, $r\geq 1$.
\begin{theorem} \label{thm4.3}
Let the control system $[A(t),B(t)]$ be globally null-controllable in some
finite time. Assume that
$$
\sum_{i=1}^r\sup_{t\in [t_0,\infty)}\|A_i(t)\|^2 < \frac{2-r}{4p^2}.
$$
Then the control delay system (4.6) is stabilizable.
\end{theorem}
The proof is similar to the proof of Theorem \ref{thm4.2}, with
$ Q = I$ and
$$V(t,x_t) = \langle P(t)x(t), x(t)\rangle + \frac{1}{2}
\sum_{i=1}^r\int_{t-h_i}^t\|x(s)\|^2ds.
$$
\paragraph{Remark} It is worth noting that although the Lyapunov function
method is not used, the results obtained in [8, 9] give us explicit
stabilizability conditions under a dissipative assumption on the operator
$W(t) = A(t) +A_1(t-h)+B(t)K(t)$. In contrast to these conditions, our
conditions are obtained via the controllability assumption and the solution
of Riccati differential equation (4.5) and do not involve the stability
of evolution operator $E(t,s)$ or the dissipative property of the operator $
W(t)$, therefore they can be easily verified and constructed. \smallskip
As an application, we consider the stabilization of the nonlinear
control system in Hilbert spaces
\begin{equation}\begin{gathered}
\dot x(t) = A(t)x(t) + A_1(t)x(t-h)+ B(t)u(t) + f(t,x(t), x(t-h),u(t)),\\
t\geq 0,\\
x(t) = \phi(t), \quad t\in [-h, 0],
\end{gathered}\end{equation}
where $x\in X, u\in U$ and
$f(t,x,y,u): [0,\infty)\times X\times X\times U \to X$ is a given nonlinear
function. We recall that nonlinear control system (4.7) is stabilizable by
a feedback control $u(t) = K(t)x(t)$, where $K(t) \in L(X,U)$,
if the closed-loop system
%\begin{equation}
$$\dot x = [A(t)x +K(t)B(t)]x + A_1(t)x(t-h)+f(t, x, x(t-h),
K(t)x),$$
%\end{equation}
is asymptotically stable. Stabilizability of nonlinear control systems has
been considered in \cite{b2,l2,t1} under the stability assumption on the
evolution operator $E(t,s)$ and on the perturbation function $f(t,.)$ that
for all $(t,x,y, u) \in [0,\infty)\times X\times X\times U$,
\begin{equation}
\|f(t,x,y, u)\| \leq a\|x\| + b\|y\| + c\|u\|
\end{equation}
for some positive numbers $a, b, c$.
In the following, in contrast to the mentioned above results, we give
stabilizability conditions for nonlinear control system (4.7) via the global
null-controllability of linear control system (2.1). Let
$$\beta = \sup_{t\in [0,+\infty)}\|B(t)\|,\quad
a_1 = \sup_{t\in [0,+\infty)}\|A_1(t)\|,
\quad p = \sup_{t\in [0,+\infty)}\|P(t)\|.$$
\begin{theorem} \label{thm4.4}
Let the linear control system $[A(t),B(t)]$ be globally null-control\-lable
in finite time. Assume that $a_1\leq 1/(2p)$
and the condition (4.8) holds for positive numbers $a, b, c$ satisfying
\begin{equation}
a < \frac{1-4a^2_1p^2}{4p},\quad
2b^2p^2 + c\beta p^2 +4ba_1p^2 < \frac{1}{2} - 2ap - 2a_1^2p^2.
\end{equation}
Then the nonlinear control system (4.7) is stabilizable.
\end{theorem}
\paragraph{Proof.}
Since the system $[A(t),B(t)]$ is globally null-controllable in
finite time, by Proposition \ref{prop3.4}, for $Q= I$ there is an operator
$P(t)\in BC([0,\infty),X^+)$ satisfying the Riccati equation (4.5).
Let us consider the Lyapunov function
$$
V(t, x_t) = \langle P(t)x(t), x(t)\rangle + \frac{1}{2}\int_{t-h}^t\|x(s)\|^2 ds
$$
for the nonlinear control system (4.7). Taking the derivative of $V(t,x_t)$
along the solution $x(t)$ we have
\begin{eqnarray}
\frac{d}{dt}V(t,x_t))\!&=& \langle \dot P(t)x(t),x(t)\rangle
+ 2\langle P(t)\dot x(t),x(t)\rangle + \frac{1}{2}(\|x(t)\|^2 -\|x(t-h)\|^2)
\nonumber \\
&\leq & - \frac{1}{2}\|x(t)\|^2 + 2\langle P(t)f(x(t), x(t-h), u(t)),
x(t)\rangle \nonumber \\ & & +2\langle P(t)A_1(t)x(t-h),x(t)\rangle
-\frac{1}{2}\|x(t-h)\|^2.
\end{eqnarray}
Substituting the control
$u(t) = - \frac{1}{2}B^*(t)P(t)x(t)$ in (4.10) gives
\begin{eqnarray*}
\frac{d}{dt}V(t,x_t)&\leq & - \frac{1}{2}\|x(t)\|^2 + 2p\Bigr[a\|x(t)\|
+ b\|x(t-h)\| + \frac{c}{2}p\beta \|x(t)\| \Bigr] \|x(t)\| \\
& & +2pa_1\|x(t-h)\|\|x(t)\| - \frac{1}{2}\|x(t-h)\|^2 \\
&\leq & (-\frac{1}{2} + 2ap + c\beta p^2)\|x\|^2
+2p\langle (b+a_1)\|x(t-h)\|\|x(t)\| \\\
& &- \frac{1}{2}\|x(t-h)\|^2 \\
&\leq & - \Big[\frac{1}{2} -2ap - c\beta p^2 -2(b+a_1)^2p^2\Big]\|x(t)\|^2.
\end{eqnarray*}
Therefore, from condition (4.9) it follows that there is a number $c_3 > 0$ such that
$$
\frac{d}{dt}V(t,x(t))\leq - c_3 \|x(t)\|^2,\quad \forall t\geq 0.
$$
The proof is then completed by using Proposition \ref{prop3.5}.
\hfill$\diamondsuit$\smallskip
Reasoning as above, Theorem \ref{thm4.4} can be extended to nonlinear systems with
multiple delays. For the system
\begin{eqnarray*}
\dot x(t) &=& A(t)x(t) + \sum_{i=1}^rA_i(t)x(t-h_i)+B(t)u(t) \\
&&+ f(t,x(t), x(t-h_1), ..., x(t-h_i), u(t)), \quad t\geq 0, \\
x(t) &=& \phi(t), \quad t\in [h_r,0],
\end{eqnarray*}
where the nonlinear perturbation satisfies the condition
$$
\|f(t,x,y_1,\dots ,y_r, u)\| \leq a\|x\| + \sum_{i=1}^rb_i\|y_i\|
+ c\|u\|
$$
for all $(t,x,y,u) \in [0,\infty)\times X\times X^r\times U$,
the Lyapunov function will be replaced with
$$V(t, x_t) = \langle P(t)x(t), x(t)\rangle
+ \frac{1}{2}\sum_{i=1}^r\int_{t-h_i}^t\|x(s)\|^2 ds.$$
\paragraph{Acknowledgment}
The author was supported by the Australian Research Council and by
the Basic Program in Natural Sciences of Vietnam. The author would like to
thank the anonymous referee for his/her valuable comments and remarks
which greatly improved the final version of the paper.
\begin{thebibliography}{99} \frenchspacing
\bibitem{b2} A. Benabdallah and M.A. Hammami, On the output feedback
stability of nonlinear uncertain control systems. {\it Int. J of Control,}
{\bf 74}(2001), 547-551.
\bibitem{b1} A. Bensoussan, G.Da Prato, M.C. Delfour and S.K. Mitter,
{\it Representation and Control of Infinite-dimensional Systems,}
Vol. II, Birkhauser, 1992.
\bibitem {c1} E. N. Chukwu, {\it Stability and Time-Optimal Control of
Hereditary Systems.} Academic Press, New York, 1992
\bibitem{c2} R. F. Curtain and J.C. Oostveen, Necessary and sufficient
conditions for strong stability of distributed parameter systems.
{\it Systems \& Control Letters,} {\bf 37}(1999), 11-18.
\bibitem{c3} R. F. Curtain and A. J. Pritchard,
{\it Functional Analysis in Modern Applied Mathematics.}
Academic Press, New York, 1977.
\bibitem{d1} J. L. Daleckii and M.G. Krein,
{\it Stability of Solution of Differential Equations in Banach Spaces.}
AMS Publisher, Providence, 1974.
\bibitem{f1} A. Feliachi and A. Thowsen,
Memoryless stabilization of linear delay-differential systems.
{IEEE Trans. AC,} {\bf 26}(1981), 586-587.
\bibitem{g1} M. I. Gil, On generalized Wazewski and Lozinskii inequalities
for semilinear abstract differential-delay equations.
{\it J. of Inequalities and Appl.}, {\bf 2}(1998), 255-267.
\bibitem{g2} M. I. Gil, Stability of linear time-variant functional
differential equations in a Hilbert space.
{\it Funkcialaj Ekvacioj,} {\bf 43}(2000), 31-38.
\bibitem{g3} M. I. Gil and A. Ailon,
On exponential stabilization of nonlinear time-varying systems.
{\it Int. J. Control,} {\bf 72}(1999), 430-434.
\bibitem{h1} J. Hale, {\it Theory of Functional Differential Equations.}
Springer-Verlag, New York, 1977.
\bibitem {k1} W. H. Kwon and A.E. Pearson, A note on the feedback
stabilization of a differential delay systems.
{\it IEEE Trans. AC,} June, 1977, 468-470.
\bibitem{l1} V. Lakshmikantham, S. Leela and M. Martynyuk,
{\it Stability Analysis of Nonlinear Systems.} Marcel Dekker, New York, 1989.
\bibitem{l2} C. H. Lee, Simple stabilizability criteria and memoryless state
feedback control design for time-delay systems with time-varying perturbations.
{\it IEEE Trans. AC,} {\bf 45}(1998), 1211-1215.
\bibitem{l3} N. M. Linh and V. N. Phat,
Exponential stability of nonlinear time-varying differential equations and
applications. {\it Elect. J. of Diff. Equations,} 2001, N-34, 1-13.
\bibitem{p1} V. N. Phat, {\it Constrained Control Problems of Discrete
Processes}. World Scientific, Singapore, 1996.
\bibitem{p2} V. N. Phat and T.T. Kiet, On the Lyapunov equation in Banach
spaces and applications to control problems.
{\it Int. J. of Math. and Math. Sci.,} {\bf 26} (2001), N-6.
\bibitem{p3} G. Da Prato and A. Ichikawa, Quadratic control for linear
time-varying systems. {\it SIAM J. Contr. Optim.,} {\bf 28}(1990), 359-381.
\bibitem {n1} P. Niamsup and V. N. Phat, Asymptotic stability of nonlinear
control systems described by differential equations with multiple delays.
{\it Elect. J. of Diff. Equations,} 2000, N-11, 1-17.
\bibitem{s1} Y. J. Sun and J. G. Hsieh, Robust stabilization for a class
of uncertain nonlinear systems with time-varying delay:
Razumikhin-type approach. {\it J. Optim. Theory Appl.,} {\bf 98}(1998), 161-173.
\bibitem{t1} H. Trinh and M. Aldeen, On robustness and stabilization of linear
systems with delayed nonlinear perturbations. {\it IEEE Trans. AC,}
{\bf 42}(1997), 1005-1007.
\bibitem{z1} S. H. Zak, On the stabilization and observation of nonlinear
dynamic systems. {\it IEEE Trans. AC,} {\bf 35}(1990), 604-607.
\end{thebibliography}
\noindent\textsc{Vu Ngoc Phat}\\
School of Electrical Engineering \& Telecommunications\\
Faculty of Engineering, University of New South Wales \\
Sydney 2052, Australia\\
e-mail: phvu@syscon.ee.unsw.edu.au \\
On leave from \\
Institute of Mathematics,
P.O. Box 631, BoHo, Hanoi, Vietnam. \\
e-mail:vnphat@thevinh.ncst.ac.vn
\end{document}