\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
{\em Electronic Journal of Differential Equations},
Vol. 2007(2007), No. 44, pp. 1--6.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2007 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2007/44\hfil Almost sure exponential stability]
{Almost sure exponential stability of delayed cellular neural
networks}

\author[C. Huang,  Y. He, L. Huang\hfil EJDE-2007/44\hfilneg]
{Chuangxia Huang, Yigang He, Lihong Huang}

\address{Chuangxia Huang \newline
College of Mathematics and Computing Science, Changsha University of
Science and Technology, Changsha, Hunan 410076, China; and College
of Electrical and Information Engineering, Hunan University,
Changsha, Hunan 410082, China} 
\email{huangchuangxia@sina.com.cn}

\address{Yigang He \newline
College of Electrical and Information Engineering, Hunan University,
Changsha, Hunan 410082, China} 
\email{yghe@hnu.cn}

\address{Lihong Huang \newline
College of Mathematics and Econometrics, Hunan University, Changsha,
Hunan 410082, China} 
\email{lhhuang@hnu.cn}

\thanks{Submitted February 27, 2007. Published March 15, 2007.}
\subjclass[2000]{34K50, 60H10}
\keywords{Stochastic; cellular neural networks;
  almost sure exponential stability; \hfill\break\indent
  Lyapunov functional}

\begin{abstract}
 The stability of stochastic delayed Cellular Neural Networks (DCNN)
 is investigated in this paper. Using suitable Lyapunov functional
 and the semimartingale convergence theorem, we obtain some sufficient
 conditions for checking the almost sure exponential stability of the DCNN.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{remark}[theorem]{Remark}

\section{Introduction}

Since the seminal work for Cellular Neural Networks
in\cite{c88a,c88b}, the past nearly two decades have witnessed the
successful applications of Cellular Neural Networks in many areas
such as combinatorial optimization, signal processing and pattern
recognition, see e.g. \cite{c06,l93,r92,v98}. Recently, it has been
realized that the axonal signal transmission delays often occur in
various neural networks, and may cause undesirable dynamic network
behaviors such as oscillation and instability. Consequently, the
stability analysis problems for delayed Cellular neural
networks(DCNN) have gained considerable research attention. Up to
now, a great deal of results have been reported in the literature,
see e.g.\cite{c03,h05,z05} and references therein, where the DCNN
has been largely restricted to deterministic differential equations.
These models do not take into account the inherent randomness that
is associated with signal transmission.

Just as pointed out by Haykin \cite{h94}, in real nervous systems
and in the implementation of artificial neural networks, noise is
unavoidable and should be taken into consideration in modelling.
In this paper, we propose a system of stochastic differential
equations for modelling DCNN as follows
\begin{equation}\label{e1.1}
\begin{aligned}
 {\rm d} x_i(t)
&=-c_{i}(t)x_i(t){\rm d}t
 +\sum _{j=1} ^na_{ij}(t)f_{ij}(x_j(t)){\rm d}t\\
&\quad +\sum _{j=1}^nb_{ij}(t)f_{ij}(x_j(t-\tau_{ij})){\rm d}t
 + \sum _{j=1} ^n\sigma_{ij}(x_j(t)){\rm d}w_j(t), t\geq0,
\end{aligned}
\end{equation}
where $i = 1,\dots,n$; $n$ corresponds to the number of units in a
neural network; $x_i(t)$ denotes the potential (or voltage) of the
cell $i$ at time $t$; $f_{ij}(\cdot)$ denotes a non-linear output
function between cell $i$ and $j$; $\inf \{c_i(t)\}>0$ denotes the
rate with which cell $i$ resets its potential to the resting state
when isolated from other cells and inputs at time $t$; $a_{ij}(t)$
and $b_{ij}(t)$ denotes the strengths of connectivity between cell
$i$ and $j$ at time $t$ respectively; $\tau_{ij}$ is time delay
and satisfies $0\leq \tau_{ij}\leq \tau$.
$\sigma(t)=(\sigma_{ij}(t))_{n\times n}$ is the diffusion
coefficient matrix and $\omega(t)=(\omega_1(t), \dots
,\omega_n(t))^T$ is an $m$-dimensional Brownian motion defined on
a complete probability space$(\mathbf{\Omega}, \mathcal{F},
\mathbf{P})$ with a natural filtration ${\{ {\mathcal{F}}_{t}
\}}_{t\geq 0}$ (i.e. $\mathcal{F}_t=\sigma \{w(s): 0\leq s\leq t
\}) $.

There are various kinds convergence concepts to describe limiting
behaviors of stochastic differential equations. The almost sure
convergence is the most useful because it is closer to the real
situation during computation than other forms of convergence (see
\cite{m97}). Therefore it is very important to study almost sure
convergence for stochastic DCNN.

To the best our knowledge, few authors discuss almost sure
exponential stability for stochastic DCNN. Motivated by the above
discussions, under the help of suitable Lyapunov functional and
the semimartingale convergence theorem, we obtain some sufficient
criteria ensuring the almost sure exponential stability for the
model.


\section {Preliminaries}

Let $C:= C([-\tau, 0],\mathbb{R}^n)$ be the Banach space of
continuous functions which maps $[-\tau, 0]$ into $\mathbb{R}^n$ with the
topology of uniform convergence. For
$(x_1(t),\dots,x_n(t))^T\in \mathbb{R}^n$ , we define $
\|x(t)\|=\sum_{i=1}^n|x_i(t)|$. For any $\varphi(t)\in C$, we
define $ \|\varphi\|=\sum_{i=1}^n|\varphi_{i}|$, where
$|\varphi_{i}|=\sup_{-\tau\leq s\leq 0}|\varphi_i(s)|$.

The initial conditions for system \eqref{e1.1} are
$x(t)=\varphi(t)$, $-\tau\leq t\leq 0$,
$\varphi\in L^2_{{\mathcal{F}}_0}([-\tau,0],\mathbb{R}^n)$, here
$L^2_{{\mathcal{F}}_0}([-\tau,0],\mathbb{R}^n)$ is regarded as a $\mathbb{R}^n-$
valued stochastic process $\varphi(t), -\tau\leq t\leq 0$,
moreover, $\varphi(t)$ is ${\mathcal{F}}_0$ measurable,
$\int^0_{-\tau}E|\xi(t)|^2dt<\infty$. Throughout this paper, we
always assume that $f_{ij}(0)=\sigma_{ij}(0)=0$ and $f_{ij},
\sigma_{ij}$ are globally Lipschitz, and $c_i(\cdot),
a_{ij}(\cdot), b_{ij}(\cdot)$ are bounded functions. We also
assume there exist positive constants $p_{ij},i,j=1,\dots,n$, such
that $|f_{ij}(u)-f_{ij}(v)|\leq p_{ij}|u-v|,\quad \forall u, v\in
R$. This implies that \eqref{e1.1} has a unique global solution on
$t\geq 0$ for the initial conditions\cite{m97}. Clearly,
\eqref{e1.1} admit an equilibrium solution $x(t)\equiv 0$.

\begin{definition}[\cite{m97}] \label{def2.1} \rm
 The trivial solution of
 \eqref{e1.1} is said to be almost surely exponentially stable
 if for almost all sample paths of the solution $x(t)$, we have
\begin{equation*}
 \limsup_{t\to \infty}\frac{1}{t}\log\|x(t)\|< 0.
\end{equation*}
\end{definition}

\begin{lemma}[Semimartingale convergence theorem \cite{m97}] \label{lem2.1}
Let $A(t)$ and $U(t)$ be two continuous adapted increasing
processes on $t\geq 0$ with $ A(0)=U(0)=0$ a.s. Let $M(t)$ be a
real-valued continuous local martingale with $M(0)=0$ a.s. Let
$\xi$ be a nonnegative ${\mathcal{F}}_0$-measurable random
variable. Define
\begin{equation*}
 X(t)=\xi+A(t)-U(t)+M(t), \quad \text{for } \quad t\geq 0
\end{equation*}
If $ X(t)$ is nonnegative, then
\begin{equation*}
 \{\lim _{t\to\infty}A(t)<\infty\}\subset
 \{\lim _{t\to\infty}X(t)<\infty\}\cap
 \{\lim _{t\to\infty}U(t)<\infty\},\quad{\rm a.s.,}
\end{equation*}
where $B\subset D$ a.s. means $P(B\cap D^c)=0$. In particular, If
 ${\lim_{t\to\infty}A(t)<\infty}$ a.s., then for almost all
$\omega\in\Omega$
\begin{equation*}
 \lim _{t\to\infty}X(t)<\infty
 \quad\text{and} \quad \lim _{t\to\infty}U(t)<\infty,
\end{equation*}
that is both $X(t)$ and $U(t)$ converge to finite random
variables.
\end{lemma}

\begin{lemma}[\cite{l76}] \label{lem2.2}
If $\rho(K)< 1$ for matrix $K=(k_{ij})_{n\times n}\geq 0$, then
$(E-K)^{-1}\geq 0$, where $E$ denotes the identity matrix of size
$n$.
\end{lemma}

\section{ Main results}

\begin{theorem} \label{thm3.1}
Let $k_{ij}=c_i^{-1}(a_{ji}+b_{ji})p_{ji}$,
 $K=(k_{ij})_{n\times n}$, where,
$ c_{i}=\inf \{c_i(t)\}$, $a_{ji}=\sup\{|a_{ji}(t)|\}$,
$b_{ji}=\sup\{|b_{ji}(t)|\}$. If $\rho(K)< 1$, then the
equilibrium point $O$ of system \eqref{e1.1} is almost surely
exponentially stable.
\end{theorem}

\begin{proof}
From $\rho(K)< 1$, it follows that $(E-K)$ is an
M-matrix\cite{b79}, where $E$ denotes an identity matrix of size
$n$. Therefore, using Lemma \ref{lem2.2}, there exists a diagonal matrix
$M=$diag$(m_1,\dots,m_n)$ with positive diagonal elements such
that the product $(E-K)M$ is strictly diagonally dominant with
positive diagonal entries. Namely,
\begin{equation}\label{e3.1}
 m_i>\sum_{j=1} ^nm_jc_i^{-1}(a_{ji}+b_{ji})p_{ji}, \quad
i=1,2,\dots, n.
\end{equation}
 Then, there exists a constant $\mu>0$ such that
\begin{equation}\label{e3.2}
 -m_ic_i+\sum_{j=1} ^nm_j(a_{ji}+b_{ji})p_{ji}<-\mu,\quad
i=1,2,\dots, n.
\end{equation}
 Thus, we can choose a constant $0<\lambda\ll1$ such that
\begin{equation}\label{e3.3}
 m_i(\lambda-c_{i})+\sum_{j=1}^nm_j(a_{ji}+b_{ji})p_{ji}e^{\lambda\tau}<0,
\quad i=1,2,\dots, n.
\end{equation}
 We define a positive definite Lyapunov function
$V(x(t),t)=e^{\lambda t}\sum^n_{i=1}m_i|x_i(t)|$.
 By It\^{o} formula, we can calculate the upper right
 differential $D^+V$ of $V$ along \eqref{e1.1} as follows
\begin{equation}\label{e3.4}
\begin{aligned}
 D^+V(x(t),t)
& = \lambda e^{\lambda t}\sum^n_{i=1}m_i|x_i(t)|{\rm d}t
 +e^{\lambda t}\sum^n_{i=1}m_i{\rm sign}(x_i(t)){\rm d}x_i(t)\\
&\leq e^{\lambda t}\{ \sum^n_{i=1}m_i[(\lambda-c_{i})|x_i(t)|
 +\sum _{j=1} ^na_{ij} p_{ij}|x_j(t)|{\rm d}t\\
&\quad +\sum _{j=1} ^nb_{ij} p_{ij}|x_j(t-\tau_{ij})|]{\rm d}t\}
 +e^{\lambda t}\sum^n_{i=1}m_i \sum _{j=1} ^n|\sigma_{ij}(x_j(t))|
 {\rm d}w_j(t),
\end{aligned}
\end{equation}
 On the other hand, for $T>0$, it is easy to see that
\begin{equation}\label{e3.5}
\begin{aligned}
 \int_0^T e^{\lambda t}|x_j(t-\tau_{ij})|{\rm d}t
 &= \int_{-\tau_{ij}}^{T-\tau_{ij}} e^{\lambda +\tau_{ij}}|x_j(t)|{\rm d}t\\
&\leq e^{\lambda\tau}\int_{-\tau}^{0}e^{\lambda t}|x_j(t)|{\rm d}t
 +e^{\lambda\tau}\int_{0}^{T}e^{\lambda t}|x_j(t)|{\rm d}t.
\end{aligned}
\end{equation}
 Calculating the integral of inequality \eqref{e3.4} from 0 to $T$
 and noticing inequality \eqref{e3.3} and inequality \eqref{e3.5}, we have
%\begin{equation}\label{e3.6}
\begin{align*}
 V(x(T),T)
&\leq V(x(0),0)+\int_0^T e^{\lambda t}\sum^n_{i=1}
 \sum_{j=1}^nm_i|\sigma_{ij}(x_j(t))|{\rm d}w_j(t)\\
&\quad+\int_0^T e^{\lambda t}\{
 \sum^n_{i=1}m_i[(\lambda-c_{i})|x_i(t)|
 +\sum _{j=1} ^na_{ij} p_{ij}|x_j(t)|{\rm d}t\\
&\quad +\sum _{j=1} ^nb_{ij} p_{ij}|x_j(t-\tau_{ij})|]{\rm d}t\}{\rm d}t\\
&\leq \sum_{i=1}^nm_i|\varphi_{i}(0)|+\int_0^T e^{\lambda
t}\sum^n_{i=1}
\sum_{j=1}^nm_i|\sigma_{ij}(x_j(t))|{\rm d}w_j(t)\\
&\quad +\int_0^T e^{\lambda t}\{\sum_{i=1}^n m_i[(\lambda-c_{i})|x_i(t)|
 +\sum _{j=1} ^ne^{\lambda \tau}(a_{ij}+b_{ij})p_{ij}|x_j(t))|]{\rm d}t\\
&\quad +\int_{-\tau}^0\sum_{i=1}^n\sum _{j=1} ^ne^{\lambda \tau}m_ib_{ij}p_{ij}|x_j(t))|\}{\rm d}t\\
&\leq \sum_{i=1}^nm_i|\varphi_{i}(0)|+\int_0^T e^{\lambda t}\sum^n_{i=1}
 \sum_{j=1}^nm_i|\sigma_{ij}(x_j(t))|{\rm d}w_j(t)\\
&\quad +\int_0^T e^{\lambda t}\{\sum_{i=1}^n [m_i(\lambda-c_{i})
 +\sum _{j=1} ^ne^{\lambda \tau}m_j(a_{ji}+b_{ji})p_{ji}]|x_i(t))|\}{\rm d}t\\
&\quad +\int_{-\tau}^0\sum_{i=1}^n\sum _{j=1}^ne^{\lambda \tau}m_ib_{ij}p_{ij}|x_j(t))|{\rm d}t\\
&\leq \sum_{i=1}^nm_i|\varphi_{i}(0)|+\int_0^T e^{\lambda t}\sum^n_{i=1}
 \sum_{j=1}^nm_i|\sigma_{ij}(x_j(t))|{\rm d}w_j(t)\\
&\quad +\int_{-\tau}^0\sum_{i=1}^n\sum _{j=1} ^n
 e^{\lambda \tau}m_ib_{ij}p_{ij}|x_j(t))|{\rm d}t.
\end{align*}
%\end{equation}
The right hand of the above expression is a nonnegative martingale, and
Lemma \ref{lem2.1} shows
$$
\lim _{T\to\infty}X(T)<\infty \quad  \rm{a.s.},
$$
where
\begin{align*}
 X(T)&=\sum_{i=1}^nm_i|\varphi_{i}(0)|
 +\int_0^T e^{\lambda t}\sum^n_{i=1}\sum_{j=1}^nm_i|\sigma_{ij}(x_j(t))
  |{\rm d}w_j(t)\\
 & \quad+\int_{-\tau}^0\sum_{i=1}^n\sum _{j=1}^ne^{\lambda\tau}m_ib_{ij}
  p_{ij}|x_j(t))|{\rm d}t.
\end{align*}
It follows that $\lim_{T\to \infty}(e^{\lambda
t}\sum^n_{i=1}m_i|x_i(t)|)<\infty$, which implies
$$
\lim_{T\to \infty}(e^{\lambda t}\sum^n_{i=1}|x_i(t)|)<\infty
 \hspace{1.0cm} \rm{a.s.}
$$
That is, $\limsup_{T\to \infty}\frac{1}{t}\log\|x(T)\|<-\lambda$. This
completes the proof.
\end{proof}

\begin{remark} \label{rmk3.1} \rm
Note that for a given matrix $M$, its spectral
radius $\rho(M)$ is equal to the minimum of its all matrix norms of
$M$, i.e., for any norm $\| \cdot \|$, $\rho(M)\leq \| M \|$.
Therefore, we have the following corollary.
\end{remark}

\begin{corollary} \label{coro3.1}
Suppose that there exist positive real
 numbers $m_i$ ($i=1,2,\dots,n$) such that one of the following inequalities
 is satisfied:
\begin{enumerate}
\item  $m_ic_i>\sum_{j=1} ^nm_j(a_{ji}+b_{ji})p_{ji}$, $i=1,2,\dots, n$.

\item $m_ic_i>\sum_{j=1} ^nm_j(a_{ij}+b_{ij})p_{ij}$, $i=1,2,\dots, n$.

\item $\sum_{i=1} ^nm_j(a_{ij}+b_{ij})p_{ij}/ (c_im_i)$, $i=1,2,\dots, n$.

\item $\sum_{i=1}\sum_{j=1} ^n((a_{ij}+b_{ij})p_{ij}m_j/(c_im_i))^2$,

\end{enumerate}
 then the equilibrium point $O$ of system \eqref{e1.1} is almost surely
exponentially stable.
\end{corollary}

\begin{remark} \label{rmk3.2} \rm
By Theorem \ref{thm3.1} and Corollary \ref{coro3.1}, we conclude if the
 delay neural network satisfy the conditions, the stability of
 system \eqref{e1.1} are independent of the magnitude of noise, and
 therefore, the noise fluctuations is harmless.
\end{remark}

\subsection*{Acknowledgments}
This research was supported by grant NCET-04-0767 from Program for
New Century Excellent  Talents in University of China, by grant
50677014  from Nature Science Foundation Council of China, by grant
20020532016 from Doctoral Special Found of Ministry of Education and
Hunan Postdoctoral  Scientific Program.

\begin{thebibliography}{00}

\bibitem {b79} A. Berman, R. Plemmons;
 \emph{Nonnegative Matrices in  the Mathematical Science},
Academic Press, New York, 1979.

\bibitem {c03} J. Cao; \emph{New results concerning expontential stability
and periodic  solutions of delayed cellular neural networks}, Phys.
 Lett. A, 307 (2003), 136-147.

\bibitem {c06} H. Chen, et al.; \emph{Image-processing algorithms realized by
 discrete-time cellular neural networks and their circuit
 implementations}, Chaos, Solitons, Fractals, 29(2006), 1100-1108.

\bibitem {c88a} L. Chua, L. Yang; \emph{Cellular neural networks:
 Theory}, IEEE Trans. Circuits Syst., 35(1988),
 1257-1272.

\bibitem {c88b} L. Chua, L. Yang, ; \emph{Cellular neural networks:
 Applications}, IEEE Trans. Circuits Syst., 35(1988),
 1273-1290.

\bibitem{h94} S. Haykin; \emph{Neural Networks}, Prentice-Hall, NJ, 1994.

\bibitem{h05} L. Huang, C. Huang, B. Liu;
\emph{Dynamics of a class of cellular neural networks with time-varying delays},
 Phys. Lett. A,  345(2005), 330-344.

\bibitem {l76} J. LaSalle;
\emph{The Stability of Dynamical System}, SIAM, Philadelphia, 1976.

\bibitem {l93} D. Liu, A. Michel; \emph{Celular neural networks for
 associative memories}, IEEE Trans. Circuits. Syst., 40
 (1993), 119-121.

\bibitem{m97} X. Mao; \emph{Stochastic Differential Equation and Application},
 Horwood Publishing, Chichester, 1997.

\bibitem {r92} T. Roska, L. Chua; \emph{Cellular neural networks with
 nonlinear and delay-type template}, Int. J. Circuit
 Theor. Appl., 20(1992), 469-481.

\bibitem {v98} P. Venetianer, T. Roska; \emph{Image compression by
 delayed CNNs}, IEEE Trans. Circuits. Syst. I, 45 (1998), 205-215.

\bibitem {z05} H. Zhao, J. Cao;
\emph{New conditions for global exponential stability of cellular neural
 networks with delays}, Neural Networks, 18(2005)1332-1340.


\end{thebibliography}
\end{document}
