\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2010(2010), No. 42, pp. 1--11.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2010 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2010/42\hfil Stochastic stability]
 {Stochastic stability of Cohen-Grossberg neural networks 
  with unbounded distributed delays}

\author[P. Chen, C. Huang, X. Liang\hfil EJDE-2010/42\hfilneg]
 {Ping Chen, Chuangxia Huang, Xiaolin Liang}  % in alphabetical order

\address{Ping Chen \newline
 College of Mathematics and Computing Science, 
 Changsha University of Science and Technology\\
 Changsha, Hunan 410114, China}
\email{chenping04@gmail.com}

\address{Chuangxia Huang \newline
 College of Mathematics and Computing Science,
 Changsha University of Science and Technology\\
 Changsha, Hunan 410114, China}
\email{huangchuangxia@sina.com.cn}

\address{Xiaolin Liang \newline
 College of Mathematics and Computing Science,
 Changsha University of Science and Technology\\
 Changsha, Hunan 410114, China}
\email{liang@csust.edu.cn}


\thanks{Submitted December 21, 2009. Published March 26, 2010.}
\thanks{Supported by grants 2008 form the Foundation of Chinese 
Society for Electrical Engineering,
     \hfill\break\indent  and 10971240 from the National 
     Natural Science Foundation of China}
\subjclass[2000]{34F05, 93E15}
\keywords{Cohen-Grossberg neural networks; stochastic; distributed delays;
  \hfill\break\indent almost sure exponential stability; Lyapunov functional}

\begin{abstract}
  In this article, we  consider a model that
  describes the dynamics of Cohen-Grossberg neural networks with
  unbounded distributed delays, whose state variable are governed
  by stochastic non-linear integro-differential equations.
  Without assuming the smoothness, monotonicity and boundedness
  of the activation functions, by constructing suitable Lyapunov
  functional, employing the semi-martingale convergence theorem
  and some inequality, we obtain some sufficient criteria to check
  the almost exponential stability of  networks.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}
\allowdisplaybreaks

\section{Introduction}

Cohen-Grossberg neural networks (CGNN) were first introduced by
Cohen and Grossberg \cite{c9} in 1983 and soon the class of
networks have been the subject of extensive investigation because
of their many important applications, such as patter recognition,
associative memory and combinatorial optimization, etc.
Especially, CGNN with delays have attracted many scientific and
technical works due to their applications for solving a number of
problems in various scientific disciplines, such application
heavily depend on the dynamic behave of networks \cite{z2}, thus,
the analysis of the dynamical behaviors such as stability is a
necessary step for practical design of neural networks. To date,
many important results on the stability have been reported in the
literature, see e.g \cite{c1,c2,c3,c5,g1,h2,l3,l4,w2,w3,x1,y1} and
reference therein. We refer to Cao and Liang \cite{c2} for the
mathematical model of CGNN that consists of n $(n>1)$
interconnected neighboring cells whose dynamical behavior are
described by
\begin{equation}
\frac{dx_i(t)}{dt}=-h_i(x_i(t))[c_i(x_i(t))
-\sum_{j=1}^{n}a_{ij}f_j(x_j(t))
-\sum_{j=1}^{n}b_{ij}f_j(x_j(t-\tau_{ij}))],
                  \label{e1.1}
\end{equation}
where $i=1,2\dots,n$; $x_i(t)$ denotes the state variable associated
with the $i$th neuron at time $t$; $h_i({\cdot})$ represents an
amplification function; $c_i({\cdot})$ is an appropriately behaved
function; $A=(a_{ij})_{n\times n}$ and $B=(b_{ij})_{n\times n}$
weight the strength of the jth unit on the ith unit at time t;
$f_j({\cdot})$ denotes a non-linear output function, $\tau _{ij}$
corresponds to the time delay required in processing and
transmitting a signal from the $j$th cell to the $i$th cell.

In formulating the network model \eqref{e1.1}, the delays are
assumed to be discrete, however, just as is pointed out in
\cite{c4}, constant fixed delays in the models of delayed feedback
systems serve as a good approximation in simple circuits
consisting of a small number of cells, but neural networks usually
have a spatial extent due to the presence of the presence of an
amount of parallel pathways with variety of axon sizes and
lengths. Therefore, there will be a distribution of conduction
velocities along these pathways and it is of significant
importance to consider continuously distributed delays to the
neural networks (see \cite{c6,c8,w1,z3}). Then model \eqref{e1.1}
can be modified as a system of integro-differential equations of
the from
\begin{equation}
\frac{dx_i(t)}{dt}=-h_i(x_i(t))[c_i(x_i(t))
-\sum_{j=1}^{n}a_{ij}f_j(x_j(t))
-\sum_{j=1}^{n}b_{ij}f_j(\int_{-\infty}^tK_{ij}(t-s)x_j(s)ds)]
  \label{e1.2}
\end{equation}
Which for convenience can be put in the form
\begin{equation}
\frac{dx_i(t)}{dt}=-h_i(x_i(t))[c_i(x_i(t))
-\sum_{j=1}^{n}a_{ij}f_j(x_j(t))
-\sum_{j=1}^{n}b_{ij}f_j(\int_0^{+\infty}K_{ij}(s)x_j(t-s)ds)]
  \label{e1.3}
\end{equation}
With initial values given by $x_i(s)=\phi_i(s)$ for
$s\in({-\infty},0]$, where each $\phi_i({\cdot})$ is bounded and
continuous on $({-\infty},0]$.

Just as is pointed out by Haykin \cite{h1}, in real nervous systems
and in the implementation of artificial neural networks, noise is
unavoidable and should be taken into consideration in modelling.
Under the effect of the noise, the trajectory of system becomes a
stochastic process. Moreover, it was realized that CGNN could be
stabilized or destabilized by certain stochastic input \cite{b1,m1}.
Therefore it is of significant importance to consider stochastic
effects to the stability of delayed neural networks, and the
existing literature on theoretical studies of stochastic CGNN is
predominantly concerned with constant fixed delay, time-varying
delays and bounded distributed delays
\cite{c8,d1,h3,l1,l2,s1,w4,z1}. To the best our knowledge, few
authors discuss almost sure exponential stability of stochastic
Cohen-Grossberg neural networks with unbounded distributed delays.

Motivated by the above discussions, in this paper, we investigate
almost sure exponential stability of stochastic Cohen-Grossberg
neural networks with unbounded distributed delays. By the
following stochastic nonlinear integro-differential equations
\begin{equation}
\begin{aligned}
dx_i(t)&= -h_i(x_i(t))[c_i(x_i(t))
 -\sum_{j=1}^{n}a_{ij}f_j(x_j(t))\\
&\quad -\sum_{j=1}^{n}b_{ij}f_j(\int_0^{+\infty}K_{ij}(s)x_j(t-s)ds)]dt
   +\sum_{j=1}^{n}\sigma_{ij}(x_j(t))d\omega_j(t),
\end{aligned}     \label{e1.4}
\end{equation}
where $t\geq0$, $\sigma(\cdot)=(\sigma_{ij}(\cdot))_{n\times n}$ is the
diffusion coefficient matrix ,and $\omega(t)=(\omega_1(t),\dots,
\omega_n(t))^T$ is an n-dimensional Brownian motion defined on a
complete probability space $(\Omega,\mathcal{F}, \mathcal{P})$ with a natural
filtration $\{\mathcal{F}_t\}_{t\geq 0}$.

\section{Preliminaries}

Let $\mathcal{C}=C(({-\infty},0],\mathbb{R}^n)$ be the Banach space of
continuous functions which map into $\mathbb{R}^n$ with the topology of
uniform convergence. For  $x(t)=(x_1(t),\dots,x_n(t))^T$ in
$\mathbb{R}^n$, we define
$\|x(t)\|=(\sum_{i=1}^{n}{|x_i(t)|}^2)^{1/2}$.
For  $\varphi \in \mathcal{C}$,
define $\| \varphi \|=(\sum_{i=1}^{n}{|
\varphi_i|}^2)^{1/2}$, where $\| \varphi
\|=\sup_{-\infty\leq s\leq 0}\{\|\varphi(s)\| \}$.

 System \eqref{e1.4} can be rewritten in the vector form
\begin{equation}
\begin{aligned}
dx(t)&=-H(x(t))[C(x(t))-AF(x(t))\\
&\quad -BF(\int_0^{+\infty}K(s)x(t-s)ds)]dt
+\sigma(x(t))d\omega(t)
\end{aligned}\label{e2.1}
\end{equation}
where $x(t)=(x_1(t),\dots,x_n(t))^T$,
$H(x(t))=\mathop{\rm diag}(h_1(x_1(t)),\dots,h_n(x_n(t)))$,
\begin{gather*}
A=(a_{ij})_{n\times n}, \quad
B=(b_{ij})_{n\times n},\quad
C(x(t))=(c _1(x_1(t)),\dots,c_n(x_n(t)))^T,
\\
F(x(t)) =(f _1(x_1(t)), \dots,
f_n(x_n(t)))^T,\quad K(s)=(k_{ij}(s))_{n\times n},\\
\sigma(x(t))=(\sigma_{ij}(x_j(t)))_{n\times n}.
\end{gather*}
The initial conditions for \eqref{e2.1} are
$x(s)=\varphi(s), -\infty \leq s\leq 0, \varphi
\in L_{\mathcal{F}_0}^2((-\infty,0],\mathbb{R}^n)$,
here $L_{\mathcal{F}_0}^2((-\infty,0],\mathbb{R}^n)$
is $\mathbb{R}^n$-valued
stochastic process $\varphi (s),-\infty\leq s\leq 0$,
$\varphi(s)$ is $\mathcal{F}_0$-measurable,
$\int_{-\infty}^0E{|\varphi(s)|}^2ds<\infty$.

Let $C^{2,1}(\mathbb{R}^n\times R;R_+)$ denote the family of all
nonnegative functions $V(x,t)$ on $\mathbb{R}^n\times R$ which are twice
differentiable in $x$ and once differentiable in $t$.
If $V\in C^{2,1}(\mathbb{R}^n\times R;R_+)$, define an operator
$LV$ associated with \eqref{e2.1} as
\begin{align*}
LV(x,t)&= V_t(x,t)+V_x(x,t)\{-H(x(t))[C(x(t))-AF(x(t))\\
&\quad -BF(\int_0^{+\infty}K(s)x(t-s)ds)]\}dt
+\frac{1}{2}\mathop{\rm trace}[\sigma^TV_{xx}(x,t)\sigma]
\end{align*}
where $V_t(x,t)=\frac{\partial V(x,t)}{\partial t}$,
\[
V_x(x,t) =\left(\frac{\partial V(x,t)}{\partial x_1},\dots,\frac{\partial
V(x,t)}{\partial x_n}\right),\quad
V_{xx}(x,t)=\left(\frac{{\partial}^2
V(x,t)}{\partial x_i\partial x_j}\right)_{n\times n}.
\]
To establish the main results of the model given in \eqref{e2.1}, some of
the following assumptions will apply:
\begin{itemize}
\item[(H1)]  Each function $h_i(x)$ is bounded, positive and locally
Lipschitz continuous; thus, there exist two positive constants
$\underline{h}_i$ and $\overline{h}_i$, such that
$0<\underline{h}_i\leq h_i(x)\leq\overline{h}_i<{+\infty}$ for all
$x\in R$ and $i=1,2,\dots,n$.

\item[(H2)]  each $i=1,2,\dots,n$, there exist constant $\alpha_i>0$,
such that $x_i(t)c_i(x_i(t))\geq\alpha_i x_i^2(t)$;

\item[(H3)]  Both $f_j(\cdot)$ and $\sigma_{ij}(\cdot)$ are globally
Lipschitz, and there exist positive constants
$\beta_j,L_{ij},i,j=1,2,\dots,n$, such that
$$
| f_j(u)-f_j(v)|\leq \beta_j| u-v|:
|\sigma_{ij}(u)-\sigma_{ij}(v)|\leq L_{ij}| u-v|,
$$
for any $u, v\in \mathbb{R}$.
we also assure that $f_j(0)=\sigma_{ij}(0)=0$.

\item[(H4)]  The delay kernels $K_{ij},i,j,=1,2,\dots,n$ are
real-valued nonnegative piecewise continuous defined on
$[0,+\infty)$ and satisfy
$$
\int_0^{+\infty}K_{ij}(s)ds=1 \quad and \quad
\int_0^{+\infty}K_{ij}(s)e^{\mu s}ds<+\infty
$$
for some positive constant $\mu$.
\end{itemize}

We notice that the activation functions $f_j(\cdot)$ do not have
to be differentiable and monotonically increasing, which including
some kinds of typical functions widely used for CGNN designs.This
implies that \eqref{e2.1} has a unique global solution on $t\geq 0$
for the initial conditions \cite{b1}. Clearly, \eqref{e2.1}
admits an equilibrium
solution $x(t)=0$.

\begin{definition}[\cite{m1}] \label{def2.1} \rm
 The trivial solution of \eqref{e2.1} is said to
be almost surely exponentially stable if for almost all sample paths
of the solution $x(t)$, we have
$$
\limsup_{t\to \infty}\frac{1}{t}\log \|
x(t)\|<0.
$$
\end{definition}

\begin{lemma}[Semi-martingale convergence theorem \cite{m1}]
\label{lem2.1}
 Let $A(t)$ and $U(t)$ be \\ two continuous adapted increasing process on
$t\geq 0$ with $M(0)=0$ a.s. Let $\xi$ be a nonnegative
$\mathcal{F}_0$-measurable random variable. Define
$$
X(t)=\xi +A(t)-U(t)+M(t),\quad\text{for } t\geq 0,
$$
If $X(t)$ is nonnegative, then
$$
\{\lim_{t\to\infty}A(t)<\infty\}\subset
\{\lim_{t\to\infty}X(t)<\infty\}\cap\{\lim_{t\to\infty}U(t)<\infty\}\quad
\text{a.s.},
$$
where $B\subset D$ a.s. means $P(S\cap D^c)=0$. In particular, If
$$
\lim_{t\to\infty}U(t)<\infty\quad \text{a.s.},
$$
then for almost all $\omega\in\Omega$
$$
\lim_{t\to\infty}X(t)<\infty\quad\text{and}\quad
\lim_{t\to\infty}U(t)<\infty,
$$
that is both $X(t)$ and $U(t)$ converge to finite random
variables.
\end{lemma}

\section{Main Results}

\begin{theorem} \label{thm3.1}
 Under  assumptions {\rm (H1)--(H4)}, if
there exist a set of positive constants
$q_i,q_{ij},p_{ij},r_{ij},q_{ij}^*,p_{ij}^*,r_{ij}^*$ such that
for $i=1,2,\dots,n$,
\begin{equation}
\begin{aligned}
& 2\underline{h}_i\alpha_iq_i-\sum  _{j=1}
 ^n[q_i\overline{h}_i^{2p_{ij}}|
 a_{ij}|^{2q_{ij}}\beta_j^{2r_{ij}}+q_j\overline{h}_j^{2-2p_{ji}}|
 a_{ji}|^{2-2q_{ji}}\beta_i^{2-2r_{ji}}\\
& +q_i\overline{h}_i^{2p_{ij}^*}|b_{ij}|^{2q_{ij}^*}\beta_j^{2r_{ij}^*}
  +q_j\overline{h}_j^{2-2p_{ji}^*}|
  b_{ji}|^{2-2q_{ji}^*}\beta_i^{2-2r_{ji}^*}+q_jL_{ji}^2
]>0,
\end{aligned} \label{e3.1}
\end{equation}
then the trivial solution of system \eqref{e2.1} is almost surely
exponentially stable.
\end{theorem}

\begin{proof}
  From  assumption (H4), we can choose a
constant $0<\lambda <\mu$, such that
\begin{equation}
\int_0^{+\infty} K_{ji}(s)e^{\lambda s}ds\leq \int_0^{+\infty}
K_{ji}(s)e^{\mu s}ds  \label{e3.2}
\end{equation}
and for \quad $i=1,2,\dots,n$,
\begin{equation}
\begin{aligned}
&2\underline{h}_i\alpha_iq_i-\lambda q_i-\sum  _{j=1}
 ^n[q_i\overline{h}_i^{2p_{ij}}|
 a_{ij}|^{2q_{ij}}\beta_j^{2r_{ij}}+q_j\overline{h}_j^{2-2p_{ji}}|
 a_{ji}|^{2-2q_{ji}}\beta_i^{2-2r_{ji}}\\
&+q_i\overline{h}_i^{2p_{ij}^*}| b_{ij}|^{2q_{ij}^*} \beta_j^{2r_{ij}^*}
 +q_j\overline{h}_j^{2-2p_{ji}^*}|
 b_{ji}|^{2-2q_{ji}^*}\beta_i^{2-2r_{ji}^*}\int_0^{+\infty}
 K_{ji}(s)e^{\lambda s}ds+q_jL_{ji}^2 ]>0,
\end{aligned}\label{e3.3}
\end{equation}
Consider the  Lyapunov functional
\begin{equation}
\begin{aligned}
 V(x(t),t)&= e^{\lambda t}\sum
_{i=1}^nq_ix_i^2(t) +\sum  _{i=1}^n\sum
_{j=1}^nq_i\overline{h}_i^{2-2p_{ij}^*}|
b_{ij}|^{2-2q_{ij}^*}\beta_j^{2-2r_{ij}^*}\\
&\quad \times \int_0^{+\infty} \big[K_{ij}(s)e^{\lambda s}
\int_{t-s}^te^{\lambda \zeta}x_j^2(\zeta)d\zeta\big]ds.
\end{aligned}    \label{e3.4}
\end{equation}

Then the operator $LV(x,t)$ associated with system \eqref{e2.1} has the
form
\begin{align}
&LV(x(t),t)\nonumber \\
&= \lambda e^{\lambda t}\sum_{i=1}^nq_ix_i^2(t)\nonumber \\
&\quad +\sum
_{i=1}^n\sum_{j=1}^nq_i\overline{h}_i^{2-2p_{ij}^*}|
                b_{ij}|^{2-2q_{ij}^*}\beta_j^{2-2r_{ij}^*}\int_0^{+\infty}
                [K_{ij}(s)e^{\lambda s}(e^{\lambda
                t}x_j^2(t)\nonumber \\
 &\quad   -e^{\lambda(t-s)}x_j^2(t-s))]ds+2e^{\lambda
                t}\sum_{i=1}^nq_ix_i(t)\{-h_i(x_i(t))[c_i(x_i(t))\nonumber \\
 &\quad      -\sum_{j=1}^{n}a_{ij}f_j(x_j(t))
                -\sum_{j=1}^{n}b_{ij}f_j(\int_0^{+\infty}K_{ij}x_j(t-s)ds)]\}\nonumber \\
 &\quad +e^{\lambda t}\sum_{i=1}^{n}\sum_{j=1}^{n}q_i\sigma_{ij}^2(x_j(t))\nonumber \\
 &\le \lambda e^{\lambda  t}\sum_{i=1}^nq_ix_i^2(t)\nonumber \\
 &\quad  +e^{\lambda t}\sum  _{i=1}^n\sum
                _{j=1}^nq_j\overline{h}_j^{2-2p_{ji}^*}|
                b_{ji}|^{2-2q_{ji}^*}\beta_i^{2-2r_{ji}^*}x_i^2(t)\int_0^{+\infty}
                K_{ji}(s)e^{\lambda s}ds\nonumber \\
 &\quad -e^{\lambda t}\sum  _{i=1}^n\sum
                _{j=1}^nq_i\overline{h}_i^{2-2p_{ij}^*}|
                b_{ij}|^{2-2q_{ij}^*}\beta_j^{2-2r_{ij}^*}\int_0^{+\infty}
                K_{ij}(s)x_j^2(t-s)ds\nonumber \\
 &\quad  -2e^{\lambda t}\sum_{i=1}^nq_i\underline{h}_i| x_i(t)|^2\alpha_i
             +2e^{\lambda t}\sum_{i=1}^nq_i[\sum
                _{j=1}^n\overline{h}_i| a_{ij}|\beta_j| x_j(t)||
                x_i(t)|\nonumber \\
 &\quad       +\sum _{j=1}^n\overline{h}_i|
                b_{ij}|\beta_j(\int_0^{+\infty}
                K_{ij}(s)x_j(t-s)x_i(t)ds)]\nonumber \\
 &\quad  +e^{\lambda t}\sum_{i=1}^{n}\sum_{j=1}^{n}q_j\sigma_{ji}^2(x_i(t))\nonumber \\
&\le  \lambda e^{\lambda  t}\sum_{i=1}^nq_ix_i^2(t)\nonumber \\
 &\quad   +e^{\lambda t}\sum  _{i=1}^n\sum
                _{j=1}^nq_j\overline{h}_j^{2-2p_{ji}^*}|
                b_{ji}|^{2-2q_{ji}^*}\beta_i^{2-2r_{ji}^*}x_i^2(t)\int_0^{+\infty}
                K_{ji}(s)e^{\lambda s}ds\nonumber \\
 &\quad  -e^{\lambda t}\sum  _{i=1}^n\sum
                _{j=1}^nq_i\overline{h}_i^{2-2p_{ij}^*}|
                b_{ij}|^{2-2q_{ij}^*}\beta_j^{2-2r_{ij}^*}\int_0^{+\infty}
                K_{ij}(s)x_j^2(t-s)ds\nonumber \\
&\quad -2e^{\lambda t}\sum_{i=1}^nq_i\underline{h}_i|
                x_i(t)|^2\alpha_i\nonumber \\
&\quad  +2e^{\lambda t}\sum_{i=1}^n\sum
                _{j=1}^nq_i\overline{h}_i^{p_{ij}}| a_{ij}|^{q_{ij}}\beta_j^{r_{ij}}| x_i(t)|\overline{h}_i^{1-p_{ij}}|
                a_{ij}|^{1-q_{ij}}\beta_j^{1-r_{ij}}|
                x_j(t)|\nonumber \\
&\quad  +2e^{\lambda t}\sum_{i=1}^n\sum
                _{j=1}^nq_i\overline{h}_i^{p_{ij}^*}| b_{ij}|^{q_{ij}^*}\beta_j^{r_{ij}^*}\overline{h}_i^{1-p_{ij}^*}|
                 b_{ij}|^{1-q_{ij}^*}\beta_j^{1-r_{ij}^*}\int_0^{+\infty}
                 K_{ij}(s)x_j(t-s)x_i(t)ds\nonumber \\
&\quad   +e^{\lambda t}\sum_{i=1}^{n}\sum_{j=1}^{n}q_jL_{ji}^2(x_i(t))^2
\nonumber \\
&\le \lambda e^{\lambda
                t}\sum_{i=1}^nq_ix_i^2(t)\nonumber \\
&\quad +e^{\lambda t}\sum  _{i=1}^n\sum
                _{j=1}^nq_j\overline{h}_j^{2-2p_{ji}^*}|
                b_{ji}|^{2-2q_{ji}^*}\beta_i^{2-2r_{ji}^*}x_i^2(t)\int_0^{+\infty}
                K_{ji}(s)e^{\lambda s}ds\nonumber \\
&\quad  -e^{\lambda t}\sum  _{i=1}^n\sum
                _{j=1}^nq_i\overline{h}_i^{2-2p_{ij}^*}|
                b_{ij}|^{2-2q_{ij}^*}\beta_j^{2-2r_{ij}^*}\int_0^{+\infty}
                K_{ij}(s)x_j^2(t-s)ds\nonumber \\
&\quad -2e^{\lambda t}\sum_{i=1}^nq_i\underline{h}_i|
                x_i(t)|^2\alpha_i +e^{\lambda t}\sum_{i=1}^n\sum
                _{j=1}^nq_i\overline{h}_i^{2p_{ij}}| a_{ij}|^{2q_{ij}}\beta_j^{2r_{ij}}|
                x_i(t)|^2\nonumber \\
&\quad +e^{\lambda t}
                \sum_{i=1}^n\sum
                _{j=1}^nq_j\overline{h}_j^{2-2p_{ji}}|
                a_{ji}|^{2-2q_{ji}}\beta_i^{2-2r_{ji}}|
                x_j(t)|^2\nonumber \\
&\quad  +e^{\lambda t}\sum_{i=1}^n\sum
                _{j=1}^nq_i\overline{h}_i^{2p_{ij}^*}| b_{ij}|^{2q_{ij}^*}\beta_j^{2r_{ij}^*}|
                x_i(t)|^2\nonumber \\
&\quad +e^{\lambda t}\sum_{i=1}^n\sum
                _{j=1}^nq_i\overline{h}_i^{2-2p_{ij}^*}|
                 b_{ij}|^{2-2q_{ij}^*}\beta_j^{2-2r_{ij}^*}\int_0^{+\infty}
                 K_{ij}(s)x_j^2(t-s)ds\nonumber \\
&\quad   +e^{\lambda
    t}\sum_{i=1}^{n}\sum_{j=1}^{n}q_jL_{ji}^2(x_i(t))^2\nonumber \\
&=   -e^{\lambda t}\sum_{i=1}^n|
     x_i(t)|^2(-\lambda q_i+2\underline{h}_i\alpha_iq_i-\sum  _{j=1}^nq_i\overline{h}_i^{2p_{ij}}|
     a_{ij}|^{2q_{ij}}\beta_j^{2r_{ij}}\nonumber \\
&\quad   -\sum     _{j=1}^nq_j\overline{h}_j^{2-2p_{ji}}|
    a_{ji}|^{2-2q_{ji}}\beta_i^{2-2r_{ji}}
  -\sum  _{j=1}^nq_i\overline{h}_i^{2p_{ij}^*}|
    b_{ij}|^{2q_{ij}^*}\beta_j^{2r_{ij}^*}\nonumber \\
&\quad  -\sum  _{j=1}^nq_j\overline{h}_j^{2-2p_{ji}^*}|
   b_{ji}|^{2-2q_{ji}^*}\beta_i^{2-2r_{ji}^*}\int_0^{+\infty}
   K_{ji}(s)e^{\lambda s}ds-\sum  _{j=1}^nq_jL_{ji}^2). \label{e3.5}
\end{align}
Using the It\^o formula, for $T>0$, from inequality
\eqref{e3.2}, \eqref{e3.3}, \eqref{e3.4} and \eqref{e3.5}, we have
\begin{equation}
\begin{aligned}
&V(x(t),t)\\
&\le \sum  _{i=1}^nq_ix_i^2(0)+\sum
   _{i=1}^n\sum  _{j=1}^nq_i\overline{h}_i^{2-2p_{ij}^*}|
                b_{ij}|^{2-2q_{ij}^*}\beta_j^{2-2r_{ij}^*} \\
&\quad\times \int_0^{+\infty}
                [K_{ij}(s)e^{\lambda s}\int_{-s}^0e^{\lambda\zeta}x_j^2(\zeta)d\zeta]ds\\
&\quad  -\int_0^Te^{\lambda t}\sum_{i=1}^n|
     x_i(t)|^2(-\lambda q_i+2\underline{h}_i\alpha_iq_i-\sum  _{j=1}^nq_i\overline{h}_i^{2p_{ij}}|
     a_{ij}|^{2q_{ij}}\beta_j^{2r_{ij}}\\
&\quad   -\sum
     _{j=1}^nq_j\overline{h}_j^{2-2p_{ji}}|
    a_{ji}|^{2-2q_{ji}}\beta_i^{2-2r_{ji}}
  -\sum  _{j=1}^nq_i\overline{h}_i^{2p_{ij}^*}|
    b_{ij}|^{2q_{ij}^*}\beta_j^{2r_{ij}^*}\\
&\quad  -\sum  _{j=1}^nq_j\overline{h}_j^{2-2p_{ji}^*}|
   b_{ji}|^{2-2q_{ji}^*}\beta_i^{2-2r_{ji}^*}\int_0^{+\infty}
   K_{ji}(s)e^{\lambda s}ds-\sum  _{j=1}^nq_jL_{ji}^2)\\
&\quad +2\int_0^Te^{\lambda
t}\sum_{i=1}^n\sum_{j=1}^nq_i|
x_i(t)\sigma_{ij}(x_i(t))| d\omega_j(t)\\
&\le \sum _{i=1}^nq_i[1+\sum
    _{j=1}^n\frac{q_j}{q_i}\overline{h}_j^{2-2p_{ji}^*}|
     b_{ji}|^{2-2q_{ji}^*}\beta_i^{2-2r_{ji}^*}\int_0^{+\infty}
   K_{ji}(s)e^{\lambda s}ds]\sup_{-\infty\leq
    s\leq 0}\{x_i^2(s)\}\\
&\quad +2\int_0^Te^{\lambda t}\sum_{i=1}^n\sum_{j=1}^nq_i|
x_i(t)||\sigma_{ij}(x_j(t))| d\omega_j(t).
\end{aligned}\label{e3.6}
\end{equation}
Then right-hand side of \eqref{e3.6} is a nonnegative martingale
and Lemma \ref{lem2.1} shows
$$
\lim_{T\to0}X(T)<\infty\quad \text{a.s.}
$$
where
\begin{equation}
\begin{aligned}
 X(T)&=  \sum  _{i=1}^nq_i[1+\sum
    _{j=1}^n\frac{q_j}{q_i}\overline{h}_j^{2-2p_{ji}^*}|
     b_{ji}|^{2-2q_{ji}^*}\beta_i^{2-2r_{ji}^*}\int_0^{+\infty}
     K_{ji}(s)e^{\lambda s}ds\}\\
&\quad\times \sup_{-\infty\leq  s\leq 0}\{x_i^2(s)\}
 +2\int_0^T e^{\lambda t}\sum_{i=1}^n\sum_{j=1}^nq_i|
x_i(t)||\sigma_{ij}(x_j(t))| d\omega_j(t).
\end{aligned}\label{e3.7}
\end{equation}
It follows by Lemma \ref{lem2.1} that
\begin{equation}
\lim_{T\to\infty}e^{\lambda
t}\sum_{i=1}^nq_ix_i^2(t)<\infty\quad \text{a.s.}
\label{e3.8}
\end{equation}
which implies
\begin{equation}
\lim_{T\to\infty}e^{\lambda
t}\sum_{i=1}^nx_i^2(t)<\infty\quad \text{a.s.}; \label{e3.9}
\end{equation}
that is,
\begin{equation}
\limsup_{T\to \infty}\frac{1}{T}\log\|
x(T)\|\leq-\lambda.\label{e3.10}
\end{equation}
This completes the proof.
\end{proof}

\begin{corollary} \label{coro3.1}
  Under  assumptions {\rm (H1)--(H4)}, if
there exist a set of positive constants $q_i$ such that
\begin{equation}
2\underline{h}_i\alpha_iq_i-\sum_{j=1}^n[2q_i+q_j\overline{h}_j^2|
a_{ji}|^2\beta_i^2+q_j\overline {h}_j^2|
b_{ji}|^2\beta_i^2+q_jL_{ji}^2]>0,\quad i=1,2,\dots,n \label{e3.11}
\end{equation}
then the trivial solution of \eqref{e2.1} is almost sure
exponentially stable.
\end{corollary}

 \begin{corollary} \label{coro3.2}
Under  assumptions {\rm (H1)--(H4)}, if the following inequality
holds
\begin{equation}
2\underline{h}_i\alpha_i-\sum_{j=1}^n[(| a_{ij}|+|
b_{ij}|)\overline{h}_i\beta_j+(| a_{ji}|+|
b_{ji}|)\overline{h}_j\beta_i+L_{ji}^2]>0,\quad i=1,2,\dots,n
\label{e3.12}
\end{equation}
then the trivial solution of \eqref{e2.1} is almost sure
exponentially stable.
\end{corollary}

\begin{proof}
Choose
$q_i=1$, $q_{ij}=p_{ij}=r_{ij}=q_{ij}^*=p_{ij}^*=r_{ij}^*=\frac{1}{2}$
for $i,j=1,2,\dots,n$
in inequality \eqref{e3.1}.
By Theorem \ref{thm3.1}, the proof is complete.
\end{proof}


  To the best of our knowledge, few authors have
considered the almost sure exponential stability for stochastic
CGNN with unbounded distributed delays. We can find the recent
papers \cite{c1,c2,c6,h2,z3} in this direction, however, all
delays in are discrete and the delay functions appearing in them
are bounded, obviously, those requirements are relaxed in this
paper and as model \eqref{e2.1} can be viewed as a general case of
interval-delayed recurrent neural networks and delayed Hopfiled
networks.


\begin{remark} \label{rmk3.2} \rm
 For system \eqref{e2.1}, when $h_i(x_i(t))=1$, then it
turns out to be following stochastic cellular neural networks with
unbounded distributed delays
$$
dx(t)=[-C(x(t))+AF(x(t))+BF(\int_0^{+\infty}K(s)x(t-s)ds)]dt
+\sigma(x(t))d\omega(t),
$$
using Theorem \ref{thm3.1}, one can easy to get a set of similar corollary
for checking the almost sure exponential stability for the trivial
solution of this system.
\end{remark}

  From the results in this paper, it is easy to see
that our development results are more general than those reported in
\cite{b1,z2}. Moreover, we conclude the stability of system \eqref{e2.1} is
dependent of the magnitude of noise, and therefore, noisy
fluctuations should be regarded adequately.

\section{Example}

In this section, we present an example to demonstrate the
correctness and effectiveness of the main  results.
Consider the  stochastic Cohen-Grossberg neural networks
with unbounded distributed delays
\begin{align*}
dx_1(t)&=-(1+\sin x_1(t))[10x_1-\sum_{j=1}^2a_{1j}f_j(x_j(t))\\
&\quad -\sum_{j=1}^2b_{1j}f_j(\int_0^{+\infty} K_{1j}(s)x_j(t-s)ds)]dt
 +\sum_{j=1}^2\sigma_{1j}(x_j(t))d\omega_j(t),
\end{align*}
\begin{equation}
\begin{aligned}
dx_2(t)&=-(1+\cos x_2(t))[4x_2-\sum_{j=1}^2a_{2j}f_j(x_j(t))\\
&\quad -\sum_{j=1}^2b_{2j}f_j(\int_0^{+\infty} K_{2j}(s)x_j(t-s)ds)]dt
+\sum_{j=1}^2\sigma_{2j}(x_j(t))d\omega_j(t).
\end{aligned}\label{e4.1}
\end{equation}
This system satisfies all assumptions in this paper with
$f_1(x)=\tanh x,f_2(x)=\frac{1}{2}(| x-1|-| x+1|)$,
by taking
$a_{11}=a_{12}=a_{21}=a_{22}=1$,
$b_{11}=b_{12}=b_{21}=b_{22}=1$, and
$q_1=1$, $a_1=18$, $a_2=10$, $\underline{h}_1=1$,
$\overline{h}_1=2$, $\underline{h}_2=3$,
$\overline{h}_2=8$, $\beta_1=\beta_2=1$, $L_{ij}=1$, $i,j=1,2$,
$$
K(s)=\begin{pmatrix}
2e^{-2s}& 4e^{-4s}\\
3e^{-3s}& 5e^{-5s}
\end{pmatrix}.
$$
By simple a computation, one can easily show that
\begin{equation}
2\underline{h}_1\alpha_1-\sum_{j=1}^2[(| a_{1j}|+|
b_{1j}|)\overline{h}_1\beta_j+(| a_{j1}|+|
b_{j1}|)\overline{h}_j\beta_1+L_{j1}^2]=6>0, \label{e4.2}
\end{equation}
and
\begin{equation}
2\underline{h}_2\alpha_2-\sum_{j=1}^2[(| a_{2j}|+|
b_{2j}|)\overline{h}_2\beta_j+(| a_{j2}|+|
b_{j2}|)\overline{h}_j\beta_2+L_{j2}^2]=6>0. \label{e4.3}
\end{equation}
from Corollary \ref{coro3.2} we know that  \eqref{e4.1} is almost surely
exponentially stable.

\subsection*{Conclusions}
 In this paper, we have investigated a stochastic Cohen-Grossberg
 neural networks with unbounded distributed delays, whose state variable
 are governed by stochastic non-linear integro-differential equations,
 which is more general than the previous published papers. By
 constructing suitable Lyapunov functional, employing the semi-martingale
 convergence theorem and some inequality, we obtain some sufficient criteria
 ensuring the almost exponential stability of the networks, and the stability
 of this system is dependent of the magnitude of noise. Furthermore, the derived
 conditions for stability of stochastic cellular neural networks with unbounded
 distributed can be viewed as byproducts of our results.

\subsection*{Acknowledgments}
The authors are grateful to Prof. Julio G. Dix
and to the anonymous reviewer for their constructive and
helpful comments, which have contributed to
improve the presentation of this article.

\begin{thebibliography}{00}

\bibitem{b1}
 S. Blythe, X. Mao and X. Liao, Stability of stochastic delay
neural networks, J. Franklin Inst., 338(4), 2001: 481-495.

\bibitem{c1}
J. Cao and X. Li, Stability in delayed Cohen-Grossberg neural
network, LMI optimization approach, Physica D, 177, 2006: 63-78.

\bibitem{c2}
J. Cao and J. Liang, Boundedness and stability for Cohen-Grossberg
neural networks with time-varying delays, J. Math. Anal. Appl.,
296(2), 2004: 665-685.

\bibitem{c3}
J. Cao, G. Feng and Y. Wang, Multistability and multiperiodicity of
delayed Cohen-Grossberg neural networks with a general class of
activation functions, Physica D: Nonlinear Phennomena, 237(13),
2008: 1734-1749.

\bibitem{c4}
J. Cao, K. Yuan,and H. Li, Global asymptotical stability of
generalized recurrent neural networks with multiple discrete delays
and distributed delsys, IEEE Trans. Neural networks, 17(6), 2006:
1646-1651.

\bibitem{c5}
C. Chen and L. Rong, Delay-independent stability of Cohen-Grossberg
neural network, Phys. Lett. A, 317(5-6), 2003: 336-449.

\bibitem{c6}
A. Chen and J. Cao, Existence and attractivity  of almost periodic
solutions for cellular neural networks with distributed delays and
variable coefficients, Appl. Math. Comput. 134, 2003: 125-140.

\bibitem{c7}
Y. Chen, Global stability of neural networks with distributed
delays, Neural Networks, 15(7), 2002: 867-871.

\bibitem{c8}
 S. Chen,  W. Zhao and Y. Xiao, New criteria for globally exponential
stability of Cohen-Grossberg neural network. Mathematics and
Computers in Simulation, 79, 2009: 1527-1543.

\bibitem{c9}
M. Cohen and S. Grossberg, Absolute stability and global pattern
formation and parallel memory storage by competitive neural
networks, IEEE Trans. Syst. Man Cybernet. SMC, 13, 1983: 15-26.

\bibitem{d1}
M. Dong,  H. Zhang and Y. Wang, Dynamics analysis of impulsive
stochastic Cohen-Grossberg neural networks with Markovian jumping
and mixed time delays, Neurocomputing, 72, 2009: 1999-2004.

\bibitem{g1}
S. Guo and L. Huang, Stability of Cohen-Grossberg neural networks,
IEEE Trans. Neural Networks, 17(1),2006: 106-117.

\bibitem{h1}
S. Haykin, Neural networks. Prentice-Hall, NJ, 1994.

\bibitem{h2}
C. Huang and L. Huang, Dynamics of a class of Cohen-Grossberg neural
networks with time-varying delays, Nonlinear Anal. RWA, 8(1), 2007:
40-52.
\bibitem{h3}
C. Huang  and L. Huang, Dynamics of a class of Cohen-Grossberg
neural networks with time-varying delays, Nonlinear Anal RWA, 8,
2007: 40-52.

\bibitem{l1}
T. Li, A. Song and S. Fei, Robust stability of stochastic
Cohen-Grossberg neural networks with mixed time-varying delays,
Neurocomputing, 731(3), 2009: 542-551.

\bibitem{l2}
X.  Li and J. Cao, Exponential stability of stochastic
Cohen-Grossberg neural networks with timevarying delays, LNCS.,
2005, 3496: 162-167.

\bibitem{l3}
X. Liao, C. Li  and K. Wong, Criteria for exponential stability of
Cohen-Grossberg neural networks, Neural Network, 17(10),2004:
1401-1414.

\bibitem{l4}
W. Lu and  T. Chen, $R_+^n$-globle stability of a Cohen-Grossberg
neural network system with nonnegative equilibria, Neural Network,
20(6), 2007: 714-722.

\bibitem{m1}
X. Mao,  Stochastic Differential Equation and Application Horwood
Publishing, Chichester, 1997.

\bibitem{s1}
Q. Song and Z. Wang, Stability analysis of impulsive stochastic
Cohen-Grossberg neural networks with mixed time delays, Physica A:
Statistical Mechanics and its Applications, 387(13), 2008:
3314-3326.

\bibitem{w1}
L.  Wan and J. Sun, Global exponential stability and periodic
solutions Cohen-Grossberg neural networks with continuously
distributed delays, Physica D, 208, 2005: 1-20.

\bibitem{w2}
 L. Wang and X. Zou, Harmless delays in Cohen-Grossberg neural
networks, Physics D, 170(2), 2002: 162-173.

\bibitem{w3}
Wang, L. and Zou, X., Exponential stability of Cohen-Grossberg
neural networks, Neural Network, 15(3), 2002: 415-422.

\bibitem{w4}
X. Wang, Q. Guo and D. Xu, Exponential p-stability of impulsive
stochastic Cohen¨CGrossberg neural networks with mixed delays,
Mathematics and Computers in Simulation, 79(5), 2009: 1698-1710.

\bibitem{x1}
W. Xiong and  J. Cao, Global exponential stability of discrete-time
Cohen-Grossberg neural networks, Neurocomputing, 64, 2005: 433-446.

\bibitem{y1}
Z. Yang and  D. Xu, Impulsive effects on stability of
Cohen-Grossberg neural networks with variable delays, Appl. Math.
Comput., 177, 2006: 63-78.

\bibitem{z1}
 O. Zeynep and A. Sabri, New results for global stability of
Cohen-Grossberg neural networks with multiple time delays,
Neurocomputing, 71, 2008: 3053-3063.

\bibitem{z2}
H. Zhao and  N. Ding, Dynamic analysis of stochastic Cohen-Grossberg
neural networks with time delays,
 Appl. Math. Comput., 183, 2006: 464-470.

\bibitem{z3}
Q. Zhang, X. Wei, and J. Xu, Global exponential stability of
Hopfield neural networks with continuously distributed delays, Phys.
lett. A, 315, 2003: 431-436.

\end{thebibliography}

\end{document}
