\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
{\em Electronic Journal of Differential Equations},
Vol. 2006(2006), No. 119, pp. 1--7.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2006 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2006/119\hfil Dissipativity of neural networks]
{Dissipativity of neural networks with continuously distributed
delays}

\author[B. Li, D. Xu\hfil EJDE-2006/119\hfilneg]
{Bing Li, Daoyi Xu}  % in alphabetical order

\address{Bing Li \newline
 Department of Applied Mathematics,
College of Science, Chongqing Jiaotong  University, \hfill\break
Chongqing 400074, China} \email{lb150@sina.com.cn}

\address{Daoyi Xu \newline
Yangtze Center of Mathematics, Sichuan University, \hfill\break
Chengdu 610064, China} \email{daoyixucn@yahoo.com.cn}

\date{}
\thanks{Submitted April 22, 2006. Published September 28, 2006.}\par
\thanks{The work is supported by National Natural Science Foundation
of China under Grant 10671133}
\subjclass[2000]{34D40, 34D45, 34K12}
\keywords{Differential inequality; dissipativity;
  stability; \hfill\break\indent global attracting set; spectral radius}

\begin{abstract}
 In this paper, we study the dissipativity and existence of a
 global attracting set for neural networks models with
 continuously distributed delays. We use nonnegative matrix
 and differential inequality techniques to obtain results
 under general conditions. Also, we give an example to
 illustrate our results.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{corollary}[theorem]{corollary}
\allowdisplaybreaks

\section{Introduction}

 In recent years, the neural networks models have received much
attention in the literature and have been applied in many fields
such as control, image processing and optimization because of its
good properties of controlling. Many results about dynamical behavior
of neural networks systems without delays had been produced.
As is well known, from a practical point of view, both in
biological and man-made neural networks, the delays arise because
of the processing of information. More specifically, in the
electronic implementation of analog neural networks, the delays
occur in the communication and response of the neurons owing to
the finite switching speed of amplifiers. Thus, studying of neural
networks dynamics with consideration of the delayed problem
becomes very important to manufacture high quality neural
networks models. In practice, although the use of constant
discrete delays in the models serve as a good approximation
in simple circuits consisting of a small number of neurons,
neural networks usually have a spatial extent
due to the presences of a multitude of parallel pathway with a
variety of axon sizes and lengths. Therefore there will be a
distribution of conduction velocities along these pathways and a
distribution of propagation be designed with discrete delays and a
more appropriate way is to incorporate continuously distributed
delays. Then the study for dynamical behaviors of neural networks models with continuously distributed delays is more important and appropriate.
In general application, people pay much attention to
the stability of those neural networks models. But at the same time, the
dissipativity is also an important concept which is more general
than stability in chaos, synchronization, system norm estimation
and robust control. There are some results about stability of neural
networks models with continuously distributed delay and some
 results about dissipativity of those
with or without discrete delays \cite{l2,x4}.
To the best of my knowledge, few authors
have discussed disspativity of the neural networks models with
continuously distributed delays. We establish a method for
dissipativity of neural networks models with continuously distributed delays.
This method based on the properties of nonnegative matrix \cite{h2,l1}
and differential inequality technique \cite{x3,x4},
yields some new criterions for
dissipativity and global attracting set. Moreover, these
criterions are easy to check and apply in practice.

\section{Preliminaries}

Throughout this paper, $\mathbb{R}^n$ denotes the $n$-dimensional
Euclidean space,
$\mathbb{R}^+ _{n}=[0,+\infty)\times\dots\times[0,+\infty)$ and $C[X,Y]$
is the class of continuous mapping from the topological space $X$
to the topological space $Y$.

Consider the neural networks system with continuously distributed
delays as follows:
\begin{equation}
\begin{gathered}
\begin{aligned}
\dot{x}_i(t)&=-\mu_ix_i(t)+\sum_{j=1}^{n}a_{ij}f_j(x_j)\\
&\quad +\sum_{j=1}^{n}b_{ij}\int_{-\infty}^{t}K_{ij}(t-s)g_j(x_j(s))d s
+P_i(t),\quad t\geq0
\end{aligned}\\
x_i(t)=\phi_i(t),\quad -\infty<t\leq0,\; i=1,\dots,n
\end{gathered} \label{e1}
\end{equation}
where $n$ denotes the number of
neurons in the neural network, $x_i(t)$ corresponds to the state of
the $i$th neuron. $f_j$ and $g_j$ denote the activation functions of
the $j$th neuron. $a_{ij}$ and $b_{ij}$ represent the constant
connection weight of the $j$th neuron to the $i$th neuron.
$P_i(t)$ is the external input bias on the $i$th neuron. $\mu_i>0$
represents the rate with which the $i$th neuron will reset its
potential to the resting state in isolation when disconnected from
the network and external inputs.  $K_{ij}$ denotes the
refractoriness of the $i,j$th neuron after it has fired or
responded. The initial functions
$\phi_i \in C[(-\infty,0],\mathbb{R}^n]$ ($i=1,\dots,n$) are bounded.
The delay kernels $K_{ij}$
 with $\int_{0}^{\infty}K_{ij}(s)d s=1$ are real valued nonnegative
continuous functions
defined on $[0,\infty)$. $f_j$, $g_j$ and $P_i$ are continuous functions.

Clearly, the system above is a basic frame of
neural networks. For example, we can obtain models with discrete
delays in [2,3,5-7]
when function $K_{ij}$ is $\delta$-function.
In addition, we can obtain Hopfield neural networks \cite{h1}
when $b_{ij}=0$, for $i,j=1,\dots,n$. Meanwhile, if $P_i(t)$ is
constant, we will obtain the system in \cite{c1}.

For convenience, in the following we shall rewrite \eqref{e1} in the form
\begin{equation} \label{e2}
\begin{gathered}
\dot{x}(t)=-\mu x(t)+A f(x)+\int_{-\infty}^{t}K_0(t-s)g(x(s))d s
+P(t),\quad t\geq0\\
x_0(s)=\phi(s),\quad -\infty<s\leq0,
\end{gathered}
\end{equation}
in which
$x(t)=\mathop{\rm col}\{x_1(t),\dots,x_n(t)\}$,
 $\mu=\mathop{\rm diag}\{\mu_1,\dots,\mu_n\}$,
\begin{gather*}
f(x)=\mathop{\rm col}\{f_1(x_1),\dots,f_n(x_n)\}, \quad
g(x)=\mathop{\rm col}\{g_1(x_1),\dots,g_n(x_n)\};\\
P(t)=\mathop{\rm col}\{P_1(t),\dots,P_n(t)\}, \quad
A=(a_{ij})_{n\times n}, \quad
K_0(\cdot)=(b_{ij}K_{ij}(\cdot))_{n\times n}.
\end{gather*}
 Let $x_t(s)=x(t+s)$, $-\infty<s\leq0$,
then $x_0(s)=x(s)=\phi(s)$, $-\infty<s\leq0$.

We always assume that system \eqref{e2} has a continuous solution
denoted by $x(t,0,\phi)$ or simply by $x(t)$ if no confusion should occur.
The inequalities between matrices or vectors such as
 $A\leq B$ ($A>B$) means that each pair of corresponding elements
of $A$ and $B$ satisfies the inequality. Especially, $A$ is called
a nonnegative matrix if $A\geq 0$.

Let $C$ be the set of all functions $\phi\in C[(-\infty,0],\mathbb{R}^n]$,
in which each $\phi(s)=\mathop{\rm col}\{\phi_1(s),\dots,\phi_n(s)\}$
satisfies that
$\sup_{-\infty<s\leq0}|\phi_i(s)|$ always exists as a finite
number. For $x\in \mathbb{R}^n$, we define
$[x]^+=\mathop{\rm col}\{|x_1|,\dots,|x_n|\}$. For
$\phi(s)\in C$,
$[\phi(s)]^+_{\infty}=\mathop{\rm col}\{\|\phi_1(s)\|_\infty,\dots,
\|\phi_n(s)\|_\infty\}$, where
$\|\phi_i(s)\|_\infty=\sup_{-\infty<s\leq0}|\phi_i(s)|$.

In the following, we shall give the same definitions as those for
the networks with discrete delays given in  \cite{a1,l2,x1,x2}.

\begin{definition} \label{def1} \rm
 A set $S\subset C$ is called a positive
invariant set of \eqref{e2}, if for any initial value $\phi \in S$, we
have the solution of \eqref{e2} $x_t(s,0,\phi)\in S$, for
$t\geq0$, $-\infty<s\leq0$.
\end{definition}

\begin{definition} \label{def2} \rm
 The system \eqref{e2} is called dissipativity, if there exists a constant
vector $L>0$, such that for any initial value $\phi \in C$,
there is a $T(0,\phi)$, when $t>T(0,\phi)$, the solution $x(t,0,\phi)$
of system \eqref{e2} satisfies $[x(t,0,\phi)]^+\leq L$.
In this case, the set $\Omega=\{\phi \in C|[\phi(s)]_{\infty}^+ \leq L\}$
is said to be the global attracting set of \eqref{e2}.
\end{definition}

Before we discuss the system \eqref{e2} in detail, we need two
Lemmas as follows.

\begin{lemma}[\cite{l1}] \label{lem1}
If $M\geq0$ and $\rho(M)<1$, then $(I-M)^{-1}\geq0$.
\end{lemma}

\begin{lemma}[\cite{h2}] \label{lem2}
Let $M\geq0$ and $\tilde{M}$ is any principal sub-matrix of $M$,
then $\rho(\tilde{M})\leq \rho(M)$.
\end{lemma}

The symbol $\rho(M)$ and the matrix $I$ denote the spectral radius
of a square matrix $M$ and a unit matrix, respectively.
In our analysis, we always suppose that:
\begin{itemize}

\item[(A1)]
 There are matrices $\alpha=\mathop{\rm diag}\{\alpha_1,\dots,\alpha_n\}$
 and $\beta=\mathop{\rm diag}\{\beta_1,\dots,\beta_n\}$ with
$\alpha_j>0$ and $\beta_j>0$,
such that for any $x \in \mathbb{R}^n$
$$
[f(x)]^+\leq \alpha[x]^+,\quad [g(x)]^+\leq \beta[x]^+.
$$

\item[(A2)]  $\rho(M)<1$, where $M=\mu^{-1}A^+\alpha+\mu^{-1}B^+\beta$,
$\mu=\mathop{\rm diag}\{\mu_1,\dots,\mu_n\}$,
$A^+=(|a_{ij}|)_{n\times n}$, $B^+=(|b_{ij}|)_{n\times n}$.

\item[(A3)] $[R(t)]^+\leq R$, where $R(t)=\mu^{-1}P(t)$ and
$R=\mathop{\rm col}\{R_1,\dots,R_n\}$ with $R_i\geq0$.
\end{itemize}

\section{Dissipativity analysis}

In this section, combining the inequality
technique \cite{x3,x4} with properties of nonnegative matrices \cite{h2,l1},
we introduce some new
results for the dissipativity of system \eqref{e2}.

\begin{theorem} \label{thm1}
 If (A1)--(A3)  hold, then  the set
$\Omega=\{\phi \in C| [\phi]^+_{\infty}\leq L\}$ is a positive invariant
set of system \eqref{e2},
 where $L=(I-M)^{-1}R=\mathop{\rm col}\{L_1,\dots,L_n\}$.
\end{theorem}

\begin{proof}
According to Definition \ref{def1}, we need to prove that for any
$\phi\in C$ and $[\phi]^+_{\infty}\leq L$, the solution
$x(t)\stackrel{\Delta}{=}x(t,0,\phi)$ of system \eqref{e2} satisfies
\begin{equation} \label{e3}
[x(t)]^+ \leq L,\quad \text{for } t\geq0.
\end{equation}
For the proof of this inequality, we first prove, for any given $\gamma>1$,
when $[\phi]^+_{\infty}<\gamma L$, the solution
$x(t)$ satisfies
\begin{equation} \label{e4}
[x(t)]^+ < \gamma L,\quad \text{for } t\geq0.
\end{equation}
Otherwise, there must be some $i$, and $t_1>0$, such that
\begin{gather}
|x_i(t_1)|= \gamma L_i,\quad |x_i(t)|< \gamma L_i\quad \text{for } 0\leq t<t_1;
\label{e5}\\
[x(t)]^+< \gamma L,\quad \text{for } 0\leq t<t_1.
\label{e6}
\end{gather}
where $L_i$ is the $i$th component of vector $L$.

Note that $L=(I-M)^{-1}R$, i.e., $L=LM+R$. Then, it follows from
\eqref{e2}, \eqref{e5}, \eqref{e6} that
% \label{e7}
\begin{align*}
[x(t_1)]^+
&\leq e^{-\mu t_1}[\phi]^+_{\infty}+\int_{0}^{t_1}e^{-\mu(t_1-s)}
[A f(x(s))]^+d s \\
&\quad +\int_{0}^{t_1}e^{-\mu(t_1-s)}\{\int_{-\infty}^{s}[K_0(s-\theta)]^+
  [g(x(\theta))]^+d \theta+[P(s)]^+\}d s  \\
&\leq e^{-\mu t_1}[\phi]^+_{\infty}+\int_{0}^{t_1}e^{-\mu(t_1-s)}
  (A^+\alpha[x(s)]^+)d s \\
&\quad + \int_{0}^{t_1}e^{-\mu(t_1-s)}\{\int_{-\infty}^{s}
  [K_0(s-\theta)]^+(\beta[x(\theta)]^+)d \theta+[P(s)]^+\}d s \\
&< e^{-\mu t_1}[\phi]^+_{\infty}+(I-e^{-\mu t_1})[\mu^{-1}(A^+\alpha+B^+
  \beta)\gamma L+R] \\
&\leq e^{-\mu t_1}\gamma L+(I-e^{-\mu t_1})(M\gamma L+R)
 < \gamma L
\end{align*}
This inequality  implies  $|x_i(t_1)|< \gamma L_i$ ($i=1,\dots,n$),
which contradicts with \eqref{e5}. Therefore, \eqref{e4} holds.
Let $\gamma\to 1^+$, we obtain that
$$
[x(t)]^+\leq L,\quad \text{for } t\geq0.
$$
The proof is complete.
\end{proof}

\begin{theorem} \label{thm2}
 If (A1)--(A3) hold, then the system \eqref{e2}  is dissipativity and
the set $\Omega=\{\phi \in C|[\phi(s)]_{\infty}^+ \leq L\}$
is the global attracting set of \eqref{e2}.
\end{theorem}

\begin{proof}
Without losing generality, we assume $L>0$.
First, we prove that for any initial value $\phi \in C$, there exists
a number $\Gamma>0$, large enough, such that
 the solution $x(t)$ satisfies
 \begin{equation} \label{e8}
[x(t)]^+ < \Gamma L,\quad \text{for }t\geq0.
\end{equation}
For a given $\phi\in C$, there must be a large enough positive number
$\Gamma$, such that $[\phi(t)]_{\infty}^+ < \Gamma L$.

If \eqref{e8} is not true, then there must be some $i$ and $t_2>0$, such that
\begin{gather}
|x_i(t_2)|= \Gamma L_i,\,\,\,\,|x_i(t)|< \Gamma L_i\quad
\text{for } 0\leq t<t_2; \label{e9}\\
[x(t)]^+< \Gamma L,\quad \text{for }0\leq t<t_2. \label{e10}
\end{gather}
 From \eqref{e2}, \eqref{e9}, \eqref{e10}, and $L=LM+R$,
we obtain
\begin{align*}
[x(t_2)]^+
&\leq e^{-\mu t_2}[\phi]^+_{\infty}+\int_{0}^{t_2}e^{-\mu(t_2-s)}
 [A f(x(s))]^+d s \\
&\quad + \int_{0}^{t_2}e^{-\mu(t_2-s)}\{\int_{-\infty}^{s}
 [K_0(s-\theta)]^+[g(x(\theta))]^+d \theta+[P(s)]^+\}d s  \\
&\leq e^{-\mu t_2}[\phi]^+_{\infty}+\int_{0}^{t_2}
  e^{-\mu(t_2-s)}(A^+\alpha[x(s)]^+)d s \\
&\quad + \int_{0}^{t_2}e^{-\mu(t_2-s)}\{\int_{-\infty}^{s}[K_0(s-\theta)]^+
  (\beta[x(\theta)]^+)d \theta+[P(s)]^+\}d s \\
&< e^{-\mu t_2}[\phi]^+_{\infty}+(I-e^{-\mu t_2})[\mu^{-1}(A^+\alpha
  +B^+\beta)\Gamma L+R] \\
&\leq e^{-\mu t_2}\Gamma L+(I-e^{-\mu t_2})(M\Gamma L+R)
 <\Gamma L
\end{align*}
 From the above inequality, it follows that
$|x_i(t_2)|< \Gamma L_i$ ($i=1,\dots,n$), which contradicts with \eqref{e9},
and so \eqref{e8} is true.

In view of Definition \ref{def2}, for the proof of Theorem \ref{thm2}, we need to prove that
for the above positive vector $L$, any solution $x(t)$ of the
system \eqref{e2} satisfies
\begin{equation} \label{e12}
[x(t)]^+\leq L,\quad \text{as } t\to  +\infty.
\end{equation}
To prove \eqref{e12}, we first verify that
\begin{equation} \label{e13}
\limsup_{t\to  +\infty}[x(t)]^+\leq L.
\end{equation}
It is equivalent to prove that
\begin{equation} \label{e14}
\limsup_{t\to  +\infty}([x(t)]^+-L)= \sigma\leq0.
\end{equation}
If \eqref{e14} is false, then there must exist
some $\sigma_i>0$. Without losing generality, we denote such
components of $\sigma$ by
$\sigma_{i_{1}}, \sigma_{i_{2}}, \dots, \sigma_{i_{m}}$, where
$\sigma_{i_{j}}>0$, $j=1,\dots,m$.
By the definition of superior limit and \eqref{e14}, for sufficient
small constant $\varepsilon>0$, there is $t_3>0$, such that
\begin{equation} \label{e15}
[x(t)]^+\leq L+\sigma+\epsilon,\quad \text{for }t\geq t_3.
\end{equation}
where $\epsilon=\mathop{\rm col}\{\varepsilon,\dots,\varepsilon\}$.
Meanwhile, since $\int_{0}^{\infty}K_{ij}d s=1$ $(i,j=1,\dots,n)$,
then for the above $\epsilon$ and $\Gamma L$ in \eqref{e8}, there
must be a $T>0$, such that, for any $t>T$,
\begin{equation} \label{e16}
\int_{T}^{\infty}K_0(t)\beta\Gamma L d t\leq\epsilon
\end{equation}
When $t>t^*\stackrel{\Delta}{=}t_3+T$, combining \eqref{e2} \eqref{e15} with
\eqref{e16}, we can obtain
\begin{equation} \label{e17}
\begin{aligned}
&D^+[x(t)]^+ +\mu[x(t)]^+\\
&\leq A^+[f(x(t))]^+ +\int_{-\infty}^{t}K_0(t-s)[g(x(s))]^+d s+[P(t)]^+ \\
&= A^+[f(x(t))]^+ +\int_{-\infty}^{t-T}K_0(t-s)[g(x(s))]^+d s\\
&\quad  +\int_{t-T}^{t}K_0(t-s)[g(x(s))]^+d s+[P(t)]^+ \\
&\leq A^+[f(x(t))]^+ +\int_{T}^{+\infty}K_0(s)\beta\Gamma L d s
  +\int_{t-T}^{t}K_0(t-s)\beta(L+\sigma+\epsilon)d s+[P(t)]^+ \\
&\leq A^+(\alpha[x(t)]^+) +\epsilon+B^+\beta(L+\sigma+\epsilon)+[P(t)]^+
\end{aligned}
\end{equation}
in which $D^+[x(t)]^+$ denotes the Dini derivative of the positive
 vector $[x(t)]^+$.

Let both sides of \eqref{e17} integrate from $t^*$ to $t$ after they
multiply $e^{-\mu(t-s)}$. Thus,
\begin{equation} \label{e18}
\begin{aligned}
&[x(t)]^+\leq e^{-\mu(t-t^*)}[x(t^*)]^++\big(I-e^{-\mu(t-t^*)}\big) \\
&\times\Big[\mu^{-1}A^+\alpha(L+\sigma+\epsilon)+\mu^{-1}B^+\beta
(L+\sigma+\epsilon)+R+\mu^{-1}\epsilon\Big]
\end{aligned}
\end{equation}
Since $M=\mu^{-1}A^+\alpha+\mu^{-1}B^+\beta$, it follows from \eqref{e8}
and \eqref{e18} that
\begin{equation} \label{e19}
[x(t)]^+\leq e^{-\mu(t-t^*)}\Gamma L+\big(I-e^{-\mu(t-t^*)}\big)
        \big[ML+M\epsilon+M\sigma+R+\mu^{-1}\epsilon\big]
\end{equation}
By the definition of superior limit and \eqref{e14}, there are
$t_k\to +\infty$, such that
\begin{equation} \label{e20}
\lim_{t_k\to  +\infty}[x(t_k)]^+=L+ \sigma.
\end{equation}
In \eqref{e19}, let $t=t_k\to \infty$, $\varepsilon\to 0^+$. Then,
from $L=(I-M)^{-1}R$ and \eqref{e20},  it follows that
\begin{equation} \label{e21}
\sigma\leq M\sigma
\end{equation}
Let $\tilde{\sigma}=\mathop{\rm col}\{\sigma_{i_1},\dots,\sigma_{i_m}\}$,
then from \eqref{e21} follows that
\begin{equation}
\tilde{\sigma}\leq \tilde{M}\tilde{\sigma}
\end{equation}
where $\tilde{M}$ is the $m$-by-$m$ principal sub-matrix of $M$
corresponding to the positive vector $\tilde{\sigma}$,
i.e., $\tilde{M}$=($m_{i_j,i_u}$), $j,u=1,\dots,m$.

By Lemma \ref{lem1}, we obtain that $\rho(\tilde{M})\geq1$.
According to Lemma \ref{lem2},
 $\rho(M)\geq1$ which contradict $\rho(M)<1$ in (A2). Then,
for any $i$, $\sigma_i\leq0$, \eqref{e13} holds. Farther, \eqref{e12} holds.
The proof is complete.
\end{proof}

\begin{corollary} \label{coro1}
 If (A1)--(A3) hold and $R=0$ in
(A3),  then the system \eqref{e2} has an unique equilibrium point
$x^*=0$ which is global asymptotically stable.
\end{corollary}

 By comparing this Corollary with \cite[Theorem 4]{z1}, we obtain the
following remark.

\begin{remark} \label{rmk1}  \rm
When $f=g$, $P_i(t)=I_i$, the system \eqref{e2} becomes the model
studied by Zhao \cite{z1}. To obtain the global
asymptotically stability, Zhao assumed (A2), that $f_j$ satisfies
 $xf_j(x)>0(x\neq0)$, and that there exist a positive constant $\lambda_j$,
such that $\lambda_j=\sup_{x\neq0}\frac{f_j(x)}{x}$.
These assumptions imply (A1); so that \cite[Theorem 4]{z1} is a special
case of the Corollary in this paper.
\end{remark}

\section{Illustrative example}

 We consider the  neural networks model, for $t\geq 0$,
\begin{equation} \label{e23}
\begin{gathered}
\dot{x}_1(t)=-2x_1(t)+\frac{1}{2}\sin x_1
  +\frac{1}{2}\int_{-\infty}^{t}\frac{2}{\pi[1+(t-s)^2]}|x_1(s)|d s+r\cos t\\
\dot{x}_2(t)=-2x_2(t)+\frac{1}{2}\sin x_2
  +\frac{1}{2}\int_{-\infty}^{t}\frac{2}{\pi[1+(t-s)^2]}|x_2(s)|d s
+r\sin t,
\end{gathered}
\end{equation}
It can be obtained easily that
$\mu=\mathop{\rm diag}\{2,2\}$, $A=\mathop{\rm diag}\{\frac{1}{2},\frac{1}{2}\}$,
$B=\mathop{\rm diag}\{\frac{1}{2},\frac{1}{2}\}$.
The delay kernels functions $K_{ij}(s)=\frac{2}{\pi[1+s^2]}\,\,\,(i,j=1,2)$
satisfy $\int_{0}^{\infty}K_{ij}(s)d s=1$.
Meanwhile, since $f_1(x_1)=\sin x_1$, $f_2(x_2)=\sin x_2$,
$g_1(x_1)=|x_1|$, $g_2(x_2)=|x_2|$, the condition (A1) holds.
With the $P(t)=\mathop{\rm col}\{r\cos t,r\sin t\}$, we can get
$R=\mathop{\rm col}\{\frac{1}{2}|r|,\frac{1}{2}|r|\}$.
By calculating, $\rho(M)=\frac{1}{2}<1$, then (A2) holds.

By Theorem \ref{thm2}, when $r\neq0$, the system \eqref{e23} is
dissipative and the set $\{(x_1,x_2) : |x_1|\leq |r|,\,\,|x_2|\leq
|r|\}$ is the global attracting set of \eqref{e23}. When $r=0$, in
view of the Corollary, the system \eqref{e23} has
 a equilibrium point $x^*=\mathop{\rm col}\{0,0\}$ which is global
asymptotically stable.

\begin{remark} \label{rmk2} \rm
 In this Example, when $r\neq 0$, we can not solve the problem on global
attracting set with those results in  \cite{a1,l2,x1,x2,x4} because of
the continuously distributed delays.
When $r=0$, the delay
kernels $K_{ij}$ do not possess properties such as
$\int_{0}^{\infty}sK_{ij}(s)d s<\infty$ in \cite{c1}. Then it can not
be considered  with the methods in \cite{c1}.
\end{remark}

\subsection*{Conclusions}
In this paper, we research the dissipativity and global attracting
set of neural networks models with continuously distributed delays
which is the basic frame of neural networks. Combining the
differential inequality technique with properties of nonnegative
matrices, we introduce some sufficient criterions for
dissipativity and a method to calculate the global attracting set
of a general class of neural networks models with continuously
distributed delays. Through the comparison and illustration of an
example, we can see that the model studied in this paper is more
general and our method can apply in more general neural networks
models than those in the references.

\begin{thebibliography}{00}

\bibitem{a1} S. Arik; On the global dissipativity of dynamical neural
networks with time delays, {\it Phys.Lett.A \bf326} (2004), 126-132.

\bibitem{c1} Y. Chen; Global stability of neural networks with distributed
delays, {\it Neural Networks \bf 15}  (2002), 867-871.

\bibitem{h1} J. J. Hopfield;
Neurons with graded response have collective computational
properties like those of two-stage neurons,
{\it Natl. Acad. Aci. USA. \bf 81} (1984), 3088-3092.

\bibitem{h2} R. A. Horn, C. R. Johnson;
Matrix Analysis, {\it Cambridge University Press}, (1985).

\bibitem{l1} J. P. Lasalle;
The Stability of Dynamical System, {\it SIAM, Philadelphia},  (1976).

\bibitem{l2} X. Liao, J. Wang;
Global dissipativity of continuous-time recurrent neural networks
with time delays, {\it  Physical Review E \bf (68)} (2003), 016118 [1-7].

\bibitem{x1} D. Xu, H. Zhao;
Invariant set and attractivity of nonlinear differential equations with
delays,  {\it Applied Mathematics letter.  \bf 15} (2002), 321-325.

\bibitem{x2} D. Xu, H. Zhao;
Invairant and attracting sets of Hopfield neural networks with delays,
{\it International Journal of Systems Science. \bf 32} (2001), 863.

\bibitem{x3} D. Xu;
Integro-differential equations and delay integral inequalities,
{\it T$\hat{o}$hoku Math.J. \bf 44} (1992), 365.

\bibitem{x4} D. Xu;
Asymptotic behavior of Hopfield neural networks with delays,
 {\it Differential Equations and Dynamical Systems. \bf 9(3)} (2001), 353-364.

\bibitem{z1} H. Zhao; Global asymptotic stability of Hopfield neural
network involving distributed delays,
{\it Neural Networks.\bf 17}  (2004), 47-53.

\end{thebibliography}
\end{document}
