\documentclass{amsart} 
\begin{document} 
{\noindent\small {\em Electronic Journal of Differential Equations},
Vol.\ 1997(1997), No.~24, pp. 1--20.\newline
ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.swt.edu (login: ftp) 147.26.103.110 or 129.120.3.113}
\thanks{\copyright 1997 Southwest Texas State University  and 
University of North Texas.} 
\vspace{1.5cm}
\title[\hfilneg EJDE--1997/24\hfil Initial value problems for
nonresonant delay equations]{Initial value problems for nonlinear nonresonant 
delay differential equations with possibly infinite delay} 

\author{Lance D. Drager}
\address{Lance D. Drager \hfil\break
Department of Mathematics and Statistics\\
Texas Tech University \hfil\break
Lubbock, TX  79409-1042 USA}
\email{drager@math.ttu.edu}

\author{William Layton}
\address{William Layton \hfil\break
Department of Mathematics\\
University of Pittsburgh \hfil\break
Pittsburgh, PA  15260 USA}
\email{wjl+@pitt.edu}

\date{}
\thanks{Submitted August 14, 1997. Published December 19, 1997.}
\thanks{The second author was partially supported by NSF Grant DMS--9400057}
\subjclass{Primary 34K05, 34K20, 34K25}
\keywords{Delay differential equation, infinite delay, initial value problem, 
\hfil\break\indent
nonresonance, asymptotic stability, exponential asymptotic stability}

\begin{abstract}
We study initial value problems for scalar, nonlinear, delay
differential equations with distributed, possibly infinite, delays.
We consider the
initial value problem 
$$\begin{cases}
x(t) = \varphi(t), & t \leq 0\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t, s, x(t), x(t-s))\, d \mu(s) = f(t),}
& t\geq 0,
\end{cases}
$$
where $\varphi$ and $f$ are  bounded and $\mu$ is a finite
Borel measure.  Motivated by the nonresonance condition for the
linear case and previous work of the authors, we introduce conditions
on $g$.  Under these conditions, we prove an existence and uniqueness
theorem.  We show that under the same
conditions, the solutions are globally
asymptotically stable and, if $\mu$ satisfies an exponential decay
condition, globally exponentially asymptotically stable.
\end{abstract}

\maketitle
   
\newtheorem{thm}{Theorem}[section]
\newtheorem{lem}[thm]{Lemma}
\newtheorem{lemma}[thm]{Lemma}
\newtheorem{prop}[thm]{Proposition}
\theoremstyle{definition}
\newtheorem{defn}[thm]{Definition}
\numberwithin{equation}{section}
% Notation
\newcommand{\gnr}{\textsf{GNR}}
\newcommand{\reals}{{\mathbb{R}}}
\newcommand{\im}{\operatorname{Im}}
\newcommand{\putinvert}[1]{{\lvert #1 \rvert}}
\let\abs=\putinvert
\newcommand{\putinVert}[1]{{\lVert #1\rVert}}
\let \Norm=\putinVert
\newcommand{\lset}{\left\{\,}
\newcommand{\rset}{\,\right\}}
\newcommand{\bcspace}[1]{{BC}^{#1}}
\let\bc=\bcspace
\newcommand{\bcz}{\bc{0}}
\renewcommand{\subset}{\subseteq}
\renewcommand{\supset}{\supseteq}
\newcommand{\inv}{{\mathstrut -1}}
\newcommand{\cspace}[1]{{C^{#1}}}
\newcommand{\normi}[1]{\norm{#1}_{1}}
\newcommand{\normbox}[1]{\norm{#1}_{\infty}}
\newcommand{\nop}[1]{{N_{#1}}}
\newcommand{\bee}[1]{{B(#1)}}
\newcommand{\scrG}{{\mathcal{G}}}
\newcommand{\gspace}{\scrG}
\newcommand{\gclass}[1]{\gspace_{#1}}
\newcommand{\gcond}[1]{\textsf{G#1}}
\newcommand{\bcx}{\bc{}}
\newcommand{\bcv}{\bcx_{0}[0,\infty)}
\newcommand{\varphibar}{\bar{\varphi}}
\newcommand{\zspace}[1]{Z(#1)}
\newcommand{\zspacep}[1]{Z_{+}(#1)}

\section{Introduction}		%%%%%%%%%  section 1

In this paper we will study the initial value problem for scalar,
nonlinear, delay differential equations with possibly infinite
delay.   We will consider problems of the form
\begin{equation}
\label{e:ivp}
\begin{cases}
x(t) = \varphi(t), & t \leq 0\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t, s, x(t), x(t-s))\, d \mu(s) = f(t),}
& t\geq 0.
\end{cases}
\end{equation}
We assume that $\varphi$ is bounded and continuous on $(-\infty,0]$,
 $f$ is bounded and continuous on $[0,\infty)$, and that $\mu$ is
a positive, finite Borel measure on $[0,\infty)$.   As usual, 
$x'(0)$ in \eqref{e:ivp} is to be interpreted as a right-hand
derivative.  In all the cases we consider, $g$ will be continuous.


In this paper, we will give conditions on $g$ that will ensure that
the initial value problem \eqref{e:ivp} has a unique maximally defined
solution, which is defined on the entire real line $\reals$.   We will show
that the same conditions on $g$ ensure that the solutions of
\eqref{e:ivp} are asymptotically stable, i.e., if $x_{1}$ and $x_{2}$
are solutions of \eqref{e:ivp} for different initial conditions
$\varphi_{1}$ and $\varphi_{2}$ (but with the same forcing function $f$),
then $x_{1}(t)-x_{2}(t)\to 0$ as $t\to \infty$.  We will also show
that if \eqref{e:ivp} has exponentially fading memory, i.e.,
$\mu$ decays exponentially, then every solution of \eqref{e:ivp} is
exponentially asymptotically stable, i.e., if $x_{1}$ and $x_{2}$ are
solutions of \eqref{e:ivp} for different initial conditions, then
\begin{equation*}
\abs{x_{1}(t)-x_{2}(t)} \leq C e^{-\lambda t}, \qquad t\geq 0,
\end{equation*}
for some constants $C$ and $\lambda >0$.

In the rest of this introduction, we will describe the conditions that
we will place on $g$.    To motivate these conditions, it will
help to recall some previous work of the authors in \cite{DLjde}
(for related work of the authors see \cite{DLlib,DLpol,DLfa,DLnr,
DLM,L}).

In \cite{DLjde}, the authors studied delay differential equations
of the form
\begin{equation}
\label{e:olddde}
x'(t) + \int_{-\infty}^{\infty} g(x(t), x(t-s)) \, d \mu(s) = f(t),
\qquad t\in \reals\,,
\end{equation}
under a generalized nonresonance condition Condition~\gnr.   Under
this condition, it was shown that \eqref{e:olddde} has a unique
solution that is defined and bounded on all of $\reals$.
We want to briefly recall the statement and motivation of
Condition~\gnr. 

A very special case of \eqref{e:olddde} is the linear constant
coefficient equation
\begin{equation}
\label{e:simple}
x'(t) + a x(t) + b x(t-\tau) = 0,
\end{equation}
where $a, b \in\reals$.  This equation can be analyzed by
classical techniques, \cite{BC,El,ElN}.  In particular, \eqref{e:simple}
has a nontrivial bounded solution if and only if the characteristic
equation $z+a + be^{-\tau z}=0$ has a root on the imaginary axis.
Thus, it can be shown that the set $C_{\tau}$ of pairs $(a,b)$ in the
$ab$-plane
for which \eqref{e:simple} has a nontrivial bounded solution
consists of the line $a+b=0$ and the multi-branch parameterized
curve
\begin{equation*}
(a,b)=\frac{1}{\tau}(-\theta \cot(\theta), \theta \csc(\theta)).
\end{equation*}
It is known that if $(a,b)$ lies to the right of $C_{\tau}$
the zero solution of \eqref{e:simple} is globally asymptotically
stable (i.e., all of the roots of the characteristic equation
are in the left half plane), see \cite{El,ElN,H}.   
As $\tau$ varies, $C_{\tau}$ will sweep out the region $R'$
consisting of the two quadrants above and below the lines
$a \pm b=0$, with $a+b=0$ included and $a-b=0$ excluded.
See Figure~\ref{fig:ctau}, which shows a few branches of
$C_{\tau}$ and shows the line $a-b=0$ as dotted.

\input epsf 
\newcommand{\EPSFileScaled}[2][0pt]{%
\epsfxsize=#1\relax\epsffile{#2}}
\begin{figure}
\centerline{\EPSFileScaled[4.5in]{lfid1.eps}}
\caption{The set $C_{\tau}$\label{fig:ctau}}
\end{figure}

Equation \eqref{e:simple} is a special case of \eqref{e:olddde}
with $g(x,y) = a x + b y$ ($\mu$ is the Dirac measure at
$\tau$).  In this case, $\lset (a,b)\rset$ is the image of
the gradient of $g$, $\nabla{g}$.   In the nonlinear case,
the image of $\nabla{g}$ will be more than a single point.
To get results for all delays, we want the image of $\nabla{g}$ to avoid $R'$.   
As the {\em first part of Condition~\gnr}, we required the somewhat
stronger condition that the image of $\nabla{g}$ be disjoint
from
\begin{equation*}
R = \lset (a,b)\in \reals^{2} \mid \abs{a} \leq \abs{b}\rset,
\end{equation*}
the closure of $R'$ (in this paper, the image will have to lie
to the right of $R$).  

It is possible that $\im(\nabla{g})$, the image of $\nabla{g}$, comes
arbitrarily close to $R$.  We need some control on how fast
$\im(\nabla{g})$ approaches $R$.    This is measured as follows.
For $\rho\geq 0$, define
\begin{equation*}
Q(\rho) = \lset (x,y)\in \reals^{2} \mid \abs{x}\leq \rho,
\abs{y}\leq \rho \rset.
\end{equation*}
and let $G(\rho)=\nabla{g}(Q(\rho))$.   Let $\alpha, \beta\colon
\reals^{2}\to \reals$ be the linear functionals
\begin{align*}
\alpha(a,b) & = a-b\\
\beta(a,b) & = a + b.
\end{align*}
The boundary lines of $R$ are $\alpha=0$ and $\beta=0$.  The region
to the right of $R$ is described by $\alpha>0$ and $\beta>0$,
while the region to the left of $R$ is described by $\alpha<0$
and $\beta<0$.   For $\rho\geq 0$ we define
\begin{equation}
\label{e:astdef}
\begin{aligned}
\alpha_{\ast} (\rho) &= \inf \lset \alpha(a,b)\mid (a,b) \in
G(\rho)\rset \\
\alpha^{\ast}(\rho) & = \sup\lset \alpha(a,b)\mid (a,b)\in G(\rho)
\rset\\
\beta_{\ast}(\rho) & = \inf \lset \beta(a,b)\mid (a,b)\in
G(\rho)\rset\\ 
\beta^{\ast}(\rho) &= \sup \lset \beta(a,b) \mid (a,b)\in G(\rho)\rset.
\end{aligned}
\end{equation}
Consider the case where $\im(\nabla{g})$ lies to the right of $R$.
In this case, we define
\begin{equation}
\label{e:rsdef}
\begin{aligned}
r(\rho) &= \min\lset \alpha_{\ast}(\rho), \beta_{\ast}(\rho)\rset\\
s(\rho) & = \max \lset \alpha^{\ast}(\rho), \beta^{\ast}(\rho)\rset.
\end{aligned}
\end{equation}
See Figure~\ref{fig:abplane} for an illustration.
Clearly $r$ is a positive non-increasing function.   If
$\im(\nabla{g})$ comes arbitrarily close to $R$, we will have
$r(\rho)\to 0$ as $\rho\to \infty$, and the rate at which
$r$ goes to zero is a measure of how fast $\im(\nabla{g})$ approaches
$R$.   As the {\em second part of Condition~\gnr}, we assume that
\begin{equation*}
\sup \lset \rho r(\rho) \mid \rho\geq 0\rset = \infty.
\end{equation*}
Similar definitions can be made in the case where $\im(\nabla{g})$
lies to the left of $R$, but these will not be needed in this
paper.   We do not need to impose any assumptions on $s(\rho)$,
but it will figure in our proofs.

We want to extend Condition~\gnr{} to allow $g$ to depend explicitly
on $t$ and $s$, as in \eqref{e:ivp}.  This is necessary for the
techniques we will use in analyzing the initial value problem,
as well as desirable for greater generality.   For brevity,
we will refer to the case where $g$ does not depend explicitly
on $t$ and $s$ as the ``time independent case.''

Our extended condition also takes into account another consideration.
Since the method of steps does not apply to \eqref{e:ivp}, it is
not clear that we have unique continuation of solutions for
\eqref{e:ivp}.  In order to prove uniqueness, it will be necessary to
consider solutions defined on intervals with a finite upper
endpoint.

These considerations lead us to the following definition of
the class of functions $g$ we will consider.

\begin{defn}
For $0<p\leq \infty$, let $\gclass{p}$ denote the set of functions
\begin{equation*}
g\colon [0,p) \times [0, \infty) \times \reals\times \reals \to
\reals\colon (t,s,x,y) \mapsto g(t,s,x,y)
\end{equation*}
that satisfy the following conditions.
\begin{enumerate}
\item[(\gcond{1})]
$g$ is continuous.
\item[(\gcond{2})]
The function 
\begin{equation*}
g(\cdot,\cdot,0,0)\colon [0,p)\times [0,\infty) \to \reals
\colon (t,s)\mapsto g(t,s, 0, 0)
\end{equation*}
is bounded.
\item[(\gcond{3})]
The partial derivatives $g_{x}$ and $g_{y}$ exist and are continuous.
\end{enumerate}
Since we will not have occasion to differentiate $g$ with respect to
$t$ or $s$, we will use the notation $\nabla{g}$ for the function
$(t,s,x,y)\mapsto (g_{x}(t,s,x,y), g_{y}(t,s,x,y))$.
\begin{enumerate}
\item[(\gcond{4})]
For every compact set $K\subset \reals^{2}$,  the image of
$[0,p)\times [0,\infty)\times K$ under $\nabla{g}$ is a bounded
set whose closure is disjoint from $R$ and lies to the right of
$R$.
\end{enumerate}
For $\rho\geq 0$, let $G(\rho)$ be the image of $[0,p)\times
[0,\infty) \times Q(\rho)$ under $\nabla{g}$.  Let
$\alpha_{\ast}(\rho)$, $\alpha^{\ast}(\rho)$, $\beta_{\ast}(\rho)$,
$\beta^{\ast}(\rho)$, $r(\rho)$ and $s(\rho)$ be defined as
in \eqref{e:astdef} and \eqref{e:rsdef}.  Our last condition
is the following.
\begin{enumerate}
\item[(\gcond{5})]
$g$ satisfies
\begin{equation*}
\sup \lset \rho r(\rho) \mid \rho\geq 0\rset = \infty.
\end{equation*}
\end{enumerate}
\end{defn}

A time independent $g$ that satisfies Condition~\gnr{}
(and has $\im(\nabla{g})$ lying to the right of $R$) is a member
of $\gclass{p}$ for all $p$.  Further examples can be generated
using the  $\scrG$-lemma of the next section.

In the next section we will derive the basic properties of
$\gclass{p}$.  In Section~\ref{s:eu}, we will prove the existence
and uniqueness of a solution to our initial value problem.
In Section~\ref{s:as}, we prove the solutions are globally
asymptotically stable. In Section~\ref{s:eas}, we prove
the solutions are exponentially asymptotically stable under
an exponential decay assumption on $\mu$.


\section{Basic estimates}  %%%%% section 2
\label{s:basic}

In this section, we will establish the basic properties of
functions in $\gclass{p}$ that we will use.
The first thing we need is an elementary geometric estimate,
see \cite{DLjde}.

\begin{lemma}
\label{thm:geoest}
Let $D$ be the region in the $ab$-plane defined by the
inequalities
\begin{equation*}
r \leq \alpha, \beta \leq s
\end{equation*}
for constants $r$ and $s$.  Let $a$ be a fixed real number.  Then
we have
\begin{equation}
\label{e:dest}
\sup \lset \abs{a-h}+ \abs{k} \mid (h,k)\in D \rset
= \max\lset\abs{a-r},\abs{a-s}\rset.
\end{equation}
\end{lemma}
To picture the region $D$, see $D(\rho)$ in
Figure~\ref{fig:abplane}.
The following lemma gives the basic estimate for functions
of class $\gclass{p}$.

\begin{lemma}
\label{thm:basicest}
Suppose that $g\in\gclass{p}$ and let $a$ be a fixed real number.
If $\rho\geq 0$ and $(\xi_{i}, \eta_{i})\in Q(\rho)$, $i=1,2$,
we have the estimate
\begin{multline}
\label{e:basicineq}
\abs{ [a\xi_{1}-g(t,s, \xi_{1},
\eta_{1})-[a\xi_{2}-g(t,s,\xi_{2},\eta_{2})]}\\
\leq K(a,\rho) \max \lset \abs{\xi_{1}-\xi_{2}}, \abs{\eta_{1}-\eta_{2}}\rset,
\end{multline}
for all $(t,s)\in [0,p)\times [0,\infty)$.  Here $K(a,\rho)$ is
defined as
\begin{equation*}
K(a,\rho) = \max\lset \abs{a-r(\rho)}, \abs{a-s(\rho)}\rset.
\end{equation*}

From this, we get the estimate
\begin{equation}
\label{e:basicsize}
\abs{g(t,s,\xi,\eta)} \leq  K(0,\rho) \max\lset \abs{\xi}, \abs{\eta}
\rset + \abs{g(t,s,0,0)}, \qquad (\xi,\eta) \in Q(\rho).
\end{equation}
\end{lemma}
See Figure~\ref{fig:abplane} for the setting of this lemma.

\begin{figure}
\centerline{\EPSFileScaled[4.5in]{lfid4.eps}}
\caption{The setting for Lemma~\ref{thm:basicest}.\label{fig:abplane}}
\end{figure}

\begin{proof}
Consider the norms defined on $\reals^{2}$ by
\begin{gather*}
\Norm{(a,b)}_{1} = \abs{a} + \abs{b}\\
\Norm{(a,b)}_{\infty} = \max\lset\abs{a},\abs{b}\rset.
\end{gather*}
Let $u_{i}=(\xi_{i},\eta_{i})$, $i=1,2$.  Let $t_{0}\in [0,p)
$ and $s_{0}\in [0,\infty)$ be
fixed but arbitrary, and define $\varphi(\xi,\eta)=a\xi - g(t_{0},s_{0},
 \xi, \eta)$.
This function is continuously differentiable, and by applying
the Mean Value Theorem to $\sigma \mapsto \varphi((1-\sigma)u_{1}+
\sigma u_{2})$, we obtain
\begin{equation}
\label{e:phider}
\varphi(u_{2}) - \varphi(u_{1}) = \nabla{\varphi}(u^{\ast})\cdot (u_{2}-u_{1}),
\end{equation}
for some point $u^{\ast} = (\xi^{\ast}, \eta^{\ast})$ on the line
segment joining $u_{1}$ and $u_{2}$.  Since $Q(\rho)$ is convex,
$u^{\ast}\in Q(\rho)$.  From \eqref{e:phider}, we have
\begin{equation}
\label{e:basic1}
\abs{\varphi(u_{2})-\varphi(u_{1})} \leq \Norm{\nabla{\varphi}(u^{\ast})}_{1}\,
\Norm{u_{2}-u_{1}}_{\infty} .
\end{equation}

Let $D(\rho)$ be the region in the $ab$-plane defined
by the inequalities $r(\rho)\leq \alpha,\beta\leq s(\rho)$.
We may estimate $\Norm{\nabla{\varphi}(u^{\ast})}_{1}$ as follows:
\begin{align*}
\Norm{\nabla{\varphi}(u^{\ast})}_{1}
&= \abs{a - g_{x}(t_{0}, s_{0}, \xi^{\ast},\eta^{\ast})}
+ \abs{-g_{y}(t_{0},s_{0}, \xi^{\ast},\eta^{\ast})}\\
&=\abs{a - g_{x}(t_{0}, s_{0}, \xi^{\ast},\eta^{\ast})}
+ \abs{g_{y}(t_{0},s_{0}, \xi^{\ast},\eta^{\ast})}\\
&\leq \sup \{\, 
\abs{a-g_{x}(t,s, \xi,\eta)} + \abs{g_{y}(t,s,\xi,\eta)}\mid\\
&\qquad\qquad (t,s, \xi,\eta) \in [0,p)\times [0,\infty)\times Q(\rho)\,\}\\
&=\sup \lset
\abs{a - h} + \abs{k} \mid (h,k)\in G(\rho)\rset\\
& \leq\sup \lset
\abs{a - h} + \abs{k} \mid (h,k)\in D(\rho)\rset\\
& = \max\lset\abs{a-r(\rho)}, \abs{a-s(\rho)}\rset,
\end{align*}
using Lemma~\ref{thm:geoest}.  Using the definition of
$K(a,\rho)$ and \eqref{e:basic1}, we have
\begin{equation*}
\abs{\varphi(u_{1})-\varphi(u_{1})} \leq K(a,\rho) \Norm{u_{2}-u_{1}}_{\infty},
\end{equation*}
which translates into \eqref{e:basicineq} when the definitions are
expanded.
The inequality in \eqref{e:basicsize} comes from \eqref{e:basicineq}
by setting $a=0$, $(\xi_{1},\eta_{1})=(0,0)$ and $(\xi_{2},\eta_{2})
=(\xi, \eta)$, and using the triangle inequality.
\end{proof}


For a topological space $X$, we will use $\bcx(X)$ to denote the
space of bounded continuous functions $f\colon X\to \reals$,
equipped with the supremum norm, which will be denoted by
 $\Norm{f}$.
If $X$ is an interval, we omit the outer parentheses.

The next two lemmas show that the class $\gclass{p}$ is closed under
an operation that will be frequently employed in our proofs.

\begin{lemma}
\label{thm:pluginbounded}
Suppose that $g\in \gclass{p}$ and that $x$ and $y$ are bounded
continuous functions on $[0,p)\times [0, \infty)$.  Then the
function
\begin{equation*}
(t,s) \mapsto g(t,s, x(t,s),y(t,s))
\end{equation*}
is bounded on $[0,p)\times [0,\infty)$.
\end{lemma}

\begin{proof}
Choose $\rho$ such that $\Norm{x},\Norm{y}\leq \rho$.  Then
$(x(t,s), y(t,s))\in Q(\rho)$ for all $(t,s)\in [0,p)\times
[0,\infty)$.    Thus, \eqref{e:basicsize} shows that
\begin{equation*}
\abs{g(t,s, x(t,s), y(t,s))} \leq K(0,\rho) \max\lset
\abs{x(t,s)}, \abs{y(t,s)}\rset + \abs{g(t,s,0,0)}.
\end{equation*}
By hypothesis \gcond{2}, the function $g(\cdot,\cdot,0,0)$ is
bounded, and so
\begin{equation*}
\abs{g(t,s,x(t,s), y(t,s))} \leq K(0,\rho)\rho + \Norm{g(\cdot,\cdot,0,0)}.
 \end{equation*}
\end{proof}


\begin{lemma}[The $\scrG$-lemma]
Suppose that $g\in \gclass{p}$ and suppose that
\begin{equation*}
x, y, f \in \bcx([0,p)\times [0,\infty)).
\end{equation*}
Let $h\colon [0,p)\times [0,\infty)\times \reals\times \reals\to \reals$
be defined by
\begin{equation*}
h(t,s, \xi, \eta) = g(t,s, \xi + x(t,s), \eta + y(t,s)) + f(t,s).
\end{equation*}
Then $h$ is in $\gclass{p}$.
\end{lemma}

\begin{proof}
The function $h$ is clearly continuous, so hypothesis \gcond{1} is
satisfied.  The last lemma shows that $h(\cdot,\cdot,0,0)$ is bounded,
so \gcond{2} is satisfied.  The gradient of $h$ is given by
\begin{equation*}
\nabla{h}(t,s,\xi,\eta) = (g_{x}(t,s, \xi+x(t,s), \eta+y(t,s)),
g_{y}(t,s, \xi+ x(t,s), \eta+y(t,s)),
\end{equation*}
and \gcond{3} is satisfied.

Choose $M>0$ such that $\Norm{x},\Norm{y}\leq M$.  Then, if $(\xi,\eta)
\in Q(\rho)$, the point $(\xi + x(t,s), \eta+y(t,s))$ is
in $Q(\rho+M)$ for all $(t,s)$.   Thus, if we let $H(\rho)$ denote
the image of $[0,p)\times [0,\infty)\times Q(\rho)$ under $\nabla{h}$,
we have
\begin{equation}
\label{e:hsubg}
H(\rho) \subset G(\rho+M).
\end{equation}
Thus, $H(\rho)$ is a bounded set whose closure lies to the right of
$R$, so \gcond{4} is satisfied.

If we let $r_{h}$ and $r_{g}$ denote the $r$-functions for $h$ and
$g$ respectively, \eqref{e:hsubg} shows that $r_{g}(\rho+M)\leq
r_{h}(\rho)$.  Thus, to show that $\sup \rho r_{h}(\rho)=\infty$,
it will suffice to show that $\sup \rho r_{g}(\rho + M)=\infty$.
To prove this, let $A>0$ be arbitrary.   Since $\sup \rho
r_{g}(\rho)=\infty$, we can find some $\sigma$ such that
$\sigma r_{g}(\sigma) \geq A + M r_{g}(0)$.  We must have
$\sigma > M$, for otherwise $\sigma r_{g}(\sigma) \leq M r_{g}(0)$,
since $r_{g}$ is non-increasing.   Thus, we can write $\sigma = \rho +
M$ for $\rho>0$.  We have 
\begin{equation*}
(\rho + M) r_{g}(\rho + M) \geq A + M r_{g}(0)
\end{equation*}
which implies that
\begin{equation*}
\rho r_{g}(\rho+M) \geq A + M [ r_{g}(0)-r_{g}(\rho + M)].
\end{equation*}
The right hand side is greater that or equal to $A$, since $r_{g}$
is non-increasing.  Since $A$ was arbitrary, we conclude that
$\sup \rho r_{g}(\rho+M)=\infty$.  Thus, $h$ satisfies $\gcond{5}$.
\end{proof}

We should also observe the following lemma, whose proof is 
straight forward.

\begin{lemma}
\label{thm:restrict}
Suppose that $g\in \gclass{p}$ and that $0<q<p$.  Then, the
restriction of $g$ to $[0,q)\times [0,\infty)\times \reals^{2}$ is
in $\gclass{q}$.
\end{lemma}

\section{Existence and Uniqueness}    %%%%  section 3
\label{s:eu}

In this section we prove the existence and uniqueness of
solutions of the initial value problem \eqref{e:ivp}.   Our
first goal is the following Proposition.

\begin{prop}
\label{thm:eu1}
Suppose that $g\in \gclass{p}$ and that $\mu$ is a finite positive
Borel measure on $[0,\infty)$.   Then, for every $\varphi\in
\bcx(-\infty,0]$ and $f\in \bcx[0,\infty)$, there is a unique
function $x\in \bcx(-\infty,p)$ that satisfies the initial value
problem
\begin{equation}
\label{e:ivpp}
\begin{cases}
x(t) = \varphi(t), & t\in (-\infty, 0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s, x(t), x(t-s))\, d \mu(s) = f(t),
}
& t\in [0,p).
\end{cases}
\end{equation}
\end{prop}

It will be useful to observe that if $x$ is a bounded solution of
\eqref{e:ivpp}, then $x'$ must be continuous and bounded on
$[0,\infty)$.

The first step in proving this proposition is to observe that we can
reduce the problem to the case where the initial condition $\varphi$
is zero.

To see this, let $\varphibar$ be an extension of $\varphi$ to the interval
$(-\infty,p)$, such that $\varphibar$ is bounded, $\varphibar$ is
$\cspace{1}$ on $[0,p)$ (interpreting $\varphibar'(0)$ as a right hand
derivative, as usual) and $\varphibar'$ is bounded on $[0,p)$.  An obvious
choice would be to define $\varphibar(t) = \varphi(0)$ for $t>0$, but
another choice will be useful in our stability analysis.

Suppose that $x$ is a solution of the initial value problem
\eqref{e:ivpp}.   Define $y = x - \varphibar$.  Then $y$ is bounded and
continuous and $y$ is continuously differentiable on $[0,p)$.
Of course, $y(t)=0$ for $t\in (-\infty,0]$.  If we substitute
$x = y + \varphibar$ in the initial value problem, for $t\in [0,p)$ we obtain
\begin{equation}
\label{e:eured1}
y'(t) + \varphibar'(t) + \int_{0}^{\infty}
g(t,s, y(t)+\varphibar(t), y(t-s)+\varphibar(t-s))\, d\mu(s) 
= f(t)\,.
\end{equation}

Let $m = \mu[0,\infty)$ denote the total mass of $\mu$, a
notation that will be used for the rest of the paper.  Then, for 
$t\in [0,p)$ we can rewrite \eqref{e:eured1} as
$$
y'(t) + \int_{0}^{\infty} [g(t,s, y(t)+\varphibar(t),
y(t-s)+\varphibar(t-s)) + \varphibar'(t)/m] \, d\mu(s)
 = f(t)\,.
$$

Thus, if we define $h$ by
\begin{equation*}
h(t,s, \xi, \eta) = g(t,s, \xi+\varphibar(t), \eta + \varphibar(t-s))
+ \varphibar'(t)/m\,,
\end{equation*}
we see that $y$ is a solution of the initial value problem
\begin{equation}
\label{e:eunewip}
\begin{cases}
y(t) = 0, & t\in (-\infty,0]\\[2\jot]
{\displaystyle
y'(t) + \int_{0}^{\infty} h(t,s, y(t), y(t-s))\, d\mu(s) = f(t),
}
& t\in [0,p).
\end{cases}
\end{equation}
By the $\scrG$-lemma, $h$ is again in $\gclass{p}$.   

Conversely, if $y$ is a bounded solution of \eqref{e:eunewip},
then $x=y+\varphibar$ is a bounded solution of \eqref{e:ivpp}.
Thus, to prove Proposition~\ref{thm:eu1}, it will suffice
to prove the following Proposition.

\begin{prop}
\label{thm:euprop}
Suppose that $g\in \gclass{p}$ and that $\mu$ is a finite positive
Borel measure on $[0,\infty)$.   Then, for every $f\in
\bcx[0,\infty)$, there is a unique function $x\in \bcx(-\infty,p)$
that satisfies the initial value problem
\begin{equation}
\label{e:ivpp2}
\begin{cases}
x(t) = 0, & t\in (-\infty, 0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s, x(t), x(t-s))\, d \mu(s) = f(t),
}
& t\in [0,p).
\end{cases}
\end{equation}
\end{prop}

The proof of this Proposition will occupy most of the rest of this
section.  We begin by introducing the function spaces we will use.
Let $\bc{1}[0,p)$ denote the space of functions $f\in \bcx[0,p)$
such that $f$ is differentiable and $f'\in \bcx[0,p)$.

Let $X_{p}$ be the space of functions $x\in \bcx(-\infty,p)$ such that
$x=0$ on the negative half-axis $(-\infty, 0]$.  This is a closed
subspace of $\bcx(-\infty,p)$ and hence a Banach space in the
supremum norm.    Finally, let $X_{p}^{1}$ denote the space
of functions $x\in X_{p}$ such that the restriction of $x$
to $[0,p)$ is in $\bc{1}[0,p)$.

The simplest case of the initial value problem \eqref{e:ivpp2}
is the problem
\begin{equation*}
\begin{cases}
x(t) = 0, & t\in (-\infty,0]\\
x'(t) + a x(t) = f(t), & t\in [0, p),
\end{cases}
\end{equation*}
where $a>0$.
This is, of course, easy to solve by elementary means.  The results
are summarized in the next lemma.

\begin{lemma}
For $a>0$, let $L_{a}\colon X_{p}^{1}\to \bcx[0,p)$ be the operator
defined by
\begin{equation*}
L_{a}x(t) = x'(t) + a x(t), \qquad t\in [0,p).
\end{equation*}
Then, $L_{a}$ is invertible, with the inverse given by
\begin{equation*}
L_{a}^{\inv} f(t ) = \begin{cases}
0, & t\in (-\infty, 0]\\[2\jot]
{\displaystyle
\int_{0}^{t} e^{a(s-t)} f(s)\, ds,
}
& t\in [0,p).
\end{cases}
\end{equation*}

From this formula we get the supremum norm estimate
\begin{equation}
\label{e:laest}
\Norm{L_{a}^{\inv} f} \leq \frac{1}{a} \Norm{f}.
\end{equation}
\end{lemma}

We will now introduce some operators that will be useful in the proof.
If $a>0$ and $x\in X_{p}$, define a function $N_{a}(x)$
on $[0,p)\times [0,\infty)$ by
\begin{equation*}
N_{a}(x)(t,s) = a x(t) - g(t,s, x(t),x(t-s)).
\end{equation*}
By Lemma~\ref{thm:pluginbounded}, this function is bounded,
so we have a nonlinear operator $N_{a}\colon X_{p}\to
\bcx([0,p)\times [0,\infty))$.

We will use the notation $\bee{\rho}$ for the closed ball of
radius $\rho$ centered at the origin in $X_{p}$.  If $x\in
\bee{\rho}$, then $(x(t), x(t-s))\in Q(\rho)$ for all $t$ and $s$.
Thus, if $x,y\in \bee{\rho}$ and we apply Lemma~\eqref{thm:basicest},
we have 
\begin{eqnarray*}
\lefteqn{ \abs{[a x(t) -g(t,s, x(t), x(t-s))]- [ a y(t) - g(t,s, y(t),
y(t-s))]} } 
 \\ 
&\leq& K(a,\rho) \max\lset\abs{x(t)-y(t)}, \abs{x(t-s)-y(t-s)}\rset\\
 &\leq& K(a,\rho) \Norm{x-y},
\end{eqnarray*}
since both terms in the maximum are bounded by $\Norm{x-y}$.  Thus,
we have the estimate
\begin{equation}
\label{e:naest}
\Norm{N_{a}(x)-N_{a}(y)} \leq K(a,\rho) \Norm{x-y},
\qquad x,y\in \bee{\rho}.
\end{equation}

For $a>0$, we define a nonlinear operator $M_{a}\colon X_{p}\to \bcx[0,p)$ by
\begin{equation*}
M_{a}(x)(t) = a x(t) - \int_{0}^{\infty} g(t,s, x(t),x(t-s))\,
d\mu(s), \qquad t\in [0,p).
\end{equation*}
Recalling the notation $m=\mu[0,\infty)$, we can rewrite this as
\begin{align*}
M_{a}(x)(t) &= \int_{0}^{\infty} [(a/m) x(t) - g(t,s, x(t), x(t-s))] \,
d\mu(s)  \\
&=\int_{0}^{\infty} N_{a/m}(x)(t,s) \, d\mu(s).
\end{align*}
Hence, by applying \eqref{e:naest} we obtain the estimate
\begin{equation}
\label{e:maest}
\Norm{M_{a}(x)-M_{a}(y)} \leq m K(a/m,\rho)\Norm{x-y},
\qquad x,y\in \bee{\rho}.
\end{equation}

We next show that \eqref{e:ivpp2} can be reduced to a fixed point
problem in $X_{p}$.  Suppose that $x\in X_{p}$ is a solution of
\eqref{e:ivpp2}.   Then, if $a>0$ is arbitrary, we have, for
$t\geq 0$,
\begin{equation*}
x'(t) + a x(t) = a x(t) - \int_{0}^{\infty} g(t,s, x(t), x(t-s))\,
d\mu(s) + f(t)
\end{equation*}
We may rewrite this as 
\begin{equation*}
L_{a} x = M_{a}(x) + f
\end{equation*}
Since $L_{a}$ is invertible, this is equivalent to
\begin{equation*}
x = T_{a}(x)\,,
\end{equation*}
where $T_{a}\colon X_{p}\to X_{p}$ is defined by 
\begin{equation*}
T_{a}(x) = L_{a}^{\inv} M_{a}(x) + L_{a}^{\inv}f.
\end{equation*}

Conversely, if $x\in X_{p}$ and $x=T_{a}(x)$, then $x\in
X_{p}^{1}\subset X_{p}$, and we may reverse the steps to conclude
that $x$ is a solution of \eqref{e:ivpp2}.  Thus, we have the
following lemma.

\begin{lemma}
\label{thm:cae}
The following conditions are equivalent.
\begin{enumerate}
\item
$x\in X_{p}$ is a solution of \eqref{e:ivpp2}.
\item
$x\in X_{p}$ and $T_{a}(x)=x$ for \emph{all} $a>0$.
\item
$x\in X_{p}$ and $T_{a}(x)=x$ for \emph{some} $a>0$.
\end{enumerate}
\end{lemma}

To show that one of the operators $T_{a}$ has a fixed point, we
will use the Contraction Mapping Lemma.
From \eqref{e:maest} and \eqref{e:laest}, we get the estimate
\begin{equation}
\label{e:taest}
\Norm{T_{a}(x)-T_{a}(y)} \leq \frac{m}{a} K(a/m,\rho) \Norm{x-y},
\qquad x,y\in \bee{\rho}\,.
\end{equation}
We now make a specific choice of $a$.  For $\rho\geq 0$, define
\begin{equation*}
a(\rho) = m\frac{r(\rho)+s(\rho)}{2}\,.
\end{equation*}
It is then easily calculated that
\begin{equation*}
K(a(\rho)/m, \rho) = \frac{s(\rho)-r(\rho)}{2}
\end{equation*}
and thus that
\begin{equation}
\label{e:ammk}
\frac{a(\rho)}{m} - K(a(\rho)/m,\rho) = r(\rho) > 0\,.
\end{equation}
In particular, $K(a(\rho)/m, \rho)<a(\rho)/m$, and so
\begin{equation*}
\frac{m}{a(\rho)} K(a(\rho)/m,\rho) < 1\,.
\end{equation*}

If we set $a=a(\rho)$ in \eqref{e:taest}, we have
\begin{equation*}
\Norm{T_{a(\rho)}(x)-T_{a(\rho)}(y)} \leq \frac{m}{a(\rho)} K(a(\rho)/m,\rho)
\Norm{x-y}, \qquad x,y\in \bee{\rho}\,,
\end{equation*}
where the constant is strictly less than one.  This is enough to
prove the uniqueness part of Proposition~\eqref{thm:euprop}.
If $x, y\in X_{p}$ are solutions of the initial value problem
\eqref{e:ivpp2}, we can choose $\rho$ such that $x,y\in \bee{\rho}$.
By Lemma~\eqref{thm:cae}, $x$ and $y$ are fixed points of
$T_{a(\rho)}$ and hence 
\begin{equation*}
\Norm{x - y} \leq \frac{m}{a(\rho)}K(a(\rho)/m, \rho) \Norm{x-y}\,.
\end{equation*}
Since the constant is strictly less than one, this implies $x=y$.

We have not yet proven the existence of a solution, because we don't
know that $T_{a(\rho)}$ maps $\bee{\rho}$ into itself.   If we can
find a $\rho$ such that $T_{a(\rho)}(\bee{\rho})\subset \bee{\rho}$,
it will follow from the Contraction Mapping Lemma that $T_{a(\rho)}$
has a fixed point, and hence that there is a solution $x\in X_{p}$
of the initial value problem \eqref{e:ivpp2}.

To find such a $\rho$, we make some additional estimates.
Setting $y=0$ in \eqref{e:maest} gives
\begin{equation*}
\Norm{M_{a}(x)} \leq m K(a/m, \rho)\Norm{x} + m \Norm{g(\cdot, \cdot,
0, 0)}, \qquad x\in \bee{\rho},
\end{equation*}
For brevity, set $\gamma=\Norm{g(\cdot, \cdot, 0, 0)}$.
By the definition of $T_{a}$, we have
\begin{equation*}
\Norm{T_{a}(x)} \leq \frac{1}{a} \Norm{M_{a}(x)} + \frac{1}{a} \Norm{f}.
\end{equation*}
Thus, we have
\begin{equation*}
\Norm{T_{a}(x)} \leq \frac{m}{a} K(a/m, \rho) \Norm{x} +
\frac{m}{a}\gamma + \frac{1}{a} \Norm{f}, \qquad
x\in \bee{\rho}.
\end{equation*}
If we set $a=a(\rho)$ in the last inequality and estimate $\Norm{x}$
by $\rho$, we obtain the estimate
\begin{equation*}
\Norm{T_{a(\rho)}(x)} \leq \frac{m}{a(\rho)} K(a(\rho)/m,\rho)\rho
+ \frac{m}{a(\rho)}\gamma + \frac{1}{a(\rho)} \Norm{f},
\qquad x\in\bee{\rho}.
\end{equation*}
From this inequality, we see that if we can choose $\rho$ such that
\begin{equation}
\label{e:ballendo1}
\frac{m}{a(\rho)} K(a(\rho)/m,\rho)\rho
+ \frac{m}{a(\rho)}\gamma + \frac{1}{a(\rho)} \Norm{f} \leq \rho,
\end{equation}
we will have $T_{a(\rho)}(\bee{\rho})\subset \bee{\rho}$.
If we multiply both sides of \eqref{e:ballendo1} by $a(\rho)/m$
and move the first term on the left to the other side, we have
\begin{equation*}
\gamma + \frac{1}{m} \Norm{f} \leq \biggl[ \frac{a(\rho)}{m}
- K(a(\rho)/m,\rho)\biggr] \rho\,.
\end{equation*}
By \eqref{e:ammk}, this reduces to
\begin{equation}
\label{e:ballendo2}
\gamma + \frac{1}{m} \Norm{f} \leq \rho r(\rho)\,.
\end{equation}
In this inequality, the left hand side is independent of $\rho$,
while the right hand side can be made as large as we like by
our assumption \gcond{5}.  Thus, we can choose $\rho$ to
satisfy \eqref{e:ballendo2}.

This completes the proof of Proposition~\ref{thm:euprop},
and hence the proof of Proposition~\ref{thm:eu1}.


We can now apply Proposition~\ref{thm:eu1} to prove our main
existence and uniqueness theorem.

\begin{thm}
Suppose that $g\in \gclass{p}$, where $0<p\leq \infty$, and that
$\mu$ is a finite positive Borel measure on $[0,\infty)$.  Then
for every $\varphi\in \bcx(-\infty,0]$ and $f\in \bcx[0,p)$, the
initial value problem
\begin{equation}
\label{e:ivpfp}
\begin{cases}
x(t)= \varphi(t), & t\in (-\infty,0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s, x(t), x(t-s))\, d\mu(s) = f(t),
}
& t \in [0,p)
\end{cases}
\end{equation}
has a unique maximally defined solution $x$, which is defined and
bounded on $(-\infty ,p)$.
\end{thm}

\begin{proof}
We know, of course, that there is a solution $x$ of \eqref{e:ivpfp} which
is defined and bounded on $(-\infty, p)$.


To prove the uniqueness assertion, suppose that $y$ is a function,
possibly unbounded,
defined on $(0,q)$, where $0< q \leq p$, that satisfies
\eqref{e:ivpfp} on its domain.   Let $t_{0}\in (0,q)$ be fixed
but arbitrary.  Choose some point $u$ such that $t_{0}<u<q$.
By continuity, both $x$ and $y$ are bounded on $[0,u]$, and
hence on $(-\infty,u)$.  The restriction of $g$ to $[0,u)\times
[0,\infty)\times \reals^{2}$ is in the class $\gclass{u}$ and
the restrictions of $x$ and $y$ to $(-\infty,u)$ are both bounded solutions
of the corresponding initial value problem.  Thus, by the
uniqueness part of Proposition~\ref{thm:eu1}, we must have
$x=y$ on $(-\infty, u)$.  In particular, $x(t_{0})=y(t_{0})$.
Since $t_{0}\in (0,q)$ was arbitrary, we conclude that
that $x=y$ on the intersection of their domains.
\end{proof}

We observe that the argument in the proof can be extended to show
the existence of a unique maximally defined solution to
\eqref{e:ivpfp} even in the case where $f$ is unbounded, but
we will not pursue this result here.

It's not hard to trace through our proof to show that the choice
of $\rho$ and the contraction constant can be made uniformly
for $\varphi$ and $f$ in a closed ball in
$\bcx(-\infty,0]\times \bcx[0,p)$, and then to use the Uniform
Contraction Principal \cite[page~25]{CH} to show that the solution $x$
depends continuously on the data $(\varphi,f)$.

Under an additional assumption on $g$,  which is automatically
satisfied in the time independent case and is preserved by the
operations in the $\scrG$-lemma, it can be shown that the dependence
of $x$ on $(\varphi, f)$ is $\cspace{1}$.  The details are sufficiently
involved that we won't pursue them here.


\section{Asymptotic Stability}  %%%  section 4
\label{s:as}

For the rest of the paper, we will be concerned with the case
$p=\infty$ and we will write $\gspace=\gclass{p}$, $X=X_{p}$, etc.
Our goal in this section is the following theorem.

\begin{thm}
\label{thm:as}
Suppose that $g\in \gspace$ and let $\mu$ be a finite positive
Borel measure on $[0,\infty)$.  Suppose that $f\in \bcx[0,\infty)$ and
$\varphi \in \bcx(-\infty,0]$
and consider the initial value problem
\begin{equation}
\label{e:ivpa1}
\begin{cases}
x(t) = \varphi(t), & t\in (-\infty,0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s, x(t), x(t-s))\, d\mu(s) = f(t),
}
& t\in [0, \infty).
\end{cases}
\end{equation}
Suppose that $x_{1}$ and $x_{2}$ are solutions of \eqref{e:ivpa1} for
different initial conditions $\varphi=\varphi_{1}$ and $\varphi=\varphi_{2}$ respectively.
Then,
\begin{equation*}
\lim_{t \to\infty} \abs{x_{1}(t)-x_{2}(t)} = 0.
\end{equation*}
\end{thm}

The proof will occupy the remainder of this section.  We will first
make some reductions.   Let $x_{1}$ and $x_{2}$ be solutions
for initial conditions $\varphi_{1}$ and $\varphi_{2}$.  Define
$y=x_{2}-x_{1}$.  For $t\leq 0$, $y(t)=\varphi_{2}(t)-\varphi_{1}(t)=
\psi(t)$, and $\psi$ is bounded and continuous.  For $t\geq 0$, we
may subtract the delay differential equations satisfied by $x_{1}$
and $x_{2}$ to obtain
$$
x'_{2}(t)-x'_{1}(t) +
 \int_{0}^{\infty} [ g(t,s, x_{2}(t), x_{2}(t-s))) -
g(t,s, x_{1}(t), x_{1}(t-s))] \, d\mu(s) = 0\,.
$$
We may rewrite this as
\begin{multline}
\label{e:asred1}
y'(t) +\\ \int [g(t,s, y(t)+x_{1}(t), y(t-s)+x_{1}(t-s))
-
 g(t,s, x_{1}(t), x_{1}(t-s))]\, d\mu(s) = 0.
\end{multline}
Thus, if we define $h$ by
\begin{equation*}
h(t,s, \xi, \eta) = g(t,s, \xi+x_{1}(t), \eta+ x_{1}(t-s))
- g(t,s, x_{1}(t), x_{1}(t-s)),
\end{equation*}
we see that $y$ is a solution of the initial value problem
\begin{equation*}
\begin{cases}
y(t)=\psi(t), & t\in (-\infty, 0]\\[2\jot]
{\displaystyle
y'(t) + \int_{0}^{\infty} h(t,s, y(t), y(t-s))\, d\mu(s) = 0,
}
&t\in [0,\infty).
\end{cases}
\end{equation*}

By the $\scrG$-lemma, $h$ is again in $\gspace$.  We also have $h(t,s,0,0)
\equiv 0$.  Thus, in order to prove Theorem~\ref{thm:as}, it will
suffice to prove the following proposition.

\begin{prop}
\label{thm:asp1}
Suppose that $g\in\gspace$ and that $g(\cdot,\cdot,0,0)=0$.  Let $\mu$
be a finite positive Borel measure on $[0,\infty)$.  
Suppose that $\varphi\in \bcx(-\infty,0]$, and let $x$ be the solution of
the initial value problem
\begin{equation}
\label{e:ivpas1}
\begin{cases}
x(t) = \varphi(t), & t\in (-\infty,0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s, x(t), x(t-s))\, d\mu(s) = 0,
}
& t\in [0,\infty).
\end{cases}
\end{equation}
Then $\displaystyle\lim_{t\to\infty} x(t) = 0$.
\end{prop}

In order to prove Proposition~\ref{thm:asp1}, we will reduce to the
case $\varphi=0$, as in the last section.

Thus, let $\varphibar$ be an extension of $\varphi$ to $(-\infty,\infty)$.
We choose $\varphibar$ so that it is continuously 
differentiable on $[0,\infty)$ and
such that the support of $\varphibar$ is bounded above.  Thus, both
$\varphibar$ and $\varphibar'$ are bounded.

Let $x$ be a solution of the initial value problem \eqref{e:ivpas1} and
define $y(t)= x(t)-\varphibar(t)$.  Then $y$ is bounded and continuous,
$y=0$ on $(-\infty,0]$ and by substituting $x=y+\varphibar$
 into \eqref{e:ivpas1}, we see
that for $t\geq 0$, $y$ satisfies
\begin{equation*}
y'(t) + \varphibar'(t) + \int_{0}^{\infty} g(t,s, y(t)+\varphibar(t),
y(t-s)+\varphibar(t-s))\, d\mu(s) = 0.
\end{equation*}
We choose to rewrite this as
\begin{multline}
\label{e:redtoz}
y'(t) + \int_{0}^{\infty}[ g(t,s, y(t)+\varphibar(t), y(t-s)+\varphibar(t-s))
- 
g(t,s, \varphibar(t),\varphibar(t-s))]\, d\mu(s) \\
=-\varphibar'(t) - \int_{0}^{\infty} g(t,s, \varphibar(t),\varphibar(t-s))\,
d \mu(s).
\end{multline}
We claim that the right hand side of this equation goes to $0$ as
$t$ goes to infinity.  This is certainly true of $\varphibar'$, since
the support of $\varphibar$ is bounded above.  

To deal with the other term, hold $s$ fixed for a moment.  Then,
for sufficiently large $t$, both $\varphibar(t)$ and $\varphibar(t-s)$ are
zero.  Since $g(t,s,0,0)\equiv 0$, we conclude that
$g(t,s,\varphibar(t),\varphibar(t-s))=0$ for all sufficiently large $t$.
Thus, in the integral
\begin{equation}
\label{e:asrhs}
\int_{0}^{\infty} g(t,s, \varphibar(t),\varphibar(t-s))\, d\mu(s)
\end{equation}
the integrand goes to zero for each fixed $s$ as $t\to\infty$.
The integrand is also bounded in absolute value by some constant,
and constant functions are integrable with respect to $\mu$, since
$\mu$ is finite.  Thus, \eqref{e:asrhs} goes to zero as $t\to\infty$
by the Dominated Convergence Theorem.

If we now define $h$ by
\begin{equation}
\label{e:redtozfun}
h(t,s,\xi,\eta) = g(t,s,\xi+\varphibar(t),\eta+\varphibar(t-s)) -
g(t,s,\varphibar(t),\varphibar(t-s)),
\end{equation}
$h$ is again in $\gspace$ and $h(t,s,0,0)\equiv 0$.  Thus, $y$ is the
solution of an initial value problem
\begin{equation}
\label{e:ivph2}
\begin{cases}
y(t)=0, &t\in (-\infty,0]\\[2\jot]
{\displaystyle
y'(t) + \int_{0}^{\infty} h(t,s, y(t), y(t-s)) = f(t),
}
& t \in [0,\infty),
\end{cases}
\end{equation}
where $f(t)\to 0$ as $t\to \infty$.

If we show that the solution of $y$ of \eqref{e:ivph2} goes to zero at
infinity, then the solution $x=y+\varphibar$ of \eqref{e:ivpas1} will also go
to zero at infinity, since the support of $\varphibar$ is bounded above.

Thus, in order to prove Proposition~\ref{thm:asp1}, it will suffice to
prove the following proposition.

\begin{prop}
\label{thm:as3}
Suppose that $g\in\gspace$ and $g(\cdot,\cdot,0,0)=0$. Let $\mu$ be
a finite positive Borel measure on $[0,\infty)$.  Suppose that
$f\in \bcx[0,\infty)$ and $f(t)\to 0$ as $t\to \infty$.  Let $x$
be the solution of the initial value problem
\begin{equation}
\label{e:ivpasx}
\begin{cases}
x(t) = 0, & t \in (-\infty,0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s, x(t), x(t-s))\, d\mu(s) = f(t),
}
& t\in [0,\infty).
\end{cases}
\end{equation}
Then $\displaystyle\lim_{t\to \infty} x(t) = 0$.
\end{prop}

The proof of Proposition~\ref{thm:as3} will occupy the rest of the
section.

Using the apparatus of the last section, our strategy is as follows.
Let $X_{0}\subset X$ be the space of functions $x\in X$ such
that $x(t)\to 0$ as $t\to \infty$.  It is easy to check that
$X_{0}$ is a closed subspace of $X$.   

Suppose that we show that $X_{0}$ is invariant under all of the
operators $T_{a}$.  Then, as in the last section, we may find some
$\rho$ such that $\bee{\rho}$ is invariant under $T_{a(\rho)}$
and $T_{a(\rho)}$ is a contraction on $\bee{\rho}$.  Thus,
$T_{a(\rho)}$ has a fixed point $x$ in $\bee{\rho}$, which is
precisely the solution of the initial value problem \eqref{e:ivpasx}.
But then $X_{0}\cap \bee{\rho}$ is a closed subset of $X$ which
is invariant under $T_{a(\rho)}$ and on which $T_{a(\rho)}$ is
a contraction.  Thus, the fixed point $x$ must be in $X_{0}$,
i.e., $x(t)\to 0$ as $t\to\infty$.

Let $\bcv$ denote the space of functions $f\in
\bcx[0,\infty)$ such that $f(t)\to 0$ as $t\to\infty$.   
Since $T_{a}(x) = L_{a}^{\inv}M_{a}(x)+L_{a}^{\inv}f$, to show that
$X_{0}$ is invariant under $T_{a}$, it will suffice to show that
$M_{a}$ sends $X_{0}$ into $\bcv$ and $L_{a}^{\inv}$ sends
$\bcv$ into $X_{0}$ (since $f$ in \eqref{e:ivpasx} is in
$\bcv$).

To show that $M_{a}(x)$ sends $X_{0}$ into $\bcv$, it will
plainly suffice to show that the operator $H$ defined by
\begin{equation}
\label{e:hop}
H(x)(t) = \int_{0}^{\infty} g(t,s,x(t), x(t-s))\, d\mu(s),\qquad t\geq 0\,,
\end{equation}
sends $X_{0}$ into $\bcv$.   To show this, suppose that $x\in X_{0}$
and choose $\rho\geq \Norm{x}$.
Since $g(\cdot,\cdot,0,0)=0$, \eqref{e:basicsize} gives
\begin{equation*}
\abs{g(t,s, x(t), x(t-s))} \leq K(0,\rho) \max\lset
\abs{x(t)},\abs{x(t-s)}
\rset\,.
\end{equation*}
If we hold $s$ fixed and let $t$ go to infinity, $x(t)$ and $x(t-s)$
go to zero, and so $g(t,s,x(t),x(t-s))\to 0$.  Thus, the integrand
in \eqref{e:hop} goes to zero as $t\to \infty$ for fixed $s$.  Since
the integrand is bounded by a constant, the Dominated Convergence
Theorem shows that $H(x)(t)\to 0$ as $t$ goes to infinity.

To show that $L_{a}^{\inv}$ maps $\bcv$ into $X_{0}$, suppose that
$f\in\bcv$.  By our formula for $L_{a}^{\inv}$, we have
\begin{equation*}
L_{a}^{\inv} f(t) = \int_{0}^{t} e^{a(s-t)} f(s) \, ds\,.
\end{equation*}
for $t\geq 0$.   By a simple change of variable, we may rewrite this
convolution as
\begin{equation*}
L_{a}^{\inv}f(t) = \int_{0}^{t} e^{-as} f(t-s)\, ds\,.
\end{equation*}
Thus, we have
\begin{equation*}
\abs{L_{a}^{\inv}f(t)} \leq \int_{0}^{t} e^{-as} \abs{f(t-s)} \, ds
\leq \int_{0}^{\infty} e^{-as} \abs{f(t-s)} \, ds\,,
\end{equation*}
where the last integral converges because $f$ is bounded.  In the integral
\begin{equation}
\label{e:laop}
\int_{0}^{\infty} e^{-as} \abs{f(t-s)}\, ds\,,
\end{equation}
the integrand goes to zero as $t\to\infty$ for each fixed $s$, and
the integrand is bounded by the integrable function $e^{-as}\Norm{f} $.
Thus, \eqref{e:laop} goes to zero as $t$ goes to infinity,
and so $L_{a}^{\inv}f$ goes to zero at infinity.

This completes the proof of Proposition~\ref{thm:as3} and hence
the proof of Theorem~\ref{thm:as}.

\section{Exponential Asymptotic Stability}  %%%% section 5
\label{s:eas}

In this section, we  show that under an exponential decay condition
on the the measure $\mu$, the solutions of our initial value problem
are globally exponentially asymptotically stable.   Specifically,
we prove the following theorem.

\begin{thm}
\label{thm:eas}
Suppose that $g\in\gspace$ and that $\mu$ is a finite Borel measure
on $[0,\infty)$ such that
\begin{equation}
\label{e:mucond}
\int_{0}^{\infty} e^{\lambda_{0} s} \, d\mu(s) < \infty,
\qquad\text{for some $\lambda_{0}>0$.}
\end{equation}
For $f\in \bcx[0,\infty)$ and $\varphi\in \bcx(-\infty,0]$, consider
the initial value problem
\begin{equation}
\label{e:easivp}
\begin{cases}
x(t) = \varphi(t), & t\in (-\infty, 0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s,x(t),x(t-s))\, d\mu(s) = f(t),
}
& t\in [0,\infty).
\end{cases}
\end{equation}
Then, if $x_{1}$ and $x_{2}$ are two solutions of \eqref{e:easivp}
for different initial conditions $\varphi=\varphi_{1}$ and $\varphi=\varphi_{2}$
respectively, then there are constants $C\geq 0$ and $\lambda >0$
such that
\begin{equation*}
\abs{x_{1}(t)-x_{2}(t)} \leq C e^{-\lambda t}, \qquad t\geq 0.
\end{equation*}
\end{thm}

To prove this theorem, we let $y=x_{2}-x_{1}$ and make the same 
reduction we made in Section~\ref{s:as} in going from Theorem~\ref{thm:as} to
Proposition~\ref{thm:asp1}. Thus, to prove Theorem~\ref{thm:eas}, it will
suffice to prove the following proposition.

\begin{prop}
\label{thm:easp2}
Suppose that $g\in\gspace$ and that $\mu$ is a finite Borel measure
on $[0,\infty)$ which satisfies the condition \eqref{e:mucond}.
Suppose, also, that $g(\cdot,\cdot,0,0)=0$.
If $\varphi\in \bcx(-\infty,0]$ and $x$ is the solution of the
initial value problem
\begin{equation}
\label{e:easivp1}
\begin{cases}
x(t) = \varphi(t), & t\in (-\infty,0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s, x(t),x(t-s))\, d\mu(s) = 0,
}
& t\in [0,\infty),
\end{cases}
\end{equation}
then there are constants $C\geq 0$ and $\lambda >0$ such that
\begin{equation*}
\abs{x(t)} \leq C e^{-\lambda t}, \qquad t\geq 0.
\end{equation*}
\end{prop}

To prove this proposition, we next make the same reduction that
we made in Section~\ref{s:as} in passing from
Proposition~\ref{thm:asp1} to Proposition~\ref{thm:as3}.
Thus, suppose that $x$ is the solution of \eqref{e:easivp1}.
Choose an extension $\varphibar$ of $\varphi$ which is $\cspace{1}$
on $[0,\infty)$ and has support bounded above.  Define $y=x-\varphibar$,
so $y=0$ on $(-\infty,0]$.   As before, we may write the equation
satisfied by $y$ in the form \eqref{e:redtoz}.  In our present
context,  we need to show that the function on the right hand
side of \eqref{e:redtoz} is exponentially decreasing.   This is
is no problem for $\varphibar'$, since the support of $\varphibar$ is
bounded above.  Thus, we need the following lemma.

\begin{lemma}
Suppose that $g\in \gspace$ and that $g(\cdot, \cdot, 0, 0)=0$.
Let $\mu$ satisfy Condition \eqref{e:mucond}.  Define a function
$f$ by
\begin{equation}
\label{e:rhsf}
f(t) = \int_{0}^{\infty} g(t,s,\varphibar(t), \varphibar(t-s))\, d\mu(s)\,,
\end{equation}
where $\varphibar\in \bcx(-\infty,\infty)$ and the support of $\varphibar$
is bounded above by $b>0$.  Then there is a constant $C\geq 0$ such
that
\begin{equation*}
\abs{f(t)} \leq C e^{-\lambda_{0} t},\qquad t\geq 0\,.
\end{equation*}
\end{lemma}


\begin{proof}[Proof of Lemma]
Consider first the $\mu$-measure of the interval $[t,\infty)$.
Let $K$ denote the value of the integral in \eqref{e:mucond}.  Then,
for any $t \geq 0$ we have
\begin{align*}
e^{\lambda_{0}t} \mu[t,\infty) &= \int_{t}^{\infty} e^{\lambda_{0}t} \,
d\mu(s)\\
& \leq \int_{t}^{\infty} e^{\lambda_{0}s}\, d\mu(s)\\
& \leq \int_{0}^{\infty} e^{\lambda_{0} s}\, d\mu(s) = K,
\end{align*}
and so
\begin{equation*}
\mu[t,\infty) \leq K e^{-\lambda_{0} t}.
\end{equation*}

Next consider the function $g(t,s,\varphibar(t),\varphibar(t-s))$.
If $t>b$, this function will be zero if $t-s> b$, since
$g(t,s,0,0)\equiv 0$.  Thus, the integrand in \eqref{e:rhsf} is
nonzero only for $s\geq t-b$.  Then we have
\begin{equation*}
\abs{f(t)} \leq \int_{t-b}^{\infty} \abs{g(t,s,
\varphibar(t),\varphibar(t-s))}
\,d\mu(s).
\end{equation*}
The integrand is bounded by some constant $C$, so we have
\begin{equation*}
\abs{f(t)} \leq C \mu[t-b,\infty) \leq CKe^{\lambda_{0}b} e^{-\lambda_{0} t},
\end{equation*}
for $t>b$.  Since $\abs{f}$ is bounded on $[0,b]$, this completes
the proof of the lemma.
\end{proof}


To return to the discussion prior to the lemma,
by applying this lemma, we see that $y=x-\varphibar$ is a solution of an
initial value problem of the form \eqref{e:ivph2}, where $h$
is defined by \eqref{e:redtozfun} and $f$ is exponentially
decreasing.   If we show that the solution of this initial value
problem is exponentially decreasing, $x=y+\varphibar$ will be
exponentially decreasing, since the support of $\varphibar$ is bounded
above.   Thus, to prove Proposition~\ref{thm:easp2}, it will suffice
to prove the following proposition.


\begin{prop}
\label{thm:easp3}
Suppose that $g\in\gspace$ and $g(\cdot,\cdot,0,0)=0$.
Let $\mu$ be a finite positive Borel
measure on $[0,\infty)$ that satisfies \eqref{e:mucond}.  Suppose
that $f\in \bcx[0,\infty)$ and that there is a constant $K$
such that $\abs{f(t)}\leq K e^{-\lambda_{0}t}$.  Let $x$ be the
solution of the initial value problem
\begin{equation}
\label{e:easivpf}
\begin{cases}
x(t)  = 0, & t\in (-\infty, 0]\\[2\jot]
{\displaystyle
x'(t) + \int_{0}^{\infty} g(t,s, x(t),x(t-s))\, d\mu(s) = f(t),
}
& t\in [0,\infty).
\end{cases}
\end{equation}
Then, for $\lambda>0$ sufficiently small, there is a constant $C$
such that
\begin{equation*}
\abs{x(t)} \leq C e^{-\lambda t}.
\end{equation*}
\end{prop}

To prove this proposition, we first introduce some spaces of
exponentially decreasing functions.   For $\lambda>0$, let $\zspace{\lambda}$
denote the space of functions $x\in X$ such that $\abs{x(t)}\leq
C e^{-\lambda t}$ for some constant $C$ and all $t$ (recall that
elements of $X$ are zero on $(-\infty,0]$).

We will be able to show that $\zspace{\lambda}$ is invariant under
the operators $T_{a}$ for sufficiently small $\lambda$.  This is
not sufficient to prove the proposition, since $\zspace{\lambda}$
is not closed in $X$.   Indeed, if $X_{c}\subset X$ denotes
the set of functions in $X$ with compact support, then
$X_{c}\subset \zspace{\lambda} \subset X_{0}$ for all $\lambda$,
but the closure of $X_{c}$ in $X$ is $X_{0}$.   To deal
with this problem, we put a norm on $\zspace{\lambda}$ and
show that an appropriate $T_{a}$ is a contraction in this
norm.

Let $e_{\lambda}(t) = e^{\lambda t}$.  A function $x\in X$ is in
$\zspace{\lambda}$ if and only if $e_{\lambda}x$ is bounded.
We define the norm on
$\zspace{\lambda}$ by
\begin{equation*}
\Norm{x}_{\lambda} = \Norm{e_{\lambda} x} = \sup\lset e^{\lambda t} \abs{x(t)}
\mid t\in\reals\rset.
\end{equation*}
Since $e_{\lambda}\geq 1$ where $x\ne 0$, we see that
$\Norm{x}\leq \Norm{x}_{\lambda}$, so the inclusion of $\zspace{\lambda}$
into $X$ is continuous.   The mapping $\zspace{\lambda}\to X\colon
x \mapsto e_{\lambda} x$ is a (bijective) isometry, so $\zspace{\lambda}$
is a Banach space.   If $\lambda_{1}<\lambda_{2}$, then $e^{\lambda_{1}t}\leq
e^{\lambda_{2}t }$ where $x$ is not zero, so $\Norm{x}_{\lambda_{1}}\leq
\Norm{x}_{\lambda_{2}}$.  Thus, $\zspace{\lambda_{2}}\subset
\zspace{\lambda_{1}}$ and the inclusion is continuous.

By the definition of the norm, if $x\in \zspace{\lambda}$, we have
\begin{equation*}
\abs{x(t)} \leq \Norm{x}_{\lambda} e^{-\lambda t}.
\end{equation*}

We make similar definitions for spaces of exponentially decreasing
functions on $[0,\infty)$.  Thus, $\zspacep{\lambda}$ will denote
that space of functions $f\in \bcx[0,\infty)$ such that $e_{\lambda}f$
is bounded and we equip $\zspacep{\lambda}$ with the norm
$\Norm{f}_{\lambda}=\Norm{e_{\lambda}f}$.

We next make some estimates for our operators on the spaces of exponentially
decreasing functions. 


We first consider the operators $L_{a}^{\inv}$.  Suppose that
$f\in\zspacep{\lambda}$, where $\lambda < a$.  Then $L_{a}^{\inv}f(t)$
is zero for $t\leq 0$, and for $t\geq 0$, we have 
\begin{align*}
\abs{L_{a}^{\inv}f(t)} &\leq \int_{0}^{t} e^{-a s} \abs{f(t-s)}\,
ds \\
& \leq \int_{0}^{t} e^{-a s}\Norm{f}_{\lambda} e^{-\lambda(t-s)}\, ds\\
& = \Norm{f}_{\lambda} e^{-\lambda t} \int_{0}^{\infty} e^{-as}e^{\lambda s} \,ds.
\end{align*}
The last integral has the value 
\begin{equation*}
\frac{1}{a-\lambda} [1 - e^{-(a-\lambda)t}],
\end{equation*}
which is less than $1/(a-\lambda)$, since $a-\lambda>0$.  Thus, we have
\begin{equation}
\label{e:easla}
\Norm{L_{a}^{\inv} f}_{\lambda} \leq \frac{1}{a-\lambda} \Norm{f}_{\lambda},
\qquad f\in \zspacep{\lambda},\ \lambda < a.
\end{equation}

We next turn to the operators $N_{a}$.  If $x,y \in \zspace{\lambda}\cap
\bee{\rho}$ and $(t,s)\in [0,\infty)\times [0,\infty)$, we may
apply Lemma~\ref{thm:basicest} to conclude
\begin{eqnarray*}
\lefteqn{ \abs{[a x(t) - g(t,s, x(t), x(t-s))] - [a y(t) - g(t,s,y(t),y(t-s))]} }
 \\
&\leq& K(a,\rho) \max\lset \abs{x(t)-y(t)}, 
\abs{x(t-s)-y(t-s)}\rset \\
&\leq& K(a,\rho) \max\lset \Norm{x-y}_{\lambda} e^{-\lambda t},
\Norm{x-y}_{\lambda} e^{-\lambda(t-s)}\rset\\
&=& K(a,\rho) \Norm{x-y}_{\lambda} e^{-\lambda t} e^{\lambda s}.
\end{eqnarray*}
Thus, for $x,y \in \zspace{\lambda} \cap \bee{\rho}$,
\begin{equation}
\label{e:easnaest}
\abs{N_{a}(x)(t,s) - N_{a}(y)(t,s)}  
\leq K(a,\rho) \Norm{x-y}_{\lambda}
e^{-\lambda t} e^{\lambda s}\,.
\end{equation}

Now consider the operator $M_{a}\colon X\to \bcx[0,\infty)$. 
Suppose that $x,y\in \zspace{\lambda}\cap \bee{\rho}$,
 where $\lambda \leq \lambda_{0}$.
Then we have
\begin{align*}
\abs{M_{a}(x)(t)-M_{a}(y)(t)} & \leq
\int_{0}^{\infty} \abs{N_{a/m}(x)(t,s)-N_{a/m}(y)(t,s)}\,d\mu(s)\\
& \leq \int_{0}^{\infty} K(a/m,\rho) \Norm{x-y}_{\lambda} e^{-\lambda t}
e^{\lambda s}\, d\mu(s)\\
& = K(a/m,\rho) \Norm{x-y}_{\lambda} e^{-\lambda t} \int_{0}^{\infty}
e^{\lambda s}\, d\mu(s)\,,
\end{align*}
where the last integral is finite because $\lambda \leq \lambda_{0}$.
In particular, if we set $y=0$ and note that $N_{a}(0)=0$ and
$M_{a}(0)=0$ (because $g(\cdot,\cdot,0,0)=0$), we see that
$M_{a}(x)\in \zspacep{\lambda}$ if $x\in \zspace{\lambda}$ and $\lambda \leq
\lambda_{0}$.   We also conclude that for 
$x,y \in \zspace{\lambda}\cap \bee{\rho}$ and $\lambda \leq \lambda_{0}$,
\begin{equation}
\label{e:easmaest}
\Norm{M_{a}(x)-M_{a}(y)}_{\lambda} \leq K(a/m,\rho) \Norm{x-y}_{\lambda}
\int_{0}^{\infty} e^{\lambda s}\, d\mu(s)\,.
\end{equation}


Consider the initial value problem \eqref{e:easivpf}, so $f\in
\zspacep{\lambda_{0}}$.
Assume that $\lambda\leq \lambda_{0}$ and $\lambda < a$.
If $x\in\zspace{\lambda}$ then, from the results above,
$M_{a}(x)\in\zspacep{\lambda}$, $f\in \zspacep{\lambda}$ and
$L_{a}^{\inv} M_{a}(x)$ and $L_{a}^{\inv}f$  are in $\zspace{\lambda}$.
Thus, $\zspace{\lambda}$ is invariant under the operator
$T_{a}(x) =L_{a}^{\inv}M_{a}(x)+L_{a}^{\inv}f$.   For 
$x,y \in \zspace{\lambda}\cap \bee{\rho}$, we have the
estimate
\begin{equation}
\label{e:eastaest}
\Norm{T_{a}(x)-T_{a}(y)}_{\lambda} \leq 
\frac{1}{a-\lambda}K(a/m,\rho) \Norm{x-y}_{\lambda}\int_{0}^{\infty}
e^{\lambda s} \, d\mu(s)\,.
\end{equation}

We know that we can find a $\rho>0$ such that
$T_{a(\rho)}(\bee{\rho})\subset \bee{\rho}$.  If we fix such a
$\rho$ and assume $\lambda < a(\rho)$ and $\lambda < \lambda_{0}$,
 the set $\zspace{\lambda}\cap \bee{\rho}$ is invariant under
$T_{a(\rho)}$.   The set $\zspace{\lambda}\cap \bee{\rho}$
is closed in $\zspace{\lambda}$, because the inclusion of
$\zspace{\lambda}$ into $X$ is continuous.   For $x,y\in \zspace{\lambda}
\cap \bee{\rho}$, we have
\begin{equation*}
\Norm{T_{a(\rho)}(x)-T_{a(\rho)}(y)} \leq \frac{1}{a(\rho)-\lambda}
K(a(\rho)/m, \rho) \Norm{x-y}_{\lambda} \int_{0}^{\infty} e^{\lambda s}\, d\mu(s).
\end{equation*}
Thus, $T_{a(\rho)}$ is Lipschitz on $\zspace{\lambda}\cap \bee{\rho}$
with Lipschitz constant
\begin{equation*}
\sigma(\lambda) = \frac{1}{a(\rho)-\lambda} K(a(\rho)/m, \rho)
\int_{0}^{\infty} e^{\lambda s}\, d\mu(s).
\end{equation*}
But $\sigma$ is a continuous, nondecreasing, function of $\lambda$
and 
\begin{equation*}
\sigma(0) = \frac{1}{a(\rho)}K(a(\rho)/m, \rho) m,
\end{equation*}
which we know is strictly less than one.  Thus, $\sigma(\lambda)<1$ for
$\lambda >0$ sufficiently small.

We conclude that if we choose $\lambda$ sufficiently small that
$\lambda < \lambda_{0}$, $\lambda < a(\rho)$ and $\sigma(\lambda)<1$, then
$T_{a(\rho)}$ leaves $\zspace{\lambda}\cap \bee{\rho}$ invariant
and is a contraction on this closed subset of $\zspace{\lambda}$.
Thus, $T_{a(\rho)}$ has a fixed point $x\in \zspace{\lambda}$,
which is precisely the solution of the initial value
problem \eqref{e:easivpf}.   This completes the proof
of Proposition~\ref{thm:easp3} and hence the proof 
of Theorem~\ref{thm:eas}.


\begin{thebibliography}{99}

\bibitem{BC}
\textsc{R.~Bellman and K.~L.~Cooke}, 
``Differential-Difference Equations,''
Academic Press, New York, 1963.


\bibitem{CH}
\textsc{S.-N. Chow and J.~K.~Hale}, ``Methods of Bifurcation Theory,''
Springer-Verlag, New York, 1982.


\bibitem{DLlib}
\textsc{L.~D.~Drager and W.~Layton}, On non-linear difference
approximations to non-linear functional differential equations,
\textsl{Libertas Mathematica}, Vol.~III (1983) 45--65.

\bibitem{DLpol}
\textsc{L.~D.~Drager and W.~Layton}, Non-resonance in functional
differential equations with small time lag, \emph{in}
``Functional-Differential Systems and Related Topics~III,''
ed.\mbox{} M.~Kisielewicz, Higher College of Engineering, Zielona,
Gora, Poland, 1983, 65--78.


\bibitem{DLfa}
\textsc{L.~D.~Drager and W.~Layton}, Non-linear delay differential
equations and function algebras, \emph{in} ``Differential
Equations,'' ed.\mbox{} I.~W.~Knowles and R.~T.~Lewis, North Holland,
Amsterdam, 1984.


\bibitem{DLnr}
\textsc{L.~D.~Drager and W.~Layton}, Some results on non-resonant
non-linear delay differential equations, 
\emph{in} ``Trends in the Theory and Practice of Non-linear
Analysis,'' ed.\mbox{} V.~Lakshmikantham, North Holland,
Amsterdam, 1985, 131--136.

\bibitem{DLjde}
\textsc{L.~D.~Drager and W.~Layton}, Bounded solutions of delay
differential equations subject to a generalized nonresonance
condition, \textsl{J. Differential Equations},
\textbf{131}(1996), no.~1, 132--169.

\bibitem{DLM}
\textsc{L.~D.~Drager, W.~Layton and R.~M.~Mattheij}, Asymptotics
of numerical methods for non-linear evolution equations,
\emph{in} ``Trends in the Theory and Practice of Non-linear
Analysis,'' ed. V.~Lakshmikantham, North Holland,
Amsterdam, 1985, 137--144.

\bibitem{El}
\textsc{L.~E.~El'sgol'ts},
``Introduction to the Theory of Differential Equations with Deviating
Arguments,'' Holden-Day, San Francisco, 1966.

\bibitem{ElN}
\textsc{L.~E.~El'sgol'ts and S.~B.~Norkin}
``Introduction to the Theory and Application of Differential
Equations with Deviating Arguments,''  Academic Press, New York,
1973.

\bibitem{H}
\textsc{J.~K.~Hale}, ``Theory of Functional Differential Equations,''
Second Edition, Springer-Verlag, New York, 1977.

\bibitem{L}
\textsc{William Layton}, Existence of almost periodic solutions to
delay differential equations with Lipschitz nonlinearities,
\textsl{J. Differential Equations}, \textbf{55}(1984),
no.~2, 151--164.

\end{thebibliography}
\end{document}






