\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2008(2008), No. 67, pp. 1--9.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2008 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2008/67\hfil Impulsive dynamic equations]
{Impulsive dynamic equations on a time scale}

\author[E. R. Kaufmann, N. Kosmatov and Y. N. Raffoul\hfil EJDE-2008/67\hfilneg]
{Eric R. Kaufmann, Nickolai Kosmatov, Youssef N. Raffoul}  % in alphabetical order

\address{Eric R. Kaufmann \newline
Department of Mathematics \& Statistics\\
University of Arkansas at Little Rock, Little Rock, AR 72204, USA}
\email{erkaufmann@ualr.edu}

\address{Nickolai Kosmatov \newline
Department of Mathematics \& Statistics\\
University of Arkansas at Little Rock, Little Rock, AR 72204, USA}
\email{nxkosmatov@ualr.edu}

\address{Youssef N. Raffoul \newline
Department of Mathematics\\
University of Dayton, Dayton, OH 45469-2316, USA}
\email{youssef.raffoul@notes.udayton.edu}

\thanks{Submitted November 20, 2007. Published May 1, 2008.}
\subjclass[2000]{34A37, 34A12, 39A05}
\keywords{Fixed point theory; nonlinear dynamic equation; stability;
impulses}

\begin{abstract}
 Let $\mathbb{T}$ be a time scale such that $0, t_i, T \in \mathbb{T}$,
 $i = 1, 2, \dots, n$, and $0 < t_i < t_{i+1}$. Assume each $t_i$ is dense.
 Using a fixed point theorem due to Krasnosel'ski\u{\i}, we show that the
 impulsive dynamic equation
 \begin{gather*}
    y^{\Delta}(t) = -a(t)y^{\sigma}(t)+ f ( t, y(t) ),\quad t \in (0, T],\\
    y(0) = 0,\\
    y(t_i^+) = y(t_i^-) + I (t_i, y(t_i) ), \quad i = 1, 2, \dots, n,
 \end{gather*}
 where $y(t_i^\pm) = \lim_{t \to t_i^\pm} y(t)$, and $y^\Delta$ is
 the $\Delta$-derivative on $\mathbb{T}$, has a solution.
 Under a slightly more stringent inequality we show that the solution
 is unique using the contraction mapping principle. Finally, with the
 aid of the contraction mapping principle we study the stability of
 the zero solution on an unbounded time scale.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{example}[theorem]{Example}

\section{Introduction}

Let $\mathbb{T}$ be a time scale such that $0, t_i, T \in \mathbb{T}$,
for $i = 1, 2, \dots, n$, $0 < t_i < t_{i+1}$, and assume that $t_i$
is dense in $\mathbb{T}$ for each $i = 1,2, \dots, n$. We will show the
existence of solutions for the nonlinear impulsive dynamic equation
\begin{equation}\label{e1.01}
\begin{gathered}
    y^{\Delta}(t) = -a(t)y^{\sigma}(t)+ f ( t, y(t) ),\quad t \in (0, T],\\
    y(0) = 0,\\
    y(t_i^+) = y(t_i^-) + I (t_i, y(t_i) ),\quad i = 1, 2, \dots, n,
\end{gathered}
\end{equation}
where $y(t_i^\pm) = \lim_{t \to t_i^\pm} y(t)$, $y(t_i) = y(t_i^-)$, and $[0, T] = \{t \in \mathbb{T} : 0 \leq t
\leq T \}$. Note, the intervals $[a,b), (a, b]$, and $(a, b)$ are defined similarly.

In 1988, Stephan Hilger \cite{hil} introduced the theory of time
scales (measure chains) as a means of unifying discrete and
continuum calculi. Since Hilger's initial work there has been
significant growth in the theory of dynamic equations on time
scales, covering a variety of different problems;  see \cite{mbgg,
mbap, mbap2} and references therein. The study of impulsive
initial and boundary value problems is extensive. For the theory
and classical results, we direct the reader to the monographs
\cite{BS,LBS,SP}. Recent works of D. Guo on the topic include
\cite{DG1,DG2,DG3} (and the references therein) and are devoted to
the existence of solutions to integro-differential equations using
the fixed point index of operators in ordered Banach spaces and
other techniques.

In Section 2 we present some preliminary material that we will
need to  show the existence of a solution of \eqref{e1.01}. We
will state some facts about the exponential function on a time
scale as well as a fixed point theorem due to Krasnosel'ski\u{\i}.
We present our main results in Section 3. In Section 4 we give
sufficient conditions for the stability of the zero solution of
\eqref{e1.01}.

\section{Preliminaries}

We assume the reader is familiar with the notation and basic
results for dynamic equations on time scales. For a review of this
topic we direct the reader to the monographs \cite{mbap,mbap2}. We
begin with a few definitions.

A function $p : \mathbb{T} \to \mathbb{R}$ is said to be
\emph{regressive}  provided $1 + \mu(t) p(t) \neq 0$ for all
$t \in \mathbb{T}^\kappa$. The set of all regressive rd-continuous
functions $f : \mathbb{T} \to \mathbb{R}$ is denoted by
$\mathcal{R}$.

Let $p \in \mathcal{R}$ and $\mu(t) \ne 0$ for all $t \in \mathbb{T}$. The \emph{exponential function} on
$\mathbb{T}$, defined by
\[\label{e2.01}
    e_p(t,s) = \exp \Big( \int_s^t \! \frac{1}{\mu(z)}\log
( 1 + \mu(z)p(z)) \,    \Delta z \Big),
\]
is the solution to the initial value problem $y^\Delta = p(t) y, \, y(s)
= 1$. Other properties of the exponential function are given in the
following lemma, \cite[Theorem 2.36]{mbap}.

\begin{lemma}\label{lemma2.3}
Let $p \in \mathcal{R}$. Then
\begin{itemize}
    \item[(i)] $e_0(t,s) \equiv 1$ and $e_p(t, t) \equiv 1$;
    \item[(ii)] $e_p(\sigma(t), s) = (1 + \mu(t) p(t)) e_p(t, s)$;
    \item[(iii)] $\frac{1}{e_p(t,s)} = e_{\ominus p}(t, s)$, where
    $\ominus p(t) = - \frac{p(t)}{1 + \mu(t) p(t)}$;
    \item[(iv)] $e_p(t, s) = \frac{1}{e_p(s,t)} = e_{\ominus p}(s, t)$;
    \item[(v)] $e_p(t,s) e_p(s, r) = e_p(t,r)$;
    \item[(vi)] $\big( \frac{1}{e_p( \cdot, s)} \big) ^\Delta = -
    \frac{p(t)}{e_p^\sigma(\cdot, s)}$.
\end{itemize}
\end{lemma}

Lastly in this section, we state Krasnosel'ski\u{\i}'s fixed point
theorem \cite{mak} which enables us to prove
the existence of a periodic solution.

\begin{theorem}[Krasnosel'ski\u{\i}] \label{thm2.4}
Let $\mathbb{M}$ be a closed convex nonempty subset of a Banach
space $\big ( \mathcal{B}, \| \cdot\| \big )$.
Suppose that
\begin{itemize}
\item[(i)] the mapping $A: \mathbb{M} \to \mathcal{B}$ is completely continuous,
\item[(ii)] the mapping $B: \mathbb{M} \to \mathcal{B}$ is a contraction, and
\item[(iii)] $x,y \in \mathbb{M}$, implies $Ax + By \in \mathbb{M}$.%
\end{itemize}
Then the mapping $A+B$ has a fixed point in $\mathbb{M}$.
\end{theorem}

\section{Existence Of Solutions}

Define $t_{n+1} \equiv T$ and let $J_0 = [0, t_1]$ and for
$k = 1, 2, \dots , n$, let $J_k = (t_k, t_{k+1}]$.
Define
\[
    PC  =  \lbrace y: [0, T] \to \mathbb{R} \, | \, y \in C(J_k), \,
y(t_k^\pm) \text{ exist and }y(t_k^-) = y(t_k), k = 1, \dots, n  \rbrace
\]
and
\[
    PC^1 = \lbrace y: [0, T] \to \mathbb{R} \, | \, y \in C^1(J_k), k = 1,
 \dots, n \rbrace
\]
where $C(J_k)$ is the space of all real valued continuous functions on $J_k$ and $C^1(J_k)$ is the space of all
continuously delta-differentiable functions on $J_k$. The set $PC$ is a Banach space when it is endowed with the
supremum norm
\[
    \| u \| = \max_{0 \leq k \leq n} \big \{ \|u\|_k \big \},
\]
where $\|u\|_k = \sup_{t \in J_k} |u(t)|$.


We will assume that the following conditions hold.
\begin{itemize}
\item[(A)] $a \in \mathcal{R}$.

\item[(F1)] $f \in C(\mathbb{T} \times \mathbb{R}, \mathbb{R})$.

\item[(F2)] There exist $g$ and $h$ with $\alpha := \max_{t \in [0, T]} \int_0^t \! |e_{\ominus a}(t, s)|
g(s) \, \Delta s < \infty$, and
$\beta := \max_{t \in [0, T]} \int_0^t \! |e_{\ominus a}(t, s)| h(s) \,
\Delta s < \infty$, such that
\[
    |f(t, y)| \leq g(t) + h(t) |y|, \quad t \in \mathbb{T},\; y \in \mathbb{R}.
\]

\item[(I)] There exists a positive constant $E$ such that
\[
    |I(t, x) - I(t, y)| \leq E |x - y|, \quad \text{for } x, y \in \mathbb{R}.
\]
\end{itemize}

\begin{lemma} \label{lemma3.1}
The function $y \in PC^1$ is a solution of equation \eqref{e1.01} if and
only if $y \in PC$ is a solution of
\begin{equation}\label{e3.03}
    y(t) = \int_{0}^{t} \! e_{\ominus a}(t, s) f(s, y(s)) \, \Delta s + \sum_{\{i:t_i < t\}} \!
    e_{\ominus a}(t, t_i) I\big(t_i, y(t_i) \big ).
\end{equation}
\end{lemma}

\begin{proof}
For $t \in J_0$, the solution of \eqref{e1.01} satisfying $y(0) = 0$ is
\[
    y(t) = \int_0^t \! e_{\ominus a}(t, 0) f(s, y(s)) \, \Delta s.
\]
See \cite{mbap} for details.
To find the solution of \eqref{e1.01} on $J_1$ we consider the initial
value problem
\begin{gather*}
    y^\Delta(t)  = -a(t) y^\sigma(t) + f(t, y(t)), \quad t \in J_1,\\
    y(t_1^+)  = \int_0^{t_1} \! e_{\ominus a}(t_1, s) f(s, y(s)) \, \Delta s + I(t_1, y(t_1)).
\end{gather*}
The solution to this initial value problem is
\[
    y(t) = e_{\ominus a}(t, t_1) I(t_1, y(t_1))
+ \int_0^t \! e_{\ominus a} (t, s) f(s, y(s)) \, \Delta s.
\]
We proceed inductively to obtain that if $y \in PC^1$ is a solution of
\eqref{e1.01}, then $y \in PC$ is a solution of
\[
    y(t) = \int_{0}^{t} \! e_{\ominus a}(t, s) f(s, y(s)) \, \Delta s + \sum_{\{i:t_i < t\}} \!
    e_{\ominus a}(t, t_i) I\big(t_i, y(t_i) \big ).
\]
The converse statement follows trivially and the proof is complete.
\end{proof}

Define the mapping $H : PC \to PC$ by
\begin{equation}\label{e3.04}
    (H\varphi)(t) = \int_{0}^{t} \! e_{\ominus a}(t, s)
f(s, \varphi(s)) \, \Delta s + \sum_{\{i:t_i <
    t\}} \! e_{\ominus a}(t, t_i) I\big(t_i, \varphi(t_i) \big ).
\end{equation}
By Lemma \ref{lemma3.1}, a fixed point of $H$ is a solution of \eqref{e1.01}.
The form of \eqref{e3.04} suggests that we construct two mappings, one of which is completely continuous and the other is a contraction. We express equation \eqref{e3.04} as
\[
    (H\varphi)(t)=(A\varphi)(t)+(B\varphi)(t)
\]
where, $A, B$ are given by
\begin{gather}\label{e3.05}
    (A\varphi)(t) = \int_{0}^{t} \! e_{\ominus a}(t, s) f(s, \varphi(s))
\, \Delta s, \\
\label{e3.06}
        (B\varphi)(t) = \sum_{\{i:t_i < t\}} \! e_{\ominus a}(t, t_i)
I\big(t_i, \varphi(t_i) \big ).
\end{gather}

\begin{lemma} \label{lemma3.2}
Suppose {\rm (A), (F1), (F2)} hold. Then $A : PC \to PC$, as defined
 by \eqref{e3.05}, is completely
continuous.
\end{lemma}

\begin{proof}
It is clear that $A : PC \to PC$.
To see that $A$ is continuous, let $\{\varphi_i\} \subset PC$ be
such that $\varphi_i \to \varphi$ as $i \to \infty$. By (F2) and
the continuity of $f$ we have, for each $t \in [0, T]$,
\begin{align*}
    \lim_{i \to \infty} \big | A\varphi_i(t) - A\varphi(t) \big |
    & \leq \lim_{i \to \infty} \int^T_0 \! \left | e_{\ominus a}(t, s) \big ( f(s, \varphi_i(s)) -
    f(s,\varphi(s)) \big ) \right | \, \Delta s\\
    & \leq \int_0^T \! \lim_{i \to \infty} | e_{\ominus a}(t, s)| \big | f(s, \varphi_i(s)) -
    f(s,\varphi(s)) \big | \, \Delta s\\
    & \to 0.
\end{align*}
Thus $A$ is continuous.
A standard application of the Arzel$\grave{a}$-Ascoli Theorem shows
 that $A$ is compact.
\end{proof}

\begin{lemma} \label{lemma3.4}
Let {\rm (A)} and {\rm (I)} hold and let $B$ be defined by \eqref{e3.06}.
Suppose that
\begin{equation}\label{e3.08}
    E \max_{t \in [0, T]} \sum_{i = 1}^n |e_{\ominus a}(t, t_i)| \leq \zeta <1.
\end{equation}
Then $B: PC \to PC$ is a contraction.
\end{lemma}

\begin{proof}
Since $e_{\ominus a}(t, t_i)$ is continuous for all $i = 1, \dots , n$,
it follows trivially that $B: PC \to PC$.
For $\varphi, \psi \in PC$, we have
\begin{align*}
    \| B\varphi - B\psi \|
& = \max_{0 \leq i \leq n} \Big\{ |B\varphi(t) -   B\psi(t)| : t \in J_i \Big\}\\
& \leq \max_{0 \leq i \leq n} \Big\{ \sum_{\{i:t_i < t\}} \! |e_{\ominus a}(t, t_i)| \Big | I(t_i,
    \varphi(t_i)) - I(t_i, \psi(t_i)) \Big | : t \in J_i \Big\}\\
&\leq  \Big( E \max_{t\in[0,T]} \sum_{i = 1}^n |e_{\ominus a}(t, t_i)|\Big) |\varphi(t_i)-\psi(t_i)|\\
&\leq\zeta\|\varphi-\psi\|.
\end{align*}
Hence $B$ defines a contraction mapping with contraction constant $\zeta$.
\end{proof}

We now state and prove our first existence theorem.

\begin{theorem}\label{thm3.5}
Assume $\eta := \max_{t \in [0, T]} \sum_{i = 1}^n | e_{\ominus a}(t, t_i)|
\, |I(t_i, 0)| < \infty$. Suppose {\rm (A), (F1), (F2), (I)}
 and \eqref{e3.08} hold. Let $J$ be a positive constant satisfying the
inequality
\begin{equation}\label{e3.12}
    \alpha + \eta + \Big( \beta + E \max_{t \in [0, T]}
\sum_{i = 1}^n |e_{\ominus a}(t, t_i)| \Big) J \leq   J.
\end{equation}
Then \eqref{e1.01} has a solution $\varphi$ such that $\|\varphi\| \leq J$.
\end{theorem}

\begin{proof}
Define $\mathbb M = \{ \varphi \in PC: \|\varphi \| \leq  J\}$.
By Lemma \ref{lemma3.2}, $A: PC \to PC$ is completely continuous.
Also, from Lemma \ref{lemma3.4}, the mapping $B: PC \to PC$ is a contraction.
The first and second conditions of Theorem \ref{thm2.4} are satisfied.

We need to show that if $\varphi, \psi \in \mathbb M $, then $\|A\varphi +B\psi\|\leq J$. Let $\varphi, \psi \in
\mathbb{M}$. Then, $\|\varphi\|, \|\psi\|\leq J$ and

\begin{align*}
&| A\varphi(t) + B\psi(t) | \\
& \leq \int_0^t \! e_{\ominus a} (t, s) |f(s, \varphi(s))| \, \Delta s
  + \sum_{\{i : t_i < t\}} \left | e_{\ominus a}(t, t_i)| \, |I(t_i, \psi(t_i)) \right |\\
& \leq \int_0^t \! |e_{\ominus a}(t, s)| g(s) \, \Delta s
  + \int_0^t |e_{\ominus a}(t, s)| h(s) \, \Delta s \, \|\varphi\|\\
&\quad + \sum_{i = 1}^n |e_{\ominus a}(t, t_i)| |I(t_i, 0)|
   + \sum_{i = 1}^n |e_{\ominus a}(t, t_i)| |I(t_i, \psi(t_i))
     - I(t_i, 0)|\\
& \leq \alpha + \beta J + \eta + E \max_{t \in [0, T]}
  \sum_{i = 1}^n |e_{\ominus a}(t, t_i)| J \leq J.
\end{align*}
Hence $\|A \varphi + B \psi\| \leq J$ and so $A\varphi + B\psi \in \mathbb{M}$.
 All the conditions of Krasnosel'ski\u{\i}'s theorem are satisfied.
Thus there exists a fixed point $z$ in $\mathbb M$ such that $z=Az+Bz$.
By Lemma \ref{lemma3.1}, this fixed point is a solution of \eqref{e1.01}
and the proof is complete.
\end{proof}

The conditions (F2) and (I) are global conditions on the functions $f$
and $I$. In the next theorem we replace these conditions with the following
local conditions.
\begin{itemize}
\item[(F2')] There exist $g$ and $h$ with
 $\alpha := \max_{t \in [0, T]} \int_0^t \! |e_{\ominus a}(t, s)|
g(s) \, \Delta s < \infty$, and
 $\beta := \max_{t \in [0, T]} \int_0^t \! |e_{\ominus a}(t, s)| h(s) \, \Delta s
< \infty$, such that
\[
    |f(t, y)| \leq g(t) + h(t) |y|, \quad t \in \mathbb{T}, |y| < J.
\]

\item[(I')] There exists a positive constant $E$ such that
\[
    |I(t, x) - I(t, y)| \leq E |x - y|, \quad \text{for } |x|, |y| < J.
\]
\end{itemize}

\begin{theorem}\label{thm3.6}
Assume $\eta := \max_{t \in [0, T]} \sum_{i = 1}^n |
e_{\ominus a}(t, t_i)| \, |I(t_i, 0)| < \infty$. Suppose
{\rm (A), (F1)}, and \eqref{e3.08} hold. Let $J$ be a positive constant
such that conditions {\rm (F2')} and {\rm (I')} hold and such that
\begin{equation}\label{e3.14}
    \alpha + \eta + \Big( \beta + E \max_{t \in [0, T]}
\sum_{i = 1}^n |e_{\ominus a}(t, t_i)| \Big) J \leq     J
\end{equation}
is satisfied. Then \eqref{e1.01} has a solution $\varphi$ such that
$\|\varphi\| \leq J$.
\end{theorem}

The proof of Theorem \ref{thm3.6} parallels that of Theorem \ref{thm3.5}
and hence is omitted.
In our last theorem in this section, we give conditions for which the
solution of \eqref{e1.01} is unique.

\begin{theorem}\label{thm3.7}
Suppose {\rm (A), (F1), (F2), (I)} and \eqref{e3.08} hold. If
$\beta + \zeta < 1$,
then there exists a unique solution to the impulsive initial value
problem \eqref{e1.01}.
\end{theorem}

\begin{proof}
Let $\varphi, \psi \in PC$. For $t \in [0, T]$
\begin{align*}
    | H\varphi(t) - H\psi(t) |
& \leq  \Big| \int_0^t \! e_{\ominus a}(t,s) \Big ( f(s, \varphi(s)) - f(s,
    \psi(s)) \Big ) \, \Delta s \Big|\\
&\quad  + \sum_{\{i : t_i < t \}} \! |e_{\ominus a}(t, t_i)| \big| I(t_i, \varphi(t_i)) - I(t_i,
    \psi(t_i)) \big| \\
& \leq  \int_0^T \! |e_{\ominus a}(t, s)| h(s) \, \Delta s \, \|\varphi
  - \psi\| + \zeta \|\varphi -  \psi\| \\
& \leq   (\beta + \zeta ) \|\varphi - \psi\|.
\end{align*}
Hence $\|H\varphi - H \psi \| \leq (\beta + \zeta) \|\varphi - \psi \|$.
 By the contraction mapping principal, $H$ has a fixed point in $PC$.
By Lemma \ref{lemma3.1}, this fixed point is a solution of \eqref{e1.01}
and the proof is complete.
\end{proof}

\section{Stability}

Assume that $\mathbb{T}$ is unbounded above. In this section,
we study the stability of the zero solution of the dynamic equation
\begin{equation}\label{e4.01}
\begin{gathered}
    y^{\Delta}(t) = -a(t)y^{\sigma}(t)+ f\big( t, y(t)\big),\\
    y(0) = y_0,\\
    y(t_i^+) = y(t_i^-) + I\big(t_i, y(t_i) \big), \quad i = 1, 2, \dots, n.
\end{gathered}
\end{equation}
In addition to assumptions (A), (F1) and (I), we assume that $a$, $I$ and $f$
satisfy
\begin{equation}\label{e4.04}
    I(t, 0) = 0, \quad f(t, 0) = 0,
\end{equation}
for all $t \in \mathbb{T}$ and
\begin{equation}\label{e4.05}
    e_{\ominus a}(t, 0) \to 0, \quad \text{as } t \to \infty,
\end{equation}
By Lemma \ref{lemma2.3} and \eqref{e4.05} we have that
$e_{\ominus a}(t, t_i) \to 0$ as $t \to \infty$.

We replace condition (F2) with the following condition.
\begin{itemize}
\item[(F3)] There exist continuous functions $g$ and $h$ with
\begin{gather*}
    \alpha := \max_{t \in \mathbb{T}} \int_0^t \! |e_{\ominus a}(t, s)| g(s) \, \Delta s < \infty,\\
    \beta := \max_{t \in \mathbb{T}} \int_0^t \! |e_{\ominus a}(t, s)| h(s) \, \Delta s < \infty,\\
    \int_0^t \! |e_{\ominus a}(t, s)| g(s) \, \Delta s \to 0, \text{ and}\\
    \int_0^t \! |e_{\ominus a}(t, s)| h(s) \, \Delta s \to 0,
\end{gather*}
such that
\[
    |f(t, y)| \leq g(t) + h(t) |y|, \quad t \in \mathbb{T}, |y| < J.
\]
\end{itemize}
Lastly, we assume that
\begin{equation}\label{e4.09}
    \lim_{t \to \infty} \int_0^t \! |e_{\ominus a}(t, s)| g(s) \, \Delta s = 0.
\end{equation}

\noindent\textbf{Remark:} Lyapunov's direct method has been used widely
 when $\mathbb{T} = \mathbb{R}$. However, the extension of the theory
 of Lyapunov functions to time scales has not been fully developed.
When Lyapunov's direct method is used, one must impose point-wise
conditions on the coefficients in order to get the derivative of the
constructed Lyapunov function to be negative along the solutions of the
differential equation of interest. Since we are using fixed point theory,
 our conditions on the functions $a, g$ and $h$ are of averaging type.
For an excellent reference of collections of recent results on the use
of fixed point theory in the study of stability, periodicity and boundedness,
 we refer the reader to the texts \cite {tb1, tb2}.

As in Lemma \ref{lemma3.1}, we can show that $y \in PC^1$ is a solution
of \eqref{e4.01} if and only if $y \in PC$ satisfies
\begin{equation}\label{e4.06}
    y(t) = e_{\ominus a}(t, 0)y_0 + \int_{0}^{t} \! e_{\ominus a}(t,
    s) f(s, y(s)) \, \Delta s + \! \sum_{\{i:t_i < t\}} \! e_{\ominus
    a}(t, t_i) I\big(t_i, y(t_i) \big ).
\end{equation}
Define the set $S$ by
\[
    S = \{ \varphi \in PC : \varphi(0) = y_0, \varphi(t) \to 0
\text{ as } t \to \infty, \text{ and  } \varphi \text{ is bounded}\}.
\]
Then $( S, \| \cdot \| )$ is a complete metric space under the
norm $\|y\| = \sup_{t \in \mathbb{T}} |y(t)|$.

Define the mapping $H_2$ by
\begin{equation}\label{e4.07}
    (H_2\varphi)(t) =  e_{\ominus a}(t, 0)y_0 + \int_{0}^{t}
\! e_{\ominus a}(t, s) f(s, \varphi(s)) \, \Delta   s
   + \sum_{\{i:t_i < t\}} \! e_{\ominus a}(t, t_i) I\big(t_i, \varphi(t_i)
\big ).
\end{equation}

We say that the zero solution of \eqref{e4.01} is stable if for each
$\varepsilon > 0$ there exists a $\delta=\delta(\varepsilon) > 0$
 and a $t^* > 0$ such that if $|y_0| < \delta$ then $|y(t)| < \varepsilon$
for all $t > t^*$.

\begin{theorem} \label{thm4.1}
Assume that {\rm (A), (F1), (F3), (I)}, and
\eqref{e4.04}--\eqref{e4.09} hold. Suppose that
\[
     \beta + E \max_{t \in \mathbb{T}} \sum_{\{i:t_i < t\}} |e_{\ominus a}(t, t_i)| < 1,
\]
Then every solution $y(t)$ of \eqref{e4.01} with small initial value $y_0$
is bounded and goes to $0$ as $t \to \infty$. Moreover, the zero solution
is stable.
\end{theorem}

\begin{proof}
We first show that $H_2: S \to S$. Note that if $\varphi \in PC$ then $H_2 \varphi \in PC$. Let $\varphi \in PC$ be such that $\| \varphi \|
\leq K$ and let $M = \max_{t \in \mathbb{T}} e_{\ominus a}(t, 0)$. Then
\begin{align*}
    |H_2\varphi(t)|
& \leq  |e_{\ominus a}(t, 0)| |y_0| + \int_{0}^{t} \! |e_{\ominus a}(t,
    s)| |f(s, \varphi(s))| \, \Delta s\\
& \quad  +  \sum_{\{i:t_i < t\}} \! |e_{\ominus a}(t, t_i)| |I\big(t_i,
    \varphi(t_i) \big )|\\
& \leq  |e_{\ominus a}(t, 0)| |y_0| + \int_0^t \! |e_{\ominus a}(t,
    s)| g(s) \, \Delta s\\
& \quad + \int_0^t \! |e_{\ominus a}(t,s)| h(s) |\varphi(s)| \, \Delta s
    +  \sum_{\{i:t_i < t\}} \! E |e_{\ominus a}(t, t_i)| |\varphi(t_i)|\\
& \leq  M |y_0| + \alpha + \beta K + E \max_{t \in \mathbb{T}}
 \sum_{\{i:t_i < t\}} \!    |e_{\ominus a}(t, t_i)| K.
\end{align*}
Since $\max_{t \in \mathbb{T}}
\sum_{\{i:t_i < t\}} \! |e_{\ominus a}(t, t_i)| < \infty$,
$H_2 \varphi$ is bounded whenever $\varphi$ is bounded.

Conditions \eqref{e4.05} and ($F_3$) imply that
$( H_2 \varphi )(t) \to 0$ as $t \to 0$.
Let $\varphi, \theta \in S$. Then
\begin{align*}
    |H_2 \varphi(t) - H_2 \theta(t)|
&  \leq  \int_0^t \! |e_{\ominus a}(t, s)| h(s)
    |\varphi(s) - \theta(s)| \, \Delta s\\
&\quad  + \sum_{i : t_i < t} \! |e_{\ominus a}(t, t_i)| E
    |\varphi(t_i) - \theta(t_i) |\\
&\leq  \Big[ \beta + E \max_{t \in \mathbb{T}} \sum_{\{i:t_i < t\}}
    |e_{\ominus a}(t, t_i)| \Big] \| \phi - \theta\|.
\end{align*}
Since $\beta + E \max_{t \in \mathbb{T}} \sum_{\{i:t_i < t\}} |e_{\ominus a}(t, t_i)| < 1$ then $H_2$ is a contraction. By the Contraction Mapping Principal, there exists a unique fixed point in $S$ which solves
\eqref{e4.01}.

Since \eqref{e4.09} holds then we can find $t^* \in \mathbb{T}$ such that
if $t > t^*$ then
\[
    \int_0^t \! |e_{\ominus a}(t, s)| g(s) \, \Delta s
< \frac{\varepsilon}{3}.
\]
Fix $\varepsilon > 0$ and let $\varphi \in PC$ be such that
$\| \varphi \| \leq \max\{\varepsilon/3, \varepsilon/(3M)\}$.
As above we have,
\begin{align*}
    |H_2\varphi(t)|
& \leq  |e_{\ominus a}(t, 0)| |y_0| + \int_0^t \! |e_{\ominus a}(t, s)| g(s) \, \Delta s\\
& \quad + \int_0^t \! |e_{\ominus a}(t,s)| h(s) |\varphi(s)| \, \Delta s
+  \sum_{\{i:t_i < t\}}
    \! E |e_{\ominus a}(t, t_i)| |\varphi(t_i)|\\
 \leq  \varepsilon.
\end{align*}
The zero solution is stable and the proof is complete.
\end{proof}

\begin{thebibliography}{00}

\bibitem{BS}  D. D. Bainov and P. S. Simeonov;
 \emph{Systems with Impulse Effect: Stability, Theory and
Applications}, Ellis Horwood Series: Mathematics and its Applications,
Ellis Horwood, 1989.

\bibitem{mbgg} M. Bohner and G. Sh. Guseinov;
 Line integrals and Green's formula on time scales, \emph{J. Math.
Anal. appl.}, \textbf{326} (2007), 1124--1141.

\bibitem{mbap} M. Bohner, and A. Peterson;
 \emph{Dynamic Equations on Time Scales, An introduction with
Applications}, Birkh\"{a}user, Boston, Massachusetts, 2001.

\bibitem{mbap2} M. Bohner, and A. Peterson;
 \emph{Advances in Dynamic Equations on Time Scales},
Birkh\"{a}user, Boston, Massachusetts, 2003.

\bibitem{tb1} T. A. Burton;
 \emph{Stability and Periodic Solutions of Ordinary and Functional Differential
Equations}, Dover Publications, Mineola, New York, 2005.

\bibitem{tb2} T. A. Burton;
 \emph{Stability by Fixed Point Theory for Functional Differential Equations}, Dover
Publications, Mineola, New York, 2006.

\bibitem{DG1} D. Guo;
 Existence of positive solutions for nth-order nonlinear impulsive singular
integro-differential equations in Banach spaces, \emph{Nonlinear Anal.}, in press.

\bibitem{DG2} D. Guo;
 A class of second-order impulsive integro-differential equations on unbounded domain in a
Banach space, \emph{Appl. Math. Comput.} {\bf 125} (1) (2002), 59--77.

\bibitem{DG3} D. Guo;
 Multiple positive solutions of a boundary value problem for nth-order impulsive
integro-differential equations in Banach spaces, \emph{Nonlinear Anal.} {\bf 63} (2005), No. 4, 618--641.

\bibitem{hil} S. Hilger;
 Ein Masskettenkalk\"{u}l mit Anwendung auf Zentrumsmanningfaltigkeiten. PhD thesis,
Universit\"{a}t W\"{u}rzburg, 1988.

\bibitem{nk} N. Kosmatov;
 Multi-point boundary value problems on time scales at resonance, \emph{J. Math. Anal.
Appl.}, \textbf{323} (2006), 253--266.

\bibitem{kkr} E. R. Kaufmann, N. Kosmatov, and Y. N. Raffoul;
 A Second Order Boundary Value Problem with
Impulsive Effects on an Unbounded Domain,'' \emph{Nonlinear Anal.}, in press.

\bibitem{er} E. R. Kaufmann, and Y. N. Raffoul;
 Periodic solutions for a neutral nonlinear dynamical equation
on a time scale, \emph{J. Math. Anal. Appl.}, 319 (2006), no. 1, 315--325.

\bibitem{mak} M. A. Krasnosel'ski\u{\i};
 Some problems in nonlinear analysis, \emph{Amer. Math. Soc. Transl.,
Ser. 2} \textbf{10} (1958), 345--409.

\bibitem{LBS} V. Lakshmikantham, D. D. Bainov, and P. S. Simeonov;
 \emph{Theory of Impulsive Differential
Equations}, Series in Modern Applied Mathematics, \textbf{6}, World Scientific, New Jersey, 1994.

\bibitem{SP}  A. M. Samo\u{\i}lenko and N. A. Perestyuk;
 \emph{Impulsive Differential Equations}, World
Scientific Seriess on Nonlinear Science, Series A: Monographs and Treatises, \textbf{14}, World Scientific, New
Jersey, 1995.

\end{thebibliography}

\end{document}
