\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2012 (2012), No. 57, pp. 1--14.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2012 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2012/57\hfil Periodic solutions]
{Periodic solutions for neutral functional differential equations
with impulses on time scales}

\author[Y. Li, X. Dou, J. Zhou\hfil EJDE-2012/57\hfilneg]
{Yongkun Li, Xiaoyan Dou, Jianwen Zhou}  

\address{Yongkun Li \newline
Department of Mathematics,
Yunnan University\\
Kunming, Yunnan 650091, China}
\email{yklie@ynu.edu.cn}

\address{Xiaoyan Dou \newline
Department of Mathematics,
Yunnan University\\
Kunming, Yunnan 650091, China}
\email{douxy21@163.com}

\address{Jianwen Zhou \newline
Department of Mathematics,
Yunnan University\\
Kunming, Yunnan 650091, China}
\email{zhoujianwen2007@126.com}

\thanks{Submitted August 22, 2011 Published April 10, 2012.}
\thanks{Supported by grant 10971183 from the
National Natural Sciences Foundation of China}
\subjclass[2000]{34N05, 34K13, 34K40, 34K45}
\keywords{Positive periodic solution; neutral functional
differential equations; \hfill\break\indent 
impulses; Krasnoselskii fixed point; time scales}

\begin{abstract}
 Let $\mathbb{T}$ be a periodic time scale. We use  Krasnoselskii's
 fixed point theorem to show that the neutral functional differential
 equation with impulses
 \begin{gather*}
  x^{\Delta}(t)=-A(t)x^\sigma(t)+g^\Delta(t,x(t-h(t)))+f(t,x(t),x(t-h(t))),\\
  t\neq t_j,\;t\in\mathbb{T},\\
   x(t_j^+)= x(t_j^-)+I_j(x(t_j)), \quad j\in \mathbb{Z}^+
  \end{gather*}
  has a periodic solution. Under a slightly more stringent conditions
  we show that the periodic solution is unique using the contraction
  mapping principle.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}
\allowdisplaybreaks

\section{Introduction}

The study of differential equations on time scales, which has been created in order
to unify the study of differential and difference equations, is an area of mathematics
that has recently gained a lot of attention, moreover, many results on this issue have
been well documented in the monographs \cite{b1,b2,l3}.

Recently Kaufmann and Raffoul \cite{k1} investigated the existence of periodic
solutions for the neutral dynamical equation on time scale
\begin{equation} \label{e1.1}
x^\Delta(t)=-a(t)x^\sigma(t)+c(t)x^\Delta(t-k)+q(t,x(t),x(t-k)),\quad t\in\mathbb{T},
\end{equation}
where $k$ is a fixed constant if $\mathbb{T}=\mathbb{R}$ and is a multiple of
the period of $\mathbb{T}$ if $\mathbb{T}\neq\mathbb{R}$.

Differential equations with impulses provide an adequate
mathematical model of many evolutionary process that suddenly change
their state at certain moments. Therefore, the study of this class
of dynamical system has gained prominence and it is rapidly growing
field. See, for instance the monographs \cite{l1,l2,z1,z2,z3}.

In this article, we are concerned with the  system
\begin{equation} \label{e1.2}
  \begin{gathered}
   x^{\Delta}(t)=-A(t)x^\sigma(t)+g^\Delta(t,x(t-h(t)))+f(t,x(t),x(t-h(t))),
 \\ t\neq      t_j,\;t\in \mathbb{T},\\
  x(t_j^+)= x(t_j^-)+I_j(x(t_j)), \quad j\in \mathbb{Z}^+,
 \end{gathered}
\end{equation}
where $\mathbb{T}$ is an $\omega$-periodic time scale and
$0\in\mathbb{T}$. For each interval $U$ of $\mathbb{R}$, we denote
by $U_{\mathbb{T}}=U\cap\mathbb{T}$, $x(t_j^+)$ and $x(t_j^-)$
represent the right and the left limit of $x(t_j)$ in the sense of
time scales, in addition, if $t_j$ is left-scattered, then
 $x(t_j^-)=x(t_j)$,
$A(t)= \operatorname{diag}(a_{i}(t))_{n\times n} (a_{i}\in
C(\mathbb{T},\mathcal{R}^+)$  is a diagonal matrix
 with continuous real-valued functions as its elements,
 $\mathcal{R}^+=\{a(t)\in  C(\mathbb{T},\mathbb{R}): 1+\mu(t)a(t)>0\}$,
 $h\in C(\mathbb{T},\mathbb{T})$,
$g=(g_1,g_2,\dots,g_n)\in C(\mathbb{T}\times\mathbb{R}_0^n,\mathbb{R}_0^n)$,
$f=(f_1,f_2,\dots,f_n)\in C(\mathbb{T}\times\mathbb{R}_0^n\times\mathbb{R}_0^n,
\mathbb{R}_0^n)$,
$I_j=(I_j^{(1)},I_j^{(2)},\dots,I_j^{(n)})\in
C(\mathbb{R}_0^n, \mathbb{R}_0^n)$,
$\mathbb{R}_0^n=\{(t_1,t_2,\dots,t_n)
 :t_i\in \mathbb{R},t_i\geq0,i=1,2,\dots\}$ and
$A(t),h(t),g(t,u(t-h(t))),f(t,u(t),u(t-h(t)))$ are all
$\omega$-periodic
 functions respect to $t$, $\omega>0$ is a constant.
There exists a positive integer $p$ such
 that $t_{j+p}=t_j+\omega$, $I_{j+p}=I_j$, $j\in\mathbb{Z}^+$,
 without loss of generality,
we also assume that
$[0,\omega)_\mathbb{T}\cap\{t_j,j\in\mathbb{Z}^+\}=\{t_1,t_2,\dots,t_p\}$.

Our main purpose in this paper is  using Krasnoselskii's fixed point theorem
 to study the existence of positive periodic solutions to  system \eqref{e1.2}.

 The organization of this paper is as follows. In Section 2, we introduce some
 notations and definitions, and state some preliminary
results needed in later sections, then we give the Green's function
of \eqref{e1.2}, which plays an important role in this paper. In Section 3,
we establish our main results for positive periodic solutions by
applying  Krasnoselskii's fixed point theorem, and
provide an example to illustrate the effectiveness of our results
obtained in the previous sections.


\section{Preliminaries}

 In this section, we shall recall some basic definitions and lemmas
which are used in what follows.

 Let $\mathbb{T}$ be a nonempty closed subset (time scale) of $\mathbb{R}$.
The forward and backward jump  operators
 $\sigma, \rho:\mathbb{T}\to\mathbb{T}$ and the graininess
$\mu:\mathbb{T}\to\mathbb{R}^+$  are defined, respectively, by
 \[
 \sigma(t)=\inf\{s\in\mathbb{T}:s>t\},\quad
\rho(t)=\sup\{s\in\mathbb{T}:s<t\}, \quad  \mu(t)=\sigma(t)-t.
 \]
A point $t\in\mathbb{T}$ is called left-dense if $t>\inf\mathbb{T}$
  and $\rho(t)=t$, left-scattered if $\rho(t)<t$,
right-dense if $t<\sup\mathbb{T}$
   and $\sigma(t)=t$, and right-scattered if $\sigma(t)>t$. If $\mathbb{T}$ has
   a left-scattered maximum $m$, then $\mathbb{T}^k=\mathbb{T}\backslash\{m\}$;
otherwise $\mathbb{T}^k=\mathbb{T}$.


 A function $f:\mathbb{T}\to\mathbb{R}$ is right-dense continuous provided
it is continuous at right-dense point in $\mathbb{T}$ and its
left-side limits exist at left-dense points in $\mathbb{T}$. If $f$
is continuous at each right-dense points and each left-dense point,
then $f$ is said to be a continuous function on $\mathbb{T}$. The
set of continuous functions $f:\mathbb{T}\to\mathbb{R}$ will
be denoted by $C(\mathbb{T})$.

 For $x:\mathbb{T}\to\mathbb{R}$ and $t\in\mathbb{T}^k$, we
define the delta derivative of $x(t)$, $x^{\Delta}(t)$, to be the
number (if it exists) with the property that for a given
$\varepsilon>0$, there exists a neighborhood $U_{\mathbb{T}}$ of $t$
such that
\[
|[x(\sigma(t))-x(s)]-x^{\Delta}(t)[\sigma(t)-s]|<\varepsilon|\sigma(t)-s|
\]
for all $s\in U_{\mathbb{T}}$.

If $x$ is continuous, then $x$ is
right-dense continuous, and if $x$ is delta differentiable at $t$,
then $x$ is continuous at $t$.

\begin{remark} \label{rmk2.1} \rm
$x:\mathbb{T}\to\mathbb{R}^n$ is delta derivable or right-dense
continuous or continuous if each entry of $x$ is delta derivable or
right-dense continuous or continuous.
\end{remark}

 Let $x$ be right-dense continuous. If $X^{\Delta}(t)=x(t)$, then we
 define the delta integral by
\begin{align*}
\int_a^t x(s)\Delta{s}=X(t)-X(a).
\end{align*}

\begin{definition}[\cite{k1}] \label{def2.1} \rm
We say that a time scale $\mathbb{T}$ is periodic if there exists
$p>0$ such that if $t\in\mathbb{T}$, then $t\pm p\in\mathbb{T}$. For
$\mathbb{T}\neq\,\mathbb{R}$, the smallest positive $p$ is called
the period of the time scale.

Let $\mathbb{T}\neq\mathbb{R}$ be a periodic time scale with period
$p$. We say that the function $f:\mathbb{T}\to\mathbb{R}$ is
periodic with period $\omega$ if there exists a natural number $n$
such that $\omega=np$, $f(t+\omega)=f(t)$ for all $t\in\mathbb{T}$
and $\omega$ is the smallest positive number such that $f(t+\omega)=f(t)$.

If $\mathbb{T}=\mathbb{R}$, we say that $f$ is periodic with period
$\omega>0$ if $\omega$ is the smallest positive number such that
$f(t+\omega)=f(t)$ for all $t\in\mathbb{T}$.
\end{definition}

\begin{remark} \label{rmk2.2} \rm
According to \cite{k1}, if $\mathbb{T}$ is a periodic time scale with period $p$,
then $\sigma(t+np)=\sigma(t)+np$ and
the graininess function $\mu$  is a periodic
function with period $p$.
\end{remark}

\begin{definition}[\cite{b2}] \label{def2.3} \rm
An $n\times n$-matrix-valued function $A$ on time scale $\mathbb{T}$
is called regressive (respect to $\mathbb{T}$) provided
$$I+\mu(t)A(t)$$
is invertible for all $t\in\mathbb{T}^k$.
\end{definition}


Let $A,B:\mathbb{T}\to\mathbb{R}^{n\times n}$ be two
$n\times n$-matrix-valued regressive functions on $\mathbb{T}$, we define
\begin{gather*}
(A\oplus B)(t):=A(t)+B(t)+\mu(t)A(t)B(t),\\
(\ominus A)(t):=-[I+\mu(t)A(t)]^{-1}A(t)=-A(t)[I+\mu(t)A(t)]^{-1}, \\
 (A(t))\ominus (B(t)):=(A(t))\oplus(\ominus (B(t))),
\end{gather*}
 for all $t\in\mathbb{T}^k$.

\begin{theorem}[\cite{b2}] \label{thm2.1}
 Let $A$ be an regressive and rd-continuous
$n\times n$-matrix-valued function on $\mathbb{T}$ and suppose that
$f:\mathbb{T}\to\mathbb{R}^n$ is rd-continuous. Let
$t_0\in\mathbb{T}$ and $y_0\in\mathbb{R}^n$. Then the initial value problem
\[
y^{\Delta}=A(t)y+f(t),\quad y(t_0)=y_0
\]
has a unique solution $y:\mathbb{T}\to\mathbb{R}^n$.
\end{theorem}

\begin{definition}[\cite{b2}] \label{def2.4} \rm
Let $t_0\in\mathbb{T}$ and assume that $A$ is an regressive and rd-continuous
$n\times n$-matrix-valued function. The unique matrix-valued solution of the
initial value problem
$$
x^{\Delta}(t)=A(t)x(t),\quad x(t_0)=I,
$$
where $I$ denotes as usual the $n\times n$-identity matrix, is called the
matrix exponential function (at $t_0$), and it is denoted by $e_A(\cdot,t_0)$.
\end{definition}

\begin{remark} \label{rmk2.3} \rm
 Assume that $A$ is a constant $n\times n$-matrix. If
$\mathbb{T}=\mathbb{R}$, then
\begin{align*}
e_A(t,t_0)=e^{A(t-t_0)},
\end{align*}
while if $\mathbb{T}=\mathbb{Z}$ and $I+A$ is invertible, then
\begin{align*}
e_A(t,t_0)=(I+A)^{(t-t_0)}.
\end{align*}
\end{remark}

In the following lemma, we give some properties of the matrix exponential function.

\begin{lemma}[\cite{b2}] \label{lem2.1}
Assume that $A,B:\mathbb{T}\to\mathbb{R}^{n\times n}$ are
regressive and rd-continuous matrix-valued functions on $\mathbb{T}$. Then
\begin{itemize}
  \item [(i)]
 $e_0(t,s)\equiv I$ and $e_A(t,t)\equiv I$;
  \item [(ii)]
  $e_A(\sigma(t),s)=(I+\mu(t)A(t))e_A(t,s)$;
  \item [(iii)]
  $e_A^{-1}(t,s)=e_{{\ominus A}^*}^*(t,s)$;
  \item [(iv)]
  $e_A(t,s)=e_A^{-1}(s,t)=e_{{\ominus A}^*}^*(s,t)$;
  \item [(v)]
  $e_A(t,s)e_A(s,r)=_A(t,r)$;
  \item [(vi)]
  $e_A(t,s)e_B(t,s)=e_{A\oplus B}(t,s)$, if $e_A(t,s)$ and  $B(t)$
  commute,
\end{itemize}
where $A^*$ denotes the conjugate transpose of $A$.
\end{lemma}

\begin{lemma}[\cite{b2}] \label{lem2.2}
Suppose $A$ and $B$ are regressive matrix-valued functions, then
\begin{itemize}
  \item [(i)]
  $A^*$ is regressive;
  \item [(ii)]
  $\ominus A^*=(\ominus A)^*$;
  \item [(iii)]
  $(A^*)^{\Delta}=(A^{\Delta})^*$ holds for any
 differential matrix-valued function $A$.
  \end{itemize}
\end{lemma}

Next, we state  Krasnoselskii's fixed point theorem
which enables us to prove the existence of a periodic solution of
\eqref{e1.2}. For its proof we refer the reader to \cite{s1} .

\begin{theorem}[Krasnoselskii] \label{thm2.2}
Let $\mathbb{M}$ be a closed convex nonempty subset of Banach space
$(\mathbb{B},\|\cdot\|)$. Suppose that $\Phi$ and $\Psi$ map
$\mathbb{M}$ into $\mathbb{B}$ such that
\begin{itemize}
  \item [(i)]
  $x,y\in\mathbb{M}$ imply $\Phi x+\Psi y\in\mathbb{M}$;
  \item [(ii)]
  $\Psi$ is compact and continuous;
  \item [(iii)]
  $\Phi$ is a contraction mapping.
\end{itemize}
Then there exists $z\in\mathbb{M}$ with $z=\Phi z+\Psi z$.
\end{theorem}


\begin{lemma} \label{lem2.3}
A function $x(t)$ is an $\omega$-periodic solution of \eqref{e1.2} if and
only if $x(t)$ is an $\omega$-periodic solution of the equation
\begin{align*}
x(t)&= g(t,x(t-h(t)))+\int_t^{t+\omega}G(t,s)[f(s,x(s),x(s-h(s)))\\
&\quad -(\ominus A(t))g^{\sigma}(s,x(s-h(s)))]\Delta{s} 
 +\sum_{j:t_j\in[t,t+\omega)}G(t,t_j)I_j(x(t_j)),
\end{align*}
where
\begin{gather*}
G(t,s)=\operatorname{diag}(G_i(t,s))_{n\times n}, G_i(t,s)=\big(1-e_{\ominus
a_i}(\omega,0)\big)^{-1}e_{\ominus a_i}(t,s)\\
\ominus A (t)=\operatorname{diag}(\ominus a_i(t))_{n\times n}.
\end{gather*}
\end{lemma}

\begin{proof}
If $x$ is an $\omega$-periodic solution of \eqref{e1.2}. For any
$t\in\mathbb{T}$, there exists $j\in\mathbb{Z}$ such that $t_j$ is
the first impulsive point after $t$. Then for $i=1,2,\dots,n,$
$x_i$ is an $\omega$-periodic solution of the equation
\begin{equation} \label{e2.1}
x_i^{\Delta}(t)+a_i(t)x_i^{\sigma}(t)
=g_i^{\Delta}(t,x_i(t-h(t)))+f_i(t,x_i(t),x_i(t-h(t))).
\end{equation}
Multiply both sides of \eqref{e2.1} by $e_{a_i}(t,0)$ and then integrate
from $t$ to $s\in[t,t_j]_{\mathbb{T}}$, we obtain
\begin{align*}
&\int_t^s[e_{a_i}(\tau,0)x_i(\tau)]^{\Delta}\Delta{\tau}\\
&=\int_t^s e_{a_i}(\tau,0)[g_i^{\Delta}(\tau,x_i(\tau-h(\tau)))
+f_i(\tau,x_i(\tau),x_i(\tau-h(\tau)))]\Delta{\tau},
\end{align*}
or
\begin{align*}
e_{a_i}(s,0)x_i(s)
&=  e_{a_i}(t,0)x_i(t)
+\int_t^s e_{a_i}(\tau,0)[g_i^{\Delta}(\tau,x_i(\tau-h(\tau)))\\
&\quad +f_i(\tau,x_i(\tau),x_i(\tau-h(\tau)))]\Delta{\tau},
\end{align*}
then
\begin{align*}
x_i(s)&= e_{\ominus a_i}(s,t)x_i(t)
+\int_t^se_{\ominus a_i}(s,\tau)[g_i^{\Delta}(\tau,x_i(\tau-h(\tau)))\\
&\quad +f_i(\tau,x_i(\tau),x_i(\tau-h(\tau)))]\Delta{\tau},\quad i=1,2,\dots,n,
\end{align*}
hence
\begin{equation} \label{e2.2}
\begin{aligned}
x_i(t_j)&= e_{\ominus a_i}(t_j,t)x_i(t)
+\int_t^{t_j} e_{\ominus a_i}(t_j,\tau)[g_i^{\Delta}(\tau,x_i(\tau-h(\tau))) \\
&\quad +f_i(\tau,x_i(\tau),x_i(\tau-h(\tau)))]\Delta{\tau},\quad i=1,2,\dots,n.
\end{aligned}
\end{equation}
Similarly, for $s\in(t_j,t_{j+1}]$, we have
\begin{align*}
x_i(s)
&= e_{\ominus a_i}(s,t_j)x_i(t_j^+)+\int_{t_j}^s
 e_{\ominus a_i}(s,\tau)[g_i^{\Delta}(\tau,x_i(\tau-h(\tau)))\\
&\quad +f_i(\tau,x_i(\tau),x_i(\tau-h(\tau)))]\Delta{\tau}\\
&= e_{\ominus a_i}(s,t_j)x_i(t_j^-)+\int_{t_j}^s e_{\ominus a_i}(s,\tau)
 [g_i^{\Delta}(\tau,x_i(\tau-h(\tau)))\\
&\quad +f_i(\tau,x_i(\tau),x_i(\tau-h(\tau)))]
 \Delta{\tau}+e_{\ominus a_i}(s,t_j)I_{ij}(x_i(t_j))\\
&= e_{\ominus a_i}(s,t_j)x_i(t_j)+\int_{t_j}^s e_{\ominus a_i}(s,\tau)
 [g_i^{\Delta}(\tau,x_i(\tau-h(\tau)))\\
&\quad +f_i(\tau,x_i(\tau),x_i(\tau-h(\tau)))]\Delta{\tau}
 +e_{\ominus a_i}(s,t_j)I_{ij}(x_i(t_j)),
\end{align*}
for $i=1,2,\dots,n$.
Substituting \eqref{e2.2} in the above equality, we obtain
\begin{align*}
x_i(s)
&= e_{\ominus a_i}(s,t)x_i(t)+\int_t^s e_{\ominus a_i}(s,\tau)
[g_i^{\Delta}(\tau,x_i(\tau-h(\tau)))\\
&\quad +f_i(\tau,x_i(\tau),x_i(\tau-h(\tau)))]\Delta{\tau}+e_{\ominus
a_i}(s,t_j)I_{ij}(x_i(t_j)).
\end{align*}
Repeating the above process for $s\in[t,t+\omega]_{\mathbb{T}}$, we
have
\begin{align*}
x_i(s)
&= e_{\ominus a_i}(s,t)x_i(t)+\int_t^s e_{\ominus a_i}(s,\tau)
 [g_i^{\Delta}(\tau,x_i(\tau-h(\tau)))\\
&\quad +f_i(\tau,x_i(\tau),x_i(\tau-h(\tau)))]\Delta{\tau}
 +\sum_{j:t_j\in[t,t+\omega)}e_{\ominus a_i}(s,t_j)I_{ij}(x_i(t_j)),
\end{align*}
for $i=1,2,\dots,n$.
Let $s=t+\omega$ in the above equality, we have
\begin{align*}
x_i(t+\omega)
&= e_{\ominus a_i}(t+\omega,t)x_i(t)+\int_t^{t+\omega}
e_{\ominus a_i}(t
+\omega,\tau)[g_i^{\Delta}(\tau,x_i(\tau-h(\tau)))\\
&\quad +f_i(\tau,x_i(\tau),x_i(\tau-h(\tau)))]\Delta{\tau}+\sum_{j:t_j\in[t,t+\omega)}e_{\ominus
a_i}(t+\omega,t_j)I_{ij}(x_i(t_j)),
\end{align*}
$i=1,2,\dots,n.$
Noticing that $x_i(t+\omega)=x_i(t)$ and $e_{\ominus
a_i}(t+\omega,t)=e_{\ominus a_i}(\omega,0)$, we obtain
\begin{equation} \label{e2.3}
\begin{aligned}
(1-e_{\ominus a_i}(\omega,0))x_i(t)
&= \int_t^{t+\omega} e_{\ominus a_i}(t
+\omega,\tau)[g_i^{\Delta}(\tau,x_i(\tau-h(\tau))) \\
&\quad +f_i(\tau,x_i(\tau),x_i(\tau-h(\tau)))]\Delta{\tau} \\
&\quad +\sum_{j:t_j\in[t,t+\omega)} e_{\ominus
a_i}(t+\omega,t_j)I_{ij}(x_i(t_j)),
\end{aligned}
\end{equation}
for $i=1,2,\dots,n$.
Notice that
\begin{equation} \label{e2.4}
\begin{aligned}
&\int_t^{t+\omega}e_{\ominus a_i}(t,\tau)g_i^{\Delta}
 (\tau,x_i(\tau-h(\tau)))\Delta{\tau} \\
&= e_{\ominus a_i}(t,t+\omega)g_i(t+\omega,x_i
 (t+\omega-h(t+\omega)))-e_{\ominus a_i}(t,t)g_i(t,x_i(t-h(t))) \\
&\quad -\int_t^{t+\omega}e_{\ominus a_i}(t,\tau)(\ominus
a_i(t))g_i^{\sigma}(\tau,x_i(\tau-h(\tau)))\Delta{\tau} \\
&= [e_{\ominus a_i}(0,\omega)-1]g_i(t,x_i(t-h(t))) \\
&\quad -\int_t^{t+\omega}e_{\ominus a_i}(t,\tau)(\ominus
a_i(t))g_i^{\sigma}(\tau,x_i(\tau-h(\tau)))\Delta{\tau},\quad i=1,2,\dots,n.
\end{aligned}
\end{equation}
It follows from \eqref{e2.3} and \eqref{e2.4} that
\begin{align*}
x_i(t)
&= g_i(t,x_i(t-h(t))) +\int_t^{t+\omega}[1-e_{\ominus
a_i}(\omega,0)]^{-1}e_{\ominus
a_i}(t,\tau)\\
&\quad\times [f_i(\tau,x_i(\tau),x_i(\tau-h(\tau))) 
 -(\ominus a_i(t))g_i^{\sigma}(\tau,x_i(\tau-h(\tau)))]\Delta{\tau}\\
&\quad +\sum_{j:t_j\in[t,t+\omega)}[1-e_{\ominus a_i}(\omega,0)]^{-1}
e_{\ominus a_i}(t,t_j)I_{ij}(x_i(t_j)) \\
&= g_i(t,x_i(t-h(t)))+\int_t^{t+\omega}G_i(t,\tau)[f_i(\tau,x_i(\tau),x_i(\tau-h(\tau))) \\
&\quad -(\ominus a_i(t))g_i^{\sigma}(\tau,x_i(s\tau-h(\tau)))]\Delta{\tau}
+\sum_{j:t_j\in[t,t+\omega)}G_i(t,t_j)I_{ij}(x_i(t_j)),
\end{align*}
for $i=1,2,\dots,n$.
Next, we prove the converse. Let
\begin{align*}
x_i(t)
&= g_i(t,x_i(t-h(t)))+\int_t^{t+\omega}G_i(t,s)[f_i(s,x_i(s),x_i(s-h(s)))\\
&\quad -(\ominus a_i(t))g_i^{\sigma}(s,x_i(s-h(s)))]\Delta{s} 
 +\sum_{j:t_j\in[t,t+\omega)}G_i(t,t_j)I_{ij}(x_i(t_j)),
\end{align*}
where
\[
G_i(t,s)=(1-e_{\ominus a_i}(\omega,0))^{-1}e_{\ominus
a_i}(t,s),\quad i=1,2,\dots,n.
\]
Then if $t\neq t_i,i\in\mathbb{Z}^+$, we have
\begin{align*}
&x_i^{\Delta}(t)\\
&= g_i^{\Delta}(t,x_i(t-h(t)))\\
&\quad +\int_t^{t+\omega}\{G_i(t,s)[f_i(s,x_i(s),x_i(s-h(s)))
-(\ominus a_i(t))g_i^{\sigma}(s,x_i(s-h(s)))]\}^{\Delta}\Delta{s}\\
&\quad +G_i(t,t+\omega)[f_i(t+\omega,x_i(t+\omega),x_i(t+\omega-h(t+\omega)))\\
&\quad -(\ominus a_i(t))g_i^{\sigma}(t+\omega,x_i(t+\omega-h(t+\omega)))]\\
&\quad -G_i(t,t)[f_i(t,x_i(t),x_i(t-h(t)))-(\ominus a_i(t))g_i^{\sigma}(t,x_i(t-h(t)))]\\
&= g_i^{\Delta}(t,x_i(t-h(t)))+f(t,x_i(t),x_i(t-h(t)))\\
&\quad +\int_t^{t+\omega}\{G_i(t,s)[f_i(s,x_i(s),x_i(s-h(s)))\\
&\quad -(\ominus a_i(t))g_i^{\sigma}(s,x_i(s-h(s)))]\}^{\Delta}\Delta{s}
 -(\ominus a_i(t))g_i^{\sigma}(t,x_i(t-h(t)))\\
&= g_i^{\Delta}(t,x_i(t-h(t)))+f_i(t,x_i(t),x_i(t-h(t)))-a_i(t)x_i^{\sigma}(t)\\
&= -a_i(t)x_i^{\sigma}(t)+g_i^{\Delta}(t,x_i(t-h(t)))+f(t,x_i(t),x_i(t-h(t))),
\; i=1,2,\dots,n.
\end{align*}
If $t=t_i,i\in\mathbb{Z}^+$, we obtain
\begin{align*}
&x_i(t_i^+)-x(t_i^-)\\
&= \sum_{j:t_j\in[t_i^+,t_i^++\omega)}G_i(t_i,t_j)I_{ij}(x_i(t_j))
-\sum_{j:t_j\in[t_i^-,t_i^-+\omega)}G_i(t_i,t_j)I_{ij}(x_i(t_j))\\
&= G_i(t_i,t_i+\omega)I_i(x_i(t_i+\omega))-G_i(t_i,t_i)I_i(x_i(t_i))\\
&= I_i(x_i(t_i)),\,i=1,2,\dots,n.
\end{align*}
So we know that, $x$ is also an $\omega$-periodic solution of \eqref{e1.2}.
This completes the proof.
\end{proof}

Throughout this paper, we make the following assumptions:
\begin{itemize}
\item[(H1)] The function $g=(g_1,g_2,\dots,g_n)$ satisfies a Lipschitz condition
in $x$. That is, for $i\in\{1,2,\dots,n\}$,
 there exists a positive constant $L_i$ such that
$$
|g_i(t,x)-g_i(t,y)|\leq L_i\|x-y\|, \quad \text{for all }
  t\in\mathbb{T},x,y\in\mathbb{R}_0^n.
$$
\item[(H2)]
The function $f=(f_1,f_2,\dots,f_n)$ satisfies a Lipschitz condition
in $x$ and $y$.
 That is, for $i\in\{1,2,\dots,n\}$, there exist positive constants
$M_i$ and $N_i$ such that
$$
|f_i(t,x,y)-f_i(t,\xi,\zeta)|\leq M_i\|x-\xi\|+N_i\|y-\zeta\|,
 \text{ for all } t\in\mathbb{T},
(x,y),(\xi,\zeta)\in\mathbb{R}_0^n\times\mathbb{R}_0^n.
$$
\item[(H3)]
For $j\in Z$, $I_j=(I_j^{(1)},I_j^{(2)},\dots,I_j^{(n)})$ satisfies
Lipschitz condition.
 That is, for $i\in\{1,2,\dots,n\}$ there exists a positive constant
$P_j^{(i)}$ such that
$$
|I_j^{(i)}(x)-I_j^{(i)}(y)|\leq P_j^{(i)}\|x-y\|,\quad \text{for all }
 x,y\in\mathbb{R}_0^n.
$$
\end{itemize}


To apply Theorem \ref{thm2.2} to  \eqref{e1.2}, we define
\begin{align*}
PC(\mathbb{T})=\{x:\mathbb{T}\to\mathbb{R}^n:x|_{(t_j,t_{j+1})_{\mathbb{T}}}
\in C(t_j,t_{j+1})_{\mathbb{T}},\exists
x(t_j^-)=x(t_j),x(t_j^+),j\in Z^+\}.
\end{align*}

Consider the Banach space
\begin{align*}
X=\{x(t)\in PC(\mathrm{T}):x(t+\omega)=x(t)\}
\end{align*}
with the norm 
$\|x\|=\mathrm{max}_{t\in[0,\omega]_{\mathbb{T}}}|x(t)|_0$, where
$|x(t)|_0=\max_{1\leq i\leq n}|x_i(t)|$.


\begin{lemma}[\cite{k1}] \label{lem2.4}
Let $x\in X$. Then there exists $\|x^\sigma\|$, and $\|x^\sigma\|=\|x\|$.
\end{lemma}

Noticing that
\[
G_i(t,s)\leq(1-e_{\ominus a_i}(\omega,0))^{-1}:=\eta_i,
\]
for convenience, we introduce the notation
\begin{gather*}
\bar{\eta}:=\max_{1\leq i\leq n}\eta_i, \quad
\gamma:=\max_{1\leq i\leq n}\max_{t\in[0,\omega]_\mathbb{T}}|\ominus a_i(t)|,\quad
L:=\max_{1\leq i\leq n}L_i,\quad M:=\max_{1\leq i\leq n}M_i,
\\
N:=\max_{1\leq i\leq n}N_i,\quad
P_j:=\max_{1\leq i\leq n}P_j^{(i)},\quad P:=\max_{1\leq j\leq p}P_j.
\end{gather*}
Define the mapping $H:X\to X$ by
\begin{equation} \label{e2.5}
 \begin{aligned}
(H\varphi)(t)
&= g(t,\varphi(t-h(t)))+\int_t^{t+\omega}G(t,s)[f(s,\varphi(s),\varphi(s-h(s))) \\
&\quad -(\ominus A(t))g^{\sigma}(s,\varphi(s-h(s)))]\Delta{s}
 +\sum_{j:t_j\in[t,t+\omega)}G(t,t_j)I_j(x(t_j)).
\end{aligned}
\end{equation}

To apply Theorem \ref{thm2.2}, we need to construct two mappings: one  is a
contraction and the other is continuous and compact. We express
\eqref{e2.5} as
\[
(H\varphi)(t)=(\Phi\varphi)(t)+(\Psi\varphi)(t),
\]
where
\begin{gather}
(\Phi\varphi)(t)=g(t,\varphi(t-h(t))), \label{e2.6}\\
\begin{aligned}
(\Psi\varphi)
&= \int_t^{t+\omega}G(t,s)[f(s,\varphi(s),\varphi(s-h(s)))
-(\ominus A(t))g^{\sigma}(s,\varphi(s-h(s)))]\Delta{s} \\
&\quad +\sum_{j:t_j\in[t,t+\omega)}G(t,t_j)I_j(\varphi(t_j)).
\end{aligned} \label{e2.7}
\end{gather}

\begin{lemma} \label{lem2.5}
Suppose {\rm (H1)} holds and $L<1$, then $\Phi:X\to X$, as
defined by \eqref{e2.6}, is a contraction.
\end{lemma}

\begin{proof}
Trivially, $\Phi:X\to X$. For $\varphi,\psi\in X$, we have
\begin{equation} \label{e2.8}
\begin{aligned}
\|\Phi(\varphi)-\Phi(\psi)\|
&= \max_{t\in[0,\omega]_{\mathbb{T}}}\max_{1\leq
i\leq n}|g_i(t,\varphi(t-h(t)))-g_i(t,\psi(t-h(t)))| \\
&\leq  L\|\varphi-\psi\|.
\end{aligned}
\end{equation}
Hence $\Phi$ defines a contraction mapping with contraction constant $L$.
\end{proof}

\begin{lemma} \label{lem2.6}
Suppose {\rm (H1)--(H3)} hold, then $\Psi:X\to X$, as defined by \eqref{e2.7},
is continuous and compact.
\end{lemma}

\begin{proof}
\begin{align*}
&(\Psi\varphi)(t+\omega)\\
&= \int_{t+\omega}^{t+2\omega}G(t+\omega,s)[f(s,\varphi(s),\varphi(s-h(s)))
-(\ominus A(t+\omega))g^{\sigma}(s,\varphi(s-h(s)))]\Delta{s}\\
&\quad +\sum_{j:t_j\in[t+\omega,t+2\omega)}G(t+\omega,t_j)I_j(\varphi(t_j)).\\
&= \int_t^{t+\omega}G(t,u+\omega)[f(u+\omega,\varphi(u+\omega),
 \varphi(u+\omega-h(u+\omega)))\\
&\quad -(\ominus A(t+\omega))g^\sigma(u+\omega,
 \varphi(u+\omega-h(u+\omega)))]\Delta{u}
+\sum_{k:t_k\in[t,t+\omega)}G(t,t_k)I_j(\varphi(t_k))\\
&= \int_t^{t+\omega}G(t,u)[f(u,\varphi(u),\varphi(u-h(u)))\\
&\quad -(\ominus A(t))g^\sigma(u,\varphi(u-h(u)))]\Delta{u}
+\sum_{k:t_k\in[t,t+\omega)}G(t,t_k)I_j(\varphi(t_k))\\
&= (\Psi\varphi)(t).
\end{align*}
That is, $\Psi:X\to X$.

Now, we show that $\Psi$ is continuous. Let $\varphi,\psi\in X$,
given $\varepsilon>0$, take
$$
\delta=\frac{\varepsilon}{\overline{\eta}[\omega(M+N+L\gamma)+P]}
$$
such that for $\|\varphi-\psi\|\leq\delta$. By using the
Lipschitz condition, we obtain
\begin{align*}
&\|\Psi\varphi-\Psi\psi\|\\
&\leq \max_{t\in[0,\omega]_\mathbb{T}}|
\int_t^{t+\omega}G(t,s)[f(s,\varphi(s),\varphi(s-h(s)))
-f(s,\psi(s)-\psi(s-(h(s))))]\Delta{s}|_0\\
&\quad +\max_{t\in[0,\omega]_\mathbb{T}}|\int_t^{t+\omega}G(t,s)(\ominus
A(t))[g(s,\varphi(s-h(s)))-g(s,\psi(s-h(s)))]\Delta{s}|_0\\
&\quad +\max_{t\in[0,\omega]_\mathbb{T}}\sum_{j:t_j\in[t,t+\omega)}|G(t,t_j)
[I_j(\varphi(t_j))-I_j(\psi(t_j))]|_0\\
&\leq \overline{\eta}\int_0^\omega|f(s,\varphi(s),\varphi(s-h(s)))
 -f(s,\psi(s),\psi(s-h(s)))|_0\Delta{s}\\
&\quad +\overline{\eta}\gamma\int_0^\omega|g(s,\varphi(s-h(s)))
 -g(s,\psi(s-h(s)))|_0\Delta{s}\\
&\quad +\overline{\eta}\max_{1\leq j\leq p}|I_j(\varphi(t_j))-I_j(\psi(t_j))|_0\\
&\leq \overline{\eta}[\omega(M+N+L\gamma)+P]\|\varphi-\psi\|
< \varepsilon.
\end{align*}
This proves $\Psi$ is continuous. Next, we need to show that $\Psi$
is compact. Consider the sequence of periodic functions
$\{\varphi_n\}\subset X$ and assume that the sequence is uniformly
bounded. Let $ \Theta>0$ be such that $\|\varphi_n\|\leq\Theta$, for
all $n\in N$. In view of (H1)--(H3), we arrive at
\begin{equation} \label{e2.9}
\begin{aligned}
\|g^\sigma(t,x)\|
&\leq  \|g^\sigma(t,x)-g^\sigma(t,0)\|+\|g^\sigma(t,0)\| \\
&=  \max_{t\in[0,\omega]_{\mathbb{T}}}\max_{1\leq i\leq n}|g_i^\sigma(t,x)
               -g_i^\sigma(t,0)|+\alpha_g \\
&\leq L\|x\|+\alpha_g;
\end{aligned}
\end{equation}
\begin{equation} \label{e2.10}
 \begin{aligned}
\|f(t,x,y)\|
&\leq  \|f(t,x,y)-f(t,0,0)\|+\|f(t,0,0)\| \\
&=  \max_{t\in[0,\omega]_{\mathbb{T}}}\max_{1\leq i\leq n}|f_i(t,x,y)-f_i(t,0,0)|
  +\alpha_f \\
&\leq (M+N)\|x\|+\alpha_f;
\end{aligned}
\end{equation}
\begin{equation} \label{e2.11}
\begin{aligned}
\|I_j(x)\|
&\leq  \|I_j(x)-I_j(0)\|+\|I_j(0)\| \\
&= \max_{1\leq i\leq n}n|I_j^{(i)}(x)-I_j^{(i)}(0)| +\alpha_{I_j} \\
&\leq  P_j\|x\|+\alpha_{I_j},\quad \mathrm{for}\quad j\in \mathbb{Z}^+,
\end{aligned}
\end{equation}
where $\alpha_g=\|g^\sigma(t,0,0)\|$, $\alpha_f=\|f(t,0,0)\|$ and
$\alpha_{I_j}=\|I_j(0)\|$. Hence,
\begin{equation} \label{e2.12}
\begin{aligned}
&\|\Psi\varphi_n\|\\
&\leq \max_{t\in[0,\omega]_{\mathbb{T}}}|\int_t^{t
+\omega}G(t,s)[f(s,\varphi_n(s),\varphi_n(s-h(s)))\\
&\quad -(\ominus A(t))g^{\sigma}(s,\varphi_n(s-h(s)))]\Delta{s}|_0 
 +\max_{t\in[0,\omega]_{\mathbb{T}}}\sum_{j:t_j\in[t,t+\omega)}|G(t,t_j)I_j(\varphi_n(t_j))|_0 \\
&\leq \overline{\eta}\int_0^\omega|f(s,\varphi_n(s),\varphi_n(s-h(s)))|_0\Delta{s}+
\overline{\eta}\gamma\int_0^\omega|g^\sigma(s,\varphi_n(s-h(s)))|_0\Delta{s} \\
&\quad +\overline{\eta}\sum_{j=1}^p|I_j(\varphi_n(t_j))|_0 \\
&\leq \overline{\eta}\omega(M\|\varphi_n\|+N\|\varphi_n\|+\alpha_f)\\
&\quad +\overline{\eta}\gamma\omega(L\|\varphi_n\|+\alpha_g)
+\overline{\eta}(\max_{1\leq j\leq p}(P_j\|\varphi_n\|+\alpha_{I_j})) \\
&\leq \overline{\eta}\omega\Theta(M+N+\gamma
L)+\overline{\eta}(\omega\alpha_f+\gamma\omega\alpha_g+P\Theta+\alpha):=D,
\end{aligned}
\end{equation}
where $\alpha=\max_{1\leq j\leq p}\alpha_{I_j}$. Thus the sequence
$\{\Psi\varphi_n\}$ is uniformly bounded. Now, it can be easily
checked that
\begin{align*}
(\Psi\varphi_n)^\Delta(t)
&=-A(t)(\Psi\varphi_n)^{\sigma}(t)+f(t,\varphi_n(t),\varphi_n(t-h(t)))\\
&\quad +\sum_{j:t_j\in[t,t+\omega)}G^{\Delta}(t,t_j)I_j(\varphi_n(t_j)).
\end{align*}
Consequently, it follows from \eqref{e2.10}, \eqref{e2.11}, \eqref{e2.12}
and Lemma \ref{lem2.4} that
\begin{align*}
|(\Psi\varphi_n)^\Delta(t)|_0
&\leq  \|A\|\|(\Psi\varphi_n)^{\sigma}\|+\|f(t,\varphi_n(t),\varphi_n(t-h(t)))\|\\
&\quad +|\ominus A(t)|_0\sum_{j:t_j\in[t,t+\omega)}|G(t,t_j)I_j(\varphi_n(t_j))|_0\\
&\leq \|A\|\|(\Psi\varphi_n)\|+(M+N)\|\varphi_n\|+\alpha_f
 +\gamma\overline{\eta}\sum_{j=1}^p|I_j(\varphi_n(t_j))|_0\\
&\leq \|A\|D+(M+N)\|\varphi_n\|+\alpha_f
 +\gamma\overline{\eta}\sum_{j=1}^p\|I_j(\varphi_n)\|\\
&\leq \|A\|D+(M+N)\|\varphi_n\|+\alpha_f
 +\gamma\overline{\eta}\sum_{j=1}^p(P_j\|\varphi_n\|+\alpha_{I_j})\\
&\leq \|A\|D+(M+N)\Theta+\alpha_f+\gamma\overline{\eta}(P\Theta+\alpha),
\end{align*}
for all $n$. That is,
 $\|(\Psi\varphi_n)^\Delta\|\leq \|A\|D+(M+N)\Theta+\alpha_f
+\gamma\overline{\eta}(P\Theta+\alpha)$, thus the sequence
$\{\Psi\varphi_n\}$ is uniformly bounded and equi-continuous. The
Arzela-Ascoli theorem implies that $\Psi$ is compact.
\end{proof}

\section{Main Result}

Our main result reads as follows.

\begin{theorem}  \label{thm3.1}
Assume that {\rm (H1)--(H3)} hold and $L<1$. Suppose that there is a positive
constant $G$ such that all solutions $x(t)$ of  \eqref{e1.2}, $x(t)\in
X$, satisfy $\|x\|\leq G$, and the inequality
\begin{equation} \label{e3.1}
\frac{\gamma\omega\alpha_g+\omega\alpha_f+\alpha}{1/\overline{\eta}-\omega(\gamma
L+M+N)-L/\overline{\eta}-P}<G
\end{equation}
holds. Then \eqref{e1.2} has an $\omega$-periodic solution.
\end{theorem}

\begin{proof}
Define $\mathbb{M}=\{\varphi\in X:\|\varphi\|\leq G\}$.
Then Lemma \ref{lem2.6} implies $\Psi:X\to X$  and $\Psi$ is compact and continuous.
Also, from Lemma \ref{lem2.5}, the mapping $\Phi$ is a contraction and
$\Phi:X\to X$. We need to show that if $\varphi,\psi\in M$,
then $\|\Phi\varphi+\Psi\psi\|\leq G$. Let $\varphi,\psi\in M$ with
$\|\varphi\|,\|\psi\|\leq G$, from \eqref{e2.9}-\eqref{e2.11}, we have
\begin{align*}
\|\Phi\varphi+\Psi\psi\|
&\leq  \|\Phi\varphi\|+\|\Psi\psi\|\\
&\leq  LG + \overline{\eta}\omega G(\gamma L+M+N)
       +\overline{\eta}(\gamma\omega\alpha_g+\omega\alpha_f+GP+\alpha)
\leq  G.
\end{align*}
Thus $\Phi\varphi+\Psi\psi\in\mathbb{M}$. We see that all the
conditions of Krasnoselskii theorem are satisfied on
the set $\mathbb{M}$. Hence there exists a fixed point $z$ in
$\mathbb{M}$ such that $z=\Phi z+\Psi z$. By Lemma \ref{lem2.3}, this fixed
point is a solution of \eqref{e1.2}.
\end{proof}

\begin{theorem} \label{thm3.2}
Suppose that {\rm (H1)--(H3)}  hold. If
\begin{align*}
\Upsilon:=\overline{\eta}[\omega(\gamma L+M+N)+P]<1,
\end{align*}
then  \eqref{e1.2} has an unique $\omega$-periodic solution.
\end{theorem}

\begin{proof}
For $\varphi,\psi\in X$, we have
\begin{align*}
\|H\varphi-H\psi\|
&\leq \overline{\eta}\int_0^\omega|f(s,\varphi(s),\varphi(s-h(s)))
-f(s,\psi(s),\psi(s-h(s)))|_0\Delta{s}\\
&\quad +\overline{\eta}\gamma\int_0^\omega|g^\sigma(s,\varphi(s-h(s)))-g^\sigma(s,\psi(s-h(s)))|_0\Delta{s}\\
&\quad +\overline{\eta}\sum_{j=1}^p|I_j(\varphi(t_j))-I_j(\psi(t_j))|_0\\
&\leq \overline{\eta}\omega(M\|\varphi-\psi\|+N\|\varphi-\psi\|)+\overline{\eta}\gamma\omega L\|\varphi-\psi\|
+\overline{\eta}P\|\varphi-\psi\|\\
&< \overline{\eta}[\omega(\gamma L+M+N)+P]\|\varphi-\psi\|\\
&= \Upsilon\|\varphi-\psi\|.
\end{align*}
This completes the proof.
\end{proof}


The next corollary shows that $G$ in Theorem \ref{thm3.1} can be attained.

\begin{corollary} \label{coro3.1}
Consider \eqref{e1.2} and suppose that {\rm (H1)--(H3)}
hold and $L<1$. Set
$\rho=\min_{t\in[0,\omega]_\mathbb{T}}\max_{1\leq i\leq n}|a_i(t)|$, if
\[
\rho>\frac{M+N}{1-\omega(\|A\|+L+M+N)}
\]
holds and defined by
\[
G=\frac{\alpha_f+\rho\omega(\alpha_f+\alpha_g)}{\rho-(M+N)-\rho\omega(\|A\|+L+M+N)}
\]
satisfies \eqref{e3.1}, then \eqref{e1.2} has an $\omega$-periodic solution.
\end{corollary}

\begin{proof}
Let $x\in X$. Then, for $i=1,2,\dots,n$, integrating \eqref{e1.2} from
0 to $\omega$, we obtain
\begin{align*}
x_i(\omega)-x_i(0)
&= -\int_0^\omega a_i(t)x_i^\sigma(t)\Delta{t}
 +\int_0^\omega g_i^\Delta(t,x_i(t))\Delta{t}\\
&\quad +\int_0^\omega f_i(t,x_i(t),x_i(t-h(t)))\Delta{t}.
\end{align*}
Then
\begin{align*}
0&= -\int_0^\omega a_i(t)x_i^\sigma(t)\Delta{t}
 +g_i(\omega,x_i(\omega-h(\omega)))-g_i(0,x_i(0-h(0)))\\
&\quad +\int_0^\omega f_i(t,x_i(t),x_i(t-h(t)))\Delta{t},\,i=1,2,\dots,n;
\end{align*}
that is,
\[
\int_0^\omega a_i(t)x_i^\sigma(t)\Delta{t}=\int_0^\omega
f_i(t,x_i(t),x_i(t-h(t)))\Delta{t},\quad i=1,2,\dots,n.
\]
\textbf{Claim.} There exists $t^*\in[0,\omega]$ such that
$$
\omega a_i(t^*)x_i^\sigma(t^*)\leq\int_0^\omega a_i(t)x_i^\sigma(t)\Delta{t}.
$$
Suppose the Claim is false. Define
$S_i:=\int_0^\omega a_i(t)x_i^\sigma(t)\Delta{t}$, $i=1,2,\dots,n$.
Then there exists $\varepsilon_i>0$ such that
$$
\omega a_i(t)x_i^\sigma(t)>S_i+\varepsilon_i
$$
for all $t\in[0,\omega]$. So
$$
S_i:=\int_0^\omega a_i(t)x_i^\sigma(t)\Delta{t}
>\frac{1}{\omega}\int_0^\omega(S_i+\varepsilon_i)\Delta{t}
=S_i+\varepsilon_i,\quad i=1,2,\dots,n,
$$
which is a contradiction.

As a consequence of the claim, we have
\begin{align*}
\omega|A(t^*)|_0|x^\sigma(t^*)|_0
&\leq \int_0^\omega|f(t,x(t),x(t-h(t)))|_0\Delta{t}\\
&\leq \int_0^\omega(M\|x\|+N\|x\|+\alpha_f)\Delta{t}.
\end{align*}
So,
$$
|A(t^*)|_0|x^\sigma(t^*)|_0\leq(M+N)\|x\|+\alpha_f,
$$
 which implies
\[
|x^\sigma(t^*)|_0
\leq \frac{(M+N)\|x\|+\alpha_f}{|A(t^*)|_0}
\leq \frac{(M+N)\|x\|+\alpha_f}{\rho}.
\]
Since for all $t\in[0,\omega]_{\mathbb{T}}$,
$$
x^\sigma(t)=x^\sigma(t^*)+\int_{t^*}^t x^\Delta(\sigma(s))\Delta{s},
$$
it follows that
\begin{align*}
|x^\sigma(t)|_0
&\leq |x^\sigma(t^*)|_0+\int_0^t|x^\Delta(\sigma(s))|_0\Delta{s}\\
&\leq \frac{(M+N)\|x\|+\alpha_f}{\rho}+\omega\|x^\Delta\|.
\end{align*}
This implies
\begin{equation} \label{e3.2}
\|x\|\leq\frac{1}{\rho}(M+N)\|x\|+\frac{\alpha_f}{\rho}+\omega\|x^\Delta\|.
\end{equation}
From \eqref{e1.2}, we have
\begin{equation} \label{e3.3}
\|x^\Delta\|\leq\|A\|\|x\|+L\|x\|+\alpha_g+(M+N)\|x\|+\alpha_f.
\end{equation}
Substituting \eqref{e3.3} in \eqref{e3.2} yields
\begin{align*}
\|x\|\leq\frac{1}{\rho}(M+N)\|x\|+\frac{\alpha_f}{\rho}
+\omega\big[(\|A\|+L+M+N)\|x\|+\alpha_g+\alpha_f\big].
\end{align*}
Then
\begin{align*}
\|x\|\leq\frac{\alpha_f+\rho\omega(\alpha_g+\alpha_f)}{\rho-(M+N)
-\rho\omega(\|A\|+L+M+N)}=G.
\end{align*}
Thus, for all $x(t)\in X$, $\|x\|\leq G$. Define
$\mathbb{M}=\{\varphi\in X:\|\varphi\|\leq G\}$.
Then by Theorem \ref{thm3.1}, Equation \eqref{e1.2} has an $\omega$-periodic solution.
 The proof is complete.
\end{proof}

\section{Example}

On time scale $\mathbb{T}=\bigcup_{k=-\infty}^\infty[k\pi, k\pi+\frac{\pi}{2}]$,
consider the neutral dynamical equation,  with period $\omega=\pi$,
\begin{equation} \label{e4.1}
\begin{gathered}
x^{\Delta}(t)= -A(t)x^\sigma(t)+g^\Delta(t,x(t-h(t)))+f(t,x(t),x(t-h(t))),\quad
t\neq  t_j,\; t\in \mathbb{T},\\
x(t_j^+)= x(t_j^-)+I_j(x(t_j)), \quad j\in \mathbb{Z}^+,
\end{gathered}
\end{equation}
where
\begin{gather*}
A(t)=\begin{pmatrix}
0.001\sin 2t & 0\\
0 & 0.003\cos 2t
\end{pmatrix},\quad
g_1(t,u)=g_2(t,u)=0.0002|\sin t|\cos u,
\\
f_1(t,u,v)=0.0002|\sin t|(\sin u+\cos v),\quad
f_2(t,u,v)=0.0003|\cos t|(\sin u+\cos v),
\\
I_j^1(u)=I_j^2(u)=0.0009 \begin{pmatrix}
u\\
u
\end{pmatrix}, \quad j\in \mathbb{Z}^+,\\
t_1=\frac{\pi}{6},\quad  t_2=\frac{\pi}{4},\quad
 t_{j+2}=t_j+\pi,j\in \mathbb{Z}^+.
\end{gather*}
By simple calculation, we have $L=0.0002$, $M=0.0003$, $N=0.0003$,
$P=0.0009$, $\alpha_f=0.0003$, $\alpha_g=0.0002$, $\alpha=0, P=0.0009$,
$\|A\|=0.003$. For $t\in\mathbb{T}$, if $t\neq k\pi+\frac{\pi}{2}$, we have
$\mu(t)=0$ and if $t=k\pi+\frac{\pi}{2}$, we have $\mu(t)=\pi$.

When $\mu(t)=0$, we have $\ominus a_i=-a_i$, then
\[
e_{\ominus a_i}(\omega,0)=e_{-a_i}(\pi, 0)\leq
e_{0.003}(\pi,0),\,i=1,2.
\]
When $\mu(t)=\pi$, we have $\ominus a_i=-\frac{a_i}{1+\pi a_i}$,
then
\[
e_{\ominus a_i}(\omega,0)\leq e_{0.003}(\pi,0),\quad i=1,2
\]
and $\bar{\eta}=\frac{1}{0.0094}, \gamma\simeq 0.003$. It is easy to
show that all conditions in Theorem \ref{thm3.1} and Corollary \ref{thm3.1} are
satisfied. Therefore, \eqref{e4.1} has a $\pi$-periodic solution.

\begin{thebibliography}{00}

\bibitem{b1} M. Bohner, A. Peterson;
\emph{Dynamic Equations on Time Scales, An Introduction with
Applications},  Birkh\"auser, Boston, 2001.

\bibitem{b2} M. Bohner, A. Peterson;
\emph{Advances in Dynamic Equations on Time Scales},  Birkh\"auser, Boston, 2003.

\bibitem{k1} E. R. Kaufmann, Y. N. Raffoul;
\emph{Periodic solutions for a neutral nonlinear dynamical
 equation on a time scale}, J. Math. Anal. Appl. 319 (2006) 315-325.

\bibitem{l1} Y. K. Li, Z. W. Xing;
\emph{Existence and global exponential stability of periodic solution of CNNS
with impulses}, Chaos, Solitons and Fractals 33 (2007) 1686-1693.

\bibitem{l2} X. Li, X, Lin, D. Jiang, X. Zhang;
\emph{Existence and multiplicity of positive periodic solutions
to functional differential equations with impulse effects}, Nonlinear
Anal. 62(2005)683-701.

\bibitem{l3} V. Lakshmikantham, S. Sivasundaram, B. Kaymarkcalan;
\emph{Dynamic Systems on Measure Chains},
 Kluwer Academic Publishers, Dordrecht, 1996.

\bibitem{s1} D. R. Smart;
\emph{Fixed Points Theorems}, Cambridge Univ. Press, Cambridge, UK, 1980.

\bibitem{z1} H. Zhang, Y. K. Li;
\emph{Existence of positive solutions for
functional differential equations with impulse effects on time
scales}, Commun.  Nonlinear Sci. Numer. Simulat. 14 (2009) 19-26.

\bibitem{z2} N. Zhang, B. Dai, X. Qian;
\emph{Periodic solutions for a class of higher-dimension functional
differential equations with impulses}, Nonlinear Anal. 68 (2008)
629-638.

\bibitem{z3} X. Zhang, J. Yan, A. Zhao;
\emph{Existence of positive periodic solutions for an impulsive
differential equation}, Nonlinear Anal. 68 (2008) 3209-3216.

\end{thebibliography}

\end{document}

