\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2015 (2015), No. 285, pp. 1--23.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2015 Texas State University.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2015/285\hfil 
 Exponential $P$-stability]
{Exponential $P$-stability of stochastic $\nabla$-dynamic equations on
disconnected sets}

\author[H. D. Nguyen, T. D. Nguyen,  A. T. Le \hfil EJDE-2015/285\hfilneg]
{Huu Du Nguyen, Thanh Dieu Nguyen, Anh Tuan Le}

\address{Huu Du Nguyen  \newline
Faculty of Mathematics, Mechanics, and Informatics,
University of Science-VNU,
334  Nguyen Trai, Thanh Xuan, Hanoi, Vietnam}
\email{dunh@vnu.edu.vn}

\address{Thanh Dieu Nguyen   \newline
Department of Mathematics, Vinh University, 182 Le Duan, Vinh, Nghe An, Vietnam}
\email{dieunguyen2008@gmail.com}

\address{Anh Tuan Le \newline
Faculty of Fundamental Science, Hanoi University of Industry,
 Tu Liem district, Ha Noi, Vietnam}
\email{tuansl83@yahoo.com}

\thanks{Submitted April 4, 2013. Published November 11, 2015.}
\subjclass[2010]{60H10, 34A40, 34D20, 39A13, 34N05}
\keywords{Differential operator;  dynamic equation; exponential stability;
\hfill\break\indent  It\^{o}'s formula; Lyapunov function}

\begin{abstract}
 The aim of this article is to consider the existence of solutions, 
 finiteness of moments, and exponential $p$-stability of stochastic 
 $\nabla$-dynamic  equations on an arbitrary closed subset of $\mathbb{R}$,
 via Lyapunov functions. This work can be considered  as a unification 
 and generalization of works dealing  with random difference and 
 stochastic differential equations.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{example}[theorem]{Example}
\allowdisplaybreaks

\section{Introduction}

The direct method  has become the most widely used tool for studying the exponential
stability of stochastic  equations. For differential equations, we mention
the very interesting book by Khas'minskii \cite{Has}
in which author uses the Lyapunov functions to study stability.
 Foss and  Konstantopoulos \cite{FK} presented an overview of stochastic
stability methods, mostly motivated by stochastic network applications.
Socha \cite {So} considered the exponential $p$-stability of singularly
perturbed stochastic systems for the ``slow" and ``fast" components of the
full-order system.  Govindan \cite{Go}  proved the existence and
uniqueness of a mild solution under two sets of hypotheses and  considered
the exponential second moment stability of the solution process for
stochastic semilinear functional differential equations in a Hilbert space.
We also  refer to  \cite{Mao,Mao1} in which authors considered stochastic
asymptotic stability and boundedness  for stochastic differential equations
 with respect to semimartingale via multiple Lyapunov functions.
The long-time behavior of densities of the solutions is studied in  \cite{Ru}
 by using Khas'minskii function.
For random difference systems, we can refer the reader to \cite{Pa,Sh,Sh1}, for
stability of nonlinear systems.

Recently, a method for the unified analysis of equations of
motion in continuous and discrete cases within the framework of the theory of
time scales has drawn a lot attention. For deterministic cases, in \cite{Da},
author used the Lyapunov function of quadratic form to study the stability of
linear dynamic equations.  Hoffacker and  Tisdell examined the stability
and instability of the equilibrium point of nonlinear dynamic equations
\cite{HT}.  Martynyuk   presented systematically the stability theory of dynamic
 equations in \cite{Mar}.

While the stability of deterministic dynamic equations on time scales has been
investigated for a long time,   as far as we know,  there is not much in
mathematical literature for the stochastic case, and no work dealing with
the stability of  stochastic dynamic equations. Here, we mention some of the
first attempts on this direction.
In \cite{GS}, the authors developed the theory of Brownian motion.
 Sanyal in his Ph. D. Dissertation \cite{San}  tries to define
\emph{stochastic integral  and stochastic dynamic equations} on time
scale with the positive graininess.  Lungan and  Lupulescu in \cite{LL}
consider random dynamical systems with random $\Delta$-integral.
Gravagne and Robert deal with the bilateral Laplace transforms in \cite{GR}.
The Doob-Meyer decomposition theorem and definition of stochastic
$\nabla$-integral with respect to square integrable martingale  on any
arbitrary time scale also  It\^{o}'s  formula  are studied in \cite{{NHDu1},{NHDu2}}.
 Recently,  Bohner et al \cite{Bo} investigate stochastic dynamic equations
on time scale by considering an integral with respect to the restriction
of a standard Wiener process on time scale. However, this way can not be
applied to define the stochastic integral in general case since when one
deals with a martingale defined on time scale and we do not know whenever
it can be extended to a regular martingale on $\mathbb{R}$.

The aim of this article is to use Lyapunov functions to consider the existence,
 finiteness of moments, and long term behavior  of solutions for
$\nabla$-stochastic dynamic equations on arbitrary closed subset of $\mathbb{R}$.
We study
\begin{gather*}
d^\nabla X(t)=f(t, X(t_-))d^\nabla  t+g(t,X(t_-))d^\nabla M(t)\\
X(a)=x_a\in\mathbb{R}^d, \quad  t\in\mathbb{T}_a,
 \end{gather*}
where $(M_t)_{t\in \mathbb{T}_a}$  is a $\mathbb{R}$-valued  square integrable
martingale and $f: \mathbb{T}_a\times\mathbb{R}\to\mathbb{R}$ and
  $g:\mathbb{T}_a\times\mathbb{R}\to\mathbb{R}$ are two Borel
functions. We emphasis that  martingale $M$ is defined only on $\mathbb{T}_a$.
This work can be considered as a unification and generalization of works dealing
 with these areas of stochastic difference  and  differential equations.

In working on stochastic multi-dimensional dynamic equations with respect to
discontinuous martingale on time scales, it rises many difficulties,
especially the complicated calculations and they require some improvements.
Besides, some estimates of stochastic calculus for continuous time
are not automatically valid on  arbitrary time scale and we need to change
them into a suitable  form to obtain similar results.

The organization of this paper is as follows. We introduce some basic
notion and definitions for time scale and for square integrable martingales
in Section \ref{sec2}.  Section \ref{S1} deals with the existence and the
finiteness of moments of solutions for  stochastic dynamic equations with
respect to a square integrable martingale in case  the coefficients satisfy
locally Lipschitz conditions. Section \ref{S2} is concerned with
 necessary and sufficient conditions  for the exponential $p$-stability of
stochastic dynamic equations.

\section{Preliminaries} \label{sec2}

Let $\mathbb{T}$ be a closed subset of $\mathbb{R}$, enclosed with the topology
inherited from  the standard topology on $\mathbb{R}$.
 Let $\sigma(t)=\inf\{s\in\mathbb{T}: s>t\}, \mu(t)=\sigma(t)-t$  and
 $\rho(t)=\sup\{s\in\mathbb{T}: s<t\}, \nu(t)=t-\rho(t)$ (supplemented by
$\sup\emptyset=\inf\mathbb{T}, \inf\emptyset=\sup\mathbb{T}$).
A point $ t\in\mathbb{T}$ is said to be {\it right-dense} if $\sigma(t)=t$,
{\it right-scattered} if $\sigma(t)>t$, {\it left-dense} if $\rho(t)=t$,
{\it left-scattered} if $\rho(t)<t$ and {\it isolated} if $t$ is simultaneously
right-scattered and left-scattered.
The set $_k\mathbb{T}$ is defined to be $\mathbb{T}$ if $\mathbb{T}$
does not have a right-scattered minimum; otherwise it is $\mathbb{T}$ without
this right-scattered minimum.
Similarly,  $\mathbb{T}^k$ is defined to be $\mathbb{T}$ if $\mathbb{T}$ does
not have a left-scattered maximum; otherwise it is $\mathbb{T}$ without this
left-scattered maximum.
A function $f$ defined on $\mathbb T$  is {\it regulated} if there exist the
left-sided limit at every left-dense point and right-sided limit at every
right-dense point. A regulated function is called {\it $ld$-continuous} if
it is continuous at every left-dense point. Similarly, one has the notion
of $rd$-continuous.
For every $a,b\in\mathbb{T}$, by $[a,b]$, we mean the set
$\{t\in\mathbb{T}: a\leq t\leq b\}$.
Denote $\mathbb{T}_a=\{t\in \mathbb{T}: t\geq a\}$ and by
$\mathcal{R}\;(resp. \;\mathcal{R}^+)$
the set of all
$rd$-continuous and  regressive (resp. positive regressive)  functions.
For any function  $f$ defined on $\mathbb{T}$, we
write  $f^{\rho}$ for the function
$f\circ\rho$; i.e., $f^{\rho}_t = f(\rho(t))$ for all
$t \in _k\mathbb{T}$ and  $\lim_{\sigma(s)\uparrow t}f(s)$ by $f({t_-})$
or $f_{t_-}$ if this limit exists. It is easy to see that if $t$ is
left-scattered then $f_{t_-}=f^\rho_t$.
 Let
   \begin{equation*}
   \mathbb{I}=\{ t: \text{$t$ is left-scattered}\}.
\end{equation*}
Clearly, the set $\mathbb{I}$ of all left-scattered points of $\mathbb{T}$
is at most countable.

Throughout of this paper, we suppose that the time scale $\mathbb{T}$ has
bounded graininess, that is $\nu_*=\sup\{\nu(t):t\in  _k\mathbb{T}\}<\infty$.

Let $A$ be an increasing right continuous function defined on $\mathbb{T}$.
We denote by $\mu_{\nabla}^A$ the Lebesgue $\nabla$-measure  associated with  $A$.
For any $\mu_\nabla^A$-measurable function $f: \mathbb{T}\to \mathbb{R}$ we
write $\int_a^tf_\tau\nabla A_\tau$ for the  integral of $f$  with respect to
the measures $\mu_{\nabla}^A$  on $(a,t]$. It is seen  that the function
$t\to \int_a^tf_\tau\nabla A_\tau$ is cadlag. It is continuous if $A$ is continuous.
In case $A(t)\equiv t$ we write simply $\int_a^tf_\tau\nabla \tau$ for
$\int_a^tf_\tau\nabla A_\tau$.
For  details, we can refer to \cite{Den}.

In general, there is no relation between the $\Delta$-integral and $\nabla$-integral.
However, in case the integrand $f$ is regulated  one has
\begin{equation*}
\int_a^bf(\tau_-)\nabla \tau
=\int_a^bf(\tau)\Delta \tau \quad \forall  a, b\in\mathbb{T}^k,
\end{equation*}
Indeed, by \cite[Theorem 6.5]{Den},
\begin{align*}
\int_a^bf(\tau)\Delta \tau&=\int_{[a;b)} f(\tau)d\tau+\sum_{a\leq s<b} f(s)\mu(s)\\
&=\int_{(a,b]} f(\tau_-)d\tau+\sum_{a<s\leq b} f(s_-)\nu(s)
=\int_a^bf(\tau_-)\nabla \tau.
\end{align*}
Therefore, if $p\in\mathcal{R}$ then the exponential function $e_p(t, t_0)$,
defined by \cite[Definition 2.30, pp. 59]{Pet}, is solution of the initial
value problem
\begin{equation}\label{e1.1}
 y^\nabla(t) =p(t_-)y(t_-),\;\;
y(t_0)=1, \;\;t>t_0.
\end{equation}
Also if $p\in\mathcal{R}$,  $e_{\ominus p}(t, t_0)$ is the solution of
the equation
\begin{equation*}
 y^\nabla(t) =-p(t_-)y(t),\quad y(t_0)=1, \quad t>t_0,
\end{equation*}
where $\ominus p(t)=\frac{-p(t)}{1+\mu(t)p(t)}$.
Later, we need the following lemma.

\begin{lemma}[\cite{Pet,NHDu2}] \label{lem1.1}
Let $u(t)$ be a regulated  function and $ u_ a, \alpha\in\mathbb{R}_+$.
Then, the inequality
$$
u(t)\leq u_a+\alpha\int_{a}^t u(\tau_-)\nabla \tau\quad \forall  t\in\mathbb{T}_a
$$
implies
$$
u(t)\leq u_a{e}_{\alpha}(t,a)\quad \forall t\in\mathbb{T}_a.
$$
\end{lemma}

Let $(\Omega, \mathcal{F},\{\mathcal{F}_t\}_{t\in\mathbb{T}_a}, \mathbb{P})$
be a probability space  with filtration $\{\mathcal{F}_t\}_{t\in\mathbb{T}_a}$
satisfying the usual conditions (i.e., $\{\mathcal{F}_t\}_{t\in\mathbb{T}_a}$
is increasing and $\cap\{ \mathcal{F}_{\rho(s)}:s\in \mathbb{T}, s>t\}=\mathcal{F}_t$
for all $t\in \mathbb{T}_a$ while $\mathcal{F}_a$ contains all $P$-null sets).
Denote  by $\mathcal{M}_2$ the set of the square integrable
$\mathcal{F}_t$-martingales and by $\mathcal{M}_2^r$ the subspace of the space
$\mathcal{M}_2$ consisting of martingales with continuous characteristics.
For any   $M\in\mathcal{M}_2$, set
\begin{equation*}
\widehat{M}_t=M_t-\sum_{s\in (a, t]}(M_s-M_{\rho(s)}).
\end{equation*}
It is clear  that  $\widehat M_t $ is an $\mathcal{F}_t$-martingale and
$\widehat{M}_t=\widehat{M}_{\rho(t)}$ for any $t\in\mathbb{T}$. Further,
\begin{equation}\label{e1.2}
\langle \widehat M\rangle_t=\langle M\rangle_t-\sum_{ s\in (a, t]}
(\langle M\rangle_s-\langle M\rangle_{\rho(s)}).
\end{equation}
Therefore, $M\in\mathcal{M}_2^r$ if and only if $\widehat{M}\in\mathcal{M}_2^r$.
In this case, $\widehat{M}$ can be extended to a regular martingale defined on
$\mathbb{R}$.

Denote by $\mathfrak{B}$ the class of Borel sets in $\mathbb{R}$ whose closure
does not contain the point $0$. Let $\delta(t,A)$ be the number of jumps
of the $M$  on the $(a,t]$ whose values fall into the set $ A\in\mathfrak{B}$.
Since the sample functions of the martingale $M$ are cadlag, the process
$\delta(t, A)$ is defined  with probability $1$ for all
$t\in\mathbb{T}_a, A\in\mathfrak{B}$. We extend its definition over the whole
$\Omega$ by setting $\delta(t, A)\equiv 0$ if the sample $t\to M_t(\omega)$
is not cadlag. Clearly the process $\delta(t, A)$ is $\mathcal{F}_t$-adapted
and its sample functions are nonnegative, monotonically nondecreasing, continuous
from the right and take on integer values.
We also define  $\widehat{\delta}(t,A)$ for $\widehat{M}_t$ by a similar way.
Let $\widetilde \delta(t,A)=\sharp \{s\in (a, t]: M_s-M_{\rho(s)}\in A\}$.
It is evident that
\begin{equation}\label{e1.3b}
\delta(t,A)=\widehat \delta(t,A)+\widetilde \delta(t,A).
\end{equation}
Further, for fixed $t$,  $\delta(t,\cdot),\widehat \delta(t,\cdot)$ and
$\widetilde \delta(t,\cdot)$ are measures.

 The functions $\delta(t, A), \widehat \delta(t,A)$ and
$\widetilde \delta(t,A), t\in\mathbb{T}_a$ are   $\mathcal{F}_t$-regular
 submartingales for fixed $A$. By Doob-Meyer decomposition, each process
has a unique representation of the form
\begin{gather*}
\delta (t, A)=\zeta(t, A)+\pi(t, A),\quad
\widehat\delta (t, A)=\widehat\zeta(t, A)+\widehat\pi(t, A),\\
\widetilde\delta (t, A)=\widetilde\zeta(t, A)+\widetilde\pi(t, A),
\end{gather*}
where $\pi(t, A), \widehat\pi(t, A)$ and $ \widetilde\pi(t, A)$ are natural
increasing integrable processes  and $\zeta(t, A), \widehat\zeta(t, A)$,
$\widetilde\zeta(t, A)$ are martingales.
We find a version  of these processes such that they are measures when
$t$ is fixed.
By denoting
\begin{equation*}
\widehat{M}^c_t=\widehat{M}_t-\widehat{M}^d_t,
\end{equation*}
where
\begin{equation*}
\widehat{M}_t^d=\int_a^t\int_{\mathbb{R}}u\widehat\zeta(\nabla \tau, du),
\end{equation*}
we obtain
\begin{equation}\label{e1.3}
\langle \widehat{M}\rangle_t
=\langle \widehat{M}^c\rangle_t+\langle \widehat{M}^d\rangle_t, \quad
\langle \widehat{M}^d\rangle_t
=\int_a^t\int_{\mathbb{R}}u^2\widehat\pi(\nabla \tau, du).
\end{equation}
Throughout this article, we suppose that $\langle M\rangle_t$
is absolutely continuous with respect to Lebesgue measure $\mu_\nabla$,
i.e., there exists $\mathcal{F}_t$-adapted progressively measurable process
$K_t$ such that
\begin{equation}\label{e1.4}
\langle M\rangle_t=\int_{a}^tK_\tau\nabla \tau.
\end{equation}
Further, for any $T\in \mathbb{T}_a$,
\begin{equation}\label{e1.5}
\mathbb{P}\{\sup_{a\leq t\leq T}|K_t|\leq N\}=1,
\end{equation}
where $N$ is a constant (possibly depending on $T$).

The relations \eqref{e1.2}, \eqref{e1.3}
 imply that $\langle\widehat{M}^c\rangle_t$ and $\langle \widehat{M}^d\rangle_t$ are
is absolutely continuous with respect to $\mu_\nabla$ on $\mathbb{T}$.
Thus,  there exists $\mathcal{F}_t$-adapted, progressively measurable bounded
process $\widehat{K}^c_t$ and $\widehat{K}^d_t$ satisfying
\begin{equation*}
\langle\widehat{M}^c\rangle_t=\int_a^t\widehat{K}^c_\tau\nabla\tau,\quad
\langle\widehat{M}^d\rangle_t=\int_a^t\widehat{K}^d_\tau\nabla\tau,
\end{equation*}
and the following relation holds
\begin{equation*}
\mathbb{P}\{\sup_{a\leq t\leq T}\widehat{K}_t^c+\widehat{K}^d_t\leq {N}\}=1.
\end{equation*}
 Moreover, it is easy to show that  $\widehat{\pi}(t, A)$ is absolutely continuous
with respect to $\mu_\nabla$ on $\mathbb{T}$,
 that is, it can be expressed as
 \begin{equation}\label{e1.6}
 \widehat{\pi}(t, A)=\int_a^t\widehat{\Upsilon}(\tau, A)\nabla\tau,
\end{equation}
with an $\mathcal{F}_t$-adapted, progressively measurable process
$\widehat{\Upsilon}(t, A)$.
Since $\mathfrak{B}$ is generated by a countable family of Borel sets,
we can find a version of $\widehat{\Upsilon}(t, A)$ such that the map
$t\to \widehat{\Upsilon}(t, A)$ is measurable and for $t$ fixed,
$\widehat{\Upsilon}(t, \cdot)$ is a measure.
Hence, from \eqref{e1.3} we see that
$$
\langle\widehat{M}^d\rangle_t
=\int_a^t\int_{\mathbb{R}}u^2\widehat{\Upsilon}(\tau, du))\nabla\tau.
$$
This means that
$$
\widehat{K}^d_t=\int_{\mathbb{R}}u^2\widehat{\Upsilon}(t, du)).
$$
For the process  $\widetilde\pi(t, A)$ we can write
\begin{equation*}
\widetilde\pi(t, A)
=\sum_{s\in (a, t]}\mathbb{E} [1_A(M_s-M_{\rho(s)})\big |\mathcal{F}_{\rho(s)}].
\end{equation*}
Putting
\[
\widetilde{\Upsilon}(t, A)
=\begin{cases}
\frac{\mathbb{E} [1_A(M_t-M_{\rho(t)})
|\mathcal{F}_{\rho(t)}]}{\nu(t)} &\text{if }\nu(t)>0,\\
0 &\text{if } \nu(t)=0
\end{cases}
\]
yields
\begin{equation}\label{e1.7}
\widetilde\pi(t, A)=\int_a^t \widetilde {\Upsilon}(\tau, A)\nabla \tau.
\end{equation}
Further, by the definition if $\nu(t)>0$ we have
\begin{equation}\label{bs}
\int_\mathbb{R} u{\widetilde\Upsilon}(t, du)=\frac{
\mathbb{E} [M_t-M_{\rho(t)} \big|\mathcal{F}_{\rho(t)}]}{\nu(t)}=0,
\end{equation}
and
$$
\int_\mathbb{R} u^2{\widetilde\Upsilon}(t, du)=\frac{
\mathbb{E} [(M_t-M_{\rho(t)})^2 \big|\mathcal{F}_{\rho(t)}]}{\nu(t)}
=\frac{\langle M\rangle_t-\langle M\rangle_{\rho(t)}}{\nu(t)}.
$$
Let  ${\Upsilon}(t, A)=\widehat {\Upsilon}(t, A)+\widetilde{\Upsilon}(t, A)$.
From \eqref{e1.3b} we see that
$$
\pi(t, A)=\int_a^t{\Upsilon}(\tau, A)\nabla\tau.
$$
Denote by $\mathcal{L}_1^{\rm loc}(\mathbb{T}_a, \mathbb{R})$ the family of
real valued, $\mathcal{F}_t$-progressively measurable  processes $f(t)$ with
$\int_a^T|f(\tau)|\nabla\tau<+\infty$\; a.s. for every $T>a$ and
by $\mathcal{L}_2(\mathbb{T}_a; M)$ the space of all real valued,
$\mathcal{F}_t$-predictable processes $\phi(t)$ satisfying
$\mathbb{E}\int_a^T\phi^2(\tau)\nabla\langle M\rangle_\tau<\infty$, for any $T>a$.
Consider a $d$-tuple of semimartingales $X(t) = ( X _1(t) , \dots, X_d(t))$
defined by
$$
X_i(t)=X_i(a)+\int_a^t f_i(\tau)\,\nabla \tau + \int_a^t g_i(\tau)\nabla M_\tau,
$$
where $f_i\in \mathcal{L}_1^{\rm loc}(\mathbb{T}_a, \mathbb{R})$ and
$g_i\in\mathcal{L}_2(\mathbb{T}_a;M)$ for $i=\overline{1,d}$.
For any twice differentiable function $V$, put
\begin{equation}\label{e1.8}
\begin{aligned}
&\mathcal{A}V(t,x)\\
&=\sum_{i=1}^d \frac{\partial V(t,x)}{\partial x_i}(1-1_{\mathbb{I}}(t))f_i(t)
+\Big(V(t, x+f(t)\nu(t))-V(t,x)\Big)\Phi(t)\\
&\quad +\frac{1}{2}\sum_{i,j} \frac{\partial^2 V(t,x)}{\partial x_ix_j}g_i(t)g_j(t)
 \widehat{K}^c_t
- \sum_{i=1}^d \frac{\partial V(t,x)}{\partial x_i}g_i(t)
 \int_\mathbb{R} u\widehat{\Upsilon}(t, du)\\
&\quad +\int_{\mathbb{R}}(V\big(t,x+f(t)\nu(t)+g(t)u\big)
 -V(t, x+f(t)\nu(t))){\Upsilon}(t, du),
\end{aligned}
\end{equation}
with $f=(f_1, f_2,\dots, f_d)$; $ g=(g_1, g_2,\dots, g_d)$
and
$$
\Phi(t)=\begin{cases}
0&\text{if $t$ is left-dense}\\
1/\nu(t) &\text{if $t$ is left-scattered}.
\end{cases}
$$
 Let $C^{1,2}(\mathbb{T}_a\times \mathbb{R}^d; \mathbb{R})$ be the set of
 all functions $V(t, x)$ defined on $\mathbb{T}_a\times \mathbb{R}^d$,
having continuous $\nabla$-derivative in $t$ and continuous second derivative
in $x$. Using the It\^{o}'s formula in  \cite{NHDu2} we see that
for any $V\in C^{1,2}(\mathbb{T}_a\times \mathbb{R}^d; \mathbb{R}_+)$
\begin{equation}\label{e1.9}
V(t,X(t))-V(a, X(a))
-\int_a^t\Big(V^{\nabla _\tau}(\tau,X({\tau_-}))+\mathcal{A}V(\tau,X({\tau_-}))
\Big)\nabla\tau
\end{equation}
is a locally integrable  martingale, where $V^{\nabla _t}$ is partial
$\nabla$-derivative of $V(t,x)$ in $t$.

\section{Existence of solutions and finiteness of moments for stochastic
dynamic equations} \label{S1}

Consider a {$\nabla$-stochastic dynamic equations on $\mathbb{T}$} of the form
\begin{equation}\label{e1.10}
\begin{gathered}
d^\nabla X(t)=f(t, X(t_-))d^\nabla  t+g(t,X(t_-))d^\nabla M(t)
\\
X(a)=x_a\in\mathbb{R}^d, \quad  t\in\mathbb{T}_a,
\end{gathered}
 \end{equation}
where $f: \mathbb{T}_a\times\mathbb{R}^d\to\mathbb{R}^d$ and
$g:\mathbb{T}_a\times\mathbb{R}^d\to\mathbb{R}^d$ are two Borel   functions.
Under the global Lipschitz and linear growth rate conditions of the coefficients
 $f, g$,   there exists uniquely a  solution for Cauchy problem \eqref{e1.10}
(see: \cite{NHDu2}).
We now consider the case where the coefficients are  locally Lipschitz.

\begin{theorem}\label{thm1.2}
Suppose that for any $k>0$ and $T>a$,
there exists a constant $L_{T,k}>0$ such that
\begin{equation}\label{e1.11}
\|f(t,x)-f(t,y)\|^2\vee \|g(t,x)-g(t,y)\|^2\leq L_{T,k}\|x-y\|^2,
\end{equation}
 for all $x,y\in\mathbb{R}^d$ with $\|x\|\vee\|y\|\leq k$  and $t\in[a, T]$.
Further, there are   positive constants $c=c(T); b=b(T)$
and a nonnegative  function $V\in  C^{1,2}([a, T]\times \mathbb{R}^d; \mathbb{R}_+)$
  satisfying
\begin{equation}\label{e1.12}
V^{\nabla_t}(t,x)+\mathcal{A}V(t,x)\leq {c}V(t_-,x)+b \quad
 \forall (t,x)\in[a, T]\times\mathbb{R}^d,
\end{equation}
and $\lim_{x\to\infty}\inf_{t\in[a, T]} V(t,x)=\infty$.
Then,  \eqref{e1.10} has a unique solution $X_{a,x_a}(t)$ defined on $\mathbb{T}_a$.
In  addition, if there exists a positive constant  $c_1=c_1(T)$ such that
\begin{equation}\label{e1.13}
c_1\|x\|^p\leq V(t, x)\quad \forall (t,x)\in [a, T]\times\mathbb{R}^d,
\end{equation}
then
$$
\mathbb{E} \|X_{a,x_a}(t)\|^p\leq \frac{1}{c_1}(V(a,x_a)
+\frac{b}{c})e_{c}(t,a)\quad\forall  t\in [a, T].
$$
\end{theorem}

\begin{proof}
For each $k\geq k_0=[\|x_a\|]+1$, define the truncation function
$$
f_k(t,x)=\begin{cases}
f(t,x) &\text{if }\|x\|\leq k\\
f(t, \frac{kx}{\|x\|}) &\text{if }\|x\|> k,
\end{cases}
$$
and $g_k(t,x)$ is defined by a similar way.
The functions $f_k$ and $g_k$ satisfy the global Lipschitz condition
and the linear growth rate  condition. Hence, by \cite[Theorem 3.2]{NHDu2}
there exists a unique solution $X_k(\cdot)$ to the equation
\begin{equation}\label{e1.14}
\begin{gathered}
d^\nabla X(t)=f_k(t, X(t_-))d^\nabla  t+g_k(t,X(t_-))d^\nabla M(t)\\
X(a)=x_a\in\mathbb{R}^d,  \;\forall   t\in[a, T].
\end{gathered}
 \end{equation}
Define the stopping time
$$
\theta_k= \inf\{t\in[a, T]: |X_k(t)|\geq k\}, \quad \theta_{k_0}=a.
$$
It is easy to see that $\theta_k$ is increasing and
\begin{equation}\label{e1.15}
X_k(t)=X_{k+1}(t)\quad \text{if } a\leq  t\leq \theta_k.
\end{equation}
Let $\theta_\infty=\lim_{k\to\infty}\theta_k$ and the process
$X_{a,x_a}(t)=X(t)$, $a\leq  t\leq \theta_\infty$ be given by
$$
X(t)=X_k(t),\quad \theta_{k-1}\leq t< \theta_k, \quad  k\geq k_0.
$$
Using \eqref{e1.15} one gets $X(t\wedge\theta_k)=X_k(t\wedge\theta_k)$.
It follows from \eqref{e1.14} that
\begin{align*}
X(t\wedge\theta_k)
&=x_a+\int_a^{t\wedge\theta_k} f_k(\tau, X(\tau_-))
 \nabla\tau+\int_a^{t\wedge\theta_k}g_k(\tau, X(\tau_-)) \nabla M_\tau
\\
&=x_a+\int_a^{t\wedge\theta_k}f(\tau, X(\tau_-))\nabla\tau
 +\int_a^{t\wedge\theta_k}g(\tau, X(\tau_-))\nabla M_\tau,
\end{align*}
 for any $t\in [a, T]$ and $k\geq 1$.
We show that $\lim_{k\to\infty}\theta_k=T$ a.s. Indeed, by \eqref{e1.9} it yields
\begin{align*}
\mathbb{E}[V(\theta_k\wedge t, X(\theta_k\wedge t))]
&=V(a,x_a) +\mathbb{E} \int_a^{t\wedge\theta_k}\Big({V^{\nabla_\tau}}(\tau,X({\tau_-}))
+\mathcal{A}V(\tau,X({\tau_-}))\Big)\nabla\tau
\\
&\leq V(a,x_a)+ \int_a^{t}( c\mathbb{E} V(\theta_k\wedge
 \tau_-,X({\theta_k\wedge\tau_-}))+b)\nabla\tau.
\end{align*}
Using Lemma \ref{lem1.1} with the function
 $u(t)=\mathbb{E}[V(\theta_k\wedge t, X(\theta_k\wedge t))]+\frac{b}c$ gets
\begin{equation*}
\mathbb{E} V(\theta_n\wedge t, X(\theta_n\wedge t))
\leq \big(V(a,x_a)+\frac{b}c\big)e_{c}(t,a).
\end{equation*}
On the other hand, on the set $\{\theta_\infty<T\}$ we have
$\limsup_{t\to \theta_\infty}\|X(t)\|=\infty$.
Therefore,  the assumption $\lim_{x\to\infty}\inf_{t\in[a, T]} V(t,x)=\infty$
follows $\mathbb{P}\{\theta_\infty<T\}=0$, i.e., the solution $X_{a,x_a}(t)$
is defined on $\mathbb{T}_a$.

The uniqueness follows immediately from the uniqueness of solutions of \eqref{e1.14}.
When the condition \eqref{e1.13} is satisfied we see that
$$
c_1\mathbb{E} \|X_{a,x_a}(t\wedge\theta_n)\|^p
\leq \mathbb{E}[V(t\wedge\theta_n, X_{a,x_a}(t\wedge\theta_n))]
\leq (V(a,x_a)+\frac{b}c)e_{c}(t,a).
$$
Letting $n\to\infty$ yields
$$
c_1\mathbb{E} \|X_{a,x_a}(t)\|^p\leq (V(a,x_a)+\frac{b}c)e_{c}(t,a)
$$
or
$$
\mathbb{E} \|X_{a,x_a}(t)\|^p\leq \frac{1}{c_1}(V(a,x_a)+\frac{b}c)e_{c}(t,a).
$$
The proof is complete.
\end{proof}

\begin{corollary} \label{coro3.2}
Suppose that the conditions \eqref{e1.4}; \eqref{e1.5} and  \eqref{e1.11}
hold and the linear growth condition
\begin{equation} \label{e1.16}
\|f(t,x)\|^2\vee \|g(t,x)\|^2\leq G(1+\|x\|^2)\quad  \forall
(t, x)\in [a, T]\times \mathbb{R}^d,
\end{equation}
 is satisfied. We suppose further that
 $\int_\mathbb{R} |u|\widehat\Upsilon(t,du)\leq m_1$ a.s where $m_1$ is a constant.
Then \eqref{e1.10} has a unique solution $X_{a,x_a}(t)$ defined on $\mathbb{T}_a$
 satisfying
$$
\mathbb{E} \|X_{a,x_a}(t)\|^2\leq (1+\|x_a\|^2)e_{c}(t,a),
$$
where $c$ is a constant.
\end{corollary}

\begin{proof}
From \eqref{e1.3}, \eqref{e1.6},  it follows that
$\int_\mathbb{R} u^2\Upsilon(t,du)<N$ for all $t\in [a, T]$.
Using the Lyapunov function $V(t, x)=1+ \|x\|^2$  gets
\begin{align*}
&\mathcal{A}V(t,x)\\
&= 2(1-1_{\mathbb{I}}(t))x^T f(t,x) +\|g(t,x)\|^2\widehat K_t^c
\\
&\quad +(\|x+f(t,x)\nu(t)\|^2-\|x\|^2)\Phi(t)
 -2x^Tg(t,x)\int_\mathbb{R} u\widehat{\Upsilon}(t, du)
\\
&\quad +\int_{\mathbb{R}}(\|x+f(t,x)\nu(t)
 +g(t,x)u\big\|^2-\|x+f(t,x)\nu(t)\|^2){\Upsilon}(t, du)
\\
&=2x^T f(t,x)+2x^Tg(t,x)\int_\mathbb{R} u\widetilde{\Upsilon}(t, du)
 +2\nu(t)f(t,x)^Tg(t,x)\int_\mathbb{R} u\widehat{\Upsilon}(t, du)\\
&\quad +\|g(t,x)\|^2\widehat K_t^c +\|f(t,x)\|^2\nu(t)
+\|g(t,x)\|^2\int_{\mathbb{R}}u^2{\Upsilon}(t, du)
\\
&\leq (1+G(1+2N+2m_1\nu_*+\nu_*))(1+\|x\|^2)
= cV(x),
\end{align*}
where $c=1+G(1+2N+2m_1\nu_*+\nu_*)$.
Moreover, $\|x\|^2\leq1+\|x\|^2=V(x)$.
Thus,  \eqref{e1.12} and \eqref{e1.13} are satisfied. Using Theorem \ref{thm1.2}
we can complete the proof.
\end{proof}

We note that in the continuous case, if the linear growth rate condition
\eqref{e1.16} holds and the boundedness  conditions \eqref{e1.4},  \eqref{e1.5}
of characteristic $\langle M\rangle_t$ are satisfied,
then all moments of the solutions are finite.
This property may no longer valid on time scale as it is shown in the following
example.

\begin{example}  \rm
Consider two random variables $\xi_1, \xi_2$ valued in $\mathbb{Z}\setminus\{0\}$
with
$$
\mathbb{P}\{\xi_1=\pm i\}=\frac k{|i|^5},\quad
\mathbb{P}[\xi_2= j\mid \xi_1= i]=C_i|j|^{-(4+\frac{1}{|i|})}.
$$
It is seen that
$8/3\geq\sum_{j\in \mathbb{Z}\setminus\{0\}}|j|^{-4}>C^{-1}_i
>\sum_{j\in \mathbb{Z}\setminus\{0\}}|j|^{-5}>1$ and
$\mathbb{E} [\xi_2\mid \xi_1]=0$. Therefore, the sequence
$M_1=\xi_1$ and $M_2=\xi_1+\xi_2$ is a martingale. Further,
\begin{align*}
 \mathbb{E} [\xi_2^2\mid \xi_1=i]
&=\sum_{j\in \mathbb{Z}\setminus\{0\}}  j^2\mathbb{P}[\xi_2= j\mid \xi_1= i]\\
&=C_i \sum_{j\in \mathbb{Z}\setminus\{0\}}\frac{j^2}{|j|^{4+\frac{1}{|i|}}}
\leq C_i  \sum_{j\in \mathbb{Z}\setminus\{0\}} \frac1{|j|^{2}}.
\end{align*}
Thus $\langle M\rangle_t$ is bounded. On the other hand,
\begin{align*}
\mathbb{E}  |\xi_2|^3
&=\sum_{i,j\in \mathbb{Z}\setminus\{0\}}|j|^3\mathbb{P}[\xi_2= j\mid \xi_1= i]
 \mathbb{P}\{\xi_1=i\}
\\
&=k \sum_{i,j\in \mathbb{Z}\setminus\{0\}}C_i\frac 1{|j|^{(1+\frac{1}{|i|})}|i|^5}\\
&\leq 4k\sum_{i\in \mathbb{Z}\setminus\{0\}}\frac 1{i^4}<\infty,
\end{align*}
which implies
$$
\mathbb{E} |M_1|^3< \infty,\; \mathbb{E} |M_2|^3
\leq 4 (\mathbb{E} \xi_1^3+\mathbb{E} \xi_2^3)<\infty.
$$
Consider the dynamic equation on the time scale $\mathbb{T}=\{1,2\}$
\begin{gather*}
d^\nabla X_{t}=-X_{t_-}d^{\nabla}t+X_{t_-}d^\nabla M_t \\
X_1=\xi_1.
\end{gather*}
This equation has a unique solution $X_1=\xi_1$ and $X_2=\xi_1\xi_2$. However,
\begin{align*}
\mathbb{E}  |X_2|^3
&=\mathbb{E}  |\xi_1\xi_2|^3
=\sum_{i,j\in \mathbb{Z}\setminus\{0\}}|ij|^3
\mathbb{P}[\xi_2= j\mid \xi_1= i]\mathbb{P}\{\xi_1=i\}
\\
&\geq \frac {3k}8\sum_{i,j\in \mathbb{Z}\setminus\{0\}}
 \frac 1{i^2|j|^{1+\frac{1}{|i|}}}\\
&\geq \frac {3k}8 \sum_{i\in \mathbb{Z}\setminus\{0\}}|i|\frac 1{i^2}=\infty.
\end{align*}
\end{example}

In the following we give conditions ensuring the finiteness of  $p$-moment of
the solution of \eqref{e1.10}.

\begin{theorem}\label{thm1.6}
Suppose that linear growth condition \eqref{e1.16}  and the conditions
 \eqref{e1.4}, \eqref{e1.5} hold. Further, there are two constants $m_1, m_p$
such that
\begin{equation}\label{e1.17}
\int_{\mathbb{R}}|u|{\widehat{\Upsilon}}(t, du)\leq m_1,\quad
\int_{\mathbb{R}}|u|^p{\Upsilon}(t, du)\leq m_p\quad \forall  t\in [a, T]
\end{equation}
almost surely. Then, the solution $X_{a,x_a}(t)$ of   \eqref{e1.10} starting
in $x_a$  satisfies the estimate
\begin{equation}\label{e1.18}
\mathbb{E}\|X_{a,x_a}(t)\|^p\leq (\|x_a\|^p+1)e_{H}(t,a),\quad a\leq  t\leq T
\end{equation}
where $H$ is a constant.
 \end{theorem}

\begin{proof}
Since $\int_{\mathbb{R}}|u|^2{\Upsilon}(t, du)=\langle M \rangle_t\leq N:=m_2$,
we can suppose that $p\geq 2$.  Applying  \eqref{e1.8} to the Lyapunov function
$V(t, x)=\|x\|^{p}$ we have
\begin{align*}
\mathcal{A}V(t,x)
&=p\|x\|^{p-2}(1-1_{\mathbb{I}}(t))x^Tf(t,x)
+(\|x+f(t,x)\nu(t)\|^{p}-\|x\|^{p})\Phi(t)
\\
&\quad +\frac{p}{2}\|x\|^{p-2}\|g(t,x)\|^2\widehat{K}^c_t+\frac{p(p-2)}{2}
\|x\|^{p-4}|x^Tg(t,x)|^2\widehat{K}^c_t
\\
&\quad +\int_{\mathbb{R}} [\|x+f(t,x)\nu(t)+g(t,x)u\|^{p}
 -\|x+f(t,x)\nu(t)\|^{p}]{\Upsilon}(t, du)
\\
&\quad -p\|x\|^{p-2}x^Tg(t,x)\int_{\mathbb{R}}u\widehat{\Upsilon}(t, du).
\end{align*}
Using  Taylor's expansion  for the function $\|x+y\|^{p}$ at $y=0$ obtains
\begin{align*}
&\|x+f(t,x)\nu(t)\|^{p}-\|x\|^{p}\\
&=p\|x\|^{p-2}x^\top f(t,x)\nu(t)
+ \frac p2\|x+\theta f(t,x)\nu(t)\|^{p-2}\|f(t,x)\|^2\nu(t)^2\\
&\quad +\frac{p(p-2)}2\|x+\theta f(t,x)\nu(t)\|^{p-4}|(x+\theta f(t,x)\nu(t))^\top
f(t,x)|^2\nu(t)^2.
\end{align*}
where $0\leq\theta\leq 1$. It is seen that
\begin{align*}
\|x+\theta f(t,x)\nu(t)\|^{p-2}\|f(t,x)\|^2
&\leq (\|x\|+\|f(t,x)\nu(t)\|)^{p-2}\|f(t,x)\|^2
\\
&\leq (\sqrt{1+\|x\|^{2}}+\sqrt {G(1+\|x\|^2)}\nu_*)^{p-2}G(1+\|x\|^2)\\
&=G(1+\sqrt G \nu_*)^{p-2}(1+\|x\|^2)^{p/2},
\end{align*}
and
\begin{align*}
&\|x+\theta f(t,x)\nu(t)\|^{p-4}|(x+\theta f(t,x)\nu(t))^\top f(t,x)|^2
\\
&\leq \|x+\theta f(t,x)\nu(t)\|^{p-2}\| f(t,x)\|^2\\
&\leq G(1+\sqrt G \nu_*)^{p-2}(1+\|x\|^2)^{p/2}.
\end{align*}
Similarly, the Taylor's expansion of the function
$\|x+f(t,x)\nu(t)+y\|^{p}$ at $y=0$ leads us
\begin{align*}
&\|x+f(t,x)\nu(t)+g(t,x)u\|^{p}-\|x+f(t,x)\nu(t)\|^{p}\\
&=p\|x+f(t,x)\nu(t)\|^{p-2}(x+f(t,x)\nu(t))^\top g(t,x)u\\
&\quad + \frac p2\|x+f(t,x)\nu(t)+\eta g(t,x)u\|^{p-2}\|g(t,x)u\|^2\\
&\quad +\frac{p(p-2)}2\|x+f(t,x)\nu(t)+\eta g(t,x)u\|^{p-4}\\
&\quad\times |(x+f(t,x)\nu(t) +\eta g(t,x)u)^\top g(t,x)u|^2,
\end{align*}
where $0\leq\eta\leq 1$. By defining
$c_p =2^{p-1}$ \text{  if } $p>1$ and $c_p=1$ \text{  if } $p\leq 1$
we  have
\begin{align*}
&\|x+f(t,x)\nu(t)\|^{p-2}(x+f(t,x)\nu(t))^\top g(t,x)u
\\
&\leq\|x+f(t,x)\nu(t)\|^{p-1} \|g(t,x)u\|\\
&\leq \sqrt G(1+\sqrt G \nu_*)^{p-1}|u|(1+\|x\|^2)^{p/2},
\end{align*}
and
\begin{align*}
&\|x+f(t,x)\nu(t)+\eta g(t,x)u\|^{p-2}\|g(t,x)u\|^2\\
&\leq c_{p-2}(\|x+f(t,x)\nu(t)\|^{p-2}+(\|g(t,x)\|u)^{p-2})\|g(t,x)u\|^2\\
&\leq c_{p-2}(G(1+\sqrt G \nu_*)^{p-2}u^2+G^{p/2}|u|^p)(1+\|x\|^2)^{p/2}.
\end{align*}
Further,
\begin{align*}
&\|x+f(t,x)\nu(t)+\eta g(t,x)u\|^{p-4}|(x+f(t,x)\nu(t)+\eta g(t,x)u)^\top g(t,x)u|^2\\
&\leq \|x+f(t,x)\nu(t)+\eta g(t,x)u\|^{p-2}\|g(t,x)u\|^2\\
&\leq c_{p-2}(G(1+\sqrt G \nu_*)^{p-2}u^2+G^{p/2}|u|^p)(1+\|x\|^2)^{p/2}.
\end{align*}
Therefore, by using \eqref{bs}, \eqref{e1.17} we obtain
\begin{align*}
&\mathcal{A}V(t,x)\\
&=p\|x\|^{p-2}(1-1_{\mathbb{I}}(t))x^Tf(t,x)\\
&\quad +(p\|x\|^{p-2}x^\top f(t,x)\nu(t)
+ \frac p2\|x+\theta f(t,x)\nu(t)\|^{p-2}\|f(t,x)\|^2\nu(t)^2\\
&\quad +\frac{p(p-2)}2\|x+\theta f(t,x)\nu(t)\|^{p-4}|(x+\theta f(t,x)\nu(t))^\top f(t,x)|^2\nu(t)^2)\Phi(t)
\\
&\quad +\frac{p}{2}\|x\|^{p-2}\|g(t,x)\|^2\widehat{K}^c_t+\frac{p(p-2)}{2}
\|x\|^{p-4}|x^Tg(t,x)|^2\widehat{K}^c_t
\\
&\quad +p \int_{\mathbb{R}}\|x+f(t,x)\nu(t)\|^{p-2}(x+f(t,x)\nu(t))^\top
 g(t,x)u{\widetilde{\Upsilon}}(t, du)\\
&\quad +p \int_{\mathbb{R}}\|x+f(t,x)\nu(t)\|^{p-2}(x+f(t,x)\nu(t))^\top
 g(t,x)u{\widehat{\Upsilon}}(t, du)\\
&\quad +\frac p2 \int_{\mathbb{R}}\|x+f(t,x)\nu(t)
 +\eta g(t,x)u\|^{p-2}\|g(t,x)u\|^2{\Upsilon}(t, du)\\
&\quad +\frac{p(p-2)}2\int_{\mathbb{R}}\|x+f(t,x)\nu(t)
 +\eta g(t,x)u\|^{p-4}|(x+f(t,x)\nu(t)\\
&\quad +\eta g(t,x)u)^\top g(t,x)u|^2{\Upsilon}(t, du)
-p\|x\|^{p-2}x^Tg(t,x)\int_{\mathbb{R}}u\widehat{\Upsilon}(t, du)
\\
&\leq\Big\{p\sqrt G (1+m_1)
+\frac{p(p-1)}{2}
G(N +(1+\sqrt G\nu_*)^{p-2}(\nu_*+c_{p-2}N))
\\
&\quad +p\sqrt G(1+\sqrt G\nu_*)^{p-1}m_1
+c_{p-2}\frac{p(p-1)}{2}G^{p/2}m_p\Big\}(1+\|x\|^2)^{p/2}\\
&\leq HV(x)
\end{align*}
 where $H$  is defined
\begin{equation}\label{e2.11bs}
\begin{aligned}
H&= c_{p/2}\Big\{p\sqrt G (1+m_1)
+\frac{p(p-1)}{2}G\Big(N +(1+\sqrt G\nu_*)^{p-2}(\nu_*+c_{p-2}N)\Big)
\\
&\quad +p\sqrt G(1+\sqrt G\nu_*)^{p-1}m_1
+c_{p-2}\frac{p(p-1)}{2}G^{p/2}m_p\Big\}.
\end{aligned}
\end{equation}
 By  Theorem \ref{thm1.2}, we obtain
$$
\mathbb{E}\|X_{a,x_a}(t)\|^p\leq(\|x_a\|^p+1)e_{H}(t,a), \quad a\leq t\leq T.
$$
The proof is complete.
\end{proof}

\section{Exponential $p$-stability}\label{S2}

By \eqref{e1.1}, the $\Delta$-exponential function $e_p$ is also a solution of
a $\nabla$-dynamic equations. Therefore, in the following, instead of
using $\widehat e_p$, we use $e_p$ to define the exponential stability although
 we are working with  stochastic $\nabla$-dynamic equations.
Let the process $K_t$ be bounded on $\mathbb{T}_a$, i.e., the constant $N$
in \eqref{e1.4} does not depend on $T>a$.
 Suppose that for any  $s
\geq a; x_s\in \mathbb{R}^d$, the solution $X_{s, x_s}(t)$
 with initial condition $X_{s, x_s}(s)=x_s$ of \eqref{e1.10} exists uniquely
and it is defined on $\mathbb{T}_s$.  Further,
 \begin{equation}\label{e2.1}
 f(t, 0)\equiv 0; \quad  g(t, 0)\equiv 0.
\end{equation}
This assumption implies that  \eqref{e1.10} has the trivial solution
$X_{s, 0}(t)\equiv 0$.

\begin{definition} \rm
The trivial solution of \eqref{e1.10} is said to be exponentially $p$-stable
if there is a positive constant  $\alpha$ such that for any $s>a$ there
exists $\Gamma=\Gamma(s)>1$, such that
\begin{equation}\label{e2.2}
\mathbb{E}\|X_{s,x_s}(t)\|^p\leq \Gamma\|x_s\|^pe_{\ominus\alpha}(t, s)\quad
 \text{on } t\geq s,
\end{equation}
holds for all $x_s\in\mathbb{R}^d$.
\end{definition}

If one  can choose $\Gamma$ independent of $s$, the trivial solution of
\eqref{e1.10} is said to be uniformly exponentially  $p$-stable.

\begin{remark} \rm
Since  $\ominus \alpha(t) \leq -\frac{\alpha}{1+\alpha \nu_*}$ for all
$t\in \mathbb{T}$,
$0<e_{\ominus\alpha}(t,s)\leq e_{-\frac{\alpha}{1+\alpha \nu_*}}(t,s)$   and
$ e_{-\frac{\alpha}{1+\alpha \nu_*}}(t,s)\to 0 \text{ as } t\to\infty$.
Thus, if $\alpha>0$ then $\lim_{t\to\infty}e_{\ominus\alpha}(t,s)=0$.
The advantage of using $e_{\ominus\alpha}(t,s)$ is that the requirement
 $-\alpha \in \mathcal R^+$ is not necessary.
\end{remark}

\begin{theorem}\label{thm2.2}
Suppose that there exist  a function
$V(t,x)\in C^{1,2}(\mathbb{T}_a\times\mathbb{R}^d; \mathbb{R}_+)$,
positive constants $\alpha_1,\alpha_2, \alpha_3$  such that
  \begin{gather}\label{e2.3}
\alpha_1\|x\|^p\leq V(t, x)\leq \alpha_2\|x\|^p, \\
\label{e2.4}
{ V^{\nabla_t}(t,x)}+ \mathcal{A}V(t, x)\leq -\alpha_3V(t_-, x)\quad
\forall  (t,x)\in\mathbb{T}_a\times\mathbb{R}^d,
\end{gather}
where the differential operator $\mathcal{A}$is defined with respect
to  \eqref{e1.10}.
 Then, the trivial solution $x\equiv 0$ of \eqref{e1.10} is uniformly
exponentially $p$-stable.
\end{theorem}

\begin{proof}
Let $\alpha$ be a positive number satisfying
$\frac \alpha{1+\alpha\nu(t)}<\alpha_3$ for all $t\in \mathbb{T}$ and let
$s\geq a, x_s\in \mathbb{R}^d$.  To simplify notations, we write $X(t)$
for $X_{s,x_s}(t)$. For each $n> \|x_s\|$, define the stopping time
$$
\theta_n=\inf\{t\geq s: \quad \|X(t)\|\geq n\}.
$$
Obviously, $\theta_n\to\infty$ as $n\to\infty$ almost surely.
By \eqref{e2.9}, calculating expectations
 we obtain
\begin{align*}
&\mathbb{E}[e_{\alpha}(t\wedge\theta_n, s)V(t\wedge\theta_n, X(t\wedge\theta_n))]\\
&=V(s,x_s) +\mathbb{E}\int_s^{t\wedge\theta_n}
 e_{\alpha}(\theta_n\wedge \tau_-, s)\Big[\alpha V(\tau_-, X(\tau_-))\\
&\quad +(1+\alpha\nu(\tau))(V^{\nabla_\tau}(\tau,X(\tau_-))
 + \mathcal{A}V(\tau,X(\tau_-)))\Big]\nabla\tau.
\end{align*}
Using \eqref{e2.4} and the inequality $\frac \alpha{1+\alpha\nu(t)}<\alpha_3$ obtains
 $$
\alpha V(\tau_-, X(\tau_-))
+(1+\alpha\nu(\tau))\big(V^{\nabla_\tau}(\tau,X(\tau_-))
+ \mathcal{A}V(\tau,X(\tau_-))\big)\leq0.
$$
Therefore,
\begin{align*}
\alpha_1e_{\alpha}(t\wedge\theta_n, s)\mathbb{E} \|X(t\wedge\theta_n)\|^p
&\leq \mathbb{E}[e_{\alpha}(t\wedge\theta_n, s)V(t\wedge\theta_n,
 X(t\wedge\theta_n))]\\
&\leq V(s,x_s)\leq \alpha_2\|x_s\|^p.
\end{align*}
Letting $n\to\infty$ yields
$$
\alpha_1e_{\alpha}(t, s)\mathbb{E} \|X(t)\|^p\leq \alpha_2\|x_s\|^p.
$$
Hence,
$$
\mathbb{E} \|X_{s,x_s}(t)\|^p
\leq \frac{\alpha_2}{\alpha_1}\|x_s\|^pe_{\ominus \alpha}(t, s).
$$
The proof is complete.
\end{proof}

We now consider  the inverse problem by showing that if the trivial solution
of \eqref{e1.10} is uniformly exponentially $p$-stable then such a Lyapunov
function exits. Firstly, we study the differentiability of solutions with
respect to the initial conditions and the continuity with respect to coefficients.

\begin{lemma}[Burkholder inequality on time scales]\label{lem2.3}
For any $p\geq 2$ there exist positive constants $B_p$ such that if
 $\{M_t\}_{t\in\mathbb{T}_a}$ is an $\mathcal{F}_t$-martingale with
$\mathbb{E}|M_t|^p<\infty$ and $ M_a=0$ then
\begin{equation*}
\mathbb{E}\sup_{a\leq s\leq t}|M_s|^p\leq B_p
\Big(\mathbb{E}\langle M\rangle_t^{p/2}
+\mathbb{E}\sum_{a\leq s\leq t}|\nabla^*M_s|^p\Big),
\end{equation*}
where $\nabla^*M_s=M_s-M_{s_-}$.
\end{lemma}

\begin{proof}
By Doob's inequality, we have
$$
\mathbb{E}\sup_{a\leq s\leq t}|M_s|^p\leq\Big(\frac{p}{p-1}\Big)^p\mathbb{E}|M_t|^p.
$$
Otherwise, we see that the martingale $\widehat M_t$ can be extended to
a regular martingale on $[a; \infty)_{\mathbb{R}}$.
Therefore, by using proof of  \cite[Lemma 5]{Tar} we obtain
$$
\mathbb{E}|\widehat{M}_t|^p
\leq \widehat{B}_p \Big(\mathbb{E}\langle \widehat{M}\rangle_t^{p/2}
+\mathbb{E}\sum_{a\leq s\leq t}|\nabla^*\widehat{M}_s|^p\Big),
$$
for a constant $\widehat{B}_p$.
Further, the martingale  $\widetilde{M}_t$ is a sum of random variables.
Then, applying \cite[Theorem 13.2.15, pp.416]{Ath} yields
\begin{equation*}
\mathbb{E}|\widetilde{M}_t|^p
\leq \widetilde{B}_p\Big(\mathbb{E}\langle \widetilde{M}\rangle_t^{p/2}
+\mathbb{E}\sum_{a\leq s\leq t}|\nabla^*\widetilde{M}_s|^p\Big).
\end{equation*}
Consequently,
\begin{align*}
\mathbb{E}\sup_{a\leq s\leq t}|M_s|^p
&\leq 2^{p-1}\Big(\frac{p}{p-1}\Big)^p
 \Big(\mathbb{E}|\widehat{M}_t|^p+\mathbb{E}|\widetilde{M}_t|^p\Big)\\
&\leq 2^{p-1}\Big(\frac{p}{p-1}\Big)^p\Big[\widehat{B}_p
 \Big(\mathbb{E}\langle \widehat{M}\rangle_t^{p/2}
+\mathbb{E}\sum_{a\leq s\leq t}|\nabla^*\widehat{M}_s|^p\Big)
+\widetilde{B}_p\Big(\mathbb{E}\langle \widetilde{M}\rangle_t^{p/2}\\
&\quad  +\mathbb{E}\sum_{a\leq s\leq t}|\nabla^*\widetilde{M}_s|^p\Big)\Big]
\\
&\leq B_p\Big(\mathbb{E}(\langle \widehat{M}\rangle_t
 +\langle \widetilde{M}\rangle_t)^{p/2}
+\mathbb{E}\sum_{a\leq s\leq t}(|\nabla^*\widehat{M}_s|^p
 +|\nabla^*\widetilde{M}_s|^p)\Big)
\\
&=B_p\Big(\mathbb{E}\langle M\rangle_t^{p/2}
+\mathbb{E}\sum_{a\leq s\leq t}|\nabla^*{M}_{s}|^p\Big)
\end{align*}
where $B_p=2^{p}(\frac{p}{p-1})^p \max\big\{\widehat{B}_p, \widetilde{B}_p\big\}$.
The proof is complete.
\end{proof}

\begin{theorem}\label{thm2.4}
Let $p\geq 2, M\in\mathcal{M}_2$ such that  the conditions \eqref{e1.4}, \eqref{e1.5}
 and \eqref{e1.17} hold
and let $  g\in\mathcal{L}_2((a,T];M)$ with
$$
\int_a^t\mathbb{E}|g(\tau)|^p\nabla\tau<\infty \;\; \forall  t\in \mathbb{T}_a.
$$
Then
\begin{equation*}
\mathbb{E}\sup_{a\leq t\leq T}\Big|\int_a^t g(\tau)\nabla M_\tau\Big|^p
\leq  C_p
\int_a^T\mathbb{E}|g(\tau)|^p\nabla\tau,
\end{equation*}
where $C_p=B_p\{(T-a)^{\frac{p}{2}-1}N^{p/2}+ m_p\}$.
\end{theorem}

\begin{proof}
Set
$$
x_t=\int_a^tg(\tau)\nabla M_\tau,\quad t\in[a, T].
$$
The process $x_t$ is a square martingale with the characteristic
$$
\langle x\rangle_t=\int_a^t |g(\tau)|^2\nabla \langle M\rangle_\tau.
$$
Since $\langle M\rangle_t$ is continuous, so is $\langle x\rangle_t$.
Applying
 Lemma \ref{lem2.3} to the martingale $(x_t)$ obtains
\begin{align*}
&\mathbb{E}\sup_{a\leq r\leq t}|x_r|^p\\
&\leq B_p\Big\{\mathbb{E}\langle x\rangle_t^{p/2}
+\mathbb{E}\sum_{a\leq s\leq t}|\nabla^*x_{s}|^p\Big\}
\\
&= B_p\Big\{\mathbb{E}\langle x\rangle_t^{p/2}
+\mathbb{E}\int_a^t\int_{\mathbb{R}} |g(\tau)u|^p\delta(\nabla \tau,du)\Big\}
\\
&= B_p\Big\{\mathbb{E}\Big[\int_a^t|g(\tau)|^2\nabla\langle M\rangle_\tau\Big]^{p/2}
+\mathbb{E}\int_a^t\int_{\mathbb{R}} |g(\tau)u|^p\pi(\nabla\tau,du)\Big\}
 \\
&\leq  B_p\Big\{(t-a)^{\frac{p}{2}-1}N^{p/2}\int_a^t\mathbb{E}|g(\tau)|^p\nabla\tau
+\mathbb{E}\int_a^t|g(\tau)|^p\int_{\mathbb{R}}|u|^p\Upsilon(\tau, du)\nabla\tau\Big\}
\\
&\leq  B_p\big\{(t-a)^{\frac{p}{2}-1}N^{p/2}+ m_p\big\}
\int_a^t\mathbb{E}|g(\tau)|^p\nabla\tau.
\end{align*}
By putting $C_p=B_p\big[(T-a)^{\frac{p}{2}-1}N^{p/2}+ m_p\big]$ we complete the proof.
\end{proof}

\begin{lemma}\label{lem2.5'}
Let $T,s\in\mathbb{T}_a; T>s$  and $p\geq2$ fixed. Suppose that the condition
\eqref{e1.17} holds and process  $\zeta(t)$ is the solution of the stochastic
equation
\begin{equation}\label{e3.5'}
\zeta(t)=\varphi(t)+\int_s^t\psi(\tau)\zeta(\tau_-)\nabla\tau+
\int_s^t\chi(\tau)\zeta(\tau_-)\nabla M_\tau, \;\; \forall  t\in[s, T].
\end{equation}
We assume that the functions $ \varphi(t), \psi(t)$ and $\chi(t)$ are
 $\mathcal{F}_t$-adapted  and that there exists a constant $K>0$ such
that with probability $1$, $\|\psi(t)\| \leq K$ and $\| \chi (t) \| \leq K$.
Then
\begin{equation}\label{2.6b2}
\mathbb{E}\sup_{s\leq t\leq T} \|\zeta (t)\|^p\leq 3^{p-1}\mathbb{E}
\sup_{s\leq t\leq T} \|\varphi(t)\|^p e_{H_1}(T,s),
\end{equation}
where $H_1=3^{p-1}K^p((T-s)^{p-1} +C_p)$.
\end{lemma}

\begin{proof} For any $n>0$ denote
$\theta_n=\inf\{t>s: \|\zeta(t)\|>n\}$. From \eqref{e3.5'} we have
\begin{align*}
&\mathbb{E}\sup_{s\leq r\leq t}\|\zeta(r\wedge \theta_n)\|^p\\
&\leq 3^{p-1}\Big(\mathbb{E}\sup_{s\leq r\leq T}\|\varphi(r)\|^p
 +\mathbb{E}\sup_{s\leq r\leq t}\Big\|\int_s^{r\wedge \theta_n}
 \psi(\tau)\zeta(\tau_-)\nabla\tau\Big\|^p\\
&\quad + \mathbb{E}\sup_{s\leq r\leq t}
 \Big\|\int_s^{r\wedge \theta_n}\chi(\tau)\zeta(\tau_-)\nabla M_\tau\Big\|^p\Big)\\
&\leq 3^{p-1}\Big(\mathbb{E}\sup_{s\leq r\leq T}\|\varphi(r)\|^p
+K^p(T-a)^{p-1}\int_s^{t\wedge \theta_n}\mathbb{E}\|
\zeta(\tau_-)\|^p\nabla\tau\\
&\quad + C_pK^p\int_s^{t\wedge \theta_n}\mathbb{E}\|\zeta(\tau_-)\|^p\nabla \tau\Big)
 \quad \text{(by Theorem \ref{thm2.4})} \\
&=3^{p-1}\Big(\mathbb{E}\sup_{s\leq r\leq T}\|\varphi(r)\|^p+K^p\Big((T-a)^{p-1}
 +C_p\Big)\int_s^{t\wedge \theta_n}\mathbb{E}\|\zeta(\tau_-)\|^p\nabla \tau\Big)\\
&=3^{p-1}\mathbb{E}\sup_{s\leq r\leq T}\|\varphi(r)\|^p+H_1\int_s^{t}
 \sup_{s\leq r\leq \tau_-}\mathbb{E}\|\zeta(r\wedge \theta_n)\|^p\nabla \tau,
\end{align*}
where $H_1=3^{p-1}K^p((T-s)^{p-1} +C_p)$.
Using Lemma \ref{lem1.1} one gets
\begin{equation*}
\mathbb{E}\sup_{s\leq t\leq T} \|\zeta (t\wedge \theta_n)\|^p\leq 3^{p-1}\mathbb{E}\sup_{s\leq t\leq T} \|\varphi(t)\|^p e_{H_1}(T,s).
\end{equation*}
Letting $n\to\infty$ yields  \eqref{2.6b2}.
 The proof is complete.
\end{proof}

\begin{lemma}\label{lem2.5}
Suppose that the coefficients of \eqref{e1.10} are continuous in $s, x$ and
they have continuous bounded first and second partial derivatives and
condition \eqref{e1.17} holds for $p\geq 4$.
Then, the solution $X_{s,x}( t)$,   $s\leq t\leq T$, with initial condition
$X_{s,x}( s)=x$ of \eqref{e1.10} is twice differentiable
with respect to $x$. Further, the derivatives
\[
 \frac{\partial}{\partial x_i}(X_{s, x}(t)),\quad
 \frac{\partial^2}{\partial x_i\partial x_j}(X_{s, x}(t))
 \]
 are continuous in $x$ in mean square.
\end{lemma}

 \begin{proof}
Suppose that the derivatives $f_x'(t,x), g_x'(t,x),f_{xx}''(t,x), g_{xx}''(t,x) $
are bounded by a constant $\lambda$.
To simplify notations we put $Y_{s,\Delta x}(t)=X_{s, x+\Delta x}(t) - X_{s, x}(t)$.
Using Lagrange theorem we see that for any $i=1,2,\dots,d$, there exists
$ \theta_i, \xi_i\in[0; 1]$ such that
\begin{equation}\label{e2.6b1}
\begin{aligned}
&f_i(t,X_{s, x}(t_-)+Y_{s, \Delta x}(t_-))-f_i(t,X_{s, x}(t_-))\\
&=\sum_{j=1}^d\frac {\partial f_i}{\partial x_j}(t,X_{s, x}(t_-)
 +\theta_iY_{s, \Delta x}(t_-))Y_{i,s, \Delta x}(t_-),
\\
&g_i(t,X_{s, x}(t_-)+Y_{s, \Delta x}(t_-))-g_i(t,X_{s, x}(t_-))
\\
&=\sum_{j=1}^d\frac {\partial g_i}{\partial x_j}(t,X_{s, x}(t_-)
 +\xi_iY_{s, \Delta x}(t_-))Y_{i,s, \Delta x}(t_-).
\end{aligned}
\end{equation}
Let $A_{s,\Delta x}(t)$ be the matrix with entries 
$a^{ij}_{s,\Delta x}(t)=\frac {\partial f_i}{\partial x_j}(t,X_{s, x}(t_-)
+\theta_iY_{s, \Delta x}(t_-))$,
and let  $B_{s,\Delta x}(t)$ be the matrix with entries
$b^{ij}_{s,\Delta x}(t)
=\frac {\partial g_i}{\partial x_j}(t, X_{s, x}(t_-)+\xi_iY_{s, \Delta x}(t_-))$.
Then \eqref{e2.6b1} can be rewritten
\begin{gather*}
f(t, X_{s, x}(t_-)+Y_{s, \Delta x}(t_-))-f(t, X_{s, x}(t_-))
 =A_{s,  \Delta x}(t)Y_{s, \Delta x}(t_-),\\
g(t, X_{s, x}(t_-)+Y_{s, \Delta x}(t_-))-g(t, X_{s, x}(t_-))
 =B_{s,  \Delta x}(t)Y_{s, \Delta x}(t_-).
\end{gather*}
Hence,
\begin{align*}
 Y_{s,\Delta x}(t)=
\Delta x +\int_s^tA_{s,  \Delta x}(\tau)Y_{s, \Delta x}(\tau_-)\nabla \tau
+\int_s^tB_{s,  \Delta x}(\tau)Y_{s, \Delta x}(\tau_-)\nabla M_\tau.
\end{align*}
Since $A_{s,  \Delta x}(t)$ and $B_{s,  \Delta x}(t)$ are bounded by a
constant $\lambda$, by using Lemma \ref{lem2.5'} one has
 \begin{equation}\label{e2.6b}
 \mathbb{E}\sup_{s\leq t\leq T}\|Y_{s,\Delta x}(t)\|^{2}
\leq 3\|\Delta x\|^{2} e_{H_2}(T, s),
 \end{equation}
where $H_2=3\lambda^2(T-s +C_2)$.
As a consequence, $ \mathbb{E}\sup_{s\leq t\leq T}\|Y_{s,\Delta x}(t)\|^{2}\to 0$ as
$\|\Delta x\|\to 0$ in probability.
Let $\zeta_{s,x}(t)$ be the solution of the variation dynamic equation
\[
\zeta_{s,x}(t)
= I+\int_s^tf'_x(\tau,X_{s, x}(\tau_-))\zeta_{s,x}(\tau_-)\nabla\tau
 +\int_s^tg'_x(\tau, X_{s, x}(\tau_-)\zeta_{s,x}(\tau_-)\nabla M_\tau,
\]
for all $ s\leq t\leq T$. Since $f'_x$ and $g'_x$ are bounded by constant $\lambda$,
\begin{equation}\label{e2.7b1}
\mathbb{E}\sup_{s\leq t\leq T}\|\zeta_{s,x}(t)\|^4
\leq 27e_{H_3}(T,s),
\end{equation}
where $H_3=27\lambda^4((T-s)^{3}+C_4)$.
Define
 $$
 \zeta_{\Delta x}(t)=Y_{s,\Delta x}(t)-\zeta_{s,x}(t){\Delta x}\;\;\;\forall \, s\leq t\leq T.
 $$
The process $\zeta_{\Delta x}(t)$ satisfies Equation
\[
\zeta_{\Delta x}(t)
=\phi_{\Delta x}(t)+\int_s^tA_{s,\Delta x}(\tau)\zeta_{\Delta x}(\tau_-)
\nabla \tau+
\int_s^tB_{s,\Delta x}(\tau)\zeta_{\Delta x}(\tau_-)\nabla M_\tau,
\]
where,
\begin{align*}
\phi_{\Delta x}(t)
&=\int_s^t[(A_{s, \Delta x}(\tau)-f'_x(\tau,X_{s, x}(\tau_-)))\zeta_{s,x}
(\tau_-)\Delta x]\nabla \tau
\\
&\quad +\int_s^t[(B_{s, \Delta x}(\tau)-g'_x(\tau,X_{s, x}(\tau_-)))
\zeta_{s,x}(\tau_-)\Delta x]\nabla M_\tau.
\end{align*}
Applying Lemma \ref{lem2.5'} again one gets
\begin{equation}\label{e2.6}
\mathbb{E}\sup_{s\leq t\leq T}\|\zeta_{\Delta x}(t)\|^2
\leq 3\mathbb{E}\sup_{s\leq t\leq T}\|\phi_{\Delta x}(t)\|^2e_{H_2}(T, s).
\end{equation}
Since $f_x'(t,x), g_x'(t,x)$  are continuous  and
$ \mathbb{E}\sup_{s\leq t\leq T}\|Y_{s,\Delta x}(t)\|^{2}\to 0$ as
 $\|\Delta x\|\to 0$
in probability,
$$
\lim_{\Delta x\to 0}(\|A_{s, \Delta x}(t)-f'_x(t ,X_{s, x}(t_-))\|
+\|B_{s, \Delta x}(t)-g'_x(t ,X_{s, x}(t_-))\|)=0
$$
in probability. Hence, by the boundedness of $A,B,f', g'$, we obtain
\begin{equation}\label{e3.10b}
\begin{aligned}
&\mathbb{E}\big[\sup_{s\leq t\leq T}\frac{\|\phi_{\Delta x}(t)\|^2}{\|\Delta x\|^2}
 \big]\\
&\leq 2(T-s)\int_s^T \mathbb{E}\|A_{s, \Delta x}(\tau)
 -f'_x(\tau,X_{s, x}(\tau_-))\zeta_{s,x}(\tau_-)\|^2\nabla \tau
\\
&\quad +8\int_s^T\mathbb{E}\|B_{s,\Delta x}(\tau)-g'_x(\tau,X_{s, x}(\tau_-))\zeta_{s,x}(\tau_-)\|^2\nabla \langle M\rangle_\tau
\to 0
\end{aligned}
\end{equation}
as $\|\Delta x\|\to 0$.
Thus, \eqref{e2.6} and \eqref{e3.10b} imply 
$$
\mathbb{E}\sup_{t\leq s\leq T}\frac{\|\zeta_{\Delta x}(s)\|}{\|\Delta x\|}= 0
\quad \text{as } \Delta x\to 0.
$$
This means
$$
\zeta_{s,x}(t)=\frac{\partial}{\partial x}X_{s, x}(t)\quad \forall s\leq t\leq T.
$$
The mean square continuity of $\zeta_{s,x}(t)$  with respect to $x$ again follows
from the continuity of
$f'_x(t, X_{s, x}(t))\quad \text{and}\quad  g'_x(t, X_{s, x}(t))$.

We prove the existence of $\frac{\partial^2 X_{s,x}(t) }{\partial x^2}$.
To simplify notations, if $F$ is a bilinear mapping, we write $F h^2$
for $F(h,h)$. Let  bilinear mapping $\eta_{s,x}(t)$ be the solution of the
second variation dynamic equation
 \begin{align*}
 \eta_{s,x}(t)
&=\int^t_sf^{''}_{xx}(\tau, X_{s,x}(\tau_-))\zeta_{s,x}^2(\tau_-)\nabla\tau
  +\int^t_s f'_x(\tau, X_{s,x}(\tau_-))\eta_{s,x}(\tau_-)\nabla \tau\\
&\quad + \int^t_sg^{''}_{xx}(\tau, X_{s,x}(\tau_-))\zeta_{s,x}^2(\tau_-)\nabla M_\tau
 + \int^t_s g'_x(\tau, X_{s,x}(\tau_-))\eta_{s,x}(\tau_-)\nabla M_\tau,
 \end{align*}
 for all $ s\leq t\leq T$. Using Lemma \ref{lem2.5'} and \eqref{e2.7b1} we see that
\begin{equation}\label{e2.12b}
\mathbb{E} \sup_{s\le t\leq T}\|\eta_{s,x}(t)\|^2\leq \infty.
\end{equation}
Define
 $$
 \eta_{\Delta x}(t)=\zeta_{s,x+\Delta x}(t)\Delta x-\zeta_{s,x}(t)
\Delta x-\eta_{s,x}(t)(\Delta x)^2,\quad  s\leq t\leq T.
 $$
The process $\eta_{\Delta x}(t)$ satisfies the equation
  \begin{equation}\label{e2.9}
\begin{aligned}
\eta_{\Delta x}(t)
&=\psi_{\Delta x}(t)+ \int_s^tf'_x(\tau,X_{s, x+\Delta x}(\tau_-))
 \eta_{\Delta x}(\tau_-)\nabla \tau
\\
&\quad +\int_s^tg'_x(\tau, X_{s, x+\Delta x}(\tau_-))
 \eta_{\Delta x}(\tau_-)\nabla M_\tau,
\end{aligned}
\end{equation}
where,
\begin{align*}
\psi_{\Delta x}(t)
&=\int_s^t\Big[\Big(f'_x(\tau,X_{s, x+\Delta x}(\tau_-))-f'_x(\tau,X_{s, x}(\tau_-))\\
&\quad -f''_{xx}(\tau,X_{s, x}(\tau_-)\Big)
\zeta_{s,x}(\tau_-)\Delta x\big)\zeta_{s,x}(\tau_-)\Delta x\\
&\quad +(f'_x(\tau,X_{s, x+\Delta x}(\tau_-))
-f'_x(\tau,X_{s, x}(\tau_-)))\eta_{s,x}(\tau_-)(\Delta x)^2\Big]\nabla \tau
\\
&\quad +\int_s^t\Big[\big(g'_x(\tau,X_{s, x+\Delta x}(\tau_-))
 -g'_x(\tau,X_{s, x}(\tau_-))\\
&\quad -g''_{xx}(\tau,X_{s, x}(\tau_-))\zeta_{s,x}(\tau_-)\Delta x\big)
 \zeta_{s,x}(\tau_-)\Delta x
\\
&\quad +\big(g'_x(\tau,X_{s, x+\Delta x}(\tau_-))
-g'_x(\tau,X_{s, x}(\tau_-))\big)\eta_{s,x}(\tau_-)(\Delta x)^2\Big]\nabla M_\tau.
\end{align*}
Using Lemma \ref{lem2.5'} one obtains
\begin{equation}\label{e2.9b}
\mathbb{E}\|\eta_{\Delta x}(t)\|^2
\leq \mathbb{E}\sup_{s\leq t\leq T}\|\psi_{\Delta x}(t)\|^2 e_{H_2}(T,s),
\end{equation}
where $H_2=3\lambda^2(T-s+4N)$. It is easy to see that
\begin{align*}
&\mathbb{E}\sup_{s\leq t\leq T}\big\|\int_s^t\Big[\big(f'_x(\tau,X_{s, x+\Delta x}(\tau_-))-f'_x(\tau,X_{s, x}(\tau_-))
-f''_{xx}(\tau,X_{s, x}(\tau_-)) \\
&\times \zeta_{s,x}(\tau_-)\Delta x\big)
\zeta_{s,x}(\tau_-)\Delta x\Big]\nabla\tau\big\|^2 \\
&\leq 2(T-s)\mathbb{E}\int_s^T\big\|\big(f'_x(\tau,X_{s, x+\Delta x}(\tau_-))\\
&-f'_x(\tau,X_{s, x}(\tau_-))-f''_{xx}(\tau,X_{s, x}(\tau_-))Y_{s,\Delta x}(\tau_-)
\big)\zeta_{s,x}(\tau_-)\Delta x\big\|^2\nabla \tau
\\
&\quad +2(T-s)\mathbb{E}\int_s^T\big\|f''_{xx}(\tau,X_{s, x}(\tau_-))(Y_{s,\Delta x}
 (\tau_-)-\zeta_{s,x}(\tau_-)\Delta x)\\
&\quad \times\zeta_{s,x}(\tau_-)\Delta x\|^2\nabla \tau=o(\|\Delta x\|^4);
\end{align*}
\begin{align*}
&\int_s^T\mathbb{E}\Big\|\big(f'_x(\tau,X_{s, x+\Delta x}(\tau_-))
-f'_x(\tau,X_{s, x}(\tau_-))\big)\eta_{s,x}(\tau_-)(\Delta x)^2\Big\|^2\nabla \tau\\
&=o(\|\Delta x\|^4);
\end{align*}
\begin{align*}
&\mathbb{E}\sup_{s\leq t\leq T}\Big\|\int_s^t
\Big[\big(g'_x(\tau,X_{s, x+\Delta x}(\tau_-))-g'_x(\tau,X_{s, x}(\tau_-))
-g''_{xx}(\tau,X_{s, x}(\tau_-))\\
&\times\zeta_{s,x}(\tau_-)\Delta x\big)
\zeta_{s,x}(\tau_-)\Delta x\Big]\nabla M_\tau\Big\|^2\\
&\leq 4N\mathbb{E}\int_s^T\big\|\big(g'_x(\tau,X_{s, x+\Delta x}(\tau_-))
\\
&\quad -g'_x(\tau,X_{s, x}(\tau_-))-g''_{xx}(\tau,X_{s, x}(\tau_-))
 Y_{s,\Delta x}(\tau_-)\big)\zeta_{s,x}(\tau_-)\Delta x\big\|^2\nabla \tau\\
&\quad +4N\mathbb{E}\int_s^T\big\|g''_{xx}(\tau,X_{s, x}(\tau_-))
 (Y_{s,\Delta x}(\tau_-)-\zeta_{s,x}(\tau_-)\Delta x)\zeta_{s,x}(\tau_-)
 \Delta x\|^2\nabla \tau\\
&=o(\|\Delta x\|^4);
\end{align*}
\begin{align*}
&\mathbb{E}\sup_{s\leq t\leq T}\hskip -.05cm\Big\|\int_s^t
\Big[\big(g'_x(\tau,X_{s, x+\Delta x}(\tau_-))
-g'_x(\tau,X_{s, x}(\tau_-))\big)\eta_{s,x}(\tau_-)(\Delta x)^2\Big]
\nabla M_\tau\Big\|^2
\\
&\leq 4N\mathbb{E}\int_s^T\big\|(g'_x(\tau,X_{s, x+\Delta x}(\tau_-))
-g'_x(\tau,X_{s, x}(\tau_-)))\eta_{s,x}(\tau_-)(\Delta x)^2\big\|^2\nabla \tau\\
&=o(\|\Delta x\|^4).
\end{align*}
Combining these results we obtain
$\mathbb{E}\sup_{s\leq t\leq T}\|\psi_\Delta(t)\|^2=o(\|\Delta x\|^4)$,
which implies that
$$
\mathbb{E}\|\eta_{\Delta x}(t)\|^2=o(\|\Delta x\|^4).
$$
Thus, $\frac {\|\eta_{\Delta x}(t)\|}{\|\Delta x\|^2}=0$, or
$$
\frac {\partial^2}{\partial x^2}X_{s,x}(t)=\eta_{s,x}(t).
$$
The proof is complete.
 \end{proof}

\begin{lemma}\label{lem2.6'}
Let $p\geq4$ and $2\leq \beta\leq p$. Then, the map
$F(\phi):\phi\to \mathbb{E}|\phi|^\beta$ from
$L_p(\Omega,\mathcal{F},\mathbb{P})$ to $\mathbb{R}$ is twice differentiable
at every $\phi_0\ne 0$ and
$$
F'(\phi_0)(\phi)=\beta \mathbb{E}[ |\phi_0|^{\beta-1}\phi];\quad
F''(\phi_0)(\phi,\psi)=\beta(\beta-1) \mathbb{E} [|\phi_0|^{\beta-2}\phi\psi].$$
\end{lemma}

\begin{proof}
We have
\begin{align*}
&\big|F(\phi_0+\Delta \phi)-F(\phi_0)-\beta \mathbb{E} |\phi_0|^{\beta-1}\Delta\phi
\big|\\
&= \big|\mathbb{E} |\phi_0+\Delta\phi|^{\beta}
 - \mathbb{E} |\phi_0|^{\beta}-\beta \mathbb{E} |\phi_0|^{\beta-1}\Delta\phi\big|\\
&= \beta(\beta-1)\mathbb{E} [|\eta|^{\beta-2}(\Delta\phi)^2]\\
&\leq \beta(\beta-1)[\mathbb{E} |\eta|^{m(\beta-2)}]^{1/m}
 [\mathbb{E}|\Delta\phi|^p]^{2/p},
\end{align*}
where $\eta \in (\phi_0, \phi_0+\Delta \phi)$ if $\phi_0+\Delta \phi>\phi_0$ and 
$\eta \in (\phi_0+\Delta \phi, \phi_0)$ if $\phi_0+\Delta \phi<\phi_0$.
Hence, with $\frac1m+\frac 2p=1$ we have
\begin{align*}
&\big |F(\phi_0+\Delta \phi)-F(\phi_0)-\beta \mathbb{E} |\phi_0|^{\beta-1}
 \Delta\phi\big|\\
&\leq \beta(\beta-1)[\mathbb{E} |\eta|^{m(\beta-2)}]^{1/m}
 [\mathbb{E}|\Delta\phi|^p]^{2/p}\\
&\leq \beta(\beta-1)[\mathbb{E} \max\{|\phi_0|,|\phi_0+\Delta\phi|\}^{m(\beta-2)}
]^{1/m}[\mathbb{E}|\Delta\phi|^p]^{2/p}.
\end{align*}
 The relation $\frac 1m+\frac 2p=1$ implies  $m(\beta-2)<p$. Thus, 
$\mathbb{E} \max\{|\phi_0|,|\phi_0+\Delta\phi|\}^{m(\beta-2)}<\infty$. Therefore,
\begin{align*}
&|F(\phi_0+\Delta \phi)-F(\phi_0)-\beta \mathbb{E} |\phi_0|^{\beta-1}\Delta\phi|\\
&\leq \beta(\beta-1)[\mathbb{E} |\eta|^{m(\beta-2)}]^{1/m}
 [\mathbb{E}(\Delta\phi)^p]^{2/p}\\
&=O(1)|\Delta\phi|_p^2\quad \text{as }|\Delta\phi|_p\to 0.
\end{align*}
This means  $F'(\phi_0)(\phi)=\beta \mathbb{E}| \phi_0|^{\beta-1}\phi$.
 The existence and continuity of the second derivative $F''$ con be proved by 
a similar way.
\end{proof}

\begin{lemma}\label{lem2.6}
 Let the coefficients of \eqref{e1.10} be continuous in $t, x$ and satisfy 
the conditions \eqref{e2.1}.
Suppose  also that conditions of Lemma \ref{lem2.5}  are satisfied and
 $2\leq \beta\leq p$.
Then, for fixed $t>a$, the function
 $u(s,x)=\mathbb{E}\|X_{s,x}(t)\|^\beta;\; a<s<t$ is twice 
continuously differentiable with respect to $x$ except perhaps at $x=0$.
\end{lemma}

\begin{proof}
The map $x\to X_{s,x}(t)$ is twice differentiable in $x$ by Lemma \ref{lem2.5}. 
The map $X \to \|X\|$ from $\mathbb{R}^d$ to $\mathbb{R}$ and the map 
$F(\phi)=\mathbb{E} |\phi|^\beta$ from  $L_p(\Omega,\mathcal{F},\mathbb{P})$ 
to $\mathbb{R}$  are also twice  differentiable. Therefore by chain rule, the map 
$u(s,x)= \mathbb{E}\|X_{s,x}(t)\|^\beta$ is  twice  differentiable. Further,
\begin{gather}\label{e2.12}
u'_x(s,x)h=\beta\mathbb{E}[\|X_{s,x}(t)\|^{\beta-2}< X_{s,x}(t),\zeta_{s,x}(t)h >]\\
\begin{aligned}
u''_{xx}(s,x)h^2
&=\beta\mathbb{E}\Big[(\beta-2)\|X_{s,x}(t)\|^{\beta-4}
\langle  X_{s,x}(t),\zeta_{s,x}(t)h \rangle^2 \\
&\quad +\|X_{s,x}(t)\|^{\beta-2}\|\zeta_{s,x}(t)h\|^2
+\|X_{s,x}(t)\|^{\beta-2}\langle  X_{s,x}(t),\eta_{s,x}(t)h^2\rangle \Big]
\end{aligned} \notag.
\end{gather}
The proof is complete. 
\end{proof}

 \begin{theorem}\label{thm2.7}
Let $M$  have independent increments and the conditions of Lemma \ref{lem2.5} 
hold and  $2\leq \beta\leq p$. Suppose further that $\mathcal{A}V(t,x)$ is
 $ld$-continuous in $(t,x)$ for all 
$V\in C^{1,2}(\mathbb{T}_a\times \mathbb{R}^d; \mathbb{R})$.
Then,  the function $u(s,x)=\mathbb{E} \|X_{s,x}(t)\|^\beta$, $ a<s< t$ is 
$\nabla$-differentiable in $s$,  twice continuously differentiable with 
respect to $x$ and satisfies the equation
\begin{equation}\label{e2.17}
u^{\nabla_s}(s,x)+\mathcal{A}u(s,x)=0.
\end{equation}
 \end{theorem}

 \begin{proof}
 By Lemma \ref{lem2.5}, $u(s, x)$ is twice differentiable in $x$.
 From \eqref{e2.2},  \eqref{e2.6b}, \eqref{e2.7b1},  \eqref{e2.12b} and  
\eqref{e2.12}, it follows that  
$\int_s^t\mathcal{A}u(h, X_{s,x}(\tau_-))\nabla\tau$ is integrable. Therefore,
\begin{equation*}
 u(h, X_{\rho(s),x}(r))-u(h, x)-\int_{\rho(s)}^{r}
\mathcal{A}u(h, X_{\rho(s),x}(\tau_-))\nabla\tau, \quad s\leq r\leq h\leq t
\end{equation*}
is an $\mathcal{F}_r$-martingale. In particular,
$$ 
\mathbb{E} u(h, X_{\rho(s),x}(h))-u(h, x)
=\int_{\rho(s)}^{h} \mathbb{E}  \mathcal{A}u(h, X_{\rho(s),x}(\tau_-))\nabla\tau.
$$
Since $M_t$ has  independent increments, $X_{h, y}(t)$ is independent of  
$X_{\rho(s),x}(h)$ when $s\leq h\leq t$ and $y\in \mathbb{R}^d$, which implies that
$\mathbb{E} u(h, X_{\rho(s),x}(h))= u(\rho(s),x)$. Thus,
  \begin{equation*}
  \frac{u(\rho(s), x)-u(h,x)}{\rho(s)-h}
=\frac{1}{\rho(s)-h}\int_{\rho(s)}^h\mathbb{E} \mathcal{A}u(h, X_{\rho(s),x}
(\tau_-))\nabla\tau.
\end{equation*}
If $s$ is left-scattered,  then
$$
u^{\nabla _s}(s,x)=-\frac 1{\nu(s)}\int_{\rho(s)}^s\mathcal{A}u(s, X_{\rho(s),x}(\tau_-))\nabla\tau=-\mathcal{A}u(s,x).
$$
In the  $s$ is left-dense we let $h\to s$ to obtain
$$
u^{\nabla _s}(s,x)=-\mathcal{A}u(s,x).
$$
The proof is complete.
 \end{proof}

\begin{theorem}\label{thm2.9}
Let the conditions in Theorem \ref{thm2.7} hold. Suppose that
for any fixed $T>0$, there exist a function  $\gamma_T:\mathbb{T}\to\mathbb{T}$ 
with $\gamma(T, s)\geq s+T$ for all   $s\in\mathbb{T}$ such that 
 $\gamma(T, s)$ and $\nabla$-derivatives $\gamma^{\nabla_s}(T, s)$ are bounded.
If the trivial solution of \eqref{e1.10} is uniformly exponentially 
$\beta$-stable, then there exists a function 
$V(s, x)\in C^{1,2}(\mathbb{T}_a\times\mathbb{R}^d; \mathbb{R}_+)$  
satisfying inequalities \eqref{e2.3}, \eqref{e2.4} (with the power $\beta$).
\end{theorem}

\begin{proof}
By Lemma \ref{lem2.6} and Theorem \ref{thm2.7}, the function
\begin{equation}\label{e3.16}
V(s,x)=\int_s^{\gamma(T, s)}\mathbb{E}\|X_{s,x}(\tau_- )\|^\beta\nabla\tau,
\end{equation}
is in class  $C^{1,2}(\mathbb{T}_a\times\mathbb{R}^d; \mathbb{R}_+)$.
From  \eqref{e2.2},
$$
V(s_-,x)\leq \int_{s_-}^{\gamma(T, {s_-})}\Gamma\|x\|^\beta 
e_{\ominus\alpha}(\tau_-, s_-)\nabla\tau\leq\alpha_1\|x\|^\beta,
$$
where $\alpha_1=\frac{\Gamma(1+\nu_*\alpha)}{\alpha}$.
By assumptions, the trivial solution of \eqref{e1.10} is uniformly 
exponentially $\beta$-stable and $\gamma^{\nabla_s}(T,s)$ is bounded, we can 
choose $T>0$ such that
\begin{equation}\label{e2.20}
\mathbb{E}\|X_{s, x}(\gamma(T, s))\|^\beta<\frac{1}{2}\|x\|^\beta,\quad
\mathbb{E}\|X_{s, x}(\gamma(T,s))\|^\beta\gamma^{\nabla_s}(T, s)
 <\frac{1}{2}\|x\|^\beta.
\end{equation}
Since  $f$ and $g$ have bounded partial derivatives  and $f(t,0)=0,\; g(t,0)=0$,
$$
\|f(t,x)\|\leq G \|x\|,\quad 
\|g(t,x)\|\leq G \|x\|, \quad t\geq a,\; x\in \mathbb{R}^d.
$$  Therefore,
\begin{equation}\label{e2.19}
\|\mathcal{A}[\|x\|^\beta](s,x)\|<c_1\|x\|^\beta,
\end{equation}
for a certain constant $c_1$. Applying It\^{o}'s formula to the function 
$\|x\|^\beta$ and using \eqref{e2.19} yields
\begin{align*}
\mathbb{E}\|X_{s, x}(\gamma(T, s))\|^\beta-\|x\|^\beta
&=\int_{s}^{\gamma(T, {s})}\mathbb{E} \mathcal{A}(\|X_{s, x}(\tau_-)\|^\beta)
 \nabla\tau\\
&\geq -c_1\int_{s}^{\gamma(T, s)}\mathbb{E} \|X_{s, x}(\tau_-)\|^\beta\nabla\tau
=-c_1V(s,x).
\end{align*}
Combining with \eqref{e2.20} we obtain the inequality 
$V(s,x)>\alpha_2\|x\|^\beta$ with $\alpha_2=\frac{1}{2c_1}$.
Thus, the function $V$ satisfies  condition  \eqref{e2.3}.
Using  \cite[Theorem 5.80]{Pet} to calculate $\nabla$-differential  of $V$ 
with respect $s$ and applying Theorem  \ref{thm2.7} we obtain
\begin{equation*}
V^{\nabla_s}(s,x)+\mathcal{A}V(s,x)
=\mathbb{E}\|X_{s_-, x}(\gamma(T,s_-))\|^\beta\gamma^{\nabla_s}(T, s_-)-\|x\|^\beta.
\end{equation*}
Using \eqref{e2.20} again we have
\begin{equation*}
V^{\nabla_s}(s,x)+\mathcal{A}V(s,x)
\leq -\frac{1}{2}\|x\|^\beta\leq -\frac{1}{2\alpha_1}V(s_-,x).
\end{equation*}
Thus, the function $V$ satisfies all conditions \eqref{e2.3}, \eqref{e2.4} 
with $\alpha_3=\frac{\alpha}{2\Gamma(1+\nu_*\alpha)}$. The proof is complete.
\end{proof}

\begin{example} \rm
Consider the linear stochastic dynamic equation
\begin{equation}\label{e2.21}
\begin{gathered}
d^\nabla X(t)=aX(t_-)d^\nabla  t+bX(t_-)d^\nabla M(t) \quad\forall  t\in\mathbb{T}_s
\\
X(s)=x,
\end{gathered}
\end{equation}
where $a,b$ are two constants, $a$ is regressive and $M$ is a square integrable  
martingale having independent increment.
By direct calculation we have
 \begin{equation}\label{3.19}
  \mathbb{E} X^2_{s,x}(t)=x^2+\int_{s}^t{q}(\tau)\mathbb{E} X_{s,x}^2(\tau_-)
\nabla\tau,
  \end{equation}
where
\begin{align*}
q(t)&=2a +b^2 \widehat K_t^c +a^2\nu(t)
+2b(1+a\nu(t))\int_{\mathbb{R}}u{\Upsilon}(t, du)\\
&\quad +b^2\int_{\mathbb{R}}u^2{\Upsilon}(t, du)
 -2b\int_{\mathbb{R}}u{\widehat\Upsilon}(t, du)\\
&=2a +b^2 \widehat K_t^c +a^2\nu(t)
+2b\int_{\mathbb{R}}u{\widetilde \Upsilon}(t, du)\\
&\quad +b^2\int_{\mathbb{R}}u^2{\Upsilon}(t, du)
+a\nu(s)\int_{\mathbb{R}}u{\Upsilon}(t, du).
\end{align*}
Since  $\int_{\mathbb{R}}u{\widetilde \Upsilon}(t, du)
=\mathbb{E}[M_t-M_{\rho(t)}|\mathcal{F}_{\rho(t)}]=0$ 
and $\nu(t)\int_{\mathbb{R}}u{\Upsilon}(t, du)=0$,
\begin{equation}\label{e3.19'}
q(t)=2a +b^2 \widehat{K}^c_t
+a^2\nu(t)+b^2\int_{\mathbb{R}}u^2{\Upsilon}(t, du).
\end{equation}
We define the function $\bar q(t)=\lim_{\rho(s)\downarrow t}q(s)$. 
It is seen that $\bar q$ is $rd$-continuous and $\bar q(t)=q(\sigma(t))$ if $t$ 
is right scattered. Since $\{t:\mu(t)>0\}$ is countable and 
$\operatorname{meas}\{t:\mathbb{E} X_{s,x}^2(t_-)\ne \mathbb{E} X_{s,x}^2(t)\}=0$,
\begin{align*}
\int_{s}^t{q}(\tau)\mathbb{E} X_{s,x}^2(\tau_-)\nabla\tau
&=\int_{(s,t]}{q}(\tau)\mathbb{E} X_{s,x}^2(\tau_-)\,d\tau
+\sum_{s<\tau\leq t}q(\tau)\mathbb{E} X_{s,x}^2(\tau_-)\nu(\tau)\\
&=\int_{[s,t)}{\bar q}(\tau)\mathbb{E} X_{s,x}^2(\tau)\,d\tau
 +\sum_{s\leq\tau< t}q(\sigma(\tau))\mathbb{E} X_{s,x}^2(\tau)\mu(\tau)\\
&=\int_{s}^t\overline{q}(\tau)\mathbb{E} X_{s,x}^2(\tau)\Delta\tau,
\end{align*}
from which it follows that
\begin{align}\label{e2.19'}
 \mathbb{E} X^2_{s,x}(t)=x^2e_{\overline{q}}(t,s),\quad t\geq s.
\end{align}
Further, it is known that
$$
 0<e_{\overline{q}}(t,s)=\exp\big\{\int_s^t\lim_{h\searrow\mu(\tau)}
\frac{\ln(1+\overline{q}(\tau)h)}{h}\Delta \tau\big\}.
$$
Then,  condition \eqref{e2.2} implies
$$
\int_s^t\lim_{h\searrow\mu(\tau)}\frac{\ln(1+\overline{q}(\tau)h)}{h} 
\Delta \tau\leq \ln \Gamma-\theta(t-s) \quad \forall  t>s.
$$
Choose $T>0$ such that $\ln \Gamma-\frac{\theta T}2<0$ we obtain
$$
\int_s^t\lim_{h\searrow\mu(\tau)}\frac{\ln(1+\overline{q}(\tau)h)}{h}
\Delta \tau\leq -\frac{\theta(t-s)}2\quad \forall  t>s+T.
$$
Thus, the exponential square stability of \eqref{e2.21} implies 
 \begin{align}\label{e2.20bs}
\sup\big\{\frac 1{t-s}\int_s^t\lim_{h\searrow\mu(\tau)}
\frac{\ln(1+\overline{q}(\tau)h)}{h}\Delta \tau:t>s+T\big\}<0.
\end{align}
Conversely, supposing that \eqref{e2.20bs} holds,   
there are $\alpha>0, K^*>0$ such that
$0<e_{\bar q}(t,s)\leq K^* e_{-\alpha}(t,s)$.
 By using \eqref{e2.19'} we see that the trivial solution of is exponentially 
square stable. To illustrate the argument in the proof of 
Theorem \ref{thm2.9} to construct a  Lyapunov function we put
$$
V(s,x)=x^2\int_s^\infty e_{\bar q}(\tau_- ,s)\nabla\tau=x^2Q(s).
$$
By direct calculation we have
\begin{equation*}
Q^{\nabla_s}(s) =-1-q(s)Q(s).
\end{equation*}
Hence,
\begin{equation}\label{e2.21bs}
\begin{aligned}
&V^{\nabla_s}(s,x)+\mathcal{A}V(s,x)=Q^{\nabla_s}(s)x^2+q(s)Q(s)x^2\\
&=(-q(s)Q(s)-1)x^2+q(s)Q(s)x^2=-x^2.
\end{aligned}
\end{equation}
Using \eqref{3.19} and the fact $\lim_{t\to\infty}\mathbb{E} X^2_{s,x}(t)=0$ 
we can show that
$V(s,x)\geq \alpha_1 x^2$ with $\alpha_1=(\sup_t |q(t)|)^{-1}$. 
Further,  $e_{\bar q}(t,s)\leq K^*e_{-\alpha}(t,s)$. Thus, 
$V(s,x)\leq \frac{K^*}\alpha x^2$.
Combining \eqref{e2.21bs} and these inequalities  obtains
$$
V^{\nabla_s}(s,x)+\mathcal{A}V(s,x)\leq-\frac{\alpha}{K^*}V(s_-,x).
$$
Thus, the conditions of Theorem \ref{thm2.2} are satisfied. The proof is complete.
\end{example}

\subsection*{Acknowledgements}
This research was supported in part by
Vietnam National Foundation for Science and Technology Development   (NAFOSTED)
No.  101.03-2014.58 also in part by  Foundation for Science and
Technology Development of Vietnam's Ministry of Education and
Training. No. B2015-27-15.


\begin{thebibliography}{10}

\bibitem{Ath}  K. B. Athreya, S. N. Lahiri;  
\emph{Measure theory and probability theory}, Springer Science Business Media, 
LLC, 2006.

\bibitem{Pet}  M.  Bohner,   A. Peterson; 
\emph{Dynamic equations on time scales,} Birkh\"{a}user Boston, Massachusetts, 2001.

\bibitem{Bo} M. Bohner, O. M. Stanzhytskyi, A. O. Bratochkina; 
\emph{Stochastic dynamic equations on general time scales}, 
Electron. J. Differential Equations,  \textbf{2013}  (2013), No. 57, 15 pp.

  \bibitem{Da}  J. J. DaCunha; 
\emph{Stability for time varying linear dynamic systems on time scales}, 
J. Comput. Appl. Math., \textbf{176}  (2005), No. 2, 381-410.

 \bibitem{Den}  A. Denizand,  \"{U}. Ufuktepe; 
\emph{Lebesgue - Stieltjes measure on time scales}, Turkish J. Math.,  
\textbf{33}  (2009), No. 1, 27-40.

\bibitem{NHDu1}N. H. Du,   N. T.  Dieu; 
\emph{The first attempt on the  stochastic calculus on time scale, } 
 Stoch. Anal. Appl. \textbf{29}  (2011), No. 6, 1057-1080.

\bibitem{NHDu2} N. H. Du, N. T.  Dieu; 
\emph{Stochastic dynamic equation on time scale}, Acta Math. Vietnam., 
\textbf{38}  (2013), No. 2, 317-338.

\bibitem{FK}   S. Foss, T.  Konstantopoulos;
\emph{An overview of some stochastic stability methods}, 
J. Oper. Res. Soc. Japan,  \textbf{47}  (2004), No. 4, 275-303.

\bibitem{Go}T. E. Govindan;
\emph{ Existence and stability of solutions of stochastic semilinear functional
 differential equations}, Stoch. Anal. Appl., \textbf{20} (2002), No. 6, 1257-1280.

\bibitem{GR} I. A. Gravagne, R. J. Robert;
\emph{Bilateral Laplace transforms on time scales: convergence, convolution, 
and the characterization of stationary stochastic time series},  
Circuits Systems Signal Process. \textbf{ 29} (2010), No. 6, 1141-1165.

\bibitem{GS} D. Grow, S. Sanyal;  
\emph{Brownian motion indexed by a time scale},  
Stoch. Anal. Appl., \textbf{29}  (2011), No. 3, 457-472.

\bibitem{Has} R. Z. Has'minskii;
 \emph{Stochastic stability of differential equation,} Sijthoff \& Noordhoff, 
1980.

\bibitem{HT}  J. Hoffacker, C. C. Tisdell;
\emph{Stability and instability for dynamic equations on time scales}, 
Comput. Math. Appl., \textbf{49} (2005), No. 9-10, 1327-1334.

\bibitem{LL} C. Lungan, V. Lupulescu; 
\emph{ Random dynamical systems on time scales},
 Electron. J. Differential Equations,  \textbf{2012}, (2012),  No. 86,   14 pp.

\bibitem{Mao} X. Mao; 
\emph{Some contributions to stochastic asymptotic stability and boundedness 
via multiple Lyapunov functions}, J. Math. Anal. Appl.,  \textbf{260} (2001), 
No. 2, 325-340.

\bibitem{Mao1} X. Mao; 
\emph{ Exponential stability for stochastic differential equations with 
respect to semimartingale}, Stochastic Process. Appl.,  
\textbf{35} (1990), No. 2,   267-277.

\bibitem{Mar}A. A. Martynyuk;
\emph{Stability theory of solutions of dynamic equations  on time scales}. 
Phoepix Publishers, Kiev 2012.

\bibitem{Pa}  B. Paternoster; 
\emph{Application of the general method of Lyapunov functionals construction 
for Difference Volterra Equations}, Comput. Math. Appl., 
\textbf{47} (2004), No. 8-9,  1165-1176.

\bibitem{Tar} A. Tartakovsky; 
\emph{Asymptotically optimal sequential tests for nonhomogeneous processes}, 
Sequential Anal., \textbf{17}  (1998), No. 1, 33 -61.

\bibitem{Ru} R. Rudnicki; 
\emph{ Long-time behaviour of a stochastic prey-predator model}, 
Stochastic Process. Appl., \textbf{108} (2003), No. 1,    93 - 107.

 \bibitem{San} S.  Sanyal;
\emph{Stochastic dynamic equations.} Ph.D. Dissertation, Applied Mathematics,
 Missouri University of Science and Technology, 2008.

\bibitem{Sh}  B. L. Shaikhet; 
\emph{Stability in probability of nonlinear stochastic difference equations}, 
Stab. Control Theory Appl.,  \textbf{2}  (1999), No. 1-2, 25-39.

\bibitem{Sh1} B. L. Shaikhet; 
\emph{About stability of nonlinear stochastic difference equations}, 
Appl. Math. Lett., \textbf{13}  (2000), No. 5, 27-32.

\bibitem{So} L. Socha;  
\emph{Exponential stability of singularly perturbed stochastic systems},  
IEEE Trans. Automat. Control, \textbf{45}  (2000), No. 3, 576-580.

\end{thebibliography}

\end{document}
