\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{epic}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2014 (2014), No. 178, pp. 1--16.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2014 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2014/178\hfil Existence and uniqueness]
{Existence and uniqueness of  M-solutions for backward stochastic Volterra
integral equations}

\author[W. Li, R. Wu, K. Wang \hfil EJDE-2014/178\hfilneg]
{Wenxue Li, Ruihua Wu, Ke Wang}  % in alphabetical order

\address{Wenxue Li \newline
Department of Mathematics,
Harbin Institute of Technology (Weihai), Weihai 264209, China}
\email{wenxuetg@hitwh.edu.cn, Phone +86 0631 5687035, fax +86 0631 5687572}

\address{Ruihua Wu \newline
Department of Mathematics,
Harbin Institute of Technology (Weihai), Weihai 264209, China}
\email{wu\_ruihua@hotmail.com}

\address{Ke Wang \newline
Department of Mathematics,
Harbin Institute of Technology (Weihai), Weihai 264209, China}
\email{wangke@hitwh.edu.cn}

\thanks{Submitted August 2, 2013. Published August 21, 2014.}

\subjclass[2000]{45D05, 60H17, 34A12, 60H20}
\keywords{Backward stochastic Volterra integral
equations; existence;\hfill\break\indent  uniqueness; dynamic risk measure}

\begin{abstract}
 In this article, we study general backward stochastic Volterra integral
 equations (BSVIEs). Combining the contractive-mapping principle,
 step-by-step iteration method and mathematical induction, we
 establish the existence and uniqueness theorem of M-solution for the
 BSVIEs. This theorem could be applied directly to many models, for
 example, using the result to a kind of financial models provides a
 new and easy method to discuss the existence of dynamic risk
 measure. 
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{definition}[theorem]{Definition}
\allowdisplaybreaks

\section {Introduction}

Backward stochastic differential equations (BSDEs) and backward
stochastic Volterra integral equation (BSVIE) are applied widely in
finance and stochastic control etc.\cite{yi,yong,121}. The
theoretical foundation of BSDEs have been established by  Pardoux
and  Peng\cite{peng}. The development of BSDEs greatly promoted
the evolution of economics and finance. For example, economists
Diffie and Epstein introduced BSDEs into economics in 1992, and
stochastic analyst El Karoai et al \cite{N.E} discovered the
important role of BSDEs in finance. In 2006,  Yong \cite{yy}
introduce the BSVIEs, in which play a major role in considering the
properties of forward stochastic Volterra integral equations, which
describe the stochastic optimal control problem with memories, and
in the proof of stochastic Pontryagin maximum principle \cite{121}
etc.. Besides, in \cite{122}, a class of dynamic convex and coherent
risk measures are identified as a component of the adapted
M-solutions to certain BSVIEs.

In this article, we consider general backward stochastic Volterra
integral equation (BSVIE) 
\begin{equation}\label{53}
\begin{split}
  Y(t)&=F(t,Y(t))+\int_t^Tg(t,s,Y(s),Y(t),Z(t,s),Z(s,t))\,\mathrm{d}s \\
  &\quad -\int_t^Th(t,s,Y(s),Y(t),Z(t,s))\,\mathrm{d}W(s),\quad t\in
[0,T].
\end{split}
\end{equation}
A class of important BSVIEs \eqref{53} is the the following
\begin{equation}\label{3}
Y(t)=\xi+\int_t^Tg(s,Y(s),Z(s))\,\mathrm{d}s+\int_t^TZ(s)\,\mathrm{d}W(s) ,\quad t\in
[0,T].
\end{equation}
 Yong  \cite{yy} introduced another form of BSVIE \eqref{53}:
\begin{equation}\label{7}
Y(t)=\phi(t)-\int_t^Tg\Big(t,s,Y(s),Z(t,s),Z(s,t)\Big)\,\mathrm{d}s
-\int_t^TZ(t,s)\,\mathrm{d}W(s),\quad t\in [0,T],
\end{equation}
and gave the following definition of adapted solution.

\begin{definition}\label{de1} \rm
Any pair of stochastic processes $(Y(\cdot),Z(\cdot,\cdot))\in
\mathcal {H}^{1}[S,T]$ (defined as \eqref{124}) satisfying \eqref{7}
is called an adapted solution of \eqref{7}.
\end{definition}

Also, the conditions of the existence and uniqueness of the adapted
solution to \eqref{7} are given. However, it is difficult to
consider the uniqueness of the adapted solution to \eqref{7} under
Definition \ref{de1}. For example, for the  BSVIE
\begin{equation}\label{88}
Y(t)=\int_t^T g\Big(t,s,Y(s),Z(t,s)\Big)\,\mathrm{d}s-
\int_t^TZ(t,s)\,\mathrm{d}W(s),\quad t\in [0,T],
\end{equation}
in which $g$ satisfies the conditions of existence and uniqueness
theorem, suppose $(Y(\cdot),Z(\cdot,\cdot))$ is the uniqueness
adapted solution of \eqref{88}. But it is easy to check that
$(\hat{Y}(\cdot),\hat{Z}(\cdot,\cdot))$ satisfies
\begin{gather*}
  \hat{Y}(t)=Y(t), \quad t\in [0,T] ,\\
  \hat{Z}(t,s)=Z(t,s),\quad (t,s)\in [0,T]\times[t,T] ,\\
  \hat{Z}(t,s)=\varsigma(t,s),\quad (t,s)\in [0,T]\times[0,t],
\end{gather*}
is also an adapted solution of \eqref{88} for any
$\varsigma(\cdot)\in L_{\mathbb{F}}^2(0,T;\mathbb{R})$. It is
contradictive.

The purpose of this article is to discuss the existence and uniqueness
of  adapted M-solutions (defined later), rather than the adapted 
solution to \eqref{53} in Definition \ref{de1}.
 We give some sufficient conditions for the existence and uniqueness 
of M-solution to \eqref{53}, by combining contractive-mapping principle, 
step-by-step iteration method and mathematical induction. 
These results could be applied directly to many models, 
such as those described as BSVIEs, in which a component of the M-solution of
BSVIEs has a close relation with the dynamic risk measure. By
applying the main result to the financial models, it provides a new
and easy method to discuss the existence of dynamic risk measure.

\section{Motivation and main results}

\subsection{Experimental motivation}
In this article, we use \eqref{53} to describe a class of economic
problems as certain portfolio, such as European option, some current
cash flows, mutual funds etc. Here, $Y(t)$ denotes the price of
merchandize, $F(t,Y(t))$ stands for the total wealth of certain
portfolio and $g$ is referred to as the generator of \eqref{53}.
Since the component $Y(t)$ of the M-solution to \eqref{53} has a
close relationship with the dynamic risk measure, it is significant
to consider the existence and uniqueness of the M-solution. Here
the dynamic risk measure is defined as follows:

\begin{definition}[\cite{122}] \rm
A map $\rho:L^2_{\mathcal {F}_{T}}(0,T)\to
L^2_{\mathbb{F}}(0,T)$ is called a dynamic risk measure if the
following conditions hold:
\begin{itemize}
  \item[1.] For any $\psi(\cdot), \bar{\psi}(\cdot) \in L^2_{\mathcal
  {F}_{T}}(0,T)$, if $\psi(\cdot)=\bar{\psi}(\cdot)$, a.s. $s\in [t,T]$,
  for some $t\in [0,T)$, then
  $$
\rho(t,\psi(\cdot))=\rho(t,\bar{\psi}(\cdot)),\quad\text{a.s.}.
$$

  \item[2.] For any $\psi(\cdot), \bar{\psi}(\cdot) \in L^2_{\mathcal
  {F}_{T}}(0,T)$, if $\psi(\cdot)\leq\bar{\psi}(\cdot)$, a.s. $s\in [t,T]$,
  for some $t\in [0,T)$, then
  $$
\rho(t,\psi(\cdot))\geq\rho(t,\bar{\psi}(\cdot)),\quad\text{a.s. }s\in [t,T].
$$
\end{itemize}
\end{definition}

In fact, the model is on the basis of some classical financial
models.  El Karoui et al \cite{N.E} described the problem of
European option pricing by applying BSDE \eqref{3}. In this case,
$\xi$ represents square-integrable contingent claim and
 $Y(t)$ represents the price of European option. For their model, 
if there exists unique
solution, then the map $\rho:\xi\to Y(t)$ defined by
\eqref{3} is a dynamic risk measure.  Yong \cite{122}, extended
\eqref{3} into \eqref{7}. There $\phi(t)$ represents the total
wealth of certain portfolio. The author gave dynamic risk measure
 $\rho(t,\phi(t))=Y(t)$ for \eqref{7}.

\subsection {Model assumptions and novelty}
Now we give some assumptions for \eqref{53} such that it has unique
solution.
\begin{itemize}
  \item [(H1)]  Let $g:\Delta ^{c}\times \mathbb{R}^{m}\times
\mathbb{R}^{m}\times \mathbb{R}^{m\times d}\times
\mathbb{R}^{m\times d}\times \Omega\to \mathbb{R}^{m} $ be
$\mathfrak{B}(\Delta ^{c}\times \mathbb{R}^{m}\times
\mathbb{R}^{m}\times \mathbb{R}^{m\times d}\times
\mathbb{R}^{m\times d}) \otimes \mathcal {F}_{T}$-measurable, and
for all $(t,\zeta,\eta,\xi,\varsigma)\in[0,T]\times
\mathbb{R}^{m}\times \mathbb{R}^{m}\times \mathbb{R}^{m\times
d}\times \mathbb{R}^{m\times d}$,
$g(t,\cdot,\zeta,\eta,\xi,\varsigma)$ is $\mathbb{F}$-adapted and
satisfies
\begin{equation}\label{22} 
E\int_{0}^T\Big(\int_t^T|
g(t,s,0,0,0,0)| \,\mathrm{d}s\Big)^2\,\mathrm{d}t<\infty,
\end{equation}
and for all $(t,s)\in \Delta
^{c},\zeta,\bar{\zeta},\eta,\bar{\eta}\in
\mathbb{R}^{m},\xi,\bar{\xi},\varsigma,\bar{\varsigma}\in
\mathbb{R}^{m\times d}$
\begin{equation}\label{23}
\begin{split}
 &|g(t,s,\zeta,\eta,\xi,\varsigma)
-g(t,s,\bar{\zeta},\bar{\eta},\bar{\xi},\bar{\varsigma}))|\\
&\leq L_{\zeta}(t,s)| \zeta-\bar{\zeta}|+L_{\eta}(t,s)|
\eta-\bar{\eta}|
+L_{\xi}(t,s)|\xi-\bar{\xi}|
+L_{\varsigma}(t,s)|\varsigma-\bar{\varsigma}|~~\text{a.s.},
\end{split}
\end{equation}
where for some $\varepsilon>0$,
$L_{\zeta}(t,s),L_{\eta}(t,s),L_{\xi}(t,s),L_{\varsigma}(t,s):\Delta
^{c}\to \mathbb{R}$ satisfy $$\sup_{t\in
[0,T]}\int_t^T\Big[L_{\zeta}(t,s)^{2+\varepsilon}+L_{\eta}(t,s)^{2+\varepsilon}
+L_{\xi}(t,s)^{2+\varepsilon}+
L_{\varsigma}(t,s)^{2+\varepsilon}\Big]\,\mathrm{d}s=A<\infty,$$ and
\begin{equation}\label{24}
\sup_{t\in [0,T]}\Big(\int_{0}^TL_{\eta}(t,s)
\,\mathrm{d}s\Big)^2=K<\frac{1}{8C^2},
\end{equation}
in which $C$ is the same as the one in \eqref{15}.

  \item [(H2)] Let $F:\mathbb{R}^{1}\times \mathbb{R}^{m}\times
\Omega\to \mathbb{R}^{m} $ be
$\mathfrak{B}(\mathbb{R}^{1}\times \mathbb{R}^{m})\otimes\mathcal
{F}_{T}$-measurable, and
\begin{equation}\label{45}
E\int_{0}^T| F(t,0)|\,\mathrm{d}t<\infty.
\end{equation}
Moreover,
\begin{equation}\label{46}
  \begin{gathered}
| F(t,\varsigma) -F(t,\bar{\varsigma}))|\leq L_{\varsigma}(t)|
\varsigma-\bar{\varsigma}|,\quad t\in
\mathbb{R},\varsigma,\bar{\varsigma}\in
\mathbb{R}^{m} \text{ a.s.},  \\
\sup_{t\in[0,T]}|
L_{\varsigma}(t)|^2\leq D,  \\
2C_{L}D<1.
\end{gathered}
\end{equation}
hold.  Here $C_{L}$ is determined by \eqref{25}.

  \item [(H3)]  Let $h:\Delta ^{c}\times \mathbb{R}^{m}\times
\mathbb{R}^{m}\times \mathbb{R}^{m\times d}\times \Omega\to
\mathbb{R}^{m\times d} $ be $\mathfrak{B}(\Delta ^{c}\times
\mathbb{R}^{m}\times \mathbb{R}^{m}\times \mathbb{R}^{m\times d})
\otimes \mathcal {F}_{T}$-measurable, and for all
$(t,\zeta,\eta,\xi)\in[0,T]\times \mathbb{R}^{m}\times
\mathbb{R}^{m}\times \mathbb{R}^{m\times d}$,
$h(t,\cdot,\zeta,\eta,\xi)$ is $\mathbb{F}$-adapted and has the
following relations:
\begin{equation}\label{54}
\begin{gathered}
  |h(t,s,\zeta,\eta,\xi)
-h(t,s,\bar{\zeta},\bar{\eta},\bar{\xi})-(\xi-\bar{\xi})|^2\leq
L_{\zeta}|\zeta-\bar{\zeta}|^2+L_{\eta}|
\eta-\bar{\eta}|^2+L_{\xi}| \xi-\bar{\xi}|^2,  \\
    \forall(t,s)\in \Delta ^{c},\zeta,\bar{\zeta},\eta,\bar{\eta}\in
\mathbb{R}^{m},\; \xi,\bar{\xi}\in \mathbb{R}^{m\times d},\text{ a.s.},  \\
 E\int_{0}^T\int_t^T|
h(t,s,0,0,0)|^2\,\mathrm{d}s\,\mathrm{d}t <\infty, \\
\max\Big\{4C_{F}L_{\xi},C_{F}(L_{\zeta}+L_{\eta})T\Big\}<1,
\end{gathered}
\end{equation}
where $C_{F}$ is determined by \eqref{47}.
\end{itemize}
It is easy to verify that conditions (H1)--(H3) will degenerate into
(H) in \cite{zzoo} as \eqref{53} equal to \eqref{7}. In the rest of
the subsection, we will show the novelty in this paper from the following 
points: theory and application.

(1) It is noted that \eqref{3} can not show the
rule of the total wealth changing with the time; and both \eqref{3}
and \eqref{7} cannot build up the relation between the total wealth
with the price of merchandise. To get rid of the two defects, we
reconstruct the model as BSVIE \eqref{53}.

 In \cite{N.E} (or
\cite{122}), by building the relation between $\xi$ and $Y(t)$, (or
$\phi(t)$ and $Y(t)$), the dynamic risk measure for \eqref{3} (or
\eqref{7}) is found. However, model \eqref{53} gives directly the
explicit relation of $\varphi(t)$ and $F(t,Y(t))$, i. e.
$\varphi(t)=F(t,Y(t)).$ Hence, if $F(t,\cdot)$ has well properties,
we could arrive a dynamic risk measure $F^{-1}:
\varphi(t)\to Y(t)$. Sequently, in order to study the
properties of the dynamic risk measure, such as dynamic convex or
coherent risk measures, it only needs to restrict the $F(t,\cdot)$
further.  Then applying the main results to this model, it is
convenient to find the dynamic risk measure.
(2) It is well-known that step-by-step
iteration method and fixed point theorem are important method to
prove the existence and uniqueness of solution to equations
\cite{kh,maox,peng,yy,zzoo}. But sometimes, for the equations having
complicated forms, it is hard to derive the existence and uniqueness
theorem, by using only method of them. However, in this paper we
combine step-by-step iteration method, fixed point theorem,
mathematic induction and Martingale representation theorem to
provide a proof of the existence and uniqueness of M-solution to
\eqref{53}. For different domain of definition of $Z(\cdot,\cdot)$,
we use different method as in Figure \ref{fig1}.

\begin{figure}[ht]
\begin{center}
\setlength{\unitlength}{1mm}
\begin{picture}(75,45)(-5,0)
\put(0,35){\line(1,0){64}}
\dashline{1}(0,22)(64,22)
\put(-5,5){\line(1,0){74}}
\put(68.5,4.1){$\to$}
\put(0,0){\line(0,1){40}}
\put(-.85,40){$\uparrow$}
\dashline{1}(32,5)(32,35)
\put(64,5){\line(0,1){30}}
\put(3,30){Step-by-step}
\put(3,26){iteration method}
\put(37,30){Fixed point}
\put(37,26){theorem}
\put(3,17){Mathematical}
\put(3,13){induction}
\put(37,17){Martingale}
\put(37,13){representation}
\put(37,8){theorem}
\put(31,1){$S$}
\put(62.6,1){$T$}
\put(70,1){$t$}
\put(-4,21){$S$}
\put(-4,34){$T$}
\end{picture}
\end{center}
\caption{Diagram of methods used in different domains
  of definition of $Z(\cdot,\cdot)$ to prove the main result} \label{fig1}
\end{figure}


\subsection{Main results}

Firstly, we introduce the definition of M-solution, and then give
the main results of the paper: existence and uniqueness theorem of
M-solution to BSVIE \eqref{53}. The proof of it is left in the next
section.

\begin{definition}\label{M} \rm
A pair $(Y(\cdot),Z(\cdot,\cdot))\in \mathcal {H}^{1}[S,T]$ is
called an adapted M-solution of BSVIE \eqref{53} on $[S,T]$, if
$(Y(\cdot),Z(\cdot,\cdot))$ satisfies \eqref{53} in the usual
$It\hat{o}s$ sense for almost all $t\in[S,T]$ and, in addition, the
following holds:
$$
Y(t)=E(Y(t)| \mathcal{F}_{S})+\int_{S}^{t}Z(t,s)\,\mathrm{d}W(s),\quad
\text{a.e. }t\in [S,T].
$$
\end{definition}

\begin{theorem}\label{main}
Let {\rm(H1)--(H3)} hold, then \eqref{53} admits a unique adapted
M-solution $(Y(\cdot),Z(\cdot,\cdot))$, and the following estimate
holds:
\begin{equation}\label{55}
\begin{split}
&\| (Y(\cdot),Z(\cdot,\cdot))\| ^2_{\mathcal {H}^2[R,T]}\\
&\leq C_{h}E\Big\{\int_{R}^T|
F(t,0)|^2\,\mathrm{d}t+ \int_{R}^T\Big(\int_t^T|
g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\\
&\quad +\int_{R}^T\int_t^T|
h(t,s,0,0,0)|^2\,\mathrm{d}s\,\mathrm{d}t\Big\},\quad
R\in[0,T].
\end{split}
\end{equation}
Furthermore, if $\bar{g}, \bar{F}, \bar{h}$ satisfy {\rm (H1)--(H3)},
respectively, and $(\bar{Y}(\cdot),\bar{Z}(\cdot,\cdot))$ is the
M-solution, in which $(g,F,h)$ is replaced by
$(\bar{g},\bar{F},\bar{h})$. Then
\begin{equation}\label{56}
\begin{split}
 &E\Big[\int_{R}^T| Y(t)-\bar{Y}(t)|^2\,\mathrm{d}t+
\int_{R}^T\int_{R}^T| Z(t,s)-\bar{Z}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big] \\
&\leq C_{h}E\Big\{\int_{R}^T|
F(t,Y(t))-\bar{F}(t,Y(t))|^2\,\mathrm{d}t\\
&\quad +\int_{R}^T\Big(\int_t^T|g(t,s,Y(s),Y(t),Z(t,s),Z(t,s))\\
&\quad -\bar{g}(t,s,Y(s),Y(t),Z(t,s),Z(t,s))|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\\
&\quad +\int_{R}^T\int_t^T|
h(t,s,Y(s),Y(t),Z(t,s))-\bar{h}(t,s,Y(s),Y(t),Z(t,s))|^2
\,\mathrm{d}s\,\mathrm{d}t\Big\},
\end{split}
\end{equation}
for $R\in[0,T]$.
\end{theorem}

\section{Proof of the main theorem}

Before proving Theorem \ref{main}, we show some useful
notion from \cite{zzoo}, and some lemmas. Let
\begin{gather*}
\begin{aligned}
   L^{p}_{\mathcal {F}_{S}}(\Omega; L^{q}(0,T))
=\Big\{&\phi:(0,T)\times\Omega\to \mathbb{R}^{m}
:\phi(\cdot) \text{ is }\mathcal {B}([   0,T])\otimes \mathcal {F}_{S}
 \text{-measurable}, \\
   & E\Big(\int_{0}^T| \phi(t)|^{q}\,\mathrm{d}t\Big)^{\frac{p}{q}}<
\infty\Big\}.
\end{aligned}
\\
 L^{p}_{\mathbb{F}}(\Omega;
L^{q}(0,T))=\Big\{\phi(\cdot)\in L^{p}(\Omega; L^{q}(0,T)):
\phi(\cdot)\text{ is } \mathbb{F}\text{-adapted} \Big\}.
\end{gather*}
For any $p,q \geq1$, let
$L^{q}(0,T;L^{p}_{\mathbb{F}}(\Omega;L^2(0,T)))$ be the set of all
processes $Z:[0,T]^2\times \Omega\to \mathbb{R}^{m\times
d} $, such that for almost all $t\in [0,T],Z(t,\cdot)\in
L^{p}_{\mathbb{F}}(\Omega;L^2(0,T))$, there is
$$
\int_{0}^T\Big\{E\Big(\int_{0}^T
|Z(t,s)|^2\,\mathrm{d}s\Big)
^{p/2}\Big\}^{q/p}\,\mathrm{d}t<\infty.
$$
For  convenience, denote
\begin{gather*} %\label{123}
\Delta[R,S]=\{(t,s)\in [R,S]^2: R\leq s\leq t\leq S\},  \\
\Delta ^{c}[R,S]=\{(t,s)\in [R,S]^2: R\leq t<s\leq S\},
\end{gather*}
and for any $0\leq R\leq S\leq T$,
\begin{equation}\label{124}
\mathcal{H}^{p}[R,S]= L^{p}_{\mathbb{F}}(\Omega; L^{p}(0,T))\times
L^{p}(0,T;L^{p}_{\mathbb{F}}(\Omega;L^2(0,T))).
\end{equation}
If we define
$$
\| y(\cdot),z(\cdot,\cdot)\|_{\mathcal {H}^2[R,S]} \equiv  \Big\{
E\big[\int_{R}^{S}|
y(t)|^2\,\mathrm{d}t+\int_{R}^{S} \int_{R}^{S}|
z(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\big]\Big\}^{1/2}.
$$
Then $\|\cdot\|_{\mathcal {H}^2[R,S]}$ could define a metric on
$\mathcal {H}^2[R,S]$ and the space is complete under this metric,
clearly.

For any $R,S\in[0,T)$, consider the stochastic integral
equation
\begin{equation}\label{9}
\lambda(t,r)=\psi(t)+\int_{r}^Tk(t,s,\mu(t,s))\,\mathrm{d}s
-\int_{r}^T\mu(t,s)\,\mathrm{d}W(s),
\end{equation}
for $ r\in [R,T]$ and $t\in [S,T]$,
where $k:[S,T]\times [R,T]\times \mathbb{R}^{m\times d}\times
\Omega\to \mathbb{R}^{m}$ is given.
$(\lambda(t,\cdot),\mu(t,\cdot))$ is $\mathbb{F}$-adapted for any
$t\in [R,T]$. Introduce the following assumption for $k$:
\begin{itemize}
\item[(H0)]
Let $R,S\in[0,T)$, and $k:[S,T]\times [R,T]\times
\mathbb{R}^{m\times d}\times \Omega\to \mathbb{R}^{m} $ be
$\mathfrak{B}([S,T]\times [R,T]\times \mathbb{R}^{m\times d})
\otimes \mathcal {F}_{T}$-measurable such that $k(t,\cdot,z)$ is
$\mathbb{F}$-progressively measurable for $(t,z)\in[S,T]\times
R^{m\times d}$, and
\begin{equation}\label{10}
E\Big(\int_{R}^T| k(t,s,0)| \,\mathrm{d}s\Big)^{p}<\infty,\quad ~~a.e.~~t\in
[S,T],
\end{equation}
for some $p>1$. Moreover, the following holds:
$$| k(t,s,z)-k(t,s,\bar{z})|\leq L_{z}(t,s)| z-\bar{z}|,~~(t,s)\in [S,T]\times[R,T],~
z, \bar{z}\in \mathbb{R}^{m\times d},a.s,$$ where
$L_{z}:[S,T]\times[R,T]\to [0,\infty)$ is a deterministic
function, such that for some $\varepsilon>0$,$$\sup_{t\in
[S,T]}\int_{R}^TL_{z}(t,s)^{2+\varepsilon}\,\mathrm{d}s<\infty.$$
\end{itemize}
Let $r=S\in[R,T)$ be fixed. Define
$$
\psi^{S}(t)=\lambda(t,S),~Z(t,s)=\mu(t,s),\quad t\in [R,S],\;s\in
[S,T].
$$
Then \eqref{9} is rewritten as stochastic Fredholm integral
equations (SFIEs):
\begin{equation}\label{14}
\psi^{S}(t)=\psi(t)+\int_{r}^Tk(t,s,Z(t,s))\,\mathrm{d}s
-\int_{r}^TZ(t,s)\,\mathrm{d}W(s),\quad t\in [S,T].
\end{equation}
We call $(\psi^{S}(\cdot),Z(\cdot,\cdot))\in L^{p}_{\mathcal
{F}_{S}}(R,S)\times L^{p}(R,S;L^2_{\mathbb{F}}(S,T))$ as an
adapted solution of \eqref{14}, if it satisfies \eqref{14} in the
sense of It\^{o}.

\begin{lemma}[\cite{zzoo}]\label{le3.1}
Let {\rm (H0)} hold. Then for any $\psi(\cdot)\in L_{\mathcal
{F}_{T}}^{p}(R,S)$, \eqref{14} admits a unique adapted solution
$(\psi^{S}(\cdot),Z(\cdot,\cdot))\in L_{\mathcal
{F}_{S}}^{p}(R,S)\times L^{p}(R,S;L^2_{\mathbb{F}}(S,T))$, and the
following estimate holds:
\begin{equation}\label{15}
  E\Big\{|\psi^{S}(t)|^{p}+\Big(\int_{S}^T|Z(t,s)|^2
\,\mathrm{d}s\Big)^{p/2}\Big\}
 \leq
CE\Big\{|\psi(t)|^{p}+\Big(\int_{S}^T| k(t,s,0)|
  \,\mathrm{d}s\Big)^{p}\Big\},
\end{equation}
for $t\in[R,S]$.
If $\bar{k}:[R,S]\times [S,T]\times \mathbb{R}^{m\times d}\times
\Omega\to \mathbb{R}^{m}$ satisfies (H0),
$\bar{\psi}(\cdot)\in L_{\mathcal {F}_{T}}^{p}(R,S)$, and
$(\bar{\psi}^{S}(\cdot), \bar{Z}(\cdot,\cdot))\in L_{\mathcal
{F}_{S}}^{p}(R,S)\times L^{p}(R,S;L^2_{\mathbb{F}}(S,T))$ is the
unique adapted solution of \eqref{14} in which $(k,\psi)$ is
replaced by $(\bar{k},\bar{\psi})$, then
\begin{equation}\label{16}
\begin{split}
  &E\Big\{| \psi^{S}(t)-\bar{\psi^{S}}(t)|^{p}+
\Big(\int_{S}^T| Z(t,s)-\bar{Z}(t,s)|^2\,\mathrm{d}s\Big)^{p/2}\Big\}\\
&\leq CE\Big\{|\psi(t)-\bar{\psi}(t)|^{p}+\Big(\int_{S}^T|
k(t,s,Z(t,s))-\bar{k}(t,s,Z(t,s))|
  \,\mathrm{d}s\Big)^{p}\Big\},
\end{split}
\end{equation}
for $t\in[R,S]$.
\end{lemma}
Let $S=R$, and define 
\begin{gather*}
Y(t)=\lambda(t,t), \quad t\in[S,T], \\
Z(t,s)=\mu(t,s), \quad (t,s)\in\Delta ^{c}[S,T].
\end{gather*}
 Then \eqref{9} can be rewritten as
\begin{equation}\label{18}
Y(t)=\psi(t)+\int_t^Tk(t,s,Z(t,s))\,\mathrm{d}s
-\int_t^TZ(t,s)\,\mathrm{d}W(s),\quad t\in [S,T].
\end{equation}


\begin{lemma}[\cite{zzoo}]\label{le3.2}
Let {\rm (H0)} hold. Then for any 
$\psi(\cdot)\in L_{\mathcal{F}_{T}}^{p}(R,S)$, \eqref{18} admits a unique 
adapted M-solution
$(Y(\cdot),Z(\cdot,\cdot))\in \mathcal {H}^{p}[S,T]$, and the
following estimate holds:
\begin{equation}\label{19}
  E\Big\{| Y(t)|^{p}+\Big(\int_{S}^T| Z(t,s)|^2\,\mathrm{d}s\Big)^{p/2}\Big\}
 \leq
CE\Big\{|\psi(t)|^{p}+\Big(\int_t^T| k(t,s,0)|
  \,\mathrm{d}s\Big)^{p}\Big\},
\end{equation}
for $t\in[S,T]$.
If $\bar{k}:[R,S]\times [S,T]\times \mathbb{R}^{m\times d}\times
\Omega\to \mathbb{R}^{m}$ also satisfies {\rm (H0)},
$\bar{\psi}(\cdot)\in L_{\mathcal {F}_{T}}^{p}(S,T)$,
 and  $(\bar{Y}(\cdot), \bar{Z}(\cdot,\cdot))\in \mathcal {H}^{p}[S,T] $ is the
unique adapted M-solution of BSVIE \eqref{18} in which $(k,\psi)$ is
 replaced by $(\bar{k},\bar{\psi})$, then
\begin{equation}\label{20}
\begin{split}
  &E\Big\{| Y(t)-\bar{Y}(t)|^{p}+
\Big(\int_{S}^T| Z(t,s)-\bar{Z}(t,s)|^2\,\mathrm{d}s\Big)^{p/2}\Big\} \\
&\leq CE\Big\{|\psi(t)-\bar{\psi}(t)|^{p}+\Big(\int_t^T|
k(t,s,Z(t,s))-\bar{k}(t,s,Z(t,s))|
  \,\mathrm{d}s\Big)^{p}\Big\},
\end{split}
\end{equation}
for $t\in[S,T]$.
\end{lemma}

The proof of Theorem \ref{main} is split into three steps, in which we find solutions
for the three BSVIE's: \eqref{21}, \eqref{44}, and \eqref{53}.


\subsection{Existence and uniqueness of M-solution for the  BSVIE}
\begin{equation}\label{21}
Y(t)=\psi(t)+\int_t^Tg(t,s,Y(s),Y(t),Z(t,s),Z(s,t))\,\mathrm{d}s
-\int_t^TZ(t,s)\,\mathrm{d}W(s),
\end{equation}
for $t\in [0,T]$.

\begin{theorem}\label{dingli2}
Let {\rm (H1)} hold. Then for any $\psi(\cdot)\in L_{\mathcal
{F}_{T}}^2(0,T)$, \eqref{21} admits a unique adapted M-solution
$(Y(\cdot),Z(\cdot,\cdot))\in \mathcal {H}^2[0,T]$. Moreover, the
following estimate holds:
\begin{equation}\label{25}
\begin{split}
&\| (Y(\cdot),Z(\cdot,\cdot))\| ^2_{\mathcal{H}^2[R,T]}\\
&\equiv
  E\Big\{\int_{R}^T| Y(t)|^2\,\mathrm{d}t+\int_{R}^T
\int_{R}^T| Z(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big\}\\
&\leq
C_{L}E\Big\{\int_{R}^T|\psi(t)|^2\,\mathrm{d}t+\int_{R}^T\Big(\int_t^T|
g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\},\quad R\in[0,T].
\end{split}
\end{equation}
If $\bar{g}$ also satisfies (H1), $\bar{\psi}(\cdot)\in L_{\mathcal
{F}_{T}}^2(0,T)$, and $(\bar{Y}(\cdot),\bar{Z}(\cdot,\cdot))\in
\mathcal {H}^2[0,T] $ is the adapted M-solution of \eqref{21} in
which $(g,\psi)$ is replaced by $(\bar{g},\bar{\psi})$, then
\begin{equation}\label{26}
\begin{split}
&E\Big\{\int_{R}^T| Y(t)-\bar{Y}(t)|^2\,\mathrm{d}t+
\int_{R}^T\int_{R}^T| Z(t,s)-\bar{Z}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big\} \\
&\leq C_{L}E\Big\{\int_{R}^T|\psi(t)-\bar{\psi}(t)|^2\,\mathrm{d}t+\int_{R}^T\Big(\int_t^T|
g(t,s,Y(s),Y(t),Z(t,s),Z(s,t))\\
&\quad -\bar{g}(t,s,Y(s),Y(t),Z(t,s),Z(s,t))|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\},\quad R\in[0,T].
\end{split}
\end{equation}
\end{theorem}

\begin{proof}
We split the proof into three steps.
\smallskip

\noindent\textbf{Step 1:} 
Let $\mathcal {M}^2[S,T]$ be a subspace of $\mathcal
{H}^2[S,T]$, and for any $(y(\cdot),z(\cdot,\cdot))\in\mathcal
{M}^2[S,T]$ satisfies
\begin{equation}\label{27}
y(t)=E(y(t)| \mathcal {F}_{S})+\int_{S}^{t}z(t,s)\,\mathrm{d}W(s),\quad ~a.e.~
t\in [S,T],\text{a.s.},
\end{equation}
and define
$$
\| y(\cdot),z(\cdot,\cdot)\|_{\mathcal {M}^2[S,T]} \equiv  \Big\{
E\big[\int_{S}^T|
y(t)|^2\,\mathrm{d}t+\int_{S}^T \int_t^T|
z(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\big]\Big\}^{1/2}.
$$
 Clearly,  $\mathcal{M}^2[S,T]$ is a nontrivial closed subspace of
$\mathcal{H}^2[S,T]$.

For any $\psi(\cdot)\in L_{\mathcal {F}_{T}}^2(S,T)$,
$(y(\cdot),z(\cdot,\cdot))\in\mathcal {M}^2[S,T]$, consider the
 BSVIE:
$$
Y(t)=\psi(t)+\int_t^Tg(t,s,y(s),y(t),Z(t,s),z(s,t))\,\mathrm{d}s
-\int_t^TZ(t,s)\,\mathrm{d}W(s),\quad t\in [S,T].
$$
 Applying Lemma \ref{le3.2}, this BSVIE admits a unique adapted M-solution
$(Y(\cdot),Z(\cdot,\cdot))$ in $\mathcal {H}^2[S,T]$. On the other
hand, $(Y(\cdot),Z(\cdot,\cdot))$ salsifies \eqref{27}, hence
$(Y(\cdot),Z(\cdot,\cdot))\in \mathcal {M}^2[S,T]$.

Define map $\Lambda:\mathcal {M}^2[S,T]\to\mathcal {M}^2[S,T]$ by
\begin{equation}\label{30}
\Lambda(y(\cdot),z(\cdot,\cdot))=(Y(\cdot),Z(\cdot,\cdot)).
\end{equation}
If $(\bar{y}(\cdot),\bar{z}(\cdot,\cdot))\in\mathcal {M}^2[S,T]$,
and
$\Lambda(\bar{y}(\cdot),\bar{z}(\cdot,\cdot))=(\bar{Y}(\cdot),\bar{Z}(\cdot,\cdot))$,
then by \eqref{20} we  obtain
\begin{align*} %\label{31}
&E\Big[| Y(t)-\bar{Y}(t)|^2+
\int_{S}^T| Z(t,s)-\bar{Z}(t,s)|^2\,\mathrm{d}s\Big] \\
&\leq CE\Big[\int_t^T|
g(t,s,y(s),y(t),Z(t,s),z(s,t))-\bar{g}(t,s,\bar{y}(s),\bar{y}(t),Z(t,s),\bar{z}(s,t))|
\,\mathrm{d}s\Big]^2\\
&\leq CE\Big[\int_t^T\Big( L_{\zeta}(t,s)|
y(s)-\bar{y}(s)|+L_{\eta}(t,s)| y(t)-\bar{y}(t)|
+L_{\varsigma}(t,s)| z(s,t)\\
&\quad -\bar{z}(s,t)| \Big) \,\mathrm{d}s\Big]^2 \\
&\leq
9CA^{\frac{2}{2+\varepsilon}}(T-t)^{\frac{\varepsilon}{2+\varepsilon}}E\Big[\int_t^T\Big(|
y(s)-\bar{y}(s)|^2+| y(t)-\bar{y}(t)|^2+|
z(s,t)\\
&\quad -\bar{z}(s,t)|^2\Big)\,\mathrm{d}s\Big].
\end{align*}
Consequently, we  obtain that
\begin{align*} %\label{32}
&\|\Lambda(y(\cdot),z(\cdot,\cdot))-\Lambda(\bar{y}(\cdot),\bar{z}(\cdot,\cdot))
\|_{\mathcal {M}^2[S,T]}^2 \\
&\equiv  E\Big[\int_{S}^T|
Y(t)-\bar{Y}(t)|^2\,\mathrm{d}t+\int_{S}^T \int_t^T|
Z(t,s)-\bar{Z}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big] \\
&\leq 9CA^{\frac{2}{2+\varepsilon}}(T-t)^{\frac{\varepsilon}{2+\varepsilon}}
\max\{1,2(T-S)\}E\Big\{\int_{S}^T|
y(t)-\bar{y}(t)|^2\,\mathrm{d}t\\
&\quad +\int_{S}^T \int_t^T|
z(s,t)-\bar{z}(s,t)|^2\,\mathrm{d}s\,\mathrm{d}t\Big\}\\
&\leq C_{3}(T-S)^{\frac{\varepsilon}{2+\varepsilon}}\|
(y(\cdot),z(\cdot,\cdot))-(\bar{y}(\cdot),\bar{z}(\cdot,\cdot))
\|_{\mathcal {M}^2[S,T]}^2,
\end{align*}
where $C_{3}=9CA^{\frac{2}{2+\varepsilon}}\max\{1,2(T-S)\}$. So
$\Lambda:\mathcal {M}^2[S,T]\to\mathcal {M}^2[S,T]$ is
contracting, if $T-S$ is sufficiently small. Then there exists a
unique fixed point in $\mathcal {M}^2[S,T]$. Hence \eqref{21}
admits a unique adapted M-solution $(Y(\cdot),Z(\cdot,\cdot))\in
\mathcal {M}^2[S,T]$.

Now \eqref{19} yields 
\begin{equation}\label{99}
\begin{split}
&E\Big[| Y(t)|^2+\int_t^T| Z(t,s)|^2\,\mathrm{d}s\Big]\\
&\leq CE\Big\{|\psi(t)|^2+\Big(\int_t^T|
g(t,s,Y(s),Y(t),0,Z(s,t))|
  \,\mathrm{d}s\Big)^2\Big\} \\
&\leq 4CE\Big\{|\psi(t)|^2+\Big(\int_t^T|
g(t,s,0,0,0,0)|\,\mathrm{d}s\Big)^2\\
&\quad +\Big(\int_t^TL_{\zeta}(t,s)^2\,\mathrm{d}s\Big)\Big(
 \int_t^T| Y(s)|^2\,\mathrm{d}s\Big)
+\Big(\int_t^TL_{\eta}(t,s)^2\,\mathrm{d}s\Big)\Big(\int_t^T|
Y(t)|^2\,\mathrm{d}s\Big)\\
&\quad +\Big(\int_t^TL_{\varsigma}(t,s)^2\,\mathrm{d}s\Big)\Big(\int_t^T|
Z(s,t)|^2\,\mathrm{d}s\Big)\Big\}.
\end{split}
\end{equation}
Consequently,
\begin{align*} %\label{100}
&\| (Y(\cdot),Z(\cdot,\cdot))\| ^2_{\mathcal {H}^2[S,T]}\\
&\equiv E\Big[\int_{S}^T|
Y(t)|^2\,\mathrm{d}t+\int_{S}^T
\int_{S}^T| Z(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big]\\
&\leq
4CE\Big\{\int_{S}^T|\psi(t)|^2\,\mathrm{d}t+\int_{S}^T\Big(\int_t^T|
g(t,s,0,0,0,0)|
\,\mathrm{d}s\Big)^2\,\mathrm{d}t\\
&\quad +2(T-S)^{^{\frac{\varepsilon}{2+\varepsilon}+1}}A^{\frac{2}{2+\varepsilon}}\int_{S}^T|
Y(s)|^2\,\mathrm{d}s\\
&\quad +
(T-S)^{^{\frac{\varepsilon}{2+\varepsilon}}}A^{\frac{2}{2+\varepsilon}}\int_{S}^T\int_{S}^T|
Z(s,t)|^2\,\mathrm{d}s\,\mathrm{d}t\Big\}.
\end{align*}
If $T-S$ is so small that
$4C(T-S)^{^{\frac{\varepsilon}{2+\varepsilon}}}A\max\{2(T-S),1\}<1/2$,
then \eqref{25} holds with $C_{L}=8C$.

Next, let us talk about the stability estimate. Let
$(Y(\cdot),Z(\cdot,\cdot))$ and
$(\bar{Y}(\cdot),\bar{Z}(\cdot,\cdot))$ be adapted M-solutions of
\eqref{21} corresponding to $(g,\psi)$ and $(\bar{g},\bar{\psi})$,
respectively. Denote
\begin{gather*}
\hat{Y}(t)=Y(t)-\bar{Y}(t) ,\quad 
\hat{Z}(t,s)=Z(t,s)-\bar{Z}(t,s),\quad 
\hat{\psi}(t)=\psi(t)-\bar{\psi}(t)\\
\hat{g}(t,s)=g(t,s,Y(s),Y(t),Z(t,s),Z(s,t))-\bar{g}(t,s,Y(s),Y(t),Z(t,s),Z(s,t)).
\end{gather*} 
Then by Hadamard formula, we obtain
\begin{align*}
 \hat{Y}(t)
&=\hat{\psi}(t)+\int_t^T\Big[\alpha_{1}(t,s)\hat{Y}(s)+\alpha_{2}(t,s)\hat{Y}(t)\\
&\quad +\sum_{i=1}^{d}
(\beta_{i}(t,s)\hat{Z}_{i}(t,s)+\bar{\beta}_{i}(t,s)\hat{Z}_{i}(s,t))+\hat{g}(t,s)\Big]\,\mathrm{d}s
-\int_t^T\hat{Z}(t,s)\,\mathrm{d}W(s).
\end{align*}
Applying \eqref{25}, we have the stability estimate \eqref{26} holds
with $C_{L}=8C$.

In this step we determine the unique solution $(Y(t),Z(t,s))$ to
\eqref{21} for $t,s\in [S,T]$.
\smallskip

\noindent\textbf{Step 2:} 
Since $E[Y(t)| \mathcal {F}_{S}]\in L^2(S,T; L^2_{\mathcal
{F}_{S}}(\Omega))$, by the Martingale Representation Theorem we
could find a unique $Z(\cdot,\cdot)\in L^2(S,T; L^2_{\mathcal
{F}_{S}}(2S-T,S))$ such that
\begin{equation}\label{33}
E[Y(t)| \mathcal {F}_{S}]=E[Y(t)| \mathcal
{F}_{2S-T}]+\int_{2S-T}^{S}Z(t,s)\,\mathrm{d}W(s),\quad t\in [S,T].
\end{equation}
By \eqref{33} we conclude that
$$
E\int_{2S-T}^{S}| Z(t,s)|^2\,\mathrm{d}s=E| Y(t)|^2-| EY(t)|^2,
\quad t\in [S,T].
$$
Furthermore,
\begin{align*}
&E\int_{S}^T\int_{2S-T}^{S}|Z(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\\
&\leq E\int_{S}^T| Y(t)|^2\,\mathrm{d}t \\
&\leq 8CE\Big\{\int_{S}^T|\psi(t)|^2\,\mathrm{d}t+\int_{S}^T\Big(\int_t^T|
g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\}.
\end{align*}
In this step we determine the unique solution $(Y(t),Z(t,s))$ of
\eqref{21} for $t\in [S,T],s\in[2S-T,S]$.
\smallskip

\noindent\textbf{Step 3:} 
Denote
\begin{gather*}
Y_{1}(t)=Y(t),\quad t\in [2S-T,S],\quad 
Y_{2}(t)=Y(t),\quad t\in [S,T],\\
Z_{11}(t,s)=Z(t,s),t\in [2S-T,S]\times[S,T],\quad 
Z_{12}(t,s)=Z(t,s),t\in [S,T]\times[S,T],  \\
Z_{21}(t,s)=Z(t,s),t\in [2S-T,S]\times[2S-T,S],\\
Z_{22}(t,s)=Z(t,s),t\in [S,T]\times[2S-T,S].
\end{gather*}
Set $Y_{1}^{(0)}(t)=0$, and for $n=1,2,\cdots,$ define the Picard
iterations:
\begin{equation}\label{10000}
\begin{gathered}
  \begin{aligned}
\psi^{S}_{n}(t)
&=\psi(t)+\int_{S}^T
g(t,s,Y_{2}(s),Y_{1}^{(n-1)}(t),Z_{11}(t,s),Z_{22}(s,t))\,\mathrm{d}s\\
&\quad -\int_{S}^TZ_{11}(t,s)\,\mathrm{d}W(s),
\end{aligned} \\
\begin{aligned}
    Y_{1}^{(n)}(t)
&=\psi^{S}_{n}(t)+\int_t^{S}g(t,s,Y_{1}^{(n)}(s),Y_{1}
^{(n)}(t),Z_{21}^{(n)}(t,s),Z_{21}^{(n)}(s,t))\,\mathrm{d}s \\
&\quad -\int_t^{S}Z_{21}^{(n)}(t,s)\,\mathrm{d}W(s),
\end{aligned}
\end{gathered}
\end{equation}
for $t\in [2S-T,S]$. Obviously,
$(Y_{1}^{(n)}(t),Z_{21}^{(n)}(t,s))\in\mathcal {M}^2[2S-T,S]$.
Moreover, by stability estimate \eqref{26} we can obtain
\begin{align*} %\label{37}
&E\Big[\int_{2S-T}^{S}| Y_{1}^{n}(t)-Y_{1}^{n-1}(t)|^2\,\mathrm{d}t+
\int_{2S-T}^{S}\int_{2S-T}^{S}| Z_{21}^{(n)}(t,s)-Z_{21}^{(n-1)}(t,s)|^2
 \,\mathrm{d}s\,\mathrm{d}t\Big] \\
&\leq 8CE\Big\{\int_{2S-T}^{S}|
\psi^{S}_{n}(t)-\psi^{S}_{n-1}(t)|^2\,\mathrm{d}t\Big\}.\\
\end{align*}
By \eqref{24} and \eqref{16}, it is not difficult to see that
\begin{align*} %\label{38}
&E\Big[| \psi^{S}_{n}(t)-\psi^{S}_{n-1}(t)|^2+
\int_{S}^T|
Z_{11}^{n}(t,s)-Z_{11}^{n-1}(t,s)|^2\,\mathrm{d}s\Big]\\
&\leq CKE|Y_{1}^{n-1}(t)-Y_{1}^{n-2}(t)|^2,
\end{align*}
and
\begin{equation*}
    8CE\int_{2S-T}^{S}|\psi_{n}^{S}(t)-\bar{\psi}_{n-1}^{S}(t)|^2\,\mathrm{d}t
\leq 8C^2KE\int_{2S-T}^{S}|
Y_{1}^{n-1}(t)-Y_{1}^{n-2}(t)|^2\,\mathrm{d}t.
\end{equation*}
Consequently,
\begin{align*}
&E\Big[\int_{2S-T}^{S}| Y_{1}^{n}(t)-Y_{1}^{n-1}(t)|^2\,\mathrm{d}t+
\int_{2S-T}^{S}\int_{2S-T}^{S}|
Z_{21}^{(n)}(t,s)-Z_{21}^{(n-1)}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big] \\
&\leq 8C^2KE\int_{2S-T}^{S}|
Y_{1}^{n-1}(t)-Y_{1}^{n-2}(t)|^2\,\mathrm{d}t\\
&\leq \cdots \leq (8C^2K)^{n-1}E\int_{2S-T}^{S}|
Y_{1}^{1}(t)-Y_{1}^{0}(t)|^2\,\mathrm{d}t.
\end{align*}
 By \eqref{24} it is easy to see that $8C^2K<1$, then we obtain
$(Y^{(n)}_{1}(\cdot),Z^{(n)}_{21}(\cdot,\cdot))$ and
$(\psi^{S}_{n}(\cdot),Z_{11}^{(n)}(\cdot,\cdot))$ are Cauchy
sequences on $ \mathcal {M}^2[2S-T,S]$ and $L_{\mathcal
{F}_{T}}^{p}(S,T)\times L^{p}(2S-T,S;L^2_{\mathbb{F}}(S,T))$,
respectively. If $n\to  \infty$ in \eqref{10000}, we could obtain the unique
 adapted M-solution $(Y(t),Z(t,s))$ of \eqref{21}
for $t\in[2S-T,S]$, $s\in[2S-T,T]$.

Now, we give the estimate of solution for
$(t,s)\in[2S-T,S]\times[2S-T,T]$. Since
\begin{equation*}\label{250}
\begin{split}
  &E\Big[\int_{2S-T}^{S}| Y_{1}(t)|^2\,\mathrm{d}t+\int_{2S-T}^{S}
\int_{2S-T}^{S}| Z_{21}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big]\\
 &\leq
8CE\Big\{\int_{2S-T}^{S}|\psi^{S}(t)|^2\,\mathrm{d}t+\int_{2S-T}^{S}\Big(\int_t^{S}|
g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\},
\end{split}
\end{equation*}
and
\begin{align*}%\label{115}
  &E\Big[|\psi^{S}(t)|^2+\int_{S}^T| Z_{11}(t,s)|^2\,\mathrm{d}s\Big]\\
&\leq  CE\Big\{| \psi(t)|^2+\Big(\int_{S}^T|
g(t,s,Y_{2}(s),Y_{1}(t),0,Z_{22}(t,s))|
  \,\mathrm{d}s\Big)^2\Big\}\\
&\leq
CE\Big\{|\psi(t)|^2+4\Big(\int_{S}^T|
g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\Big\}\\
  &\quad +4C(T-S)^{\frac{\varepsilon}{2+\varepsilon}}A^{\frac{2}{2+\varepsilon}}E\Big[\int_{S}^T|
Y_{2}(s)|^2
  \,\mathrm{d}s\\
  &\quad +\int_{S}^T|
Z_{22}(s,t)|^2
  \,\mathrm{d}s\Big]+4C(T-S)^{\frac{\varepsilon}{2+\varepsilon}+1}A^{\frac{2}{2+\varepsilon}}E|
Y_{1}(t)|^2.
\end{align*}
So, if
$32(T-S)^{\frac{2\varepsilon}{2+\varepsilon}}A^{\frac{2}{2+\varepsilon}}C^2
\max\{1,T-S\}<\frac{1}{2}$,
we have
\begin{equation}\label{10001}
\begin{split}
  &E\Big[\int_{2S-T}^{S}| \psi^{S}(t)|^2\,\mathrm{d}t+\int_{2S-T}^{S}
  \int_{S}^T| Z_{11}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big] \\
&\leq (8C+3)E\Big\{\int_{2S-T}^T|\psi(t)|^2\,\mathrm{d}t+\int_{2S-T}^T\Big(\int_t^T|
g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\},
\end{split}
\end{equation}
and 
\begin{equation}\label{10002}
\begin{split}
  &E\Big[\int_{2S-T}^{S}| Y_{1}(t)|^2\,\mathrm{d}t+\int_{2S-T}^{S}
\int_{2S-T}^{S}| Z_{21}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big]\\
 &\leq (8C+4)E\Big\{\int_{2S-T}^T|\psi(t)|^2\,\mathrm{d}t+\int_{2S-T}^T\Big(\int_t^T|
g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\}.
\end{split}
\end{equation}
Combining \eqref{10001} and \eqref{10002}, we  show that
\begin{align*}
&E\Big\{\int_{2S-T}^T|
Y(t)|^2\,\mathrm{d}t+\int_{2S-T}^T \int_{2S-T}^T|
Z(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big\}\\
 &\leq
(8C+4)E\Big\{\int_{2S-T}^T|\psi(t)|^2\,\mathrm{d}t+
  \int_{2S-T}^T\Big(\int_t^T| g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\}
\end{align*}
Similar to Step 1, stability estimate \eqref{26} holds for $t\in
[2S-T,T]$. Then we can use induction method to finish the theorem.
\end{proof}


\subsection{Existence and uniqueness of M-solution for the  BSVIE}
\begin{equation}\label{44}
\begin{aligned}
Y(t)&=F(t,Y(t))+\int_t^Tg(t,s,Y(s),Y(t),Z(t,s),Z(s,t))\,\mathrm{d}s\\
&\quad -\int_t^TZ(t,s)\,\mathrm{d}W(s),~t\in [0,T].
\end{aligned}\end{equation}

\begin{theorem} \label{thm3}
If {\rm (H1)} and {\rm (H2)} hold for \eqref{44}. Then there admits unique
adapted M-solution $(Y(\cdot),Z(\cdot,\cdot))\in \mathcal
{H}^2[0,T]$, and the following estimate holds:
\begin{equation}\label{47}
\begin{split}
&\| (Y(\cdot),Z(\cdot,\cdot))\| ^2_{\mathcal {H}^2[R,T]}\\
&\leq
C_{F}E\Big\{\int_{R}^T|
F(t,0)|^2\,\mathrm{d}t+\int_{R}^T\Big(\int_t^T|
g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\},~~R\in[0,T],
\end{split}
\end{equation}
where $C_{F}=\frac{2C_{L}}{1-2C_{L}D}$. Furthermore, if $\bar{g}$
also satisfies (H1), $\bar{F}$ satisfies (H2), and
$(\bar{Y}(\cdot),\bar{Z}(\cdot,\cdot))\in \mathcal {H}^2[0,T] $
is the adapted M-solution of \eqref{44}, in which $(g,F)$ is
replaced by $(\bar{g},\bar{F})$. Then
\begin{equation}\label{484848}
\begin{split}
&E\Big[\int_{R}^T|Y(t)-\bar{Y}(t)|^2\,\mathrm{d}t+
\int_{R}^T\int_{R}^T|Z(t,s)-\bar{Z}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big] \\
&\leq  C_{F}E\Big\{\int_{R}^T|
F(t,Y(t))-\bar{F}(t,Y(t))|^2\,\mathrm{d}t\\
&\quad +\int_{R}^T\Big(\int_t^T| g(t,s,Y(s),Y(t),Z(t,s),Z(t,s))\\
&\quad -\bar{g}(t,s,Y(s),Y(t),Z(t,s),Z(t,s))|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\},~~R\in[0,T].
\end{split}
\end{equation}
\end{theorem}

\begin{proof}
Let $Y^{(0)}(t)=0$. Since $F(t,Y^{(0)}(t)) \in L_{\mathcal
{F}_{T}}^2(0,T)$, then for $n=1,2,\cdots$ we can define the Picard
iterations:
\begin{equation}\label{51}
\begin{split}
  Y^{(n)}(t)
&=F(t,Y^{(n-1)}(t))+\int_t^Tg(t,s,Y^{(n)}(s),Y^{(n)}(t),Z^{(n)}(t,s),Z^{(n)}(s,t))\,\mathrm{d}s \\
&\quad -\int_t^TZ^{(n)}(t,s)\,\mathrm{d}W(s),\quad t\in [0,T].
\end{split}
\end{equation} 
In view of \eqref{26}, we have that
\begin{align*}%\label{52}
  &E\Big[\int_{0}^T| Y^{(n)}(t)-Y^{n-1}(t)|^2\,\mathrm{d}t+
\int_{0}^T\int_{0}^T| Z^{n}(t,s)-Z^{n-1}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big] \\
&\leq  C_{L}D E\int_{0}^T|
Y_{1}^{n-1}(t)-Y_{1}^{n-2}(t)|^2 \,\mathrm{d}t \leq \cdots \\
&\leq (C_{L}D)^{n-1}E\int_{0}^T|
Y_{1}^{1}(t)-Y_{1}^{0}(t)|^2 \,\mathrm{d}t.
\end{align*}
By \eqref{46} we are sure that
$(Y^{(n)}(\cdot),Z^{(n)}(\cdot,\cdot))$ is a Cauchy sequence on 
$\mathcal {M}^2[0,T]$. Let $n\to  \infty$ in \eqref{51} we could obtain
the unique solution of \eqref{44}.
From \eqref{25}, we have
\begin{equation}\label{101}
\begin{aligned}
&E\Big[\int_{0}^T|
Y(t)|^2\,\mathrm{d}t+\int_{0}^T
\int_{0}^T| Z(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big]\\
&\leq  C_{L}E\Big\{2\int_{0}^T|F(t,0)|^2\,\mathrm{d}t+2D\int_{0}^T|
Y(t)|^2\,\mathrm{d}t\\
&\quad +\int_{0}^T\Big(\int_t^T|
g(t,s,0,0,0,0)| \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\}.
\end{aligned}
\end{equation}
So from \eqref{46} and \eqref{101} we conclude that
\begin{align*}
&E\Big[\int_{0}^T|
Y(t)|^2\,\mathrm{d}t+\int_{0}^T
\int_{0}^T| Z(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big]\\
&\leq  C_{F}E\Big\{\int_{0}^T| F(t,0)|^2\,\mathrm{d}t
+\int_{0}^T\Big(\int_t^T| g(t,s,0,0,0,0)|
\,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\},
\end{align*}
where $C_{F}=\frac{2C_{L}}{1-2C_{L}D}$. Similar to Step 1 of Theorem
\ref{dingli2}, the stability estimate holds. The proof is complete.
\end{proof}

\subsection{Existence and uniqueness of M-solution for \eqref{53}}

\begin{proof}
Let $(Y^{(0)}(t),Z^{(0)}(t,s))=(0,0)$, then for $n=1,2,\cdots$ we
can have the Picard iterations:
\begin{equation}\label{60}
\begin{split}
  Y^{n}(t)
&=F(t,Y^{n-1}(t))-\int_t^T\Big[h(t,s,Y^{(n-1)}(s),Y^{(n-1)}(t),Z^{(n-1)}(t,s))\\
&\quad -Z^{(n-1)}(t,s)\Big]  \,\mathrm{d}W(s)\\
&\quad +\int_t^Tg(t,s,Y^{n}(s),Y^{n}(t),Z^{n}(t,s),Z^{n}(s,t))\,\mathrm{d}s\\
&\quad -\int_t^TZ^{n}(t,s)\,\mathrm{d}W(s),\quad t\in [0,T].
\end{split}
\end{equation}
By \eqref{484848} and  It\^{o}'s isometry, we have
\begin{equation}\label{61}
\begin{split}
E&\Big[\int_{0}^T| Y^{(n)}(t)-Y^{(n-1)}(t)|^2\,\mathrm{d}t+
    \int_{0}^T\int_{0}^T| Z^{(n)}(t,s)-Z^{(n-1)}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big] \\
&\leq
C_{F}E\Big\{\int_{0}^T\int_t^T|
h(t,s,Y^{(n-1)}(s),Y^{(n-1)}(t),Z^{(n-1)}(t,s))-Z^{(n-1)}(t,s)\\
&\quad -
[h(t,s,Y^{(n-2)}(s),Y^{(n-2)}(t),Z^{(n-2)}(t,s))-Z^{(n-2)}(t,s)]|^2\,\mathrm{d}s\,\mathrm{d}t\Big\}\\
&\leq
C_{F}L_{\xi}E\int_{0}^T\int_{0}^T|
Z^{(n-1)}(t,s)-Z^{(n-2)}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\\
&\quad +C_{F}(L_{\zeta}+L_{\eta})TE\int_{0}^T(Y^{(n-1)}(s)-Y^{(n-2)}(s))
\,\mathrm{d}s.
\end{split}
\end{equation}
By \eqref{54}, we see that
$\max\{C_{F}L_{\zeta},C_{F}(L_{\zeta}+L_{\eta})T\}=M<1$, and then
\begin{align*}
&E\Big[\int_{0}^T|
Y^{(n)}(t)-Y^{(n-1)}(t)|^2\,\mathrm{d}t+
\int_{0}^T\int_{0}^T| Z^{(n)}(t,s)-Z^{(n-1)}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big] \\
&\leq  ME\Big[\int_{0}^T|
Y^{(n-1)}(t)-Y^{(n-2)}(t)|^2\,\mathrm{d}t\\
&\quad +\int_{0}^T\int_{0}^T|Z^{(n-1)}(t,s)-Z^{(n-2)}(t,s)|^2\,\mathrm{d}s
\,\mathrm{d}t\Big]
\leq \cdots \\
&\leq M^{n-1}E\Big[\int_{0}^T|
Y^{(1)}(t)-Y^{(0)}(t)|^2\,\mathrm{d}t+
\int_{0}^T\int_{0}^T|
Z^{(1)}(t,s)-Z^{(0)}(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big]
\end{align*}
holds. So we obtain $(Y^{(n)}(\cdot),Z^{(n)}(\cdot,\cdot))$ is a
Cauchy sequence on $ \mathcal {M}^2[0,T]$. Let $n\to
 \infty$ in \eqref{60} we obtain the unique adapted  M-solution of \eqref{53}.
From \eqref{47}, we have
\begin{align*} %\label{470}
  &E\Big[\int_{0}^T| Y(t)|^2\,\mathrm{d}t+\int_{0}^T
\int_{0}^T| Z(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big]\\
 &\leq
C_{F}E\Big\{\int_{0}^T|
F(t,0)-\int_t^T\Big[h(t,s,0,0,Z(t,s))-Z(t,s)\Big]\,\mathrm{d}W(s)|^2\,\mathrm{d}t\\
&\quad +\int_{0}^T\Big(\int_t^T| g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\}\\
&\leq  C_{F}E\Big\{\int_{0}^T|
F(t,0)|^2\,\mathrm{d}t+4L_{\xi}\int_{0}^T\int_{0}^T|
Z(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\\
&\quad +4\int_{0}^T\int_t^T|
h(t,s,0,0,0)|^2\,\mathrm{d}s\,\mathrm{d}t+\int_{0}^T\Big(\int_t^T|
g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\Big\}.
\end{align*}
So
\begin{equation}\label{55b}
\begin{split}
  &E\Big[\int_{0}^T| Y(t)|^2\,\mathrm{d}t+\int_{0}^T
\int_{0}^T| Z(t,s)|^2\,\mathrm{d}s\,\mathrm{d}t\Big]\\
 &\leq C_{h}E\Big\{\int_{0}^T| F(t,0)|^2\,\mathrm{d}t+
\int_{0}^T\Big(\int_t^T| g(t,s,0,0,0,0)|
  \,\mathrm{d}s\Big)^2\,\mathrm{d}t\\
 &\quad +\int_{0}^T\int_t^T|
h(t,s,0,0,0)|^2\,\mathrm{d}s\,\mathrm{d}t\Big\}
\end{split}
\end{equation}
where $C_{h}=\frac{4C_{F}}{1-4L_{\xi}C_{F}}$. Similar to Step 1 of
Theorem \ref{dingli2}, applying the Hadamard formula it is not difficult to
see that \eqref{56} holds.
\end{proof}

\subsection*{Acknowledgements}
This research was supported by the NNSF of China 
(Nos. 11301112, 11171081 and 11171056), 
by the NNSF of Shandong Province (No. \\ZR2013AQ003),
by China Postdoctoral Science Foundation funded project 
(Nos. 2013M541352, 2014T70313),  by
HIT.IBRSEM.A.2014014 and by the Key Project of Science and Technology of Weihai 
(No.2013DXGJ04).

\begin{thebibliography}{00}
\bibitem{kh} K. Bahlali;
\emph{Backward stochastic differential equations with locally Lipschitz
coefficient}, C. R. A. S, Paris, serie I. 333 (2001) 481-486.

\bibitem {N.E} N. El Karoui, S. Peng, M. C. Quenez;
\emph{Backward stochastic differential equations in finance}, Mathematical Finance.
7 (1997) 1-71.

\bibitem{yi} Y. Hu, J. Ma;
\emph{Nonlinear Feynman-Kac formula and discrete-functional-type BSDEs with 
continuous coefficients}, Stochastic Process Appl. 112 (2004) 23-51.

\bibitem{maox} X. Mao;
\emph{Adapted solution of backward stochastic differential equation with 
non-Lipschitz coefficients}, Stochastic Process Appl. 58 (1995) 281-292.

\bibitem{peng} E. Pardoux, S. Peng;
\emph{Adapted solution of backward stochastic equations}, 
Systems Control lett. 14 (1990) 55-61.

\bibitem{yong} J. Yong;
\emph{Completeness of security markets and solvability of linear backward stochastic
differential equations}, J. Math. Anal. Appl. 319 (2006) 333-356.

\bibitem{121}J. Yong,  X. Zhou;
\emph{Stochastic Controls: Hamiltonian System and HJB  Equations}, 
Springer, New York (1999).

\bibitem{yy} J. Yong;
\emph{Backward stochastic Volterra integral equations and some related problems}, 
Stochastic Process Appl. 116 (2006) 779-795.

\bibitem{122} J. Yong;
\emph{Continuous-time dunamic risk measures by
backward stochastic Volterra integral equations}, Applicable
Analysis. 86 (2007) 1429-1442.

\bibitem{zzoo} J. Yong;
\emph{Well-Posedness and regularity of backward stochastic Volterra integral 
equations}, Probability Theory and Related Fields, 142 (2008) 21-77.

\end{thebibliography}

\end{document}
