\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2011 (2011), No. 92, pp. 1--30.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2011 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2011/92\hfil Time-dependent domains]
{Time-dependent domains for nonlinear evolution operators and
partial differential equations}

\author[C.-Y. Lin\hfil EJDE-2011/92\hfilneg]
{Chin-Yuan Lin}

\dedicatory{Dedicated to Professor Jerome A. Goldstein
on his 70th birthday}

\address{Chin-Yuan Lin \newline
 Department of Mathematics \\
 National Central University \\
 Chung-Li 320, Taiwan}
\email{cylin@math.ncu.edu.tw}

\thanks{Submitted June 15, 2011. Published July 18, 2011.}
\subjclass[2000]{47B44, 47H20, 35J25, 35K20}
\keywords{Dissipative operators; evolution equations;
\hfill\break\indent
parabolic and elliptic equations}

\begin{abstract}
 This article concerns the nonlinear evolution equation
 \begin{gather*}
 \frac{du(t)}{dt} \in A(t)u(t), \quad 0 \leq s < t < T, \\
  u(s) = u_0
 \end{gather*}
 in a real Banach space $X$, where the nonlinear, time-dependent,
 and multi-valued  operator
 $ A(t) : D(A(t)) \subset X \to X$
  has a time-dependent domain $D(A(t))$.
 It will be shown that, under certain assumptions on $ A(t) $,
 the equation has a strong solution. Illustrations are
 given of solving quasi-linear partial differential equations
 of parabolic type with time-dependent boundary conditions.
 Those partial differential equations are studied to a
 large extent.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{example}[theorem]{Example}
\allowdisplaybreaks

\section{Introduction} \label{S:A}

Let $ (X, \|\cdot\|) $ be a real Banach space
with the norm $ \|\cdot\|$, and let $ T > 0$, $\omega $
be two real constants. Consider the nonlinear evolution equation
\begin{equation} \label{E:A}
\begin{gathered}
\frac{du(t)}{dt} \in A(t)u(t), \quad 0 \le s < t < T, \\
u(s) = u_0,
\end{gathered}
\end{equation}
where
\[
 A(t) : D(A(t)) \subset X \to X
\]
is a nonlinear, time-dependent, and  multi-valued operator.
 To solve \eqref{E:A}, Crandall and Pazy \cite{Cran}
made the following hypotheses of (H1)--(H3) and the $ t $-dependence
hypothesis of either (H4) or (H5),
for each $ 0 \le t \le T $.
\begin{itemize}
\item[(H1)] $ A(t) $ is dissipative in the sense that
\[
 \|u - v\| \leq \|(u - v) - \lambda
(g - h)\|
\]
 for all $ u, v \in D(A(t)) $,
$ g \in (A(t) - \omega)u, h \in
(A(t) - \omega)v $,
and for all $ \lambda > 0 $.
Equivalently,
\[
\Re(\eta(g - h)) \leq 0
 \]
 for some
$ \eta \in G(u - v)
\equiv \{\xi \in X^{*} :
\|u - v\|^2 = \xi(u - v)
= \|\xi\|_{X^{*}}^2 \} $,
the duality map of $ (u - v) $ \cite{Mi}.
Here $ (X^{*}, \|.\|_{X^{*}}) $ is
the dual space of $X$ and $ \Re(z) $ is the real part of a
complex number $ z $.

\item[(H2)] The range of $ (I - \lambda A(t)) $ contains
the closure $ \overline{D(A(t))} $ of $ D(A(t)) $ for small $ 0 <
\lambda < \lambda_0 $ with $ \lambda_0\omega < 1 $.

\item[(H3)]  $ \overline{D(A(t))} = \overline{D} $ is independent of
$ t$.

\item[(H4)]  There are a continuous function $ f : [0, T] \to X $
and a monotone increasing function $ L : [0, \infty) \to
[0, \infty) $, such that
\[
   \|J_{\lambda}(t)x - J_{\lambda}(\tau)x\| \leq \lambda \|f(t) -
f(\tau)\| L(\|x\|)
\]
for $ 0 < \lambda < \lambda_0,
 0 \leq t, \tau \leq T, $ and $ x \in
\overline{D} $. Here $ J_{\lambda}(t)x \equiv (I - \lambda A(t))^{-1}
$ exists for $ x \in \overline{D} $ by (H1) and (H2).

\item[(H5)] There is a continuous function $ f : [0, T] \to X $,
which is of bounded variation on $ [0, T] $, and there is
a monotone increasing
function $ L : [0, \infty) \to [0, \infty) $,  such that
\[
   \|J_{\lambda}(t)x - J_{\lambda}(\tau)x\| \leq \lambda \|f(t) -
f(\tau)\| L(\|x\|) (1 + |A(\tau)x|)
\]
for $ 0 < \lambda < \lambda_0,  0 \leq t, \tau \leq T, $ and $ x \in
 \overline{D} $. Here
 \[
 |A(\tau)x| \equiv \lim_{\lambda
 \to 0}\|\frac{(J_{\lambda}(\tau) -
I)x}{\lambda}\|
\]
 by (H1) and (H2), which can equal $ \infty $  \cite{Crand,Cran}.
\end{itemize}

    By defining the generalized domain
$ \hat{D}(A(t)) \equiv \{x \in \overline{D(A(t))} : |A(t)x| < \infty
 \} $ \cite{Crand,Wes},
they \cite{Cran} proved, among other things,
that the limit
\begin{equation} \label{E:DvpdesA}
 U(t, s)x \equiv
 \lim_{n \to \infty} \prod_{i=1}^{n}
J_{\frac{t - s}{n}}(s + i \frac{t - s}{n})x
\end{equation}
 exists for $ x \in \overline{D} $
and that $ U(t, s)u_0 $ is a unique solution, in a generalized sense,
 to  the equation \eqref{E:A}
for $ u_0 \in \overline{D} $.


Because of the restriction in  (H3) that
$\overline{D(A(t))} = \overline{D} $ is independent of $ t $,
the boundary condition in the example in \cite{Cran} does not
depend on time.
In this paper,
in order to enlarge the scope of applications,
we will consider
a different set of  hypotheses, the dissipativity
condition (H1), the range condition (H2'), and
the time-regulating condition  (HA) below.
Here a similar set of hypotheses was considered in \cite{Lin2} but the
results were not satisfactory.
\begin{itemize}
\item[(H2')] The range
of $ (I - \lambda A(t)) $, denoted by
$  E  $,  is independent of $ t $ and contains
$ \overline{D(A(t))} $  for all $ t \in [0, T] $ and
for small $ 0 < \lambda <
\lambda_0 $ with $ \lambda_0 \omega < 1 $.

\item[(HA)] There is a continuous function
 $ f : [0, T] \to \mathbb{R} $, of bounded variation,
 and  there is a nonnegative
function $ L $ on $ [ 0, \infty) $
with $ L(s) $ bounded
for bounded $ s $,  such that, for each $ 0 < \lambda < \lambda_0 $,
we have
\[
\{J_{\lambda}(t)x -
J_{\lambda}(\tau)y : 0 \leq t, \tau
\leq T, x, y \in
E \} = S_1(\lambda)
\cup S_2(\lambda).
\]
 Here
$ S_1(\lambda) $ denotes the set
\begin{align*}
&\big\{ J_{\lambda}(t)x - J_{\lambda}(\tau)y : 0
\leq t, \tau \leq T,
 x, y \in E, \\
&\quad \|J_{\lambda}(t)x -J_{\lambda}(\tau)y\|
\leq L(\|J_{\lambda}(\tau)y\|)|t - \tau| \big\},
\end{align*}
and
$ S_2(\lambda) $ denotes the set
\begin{align*}
&\big\{J_{\lambda}(t)x - J_{\lambda}(\tau)y :
 0 \leq t, \tau \leq T, x, y \in E,
 \|J_{\lambda}(t)x - J_{\lambda}(\tau)y\|\\
& \leq (1 - \lambda \omega)^{-1}[
\|x - y\| + \lambda |f(t) - f(\tau)|
L(\|J_{\lambda}(\tau)y\|)( 1 +
\frac{\|(J_{\lambda}(\tau) - I)y\|}{\lambda})] \big\}.
\end{align*}
\end{itemize}

We will show that the limit in \eqref{E:DvpdesA} for
$ x \in \overline{\hat{D}(A(s))}  =
\overline{D(A(s))} $ exists, and that this limit for $ x = u_0
\in \hat{D}(A(s)) $ is a strong solution to the equation \eqref{E:A},
if $ A(t) $ satisfies additionally an embedding property
in \cite{Li} of embeddedly quasi-demi-closedness.
We then apply the abstract theory to quasi-linear, parabolic partial
differential equations with boundary conditions depending on time $ t $.
We finally show that, in those applications, each quantity
\[
J_{\frac{t - s}{n}}(s + i\frac{t - s}{n})h = [I - \frac{t - s}{n}
A(s + i \frac{t - s}{n})]^{-1}h, \quad i = 1, 2, \ldots, n
\]
is  the limit of a
sequence where each term in the sequence is
an explicit function $ F(\phi) $
of the solution $ \phi = \pounds_0^{-1}(h, \varphi) $
to the elliptic equation
with $ \varphi \equiv 0 $:
\begin{equation} \label{E:TimeC}
\begin{gathered}
-\Delta v(y) = h, \quad y \in \Omega, \\
 \frac{\partial v}{\partial \nu} + v = \varphi, \quad
y \in \partial \Omega.
\end{gathered}
\end{equation}
Here for the dimension
of the space variable $ y $ equal to 2 or 3, the $ \phi =
\pounds_0^{-1}(h, 0) $ and the
solution $ \pounds_0^{-1}(h, \varphi) $ to \eqref{E:TimeC} can be
computed numerically and efficiently by the boundary element methods
\cite{Gau,Sch}.
See Sections \ref{S:D} and \ref{S:E} for more details of these,
including how $ F(\phi) $ depends on $ \phi $,
and for other aspects of the treated  partial differential equations.


There are many related works,
to cite a few, we
mention \cite{Ba,Br,Bre,Bro,Cr,Cra,Cran,En,Eng,Gol,Lie,Hi,Kat,Kato,Li,Lin0,Lin1,Lin2,Lin4,Mi,Oh,Pa,Paz,Ro,Roy,We}, especially the \cite{Lin4} for the recent
development on nonlinear evolution equations where
the hypothesis (H2) is relaxed.

The rest of this article will be organized
as follows. Section \ref{S:VaryA} obtains some preliminary
estimates, and Section \ref{S:B} deals with
the main results, where the nonlinear operator  $ A(t) $ is equipped
with time-dependent domain $ D(A(t)) $.  The Appendix
in Section \ref{S:VaryB}
examines the difference equations theory
in our papers \cite{Lin0,Lin1,Lin4},
whose results, together with those in Section \ref{S:VaryA}, will be used to
prove the main results in Section \ref{S:B}.
Section
\ref{S:D} studies applications  to linear or nonlinear
partial differential equations of parabolic type, in  which
each corresponding elliptic solution
$ J_{\frac{t - s}{n}}(s + i \frac{t - s}{n})h $
will be derived theoretically.
Finally, Section \ref{S:E} follows Section \ref{S:D}
but derives each elliptic solution
$ J_{\frac{t - s}{n}}(s + i \frac{t - s}{n})h $
as the limit of a sequence where each term in the sequence is an
explicit function of the solution $ \phi $ to the elliptic equation
\eqref{E:TimeC} with $ \varphi \equiv 0 $.
In either Section \ref{S:D} or Section \ref{S:E},
other aspects of the treated partial differential equations are
considered.

\section{Some preliminary estimates} \label{S:VaryA}

    Within this section and the Sections \ref{S:B} and
\ref{S:VaryB}, we can assume, without loss of generality,
 that $ \omega \ge 0 $
where $ \omega $ is the $ \omega $ in the
hypothesis (H1). This is because the case $ \omega < 0 $
is the same as the case $ \omega = 0 $.
This will be readily seen from the corresponding proofs.

To prove the main results Theorems \ref{T:XA} and
\ref{T:XB} in Section \ref{S:B},
we need to make two preparations.
One preparation is this section, and the other  is the Appendix in
Section \ref{S:VaryB}.

\begin{proposition} \label{P:A}
  Let $ A(t) $ satisfy the dissipativity condition (H1),
 the range condition (H2') , and
the time-regulating condition (HA), and let
 $ u_0 $ be in  $  D(A(s)) \subset E $ % ($ \subset
 where $ 0 \le s \le T $.
 Let $  0 < \epsilon  < \lambda_0 $ be so chosen
  that $ 0 <  \epsilon \omega < 1 $, and let
  $ 0 \le t_i = s + i \epsilon \le T $  where
 $ i \in  \mathbb{N} $.
Then
\begin{equation}  \label{E:VaryA}
\|u_i - u_0\| \le \eta^{i}L(\|u_0\|)(i\epsilon)
+ [\eta^{i - 1}b_1 + \eta^{i - 2}b_2 + \dots + \eta b_{i - 1} + b_i]
\end{equation}
and
\begin{equation} \label{E:VaryB}
\begin{split}
\|\frac{u_i - u_{i-1}}{\epsilon}\|
&\leq [(c_ic_{i - 1} \dots c_2)L(\|u_0\|) \quad
\text{or  $ (c_ic_{i - 1} \dots c_3) L(\|u_1\|)$ or $ \dots $}
\\
&\quad \text{or $ c_iL(\|u_{i - 2}\|) $ or $ L(\|u_{i - 1}\|) $}]
 +  [(c_ic_{i -1}\dots c_1)a_0 \\
&\quad  + (c_ic_{i - 1} \dots c_2)d_1 +
(c_ic_{i - 1}\dots c_3)d_2
+ \dots  + c_id_{i - 1} + d_i].
\end{split}
\end{equation}
Here
$u_i = \prod_{j = 1}^{i}J_{\epsilon}(t_{j})u_0$
exists uniquely by the hypotheses (H1) and (H2'); \\
$\eta = (1 - \epsilon \omega)^{-1} > 1$; \\
$b_i = \eta \epsilon \|v_0\| + \eta \epsilon |f(t_i) - f(s)|
L(\|u_0\|)(1 + \|v_0\|)$,
 where $ v_0 $ is any element in $ A(s)u_0 $; \\
$c_i= \eta [1 + L(\|u_{i - 1}\|)|f(t_i) -
f(t_{i-1})|]$;  \\
$d_i = \eta L(\|u_{i - 1}\|)|f(t_i)
- f(t_{i-1})|$;  \\
the right sides of \eqref{E:VaryB} are interpreted as
$[L(\|u_0\|)] + [c_1a_0 + d_1]$  for $ i = 1 $; \\
$[c_2L(\|u_0\|)$ or
$L(\|u_1\|)] + [c_2c_1a_0 +
c_2d_1 + d_2] $ for $ i = 2 $;
 \dots, and so on;
and
\[
a_0 = \|\frac{u_0 - u_{-1}}{\epsilon}\|,
\]
where $ u_{-1} $ is defined by
$u_0 - \epsilon v_0 = u_{-1}$,
with $ v_0 $ any element in $ A(s)u_0 $.
\end{proposition}

\begin{proof}
We will use the method of mathematical induction.
Two cases will be considered,
and for each case, we divide the proof
into two steps.

\subsection*{Case 1} Here \eqref{E:VaryA} is considered

\textbf{Step 1.} Claim that \eqref{E:VaryA} is true for $ i = 1 $.
This will follow from the arguments below.
If $ (u_1 - u_0)\in S_1(\epsilon) $
(defined in Section \ref{S:A}), then
\[
 \|u_1 - u_0\|
 = \|J_{\epsilon}(t_1)u_0 -
   J_{\epsilon}(s)(I - \epsilon A(s))u_0\|
\leq L(\|u_0\|)|t_1 - s|
\leq L(\|u_0\|)\epsilon,
\]
 which is less than or equal to the right-hand side of \eqref{E:VaryA}
with $ i = 1 $.

On the other and, if $ (u_1 - u_0)
\in S_2(\epsilon) $ (defined in Section \ref{S:A}), then
\[
\|u_1 - u_0\| \le \eta \|u_0 - u_0\|
+\eta \epsilon \|v_0\|
+\eta \epsilon |f(t_1) - f(s)|L(\|u_0\|)(1 + \|v_0\|),
\]
which is less than or equal to the right-hand side
of \eqref{E:VaryA} with $ i = 1 $.
Here $ v_0 $ is any element in
$ A(s)u_0 $.

\textbf{Step 2.} By assuming that \eqref{E:VaryA} is true for
$ i = i - 1 $, we shall show that it is also true for $ i = i $.
If $ (u_i - u_0) \in S_1(\epsilon) $, then
\[
\|u_i - u_0\| = \|J_{\epsilon}(t_i)u_{i - 1}
- J_{\epsilon}(s)(I - \epsilon A(s))u_0\|
\le L(\|u_0\|)|t_i - s|
= L(\|u_0\|)(i\epsilon),
\]
which is less than or equal to the right side of \eqref{E:VaryA}
with $ i = i $ because of $ \eta^{i} > 1 $.

On the other hand, if $ (u_i - u_0) \in S_2(\epsilon) $, then
\[
\|u_i - u_0\| \le \eta \|u_{i - 1} - u_0\| + b_i
\]
where $\eta = (1 - \epsilon \omega)^{-1}$ and
\[
b_i = \eta \epsilon \|v_0\| + \eta \epsilon|f(t_i) - f(s)|
L(\|u_0\|)(1 + \|v_0\|).
\]
 This recursive inequality, combined with the induction
assumption, readily gives
\begin{align*}
\|u_i - u_0\|
&\le \eta \{\eta^{i - 1}L(\|u_0\|)(i - 1)\epsilon
  + [\eta^{i - 2}b_1 + \eta^{i - 3}b_2 + \dots +
\eta b_{i - 2} + b_{i - 1}]\} + b_i \\
&= \eta^{i}L(\|u_0\|)(i - 1)\epsilon
+ [\eta^{i - 1}b_1 + \eta^{i - 2}b_2 + \dots
 + \eta b_{i - 1} + b_i],
\end{align*}
which is less than or equal to the right-hand side
of \eqref{E:VaryA} with $ i = i $
because of $ (i - 1)\epsilon \le i\epsilon $.


\subsection*{Case 2} Here \eqref{E:VaryB} is considered.

\textbf{Step 1.} Claim that \eqref{E:VaryB} is true for $i = 1 $.
This follows from the Step 1 in Case 1, because there it
was shown that
\[
\|u_1 - u_0\| \le L(\|u_0\|)\epsilon \quad  \text{or $ b_1 $},
\]
which, when divided by
$ \epsilon $,  is less than or equal to
the right side of \eqref{E:VaryB} with $ i = 1 $.
Here $ a_0 = \|v_0\| $, in which $ a_0 = (u_0 - u_{-1})/\epsilon
$ and $ u_{-1} \equiv u_0 - \epsilon v_0 $.

\textbf{Step 2.}  By assuming that \eqref{E:VaryB}
is true for $ i = i - 1 $, we will show that it is also
true for $ i = i $.
If $ (u_i - u_{i - 1})\in S_1(\epsilon)$, then
\[
\|u_i - u_{i - 1}\| \le L(\|u_{i - 1}\|)
|t_i - t_{i - 1}| = L(\|u_{i - 1}\|) \epsilon.
\]
This, when divided by $ \epsilon $, has its right side less than
or equal to one on the right-hand
sides of \eqref{E:VaryB} with $ i = i $.

If $ (u_i - u_{i-1}) \in S_2(\epsilon) $, then
\begin{align*}
\|u_i - u_{i-1}\|
&\leq (1 - \epsilon \omega)^{-1}[\|u_{i-1} - u_{i-2}\| \\
&\quad + \epsilon |f(t_i) - f(t_{i-1})| L(\|u_{i-1}\|)(1
+ \frac{\|u_{i-1} - u_{i-2}\|}{\epsilon})].
\end{align*}
By letting
\begin{gather*}
 a_i = \frac{\|u_i - u_{i-1}\|}
 {\epsilon},  \\
c_i = (1 - \epsilon \omega)^{-1}[1 + L(\|u_{i - 1}\|)|f(t_i) -
f(t_{i-1})|], \quad  \text{and} \\
d_i = L(\|u_{i - 1}\|)(1 - \epsilon \omega)^{-1}|f(t_i)
- f(t_{i-1})|,
\end{gather*}
it follows that
$a_i \leq c_ia_{i - 1} + d_i$.
Here notice that
\[
u_0 - \epsilon v_0 = u_{-1}; \quad
a_0 = \|\frac{u_0 - u_{-1}}{\epsilon}\|
= \|v_0\|.
\]
The above inequality, combined with the induction assumption,
readily gives
\begin{align*}
a_i &\leq c_i\big\{[(c_{i - 1}c_{i - 2} \dots c_2)L(\|u_0\|) \quad \text{or
 $ (c_{i - 1}c_{i - 2} \dots c_3)
L(\|u_1\|)$ or $ \dots $} \\
&\quad \text{or $ c_{i - 1}L(\|u_{i - 3}\|) $ or $ L(\|u_{i - 2}\|) $}]
 +  [(c_{i - 1}c_{i -2}\dots c_1)a_0 \\
&\quad + (c_{i - 1}c_{i - 2} \dots c_2)d_1 +
(c_{i - 1}c_{i - 2}\dots c_3)d_2
+ \dots \\
&\quad  + c_{i - 1}d_{i - 2} + d_{i - 1}]\big\} + d_i \\
&\leq [(c_ic_{i - 1} \dots c_2)L(\|u_0\|) \quad
\text{or  $ (c_ic_{i - 1} \dots c_3) L(\|u_1\|)
$ or $ \dots $} \\
&\quad \text{or $ c_iL(\|u_{i - 2}\|) $}]
 +  [(c_ic_{i -1}\dots c_1)a_0 \\
&\quad  + (c_ic_{i - 1} \dots c_2)d_1 +
(c_ic_{i - 1}\dots c_3)d_2 + \dots
  + c_id_{i - 1} + d_i],
\end{align*}
each of which is less than or equal to one on the right
sides of \eqref{E:VaryB}
with $ i = i $.

The induction proof is now complete.
\end{proof}

\begin{proposition} \label{P:VaryA}
Under the assumptions of Proposition \ref{P:A}, the following
are true if $ u_0 $ is in
$ \hat{D}(A(s)) = \{y \in \overline{D(A(s))}:|A(s)y| < \infty\} $:
\begin{gather*}
\|u_i - u_0\| \le K_1(1 - \epsilon \omega)^{-i}(2i + 1)\epsilon
\le K_1e^{(T - s)\omega}(3)(T - s); \\
\|\frac{u_i - u_{i - 1}}{\epsilon}\| \le K_3;
\end{gather*}
where the constants $ K_1 $ and $ K_3 $  depend on the quantities:
\begin{gather*}
K_1 = K_1(L(\|u_0\|), (T - s), \omega, |A(s)u_0|, K_{B}); \\
K_2 = K_2(K_1, (T - s), \omega, \|u_0\|); \\
K_3 = K_3(L(K_2), (T - s), \omega, \|u_0\|, |A(s)u_0|, K_{B}); \\
K_{B}  \text{ is the total variation of $ f $ on $ [0, T] $}.
\end{gather*}
\end{proposition}

\begin{proof}
We divide the proof into two cases.

\subsection*{Case 1} Here $ u_0 \in D(A(s)) $.
 It follows immediately from Proposition \ref{P:A} that
\begin{gather*}
\|u_i - u_0\| \le N_1(1 - \epsilon \omega)^{-i}(2i + 1)\epsilon
\le N_1e^{(T - s)\omega}(3)(T - s); \\
\|\frac{u_i - u_{i - 1}}{\epsilon}\| \le N_3;
\end{gather*}
where the constants $ N_1 $ and $ N_3 $ depend on the quantities:
\begin{gather*}
N_1 = N_1(L(\|u_0\|), (T - s), \omega, \|v_0\|, K_{B}); \\
N_2 = N_2(N_1, (T - s), \omega, \|u_0\|); \\
N_3 = N_3(L(N_2), (T - s), \omega, \|u_0\|, \|v_0\|, K_{B}); \\
K_{B}  \text{ is the total variation of $ f $ on $ [0, T] $}.
\end{gather*}
We used here the estimate in \cite[Page 65]{Cran}
\[
c_i\dots  c_1
\leq e^{i\epsilon \omega}
e^{e_i + \dots + e_1},
\]
where $ e_i = L(\|u_{i - 1}\|)|f(t_i) - f(t_{i-1})| $.

\subsection*{Case 2} Here $ u_0 \in \hat{D}(A(s)) $.
 This involves two steps.

\textbf{Step 1.} Let $ u_0^{\mu} = (I - \mu A(s))
^{-1}u_0 $ where $ \mu > 0 $, and let
\[
u_i = \prod_{j = 1}^{i}J_{\epsilon}(t_{j})u_0; \quad
u_i^{\mu} = \prod_{j = 1}^{i}J_{\epsilon}(t_{j})u_0^{\mu}.
\]
As in \cite[Lemma 3.2, Page 9]{Paz}, we have, by letting
$ \mu \to 0 $,
\[
 u_0^{\mu} \to u_0;
 \]
here notice that $ D(A(s)) $ is dense in $ \hat{D}(A(s)) $.
Also it is readily seen that
\[
 u_i^{\mu} = \prod_{k=1}^{i}(I - \epsilon A(t_{k}))
^{-1}u_0^{\mu} \to u_i = \prod_{k=1}^{i}(I - \epsilon A(t_{k}))^{-1}
u_0
\]
 as $ \mu \to 0 $,
 since $ (A(t) - \omega) $
is dissipative  for each $ 0 \leq t \leq T $.

\textbf{Step 2.}
Since $ u_0^{\mu}
\in D(A(s)) $, Case 1 gives
\begin{equation} \label{E:TimeE}
\begin{gathered}
\|u_i^{\mu} - u_0^{\mu}\| \le
N_1(L(\|u_0^{\mu}\|), (T - s), \omega, \|v_0^{\mu}\|, K_{B})
(1 - \epsilon \omega)^{-i}(2i + 1)\epsilon   \\
\frac{\|u_i^{\mu} -
u_{i-1}^{\mu}\|}{\epsilon}
\leq N_3(L(N_2), (T - s), \omega, \|u_0^{\mu}\|, \|v_0^{\mu}\|,
K_{B}),
\end{gathered}
\end{equation}
where
\[
 N_2 = N_2(N_1, (T - s), \omega, \|u_0^{\mu}\|),
\]
and $ v_0^{\mu} $ is any element in
$ A(s)(I - \mu A(s))^{-1}u_0 $.
We can take
\[
 v_0^{\mu} = w_0^{\mu} \equiv  \frac{(J_{\mu}(s) - I)u_0}{\mu},
\]
 since $ w_0^{\mu}
\in A(s)(I - \mu A(s))^{-1}u_0 $.

On account of  $ u_0 \in \hat{D}(A(s)) $,
we have
\[
 \lim_{\mu \to 0}\|\frac{(J_{\mu}(s)
- I)u_0}{\mu}\| = |A(s)u_0| < \infty.
\]
Thus, by letting $ \mu \to
0 $ in \eqref{E:TimeE} and using Step 1, the results in the Proposition
\ref{P:VaryA} follow.
The proof is complete.
\end{proof}

\section{Main results} \label{S:B}

Using the estimates in Section \ref{S:VaryA},
together with the difference equations theory,
the following result will be shown in
in Section \ref{S:VaryB}.

\begin{proposition} \label{P:VaryB}
Under the assumptions of Proposition \ref{P:XA}, the following
inequality is true
\[
a_{m, n} \le
\begin{cases}
L(K_2)|n\mu - m\lambda|, \quad &\text{if $ S_2(\mu) = \emptyset$}; \\
c_{m, n} + s_{m, n} + d_{m, n} + f_{m, n} + g_{m, n}, \quad
&\text{if $ S_1(\mu) = \emptyset $};
\end{cases}
\]
where $ a_{m, n}, c_{m, n}, s_{m, n}, f_{m, n}, g_{m, n} $
and $ L(K_2) $ are
defined in Proposition \ref{P:XA}.
\end{proposition}

In view of this and Proposition \ref{P:A}, we are led to the
following claim.

\begin{proposition} \label{P:XA}
    Let $ x \in \hat{D}(A(s)) $ where
    $ 0 \le s \le T $, and let $ \lambda , \mu > 0 $,
    $ n, m \in \mathbb{N}, $  be such that
$ 0 \le (s +m \lambda), (s + n \mu) \le
T $, and such that
 $ \lambda_0 > \lambda
\geq \mu > 0 $ for which  $ \mu \omega,
\lambda \omega < 1 $. If $ A(t) $ satisfies
the dissipativity condition (H1),
the range condition (H2'), and  the time-regulating
condition (HA), then   the inequality
is true:
\begin{equation} \label{E:VaryC}
a_{m, n} \le c_{m, n} + s_{m, n} + d_{m, n} + e_{m, n} + f_{m, n} + g_{m, n}.
\end{equation}
 Here
\begin{gather*}
a_{m, n} \equiv \|\prod_{i = 1}^{n}
J_{\mu}(s + i \mu)x - \prod_{i = 1}^{m}J_{\lambda}(s + i \lambda)x\|; \\
\gamma \equiv (1 - \mu \omega)^{-1} > 1;  \quad
\alpha \equiv \frac{\mu}{\lambda};  \quad
 \beta \equiv 1 - \alpha;  \\
c_{m, n} = 2K_1\gamma^{n}[(n\mu
- m \lambda) + \sqrt{(n \mu - m \lambda)^2 + (n \mu)(\lambda
- \mu)}]; \\
s_{m, n} = 2K_1\gamma^{n}(1 - \lambda \omega)^{-m}
\sqrt{(n \mu - m \lambda)^2 + (n \mu)(\lambda - \mu)}; \\
d_{m, n} = [K_4\rho(\delta)\gamma^{n}(m \lambda)] +
\{K_4\frac{\rho(T)}{\delta^2}\gamma^{n}[(m \lambda)(n \mu -
m \lambda)^2
+ (\lambda - \mu)\frac{m(m + 1)}{2}\lambda^2]\}; \\
e_{m, n} = L(K_2)\gamma^{n}\sqrt{(n \mu - m \lambda)^2
+ (n \mu)(\lambda - \mu)}; \\
f_{m, n} = K_1[\gamma^{n}\mu +
\gamma^{n}(1 - \lambda \omega)^{-m}\lambda]; \\
g_{m, n} = K_4\rho(|\lambda - \mu|)\gamma^{n}(m\lambda); \\
K_4 = \gamma L(K_2)(1 + K_3); \quad
\delta > 0 \quad \text{is arbitrary}; \\
\rho(r) \equiv \sup\{|f(t) - f(\tau)| : 0 \leq t,
\tau \leq T,
|t - \tau| \leq r \}
\end{gather*}
where $\rho(r)$ is the modulus of continuity
of $ f $ on $ [0, T] $;
and $ K_1, K_2 $, and $ K_3 $
are defined in Proposition \ref{P:VaryA}.
\end{proposition}

\begin{proof}
We will use the method of mathematical induction and divide
the proof into two steps. Step 2 will involve six cases.

\textbf{Step 1.} \eqref{E:VaryC} is clearly true by Proposition
\ref{P:VaryA}, if  $ (m, n)= (0, n) $ or
$ (m, n) = (m, 0) $.

\textbf{Step 2.} By assuming that \eqref{E:VaryC} is true for
$ (m, n) = (m - 1, n - 1) $ or $ (m, n) = (m, n - 1) $, we will show
that it is also true for $ (m, n) = (m, n) $. This is done by the
arguments below.

Using the nonlinear resolvent identity in
\cite{Cr}, we have
\begin{align*}
 a_{m, n} &= \|J_{u}(s + n \mu)\prod_{i = 1}
^{n - 1}J_{\mu}(s + i \mu)x\\
&\quad - J_{\mu}(s + m \lambda)
[\alpha \prod_{i = 1}^{m - 1}J_{\lambda}(
s + i \lambda)x + \beta \prod_{i = 1}
^{m}J_{\lambda}(s + i \lambda)x)]\|.
\end{align*}
Here $ \alpha = \mu/\lambda$ and $ \beta = (\lambda - \mu)/\lambda$.

Under the time-regulating condition (HA), it follows that,
if the element inside the
norm of the right side of the above equality
is in $ S_1(\mu) $, then, by Proposition \ref{P:VaryA}
with $ \epsilon = \mu $,
\begin{equation} \label{E:VaryMain2}
a_{m, n}
\le L(\|\prod_{i = 1}^{n}J_{\mu}
(s + i \mu)x\|)|m \lambda - n \mu|
\le L(K_2)|m \lambda - n \mu|,
\end{equation}
which is less than or equal to the right-hand side
of \eqref{E:VaryC} with $ (m, n) =
(m, n) $, where $ \gamma ^{n} > 1 $.

If that element
instead  lies in
$ S_2(\mu) $, then, by Proposition \ref{P:VaryA} with
$ \epsilon = \mu $,
\begin{equation} \label{E:VaryMain}
\begin{split}
a_{m, n}
&\le \gamma (\alpha a_{m - 1, n - 1}
+\beta a_{m, n - 1})  + \gamma \mu |f(s + m \lambda) - f(s + n \mu)|\\
&\quad\times
L(\|\prod_{i = 1}^{n}J_{\mu}(s + i \mu)x\|)[1 +
 \|\frac{\prod_{i = 1}^{n}J_{\mu}
(s + i \mu)x - \prod_{i = 1}^{n - 1}
J_{\mu}(s + i \mu)x}{\mu}\|] \\
&\le [\gamma \alpha a_{m - 1, n - 1} + \gamma \beta a_{m, n - 1}] +
K_4\mu \rho(|n \mu - m \lambda|),
\end{split}
\end{equation}
where $ K_4 = \gamma L(K_2)(1 + K_3) $ and $ \rho(r) $ is the
modulus of continuity of $ f $ on $ [0, T] $. From this, it follows
that proving the relations is sufficient under the induction
assumption:
\begin{gather}
\gamma \alpha p_{m - 1, n - 1} + \gamma \beta p_{m, n - 1}
\le p_{m, n}; \label{E:VaryD} \\
\gamma \alpha q_{m - 1, n - 1} + \gamma \beta q_{m, n - 1}
+ K_4\mu \rho(|n \mu - m \lambda|) \le q_{m, n}; \label{E:VaryE}
\end{gather}
where $ q_{m, n} = d_{m, n} $, and $ p_{m, n} = c_{m, n} $ or
$ s_{m, n} $ or $  e_{m, n} $ or $ f_{m, n} $ or $ g_{m, n} $.

Now we consider five cases.

\textbf{Case 1.} Here $ p_{m, n} = c_{m, n} $.
Under this case, \eqref{E:VaryD}
is true because of the calculations,
where
\[
 b_{m, n} = \sqrt{(n\mu - m\lambda)^2 + (n\mu)(\lambda - \mu)}
\]
 was defined and the Schwartz inequality was used:
\begin{gather*}
\alpha[(n - 1)\mu - (m - 1)\lambda] + \beta[(n - 1)\mu - m\lambda]
= (n \mu - m \lambda);
\\
\begin{aligned}
 \alpha b_{m - 1, n - 1} + \beta b_{m, n - 1}
&= \sqrt{\alpha}\sqrt{\alpha}b_{m - 1, n - 1} + \sqrt{\beta}\sqrt{\beta}
b_{m, n - 1} \\
&\quad \le (\alpha + \beta)^{1/2}(\alpha b_{m - 1, n - 1}^2
+ \beta b_{m, n - 1}^2)^{1/2} \\
&\quad \le \{(\alpha + \beta)(n\mu - m\lambda)^2 +
2(n\mu - m \lambda)[\alpha(\lambda - \mu) - \beta \mu] \\
&\quad + [\alpha(\lambda - \mu)^2
 + \beta \mu^2] + (n - 1)\mu(\lambda -
\mu)\}^{1/2} \\
&= b_{m, n}.
\end{aligned}
\end{gather*}
Here
\[
\alpha + \beta = 1; \quad
\alpha(\lambda - \mu) - \beta \mu = 0; \quad
\alpha(\lambda - \mu)^2 + \beta \mu^2 = \mu(\lambda - \mu).
\]

\textbf{Case 2.} Here $ p_{m, n} = s_{m, n} $. Under this case,
\eqref{E:VaryD} is true, as is with  the Case 1, by noting that
\[
(1 - \lambda \omega)^{-(m - 1)} \le (1 - \lambda \omega)^{-m}.
\]

\textbf{Case 3.} Here $ q_{m, n} = d_{m, n} $.
Under this case, \eqref{E:VaryE} is true because of the calculations:
\begin{align*}
&\gamma \alpha d_{m - 1, n - 1} + \gamma \beta d_{m, n - 1} + K_4
\mu \rho(|n \mu - m \lambda|) \\
&\le \{\gamma \alpha[K_4\rho(\delta)\gamma^{n - 1}(m - 1)\lambda]
+ \gamma \beta[K_4 \rho(\delta)\gamma^{n - 1}(m \lambda)]\} \\
& \quad + \gamma \alpha \{K_4\frac{\rho(T)}{\delta^2}
\gamma^{n - 1}[(m - 1)\lambda\left((n - 1)\mu - (m - 1)\lambda\right)^2
+ (\lambda - \mu)\frac{(m - 1)m}{2}\lambda^2]\} \\
&\quad + \gamma \beta\{K_4\frac{\rho(T)}{\delta^2}
\gamma^{n - 1}[(m \lambda)\left((n - 1)\mu - m\lambda\right)^2 +
(\lambda - \mu)\frac{m(m + 1)}{2}\lambda^2]\} \\
&\quad  + K_4\mu \rho(|n \mu - m \lambda|) \\
&= K_4\rho(\delta)\gamma^{n}[(\alpha + \beta)(m\lambda) -
\alpha \lambda] \\
&\quad + K_4\frac{\rho(T)}{\delta^2}
\gamma^{n}\{\alpha[(n\mu - m\lambda)^2
+ 2(n \mu - m \lambda)(\lambda - \mu) + (\lambda - \mu)^2](m
\lambda - \lambda) \\
&\quad + [\alpha (\lambda - \mu)\frac{m(m + 1)}{2}\lambda^2
- \alpha(\lambda - \mu)m\lambda^2] \\
&\quad + \beta[(n \mu - m \lambda)^2 -
2(n \mu - m \lambda)\mu + \mu^2](m \lambda) \\
&\quad
+[\beta (\lambda - \mu)\frac{m(m + 1)}{2}\lambda^2]\}  +
K_4\mu \rho(|n\mu - m\lambda|) \\
&\quad \le K_4\rho(\delta)\gamma^{n}[(m\lambda) - \mu]
+ K_4\mu \rho(|n \mu - m \lambda|) \\
&\quad +
K_4\frac{\rho(T)}{\delta^2}\gamma^{n}[(m\lambda)(n \mu - m \lambda)^2 +
(\lambda - \mu)\frac{m(m + 1)}{2}\lambda^2 - \mu(n \mu - m \lambda)^2] \\
& \equiv r_{m, n},
\end{align*}
where the negative terms $ [2(n\mu - m\lambda)(\lambda - \mu)
+ (\lambda - \mu)^2](-\lambda)$ were dropped,
\[
\alpha 2(n\mu - m\lambda)(\lambda - \mu) - \beta 2(n\mu - m\lambda)\mu
= 0,
\]
and
\[
[\alpha(\lambda - \mu)^2 + \beta \mu^2](m\lambda)
= (m\lambda)\mu(\lambda - \mu),
\]
which cancelled
\[
 -\alpha(\lambda - \mu)m\lambda^2 = - (m\lambda)\mu(\lambda - \mu);
\]
it follows that $ r_{m, n} \le d_{m, n} $, since
\begin{align*}
&K_4\mu \rho(|n\mu - m\lambda|) \\
& \le
\begin{cases}
K_4\mu \rho(\delta) \le K_4\mu\rho(\delta)\gamma^{n}, \quad &\text{if
$ |n\mu - m\lambda| \le \delta $}; \\[3pt]
K_4\mu\rho(T)\frac{(n\mu - m\lambda)^2}{\delta^2}
\le K_4\mu\rho(T)\gamma^{n}\frac{(n\mu - m\lambda)^2}{\delta^2},
\quad &\text{if $ |n\mu - m\lambda| > \delta $}.
\end{cases}
\end{align*}

\textbf{Case 4.} Here $ p_{m, n} = e_{m, n} $. Under this
case, \eqref{E:VaryD} is true, as is with the Case 1.

\textbf{Case 5.} Here $ p_{m, n} = f_{m, n} $. Under this
case, \eqref{E:VaryD} is true because of the calculations:
\begin{align*}
\gamma \alpha f_{m - 1, n - 1} + \gamma \beta f_{m, n - 1}
&= \gamma \alpha K_1[\gamma^{n - 1}\mu + \gamma^{n - 1}
(1 - \lambda \omega)^{-(m - 1)}\lambda] \\
& \quad + \gamma \beta
K_1[\gamma^{n - 1}
\mu + \gamma^{n - 1}(1 - \lambda \omega)^{-m}\lambda] \\
& \le K_1[(\alpha + \beta)\gamma^{n}\mu + (\alpha + \beta)\gamma^{n}(1 -
\lambda \omega)^{-m}\lambda], \\
&= f_{m, n}.
\end{align*}

\textbf{Case 6.} Here $ p_{m, n} = g_{m, n} $. Under this case,
\eqref{E:VaryD} is true because of the calculations:
\begin{align*}
\gamma \alpha g_{m - 1, n - 1} + \gamma \beta g_{m, n - 1}
&\le K_4\gamma^{n}\rho(|\lambda - \mu|)\alpha(m - 1)\lambda
 + K_4\gamma^{n}\rho(|\lambda - \mu|)\beta(m \lambda) \\
& \le K_4\gamma^{n}\rho(|\lambda - \mu|)(\alpha + \beta)(m\lambda) \\
&= g_{m, n}.
\end{align*}
Now the proof is complete.
\end{proof}

Here is one of our two main results:

\begin{theorem} \label{T:XA}
If the nonlinear operator $ A(t) $ satisfies the dissipativity
condition {\rm (H1)}, the range condition {\rm (H2')}, and  the
 time-regulating condition {\rm (HA)} ,
 then
\[
U(s + t, s)u_0
\equiv \lim_{n \to \infty}\prod_{i = 1}^{n}
J_{\frac{t}{n}}(s + i \frac{t}{n})u_0
\]
exists for $ u_0 \in \overline{\hat{D}(A(s))} =
\overline{D(A(s))} $ where
$ s,t \ge 0 $ and $ 0 \le (s + t) \le T $,
and is the so-called a limit  solution to
the equation \eqref{E:A} .
Furthermore, this limit
$ U(s + t, s)u_0 $   has the Lipschitz property
\[
\|U( s + t, s)u_0 - U(s + \tau, s)u_0\|
\le k |t - \tau|
\]
 for $ 0 \le s + t,  s + \tau \le T $ and
for $ u_0 \in \hat{D}(A(s)) $.
\end{theorem}

\begin{proof}
For $ x \in \hat{D}(A(s)) $,
it follows from Proposition \ref{P:XA},
by setting
$ \mu = \frac{t}{n}, \lambda = \frac{t}{m}, $ and  $
  \delta^2 = \sqrt{\lambda
 - \mu} $
that, as $ n,  m \to \infty $,
$ a_{m, n} $ converges to $ 0 $, uniformly for
$ 0 \le (s + t) \le T $.
Thus
\[
\lim_{n \to \infty}
\prod_{i = 1}^{n}J_{\frac{t}{n}}(s + i\frac{t}{n})x
\]
exists for $ x \in \hat{D}(A(s)) $. This limit
also exits for
$ x \in \overline{\hat{D}(A(s))} =
\overline{D(A(s)))} $,  on following
the limiting arguments in Crandall-Pazy
\cite{Cran}.

On the other hand, setting $ \mu = \lambda = t/n$,
$m = [\frac{t}{\mu}]$ and setting
$ \delta^2 = \sqrt{\lambda - \mu} $,
it follows that
\begin{equation} \label{E:TimeF}
\lim_{n \to \infty}\prod_{i = 1}^{n}J_{\frac{t}{n}}(s + i \frac{t}{n})u_0
= \lim_{\mu \to 0}\prod_{i = 1}^{[\frac{t}{\mu}]}J_{\mu}(s + i\mu)u_0.
\end{equation}

   Now, to show the Lipschitz property, \eqref{E:TimeF} and
Crandall-Pazy   \cite[Page 71]{Cran} will be used.
From Proposition \ref{P:VaryA},
it is derived that
\begin{gather*}
\begin{aligned}
\|u_{n} - u_{m}\| &\le \|u_{n} - u_{n - 1}\|
+ \|u_{n - 1} - u_{n - 2}\| + \dots + \|u_{m + 1} - u_{m}\| \\
&\le K_3 \mu (n - m)
\quad \text{for }   x \in \hat{D}(A(s)),
\end{aligned}\\
u_{n} = \prod_{i = 1}^{n}J_{\mu}(s + i \mu)x, \quad
u_{m} = \prod_{i = 1}^{m}J_{\mu}(s + i \mu)x,
\end{gather*}
where $ n = [t/\mu]$, $m = [\tau/\mu]$,
$t > \tau $ and $ 0 < \mu < \lambda_0 $.
The proof is completed by making $ \mu \to 0 $ and using
\eqref{E:TimeF}.
\end{proof}


  Now discretize \eqref{E:A} as
\begin{equation} \label{E:TimeB}
\begin{gathered}
    u_i - \epsilon A(t_i)u_i \ni u_{i-1},  \\
u_i \in D(A(t_i)),
\end{gathered}
\end{equation}
where $ n \in \mathbb{N} $ is large, and $ \epsilon $
is such that  $ s \le t_i = s + i \epsilon \le T $
for each $ i = 1, 2, \ldots, n $.
Here notice that, for $ u_0 \in E $, $ u_i $ exists uniquely by
the hypotheses (H1) and (H2').

     Let $ u_0 \in \hat{D}(A(s)) $, and
construct the Rothe functions \cite{Ka,Ro}. Let
\begin{gather*}
 \chi^{n}(s) = u_0, \quad C^{n}(s) = A(s),  \\
 \chi^{n}(t) = u_i, \quad  C^{n}(t) =
A(t_i)  \quad \text{for } t \in (t_{i-1}, t_i],
\end{gather*}
and  let
\begin{gather*}
 u^{n}(s) = u_0, \\
  u^{n}(t) = u_{i-1} + (u_i - u_{i-1}) \frac{t - t_{i-1}}{\epsilon}
\quad \text{for } t \in (t_{i-1}, t_i] \subset [s, T].
\end{gather*}

Since $ \|\frac{u_i - u_{i-1}}{\epsilon}\| \leq K_3 $
for $ u_0 \in \hat {D}(A(s)) $ by
Proposition \ref{P:A}, it follows that, for
$ u_0 \in \hat{D}(A(s)) $,
\begin{equation} \label{E:B}
\begin{gathered}
   \lim_{n \to \infty}\sup_{t \in [0, T]}\|u^{n}(t) -
\chi^{n}(t)\| = 0,  \\
   \|u^{n}(t) - u^{n}(\tau)\| \leq K_3|t - \tau|,
\end{gathered}
\end{equation}
where $ t, \tau \in (t_{i-1}, t_i] $, and that, for $ u_0
\in \hat{D}A(s)) $,
\begin{equation} \label{E:C}
\begin{gathered}
  \frac{du^{n}(t)}{dt} \in C^{n}(t)\chi^{n}(t),  \\
 u^{n}(s) = u_0,
\end{gathered}
\end{equation}
where $ t \in (t_{i-1}, t_i] $. Here the last equation has values in
$ B([s, T]; X) $, which is the real Banach space of all bounded
functions from $ [s, T] $ to $X$.

\begin{proposition} \label{P:B}
If $ A(t) $ satisfies the assumptions in
Theorem \ref{T:XA}, then
\[
\lim_{n \to \infty}u^{n}(t)
= \lim_{n \to \infty}\prod_{i = 1}^{n}
J_{\frac{t - s}{n}}(s + i \frac{t}{n})u_0
\]
 uniformly for finite
 $ 0 \le (s +t) \le T $ and
 for $ u_0 \in \hat{D}(A(s)) $.
\end{proposition}

\begin{proof}
The asserted uniform convergence will be proved by using
the Ascoli-Arzela Theorem \cite{Roy}.

    Pointwise convergence will be proved first.
For each $ t \in [s, T) $, we have $ t \in
[t_i, t_{i+1}) $ for some  $ i $, and so $ i =
[\frac{t - s}{\epsilon}] $, the greatest integer that is
less than or equal to $ \frac{t - s}{\epsilon} $.
That $ u_i $
converges is  because, for each above $ t $,
\begin{equation} \label{E:TimeG}
\begin{aligned}
\lim_{\epsilon \to 0}u_i
&= \lim_{\epsilon \to 0}\prod_{k=1}^{i}(I -
\epsilon A(t_{k}))^{-1}u_0 \\
&= \lim_{n \to \infty}
 \prod_{k=1}^{n}[I - \frac{t - s}{n}A(s + k\frac{t - s}{n})]^{-1}u_0
\end{aligned}
 \end{equation}
by \eqref{E:TimeF},
which has the  right side convergent by Theorem \ref{T:XA}. Since
\[
\|\frac{u_i - u_{i - 1}}{\epsilon}\|
\le K_3
\]
for $ u_0 \in \hat{D}(A(s)) $,
we see from the definition of
$ u^{n}(t) $ that
\[
\lim_{n \to \infty}u^{n}(t) =
\lim_{\epsilon \to 0}u_i =
\lim_{n \to \infty}\prod_{i = 1}^{n}
J_{\frac{t - s}{n}}(s + i\frac{t - s}{n})u_0
\]
 for each $ t $.

On the other hand, due to
\[
\|\frac{u_i - u_{i - 1}}{\epsilon}\|
\le K_3
\]
again, we see that  $ u^{n}(t) $ is equi-continuous in
$ C([s, T]; X) $,
the real Banach space of all continuous
functions from $ [s, T] $ to $X$.
Thus it follows from the Ascoli-Arzela theorem \cite{Roy}
that, for $ u_0 \in \hat{D}(A(s)) $,
some subsequence of $ u^{n}(t) $
(and then itself) converges uniformly
to some
\[
u(t) = \lim_{n \to \infty}\prod_{i = 1}^{n}
J_{\frac{t - s}{n}}(s + i \frac{t - s}{n})u_0
\in C([s, T];
X).
\]
This completes the proof.
\end{proof}


Now consider a strong solution.
Let $ (Y, \|\cdot\|_{Y}) $ be a real Banach space, into which
the real Banach space $ (X, \|\cdot\|) $ is
continuously embedded.  Assume additionally that $ A(t) $
satisfies the
embedding property of embeddedly quasi-demi-closedness:
\begin{itemize}
\item[(HB)]
If $ t_{n} \in [0, T] \to t $, if
 $ x_{n} \in D(A(t_{n})) \to x $,  and if $
\|y_{n}\| \leq k $ for some $ y_{n} \in
A(t_{n})x_{n} $,
then $ \eta(A(t)x) $
exists and
\[
        |\eta(y_{n_{l}}) - z| \to 0
\]
 for some subsequence $ y_{n_{l}} $
of $ y_{n} $,  for some $ z \in \eta(A(t)x) $,
and for each $ \eta
\in Y^{*} \subset X^{*} $,
the real dual space of $ Y $.
\end{itemize}

Here is the other main result.

\begin{theorem} \label{T:XB}
Let $ A(t) $ satisfy the dissipativity condition {\rm (H1)},
the range condition {\rm (H2')},
 the time-regulating condition {\rm (HA)},  and the embedding
property {\rm (HB)}.
Then  equation \eqref{E:A},  for $ u_0 \in \hat{D}(A(s)) $,  has a
strong solution
\[
u(t) = \lim_{n \to \infty}
\prod_{i = 1}^{n}J_{\frac{t - s}{n}}(s + i \frac{t}{n})u_0
\]
in $ Y $, in the sense that
\begin{gather*}
\frac{d}{dt}u(t) \in A(t)u(t) \quad \text{in $ Y $ for almost
every $ t \in (0, T) $}; \\
u(s) = u_0.
\end{gather*}
The solution is unique if $ Y \equiv X $.
Furthermore,
\[
\|u(t) - u(\tau)\|_{X} \le K_3|t - \tau|
\]
for $ 0 \le s \le t$, $\tau \le T $,
a result from Theorem \ref{T:XA}.
\end{theorem}

The results in the above theorem follow from
Theorem \ref{T:XA} and the proof in
\cite[page 364]{Li}, \cite[pages 262-263]{Lin2}.

\begin{remark} \label{T:XC} \rm
 The results in Sections \ref{S:VaryA} and \ref{S:B}
are still true if the range condition (H2') is replaced by the
weaker condition (H2'') below, provided that the initial conditions
$ u_0 \in \hat{D}(A(s)) (\supset D(A(s)) ) $ and $ u_0 \in
\overline{\hat{D}(A(s))} = \overline{D(A(s))}
(\supset D(A(s)) )$ are changed to the condition
$ u_0 \in D(A(s)) $. This is readily seen from the corresponding
proofs. Here
\begin{itemize}
\item[(H2'')] The range of $ (I - \lambda A(t)) $,
denoted by $ E $,
is independent $ t $ and contains $ D(A(t)) $ for all
$ t \in [0, T] $ and for small
$ 0 < \lambda < \lambda_0 $ with $ \lambda_0\omega < 1 $.
\end{itemize}
\end{remark}


\section{Applications to partial differential equations (I)}
 \label{S:D}

Within this section, $ K $ will denote a constant that can vary
with different occasions.
Now we make the following assumptions:

\begin{itemize}
\item[(A1)] $ \Omega $ is a bounded smooth domain in $ \mathbb{R}^{n}, n \geq 2, $ and $ \partial \Omega $ is the boundary of $ \Omega $.

\item[(A2)] $ \nu(x) $ is the unit outer normal to $ x \in \partial \Omega $, and $ \mu $ is a real number such that $ 0 < \mu < 1 $.

\item[(A3)] $ \alpha(x, t, p) \in C^2(\overline \Omega \times \mathbb{R}^{n}) $
is true for each $ t \in [0, T] $, and is continuous in all its
arguments. Furthermore, $ \alpha(x, t, p)
\geq \delta_0 > 0 $ is true
  for all $ x, z $, and all $ t \in [0, T] $,
 and for some constant  $ \delta_0 > 0 $.

\item[(A4)] $ g(x, t, z, p) \in C^2(\overline \Omega
\times \mathbb{R} \times \mathbb{R}^{n}) $  is true  for each $ t \in [0, T] $,
is continuous in all its arguments, and
is monotone non-increasing in $ z $ for each $ t , x $, and $ p $.

\item[(A5)] $ \frac{g(x,t, z, p)}{\alpha(x, t, p)} $ is of at most
linear growth in $ p $, that is ,
\[
 | \frac{g(x, t, z, p)}{\alpha(x, t, p)} | \leq M(x, t, z)(1 + |p|)
\]
 for some continuous function $ M $ and for all
$ t \in [0, T] $ when $ |p| $ is large enough.

\item[(A6)] $ \beta(x, t, z) \in C^{3}(\Omega \times \mathbb{R}) $
is true for each $ t \in [0, T] $, is continuous in all its
arguments, and is strictly monotone increasing in
$ z $ so that $ \beta_{z} \geq
\delta_0 > 0 $  for the constant
$ \delta_0 > 0 $ in (A3).

\item[(A7)]
\begin{gather*}
 |\alpha(x, t, p) - \alpha(x, \tau, p)| \leq |\zeta(t) - \zeta(\tau)|
N_1(x, |p|), \\
|g(x, t, z, p) - g(x, \tau, z, p)|
\leq |\zeta(t) -
\zeta(\tau)|N_2(x, |z|, |p|), \\
 |\beta(x, t, z) - \beta(x, \tau, z)|
\leq |t - \tau|N_3(x, |z|)
\end{gather*}
 are true
for some continuous positive functions
$ N_1, N_2, N_3 $ and for some
continuous function $ \zeta $ of bounded variation.
\end{itemize}

    Define the $t$-dependent nonlinear operator
$ A(t) : D(A(t)) \subset
C(\overline \Omega) \to
C(\overline \Omega) $ by
\begin{gather*}
    D(A(t)) = \{ u \in C^{2 + \mu}(\overline \Omega) :
\frac{\partial u}{\partial
\nu} + \beta(x, t, u) = 0 \quad \text{on $
 \partial \Omega $} \} \quad \text{and} \\
  A(t)u = \alpha(x, t, Du) \Delta u + g(x, t, u, Du) \quad
\text{for $ u \in D(A(t)) $}.
\end{gather*}


\begin{example} \label{T:XD} \rm
Consider the equation
\begin{equation} \label{E:XA}
\begin{gathered}
   \frac{\partial}{\partial t}u(x, t) = \alpha(x, t, Du)\Delta u +
g(x, t, u, Du), \quad (x, t)
\in \Omega \times (0, T),  \\
\frac{\partial}{\partial \nu}u + \beta(x, t, u)
= 0 , \quad x \in \partial \Omega, \\
  u(x, 0) = u_0,
\end{gathered}
\end{equation}
for $ u_0 \in D(A(0)) $.
The above equation has a strong solution
\[
u(t) = \lim_{n \to \infty}\prod_{i = 1}^{n}
J_{\frac{t}{n}}(i \frac{t}{n})u_0
\]
in $ L^2(\Omega) $ with
\[
\frac{\partial}{\partial \nu}u(t) + \beta(x, t, u(t)) = 0, \quad
x \in \partial \Omega,
\]
and the solution $ u(t) $ satisfies
the property
\[
\sup_{t \in [0, T]}\|u(t)\|_{C^{1 + \mu}
(\overline{\Omega})} \le K
\]
for some constant $ K $.
\end{example}

\begin{proof}
It was shown in \cite[Pages 264-268]{Lin2} that
$ A(t) $ satisfies the dissipativity condition (H1), the
range condition (H2'')
with  $ E = C^{\mu}(\overline{\Omega}) $
 for any $ 0 < \mu < 1 $, and satisfies
 the time-regulating condition  (HA)  and the embedding
property (HB).
Here the third line
on  \cite[Page 268]{Lin2}:
\[
\times [\|N_2(z, \|v\|_{\infty},
\|Dv\|_{\infty})\|_{\infty} +
\frac{\|N_1(z,
\|Dv\|_{\infty})\|_{\infty}}{\delta_1}
\|A(\tau)v\|_{\infty})]
\]
should have $ \|A(\tau)v\|_{\infty} $
replaced by
\[
[\|A(\tau)v\|_{\infty} + \|g(z, \tau,
v, Dv)\|_{\infty}].
\]
Hence Remark  \ref{T:XC} and Theorems
 \ref{T:XA} and \ref{T:XB} are applicable.

It remains to prove that $ u(t) $ satisfies the mentioned property
and the middle equation in \eqref{E:XA} in $ C(\overline{\Omega}) $.
This basically follows from \cite[pages 264-268]{Lin2}.
To this end,
 the $ u_i $ in \eqref{E:TimeB} will be used.

Since $ A(t) $ satisfies (H1), (H2''), and (HA),
it follows from Proposition \ref{P:VaryA} and Remark \ref{T:XC} that
\[
\|\frac{u_i - u_{i - 1}}{\epsilon}\|
= \|A(t_i)u_i\|_{\infty} \le K_3 \quad \text{and}
\quad \|u_i\|_{\infty} \leq K_2.
\]
 Thus, from linear $ L^{p} $
elliptic theory \cite{Tr,Gi2}, it follows that
$\|u_i\|_{W^{2, p}} \le K$
for some constant $ K $, whence
\begin{equation} \label{E:TimeLA}
 \|u_i\|_{C^{1 + \eta}} \le K
\end{equation}
 for any
$ 0 < \eta < 1 $ by the Sobolev embedding
theorem \cite{Gi2}.  This, together with the interpolation inequality
\cite{Gi2}
and the Ascoli-Arzela theorem \cite{Gi2, Roy}, implies that a
convergent subsequence of $ u_i $
converges in $ C^{1 + \mu}(\overline{\Omega}) $ for any
$ 0 < \lambda < \eta < 1 $.
 Therefore, on account of \eqref{E:TimeG} and Proposition \ref{P:B},
\[
\sup_{t \in [0, T]}\|u(t)\|_{C^{1 + \mu}} \le K
\]
results for $ u_0 \in D(A(0)) $, and $ u(t) $ satisfies the middle
equation in \eqref{E:XA} in $ C(\overline{\Omega}) $.
The proof is complete
\end{proof}


Consider the linear equation
\begin{equation} \label{E:ZA}
\begin{gathered}
\frac{\partial u(x,t)}{\partial t} =
\sum_{i, j = 1}^{n}a_{ij}(x, t)D_{ij}u(x,t)
+ \sum_{i = 1}^{n}b_i(x, t)D_iu(x, t)
+ c(x, t)u(x, t) \\
 \quad \text{for $ (x, t) \in \Omega \times (0, T) $}, \\
\frac{\partial}{\partial \nu}u + \beta(x, t)
u = 0, \quad x \in \partial \Omega,  \\
u(x, 0) = u_0,
\end{gathered}
\end{equation}
in which the following are assumed.
Let $ a_{ij}(x, t) = a_{ji}(x, t) $, and let
\[
\lambda_{\rm min} |\xi|^2 \leq
\sum_{i, j}^{n}a_{ij}(x, t)\xi_i\xi_{j}
\le \lambda_{\rm max} |\xi|^2
\]
for some positive constants $ \lambda_{\rm min}$,
$\lambda_{\rm max} $,
for all $ \xi \in \mathbb{R}^{n} $,
and for all $ x, t $. Let
\[
a_{ij}(x, t),\; b_i(x, t),\; c(x, t)  \in
C^{\mu}(\overline{\Omega})
\]
 uniformly for all $ t $,
be continuous in all
their arguments, and be of bounded
variation in $ t $ uniformly for $ x $. Let
$c(x, t) \le 0$
for all $ x, t $,
\[
\beta(x, t) \in C^{1 + \mu}(\overline{\Omega}), \quad 0 < \mu < 1
\]
for all $ t $, and
$ \beta(x, t) \ge \delta > 0 $
for some constant $ \delta > 0 $. Finally,  let
  $ \beta(x, t) $ and $ c(x, t) $
be continuous in
all its arguments, and let
$ \beta(x, t) $ be Lipschitz
continuous in $ t $ uniformly for $ x $.


\begin{example} \label{T:ZA} \rm
If $ \sum_{i, j}a_{ij}(x, t)D_{ij}u(x, t)
= a_0(x, t)\Delta u(x, t) $ for some
$ a_0(x, t) $, then the equation \eqref{E:ZA},
for $ u_0 \in D(A(0)) $,  has a strong
solution
\[
u(t) = \lim_{n \to \infty}\prod_{i = 1}^{n}
J_{\frac{t}{n}}(i \frac{t}{n})u_0
\]
 in $ L^2(\Omega) $ with
\[
\frac{\partial}{\partial \nu}u(t) + \beta(x, t)u(t) = 0, \quad
x \in \partial \Omega,
\]
and $ u(t) $ satisfies
the property
\[
\sup_{t \in [0, T]}\|u(t)\|_{C^{1 + \mu}(\overline{\Omega})} \le K.
\]
\end{example}

\begin{proof}
Linear elliptic equation theory \cite[Pages 128-130]{Gi2}
shows that the corresponding operator $ A(t) $
satisfies the range condition (H2'') with $ E =
C^{\mu}(\overline{\Omega}) $.
The arguments in \cite[Pages 267-268]{Lin2}
shows that
$ A(t) $ satisfies the dissipativity condition (H1),
the time-regulating condition (HA),
and the embedding property (HB).
The proof is complete, after applying
Remark \ref{T:XC}, Theorems  \ref{T:XA} and \ref{T:XB}, and
the proof for
Theorem \ref{T:XD}.
\end{proof}

\begin{example} \label{T:ZB} \rm
Suppose that
\[
a_{ij}(x),\; b_i(x),\; c(x) \in C^{1 + \mu}(\overline{\Omega}),\;
  \beta(x) \in C^{2 + \mu}(\overline{\Omega})
\]
are independent of $ t $, where $ 0 < \mu < 1 $. Then equation
\eqref{E:ZA}
 has a unique classical solution
\[
u(t) = \lim_{n \to \infty}\prod_{i = 1}^{n}
J_{\frac{t}{n}}(i \frac{t}{n})u_0  =
\lim_{n \to \infty}(I - \frac{t}{n}A)^{-n}u_0
\]
for $u_0 \in D(A) $ with $ Au_0 \in D(A) $, and the solution
has the properties
that $ \frac{du(t)}{dt} $ is Lipschitz
 continuous in $ t $, and that
\[
\|\frac{du}{dt}\|_{C^{1 + \mu}(\overline{\Omega})}
\le K.
\]
 Furthermore,
 $ \frac{d}{dt}u $ is
 differentiable in $ t $  and $ \frac{d^2}{dt^2}u(t) $ is
Lipschitz continuous in $ t $, if  $ u_0  $ is in $
D(A^{3}) $ such that  $ A^{3}u_0
\in D(A) $. More regularity
of $ \frac{du}{dt} $ in $ t $
can be obtained iteratively.
\end{example}

\begin{remark} \label{rmk2} \rm  In order for $ u_0 $ to be in $  D(A^2) $,
more smoothness
assumptions should be imposed on the coefficient functions
$ a_{ij}(x), b_i(x), c(x) $ and $ \beta(x) $.
\end{remark}


\begin{proof}
Here observe that the operator $ A $ is not closed, and
so  \cite[Theorem 1 Page 363]{Li}
does not apply directly.

The $ u_i $ in \eqref{E:TimeB} will be used, and
 $ u_0 \in D(A) $ with $
Au_0 \in D(A) $ will be assumed
for a moment. It follows that
\[
Au_i = \frac{u_i - u_{i - 1}}{\epsilon} =
(I - \epsilon A)^{-i}(Au_0),
\]
and hence, by \eqref{E:TimeLA} which is for the proof of
Theorem \ref{T:XD},
\[
\|Au_i\|_{C^{1 + \eta}(\overline{\Omega})} =
\|(I - \epsilon A)^{-i}(Au_0)\|_{C^{1 +
\eta}(\overline{\Omega})} \le K
\]
for $ Au_0 \in D(A) $ and
for any $ 0 < \eta < 1 $.
This implies
\[
\|u_i\|_{C^{3 + \eta}(\overline{\Omega})}
\le K
\]
by the Schauder global estimate with more smoothness in the
linear elliptic theory \cite{Gi2}. Consequently,
on using the interpolation
inequality \cite{Gi2} and the Ascoli-Arzela theorem \cite{Gi2,Roy},
we have
\[
Au_i \to  Au(t) = U(t)(Au_0)
\]
through some subsequence
with respect to
the topology in
$ C^{1 + \lambda}(\overline{\Omega}) $ for any $ 0 < \lambda < \eta < 1 $.
Here
\[
U(t)u_0 \equiv \lim_{n \to \infty}
(I - \frac{t}{n}A)^{-n}u_0.
\]
The rest
follows from  \cite[Page 363]{Li}, where the Lipschitz property in
Theorem \ref{T:XA} and  Remark \ref{T:XC} will be used.
\end{proof}

Now consider the linear equation with
the space dimension  $ 1 $:
\begin{equation} \label{E:MA}
\begin{gathered}
\frac{\partial u}{\partial t} =  a(x, t)u_{xx} + b(x, t)u_{x}
+ c(x, t)u, \quad (x, t) \in (0, 1) \times (0, T),  \\
u'(j, t) = (-1)^{j}\beta_{j}(j, t)u(j, t), \quad  j = 0, 1,  \\
u(x, 0) = u_0(x).
\end{gathered}
\end{equation}

Here we assume that
 $ a, b, c  $ are jointly continuous in $ x
\in [0, 1]$, $t \in [0, T] $,
and are of bounded variation in $ t $ uniformly for all
$ x $, that
$ c(x, t) \le 0 $ and $ a(x, t)
\ge \delta_0 $ for some
constant $ \delta_0 > 0 $, and finally that
 $ \beta_{j} \ge \delta_0 > 0, j = 0, 1 $
 are jointly continuous  in $ x, t $,
and are Lipschitz continuous in $ t $,  uniformly over $ x $.

Let $ A(t) $:
$ D(A(t)) \subset C[0, 1] \to C[0, 1] $ be the operator defined by
\begin{gather*}
A(t)u \equiv a(x, t)u'' + b(x, t)u' + c(c, t)u  \quad
\text{for $ u \in D(A(t)) $ where} \\
D(A(t)) \equiv \{v\in C^2[0, 1]: v'(j) = (-1)^{j}
\beta_{j}(j, t)v(j), j = 0, 1 \}.
\end{gather*}

Following \cite{Li}  and the proof for the previous
case of higher space dimensions, and applying
linear ordinary differential equation theory \cite{Co, LinA}
and Theorem \ref{T:XB},
the next example is readily proven. Here the range condition
(H2') is satisfied with $ E = C[0, 1] \supset
\overline{D(A(t))} $ for all $ t $.

\begin{example} \label{T:MA} \rm
Equation \eqref{E:MA} has a strong solution
\[
u(t) = \lim_{n \to \infty}
(I - \frac{t}{n}A)^{-n}u_0
\]
 in $ L^2(0, 1) $ for $ u_0 \in \hat{D}(A(0)) $,  and
$ u(t) $ satisfies the middle equation in
\eqref{E:MA} and the Lipschitz property
\[
\|u(t)  -u(\tau)\|_{\infty} \le k|t - \tau|
\]
 for $ u_0 \in \hat{D}(A(0)) $
and for $ 0 \le t, \tau \le T $.
\end{example}

In the case that $ a, b, c, \beta_{j}$, for $j = 0, 1$,
are independent of $ t $, the Theorem 1
in \cite[Page 363]{Li}, together with the Lipschitz property
in the Theorem \ref{T:XA} in this paper, will readily deliver
the following example. Here it is to be observed that
the corresponding  operator $ A $ is closed.

\begin{example} \label{T:MB} \rm
If the coefficient functions $ a, b, c, \beta_{j}, j = 0, 1 $
are independent of $ t $, then the equation \eqref{E:MA}
has a unique classical solution
\[
u(t) = \lim_{n \to \infty}(I - \frac{t}{n}A)^{-n}u_0
\]
for $ u_0 \in D(A) $ with $ Au_0 \in \overline{D(A)} $.
This $ u(t) $ has this property that the function
$ \frac{du}{dt} $
is continuous in $ t $.

Furthermore, $ u(t) $ is Lipschitz continuous in $ t $ for
$ u_0 \in \hat{D}(A) $, and  $ \frac{du}{dt} $
is Lipschitz continuous in $ t $
for $ u_0 \in D(A) $ with $ Au_0 \in \hat{D}(A) $,
and is differentiable in $ t $ for
$ u_0 \in D(A^2) $ with $ A^2u_0 \in \overline{D(A)}
$. More regularity of $ \frac{du}{dt} $ can be obtained
iteratively.
\end{example}

\begin{remark}\label{rmk3} \rm
 In order for $ u_0 $ to be in $ D(A^2) $,
more smoothness assumptions should be imposed on the coefficient
functions $ a(x), b(x), c(x) $, and $ \beta_{j}, j = 0, 1 $.
\end{remark}

\section{Applications to partial differential equations (II)}
\label{S:E}

In this section, it will be further shown that,  for each concrete
$ A(t) $ in Section \ref{S:D}, the corresponding quantity
\[
 J_{\frac{t}{n}}(i\frac{t}{n})h  =
[I - \frac{t}{n}A(i\frac{t}{n})]^{-1}h, \quad i = 1, 2, \ldots, n
\]
is the limit of a sequence where each term in the sequence is
an explicit function of the solution $ \phi $
to the elliptic equation \eqref{E:TimeC} with $ \varphi \equiv 0 $.

We start with the case of linear $ A(t) $ and
consider the parabolic equation \eqref{E:ZA}.

\begin{proposition} \label{T:NA}
For $ h \in C^{\mu}(\overline{\Omega}) $, the solution $ u $ to
the equation
\begin{equation} \label{E:TimeXA}
[I - \epsilon A(t)]u = h
\end{equation}
where  $ 0 \le t \le T $ and
$ \epsilon > 0 $, is the limit of a sequence where each term
in the sequence is
an explicit function of the solution
$ \phi $ to the elliptic equation \eqref{E:TimeC}
with $ \varphi \equiv 0 $. Here $ A(t) $
is the linear
operator corresponding to the parabolic equation \eqref{E:ZA}.
\end{proposition}

\begin{proof}
The linear operator $ A(t): D(A(t))
\subset C(\overline{\Omega}) \to
C(\overline{\Omega}) $ is defined by
\begin{gather*}
A(t)u \equiv \sum_{i, j}a_{ij}(x, t)D_{ij}
u + \sum_ib_i(x, t)D_iu
+ c(x, t)u \\
\text{for $ u \in D(A(t)) \equiv
\{u \in C^{2 + \mu}(\overline{\Omega}):
\frac{\partial u}{\partial \nu} + \beta(x, t)u =
0 \quad $ on $ \partial \Omega
\} $}.
\end{gather*}

Solvability of  \eqref{E:TimeXA} follows from \cite[Pages
128-130]{Gi2}, where the method of continuity \cite[Page 75]{Gi2}
is used. By writing out fully how the method of continuity is used,
it will be seen that the solution $ u $ is the limit of a sequence
where each term in the sequence is an explicit
function of  the solution $ \phi $ to the elliptic equation
\eqref{E:TimeC} with $ \varphi \equiv 0 $.
To this end,  set
\begin{gather*}
U_1 = C^{2 + \mu}(\overline{\Omega}),
\quad U_2 = C^{\mu}(\overline{\Omega}) \times
C^{1 + \mu}(\partial \Omega), \\
L_{\tau}u = \tau [u - \epsilon
A(t)u] + (1 - \tau)(- \Delta
u)  \quad \text{in} \quad \Omega, \\
N_{\tau}u =
\tau[\frac{\partial u}{\partial \nu}
+ \beta(x, t)u] + (1 - \tau)
(\frac{\partial u}{\partial \nu} +
u)  \quad \text{on} \quad \partial \Omega,
\end{gather*}
where $ 0 \le \tau \le 1 $.
Define the linear operator  $
\pounds_{\tau}: U_1 \to U_2  $ by
\[
\pounds_{\tau}u = (L_{\tau}u, N_{\tau}u)
\]
for $ u \in U_1 $, and assume that $ \pounds_{s} $ is onto
for some $ s \in [0, 1] $.

 It follows from \cite[Pages
128-130]{Gi2} that
\begin{align} \label{E:PA}
\|u\|_{U_1} \le C \|\pounds_{\tau}u\|_{U_2},
\end{align}
 where the constant $ C $ is independent
of $ \tau $.
This implies that  $ \pounds_{s} $ is one to one,
and so $ \pounds_{s}^{-1} $ exists.
By making use of $ \pounds_{s}^{-1} $, the equation, for $ w_0
\in U_2 $ given,
\[
\pounds_{\tau}u = w_0 %(h, 0)
\]
is equivalent to the equation
\[
u = \pounds_{s}^{-1}w_0 + (\tau - s)\pounds_{s}^{-1}(
\pounds_0 - \pounds_1)u,
\]
from which a linear map
$S: U_1 \to U_1$,
\[
Su = S_{s}u \equiv \pounds_{s}^{-1}w_0
+ (\tau - s)\pounds_{s}^{-1}(
\pounds_0 - \pounds_1)u
\]
is defined. The unique fixed point $ u $ of $ S = S_{s}$
will be related to
the solution of \eqref{E:TimeXA}.

By choosing $ \tau \in [0, 1] $ such that
\begin{align} \label{E:PB}
|s - \tau| < \delta \equiv [C(
\|\pounds_0\|_{U_1 \to U_2}
+ \|\pounds_1\|_{U_1 \to U_2})]^{-1},
\end{align}
it follows that $ S = S_{s} $ is a strict contraction map.
Therefore $ S $ has a unique fixed point $ w $, and the $ w $ can be
represented by
\[
\lim_{n \to \infty}S^{n}0 = \lim_{n \to \infty}(S_{s})^{n}0
\]
because of  $ 0 \in U_1 $.
Thus $ \pounds_{\tau} $ is onto for
$ |\tau - s| < \delta $.

 It follows that, by dividing
$ [0, 1] $ into subintervals of length
less than $ \delta $ and repeating the above arguments in a finite
number of times,
$ \pounds_{\tau} $ becomes  onto for all
$ \tau \in [0, 1] $, provided that it is onto for some $ \tau \in [0, 1]
$.
Since $ \pounds_0 $ is onto by the potential
theory \cite[Page 130]{Gi2}, we have that
$ \pounds_1 $ is also onto.  Therefore, for $ w_0 = (h, 0) $,
the equation
\[
\pounds_1u = w_0
\]
has a unique solution $ u $,  and the $ u $ is the seeked solution
to \eqref{E:TimeXA}.
Here it is to be observed that
$ \phi \equiv \pounds_0^{-1}(h, 0) $ is the unique solution
$ \pounds_0^{-1}(h, \varphi)  $
to the elliptic equation \eqref{E:TimeC} with $ \varphi \equiv 0 $:
\begin{gather*}
-\Delta v = h, \quad x \in \Omega, \\
\frac{\partial v}{\partial \nu} + v(x) = 0 \quad \text{on} \quad
\partial \Omega,
\end{gather*}
and that
\begin{gather*}
S0 = S_00 = \pounds_0^{-1}(h, 0), \\
S^20 = (S_0)^20 =
\pounds_0^{-1}(h, 0) + \pounds_0^{-1}[|\tau -0|
(\pounds_0 - \pounds_1)\pounds_0^{-1}(h, 0)], \\
\dots.
\end{gather*}
The proof is complete.
\end{proof}

\begin{remark} \label{rmk4}\rm
$\bullet$
The solution
$ u $ is eventually represented by %this integral formula
\[
u(x) = \pounds_0^{-1}H((h, 0)), %\int_{\Omega}G(x, y)h(y)(y) \, dy
\]
where $ H((h, 0)) $ is a convergent series in which each term is basically
obtained by, repeatedly, applying the linear operator
$ (\pounds_0 - \pounds_1)\pounds_0^{-1}  $ to $ (h, 0) $ for a
certain number of times.

$\bullet$ The quantity $ \pounds_0^{-1}(h, \varphi)
$, for each $ (h, \varphi) \in U_2 $ given, can be computed
numerically and efficiently by the boundary element methods
\cite{Gau,Sch}, if the dimension of the space variable
$ x $ equals $ 2 $ or $ 3 $.

$\bullet$ The constant
$ C $ above in \eqref{E:PA} and \eqref{E:PB}   depends on
$ n, \mu, \lambda_{\rm min}, \Omega $, and on the coefficient
functions $ a_{ij}(x, t), b_i(x, t), c(x, t), \beta(x, t) $, and
is not known explicitly \cite{Gi2}.  % \cite[First Edition, Page 134]{Gi2}),
Therefore, the corresponding
$ \delta $  cannot
be determined in advance, and so, when dealing with the elliptic
equation \eqref{E:TimeXA} in Proposition \ref{T:NA} numerically,
it is more possible, by choosing $ \tau
\in [0, 1] $ such that $ |s - \tau| $ is smaller,
 that the sequence
$ S^{n}0 $ will converge, for which
$|s - \tau| < \delta$ occurs.
\end{remark}

Next, we extend the above techniques
to the case of nonlinear $ A(t) $, and consider the nonlinear
parabolic equation \eqref{E:XA}; more work is required in this case.

\begin{proposition} \label{T:OA}
For $ h \in C^{\mu}(\overline{\Omega}) $, the solution $ u $ to
the equation \eqref{E:TimeXA}
\[
[I - \epsilon A(t)]u = h
\]
where  $ 0 \le t \le T $ and
$ \epsilon > 0 $, is the limit of a sequence where each term in the sequence
 is
an explicit function of the solution
$ \phi $ to the elliptic equation \eqref{E:TimeC}
with $ \varphi \equiv 0 $. Here $ A(t) $
is the nonlinear
operator corresponding to the parabolic equation \eqref{E:XA},
and $ \beta(x, t, 0) \equiv 0 $ is assumed additionally.
\end{proposition}

\begin{proof}
The nonlinear operator $ A(t): D(A(t))
\subset C(\overline{\Omega}) \to
C(\overline{\Omega}) $ is defined by
\begin{gather*}
D(A(t)) = \{u \in C^{2 + \mu}(\overline{\Omega}):
\frac{\partial u}{\partial \nu} + \beta(x, t, u)
= 0 \quad \text{on} \quad \partial \Omega\},  \\
A(t)u = \alpha(x, t, Du)\Delta u + g(x, t, u,
Du), \quad u \in D(A(t)).
\end{gather*}

Equation  \eqref{E:TimeXA} with
the nonlinear $ A(t) $ has been solved in \cite{Lin2}, but here
the proof will be based on the contraction mapping theorem as in the proof
of Proposition \ref{T:NA}.
To  this end,
set
\begin{gather*}
U_1 = C^{2 + \mu}(\overline{\Omega}), \\
U_2 = C^{\mu}(\overline{\Omega}) \times C^{1 + \mu}(\partial \Omega), \\
L_{\tau}u = \tau[u - \epsilon A(t)u]
+ (1 - \tau)(u - \Delta u),  \quad x \in \Omega, \\
N_{\tau}u = \tau[\frac{\partial u}{\partial \nu}
+ \beta(x, t, u)] + (1 - \tau)(
\frac{\partial u}{\partial \nu} + u)  \quad \text{on } \partial \Omega,
\end{gather*}
where $ 0 \le \tau \le 1 $.
Define the nonlinear operator
$ \pounds_{\tau}: U_1 \to U_2 $
by
\[
\pounds_{\tau}u = (L_{\tau}u, N_{\tau}u)
\]
for $ u \in U_1 $, and
assume that $ \pounds_{s} $ is onto for some $ s \in [0, 1]$.

As in proving that $ A(t) $ satisfies the dissipativity
 (H1)  where the maximum principle was used,
$ \pounds_{s} $ is one to one,
and so $ \pounds_{s}^{-1} $
exists.
 By making use of $ \pounds_{s}^{-1} $, the equation, for
$ w_0 \in U_2 $ given,
$\pounds_{\tau}u = w_0$
is equivalent to the equation
\[
u = \pounds_{s}^{-1}[w_0 +
(\tau - s)(\pounds_0 - \pounds_1)u],
\]
from which a nonlinear map
\begin{gather*}
S: U_1 \to U_1,  \\
Su = S_{s}u \equiv
 \pounds_{s}^{-1}[w_0 + (\tau - s)(\pounds_0 - \pounds_1)u]
\quad \text{for $ u \in U_1 $}
\end{gather*}
is defined. The unique fixed point of $ S = S_{s} $
will be related to the solution
of \eqref{E:TimeXA} with nonlinear $ A(t) $.


By restricting  $ S = S_{s} $ to the closed ball of the Banach space
$ U_1 $,
\[
B_{s, r, w_0} \equiv \{u \in U_1:
\|u - \pounds_{s}^{-1}w_0\|_{C^{2 + \mu}} \le r > 0\},
\]
 and choosing small enough $ |\tau - s| $,
we will show that $ S = S_{s} $
leaves $ B_{s, r, w_0}  $
invariant. This will be done by the following
steps 1 to 4.

\textbf{Step 1.}  It follows as in \cite[Pages 265-266]{Lin2}  that,
for $ \pounds_{\tau} v = (f, \chi) $,
\begin{equation}  \label{E:Time1A}
\begin{gathered}
\|v\|_{\infty} \le
k_{\{\|f\|_{\infty}, \|\chi\|_{C(\partial \Omega)}\}}, \\
\|Dv\|_{C^{\mu}} \le k_{\{\|v\|_{\infty}\}}\|Dv\|_{\infty}
+ k_{\{\|v\|_{\infty}, \|f\|_{\infty}, \|\chi\|_{C(\partial \Omega)}\}}, \\
\|v\|_{C^{1 + \mu}} \le k_{\{\|\chi\|_{C(\partial \Omega)}, \|f\|_{\infty}\}}, \\
\|v\|_{C^{2 + \mu}} \le K \|\pounds_{\tau}v\|_{U_2}
= K \|\pounds_{\tau}v\|_{C^{\mu}(\overline{\Omega})
\times C^{1 + \mu}(\partial \Omega)}
\end{gathered}
\end{equation}
where $ k_{\{\|f\|_{\infty}\}} $
is a constant depending on $ \|f\|_{\infty} $, and similar meaning
is defined for other constants $ k $'s; further, $ K $ is independent
of $ \tau $, but depends on
$ n, \delta_0, \mu, \Omega $, and on the coefficient
functions $ \alpha(x, t, Dv), g(x, t, v, Dv), \beta(x, t, v) $,
which have incorporated the dependence of
$ v, Dv $ into $ \|\pounds_{\tau}v\|_{U_2} $.

\textbf{Step 2.} It is readily seen that, for
$ v \in C^{2 + \mu}(\overline{\Omega})  $
with $ \|v\|_{C^{2 + \mu}} \le R > 0 $,  we have
\begin{equation} \label{E:Time2A}
\|\pounds_{\tau}v\|_{U_2}
\le k_{\{R\}}\|v\|_{C^{2 + \mu}},
\end{equation}
where $ k_{\{R\}} $ is independent of $ \tau $.

\textbf{Step 3.}
It will be shown that, if
\[
\|u\|_{C^{2 + \mu}} \le R, \quad \|v\|_{C^{2 + \mu}} \le R > 0,
\]
then
\begin{equation}  \label{E:Time3A}
\|\pounds_{\tau}u - \pounds_{\tau}v\|_{U_2}
\le k_{\{R\}}\|u -v\|_{C^{2 + \mu}}.
\end{equation}
It will be also shown that, if
\[
\pounds_{\tau}u = (f, \chi_1), \quad \pounds_{\tau}v = (w, \chi_2),
\]
then
\begin{equation} \label{E:Time3B}
\begin{aligned}
\|u - v\|_{C^{2 + \mu}}
&\le k_{\{\|\pounds_{\tau}u\|_{U_2},
\|\pounds_{\tau}v\|_{U_2}\}}
[\|f - w\|_{C^{\mu}} + \|\chi_1 - \chi_2\|_{C^{1 + \mu}}] \\
&= k_{\{\|\pounds_{\tau}u\|_{U_2},
\|\pounds_{\tau}v\|_{U_2}\}}\|\pounds_{\tau}u - \pounds_{\tau}v\|_{U_2}.
\end{aligned}
\end{equation}
Here $ K_{\{R\}} $ and $ K_{\{\|\pounds_{\tau}u\|_{U_2},
\|\pounds_{\tau}v\|_{U_2}\}} $ are independent of $ \tau $.

Using the mean value theorem, we have that
\begin{gather*}
\begin{aligned}
f - w
&= L_{\tau}u - L_{\tau}v  \\
&= (u - v) - (1 - \tau)\Delta (u - v) -
\tau \epsilon [\alpha \Delta(u - v) \\
&\quad + \alpha_{p}(x,t,p_1)(Du - Dv)\Delta v
+ g_{p}(x, t, u, p_2)(Du - Dv) \\
&\quad +  g_{z}(x, t, z_1, Dv)(u - v)], \quad x \in \Omega,
\end{aligned} \\
\frac{\partial (u - v)}{\partial \nu} + [\beta(x, t, u) -
\beta(x, t, v)] = \chi_1 - \chi_2 \quad \text{on } \partial \Omega ,
\end{gather*}
were $ p_1, p_2 $ are some functions between
$ Du $ and $ Dv $, and $ z_1 $
is some function between $ u $
and $ v $.

It follows as in \eqref{E:Time2A} that
\[
\|\pounds_{\tau}u - \pounds_{\tau}v\|_{U_2}
\le k_{\{R\}}\|u - v\|_{C^{2 + \mu}},
\]
which is the desired estimate.

On the other hand, the maximum principle yields
\[
\|u - v\|_{\infty} \le k_{\{\|f - w\|_{\infty},
\|\chi_1 - \chi_2\|_{\infty}\}}
\]
and \eqref{E:Time1A}  yields
\[
\|u\|_{C^{2 + \mu}} \le K \|\pounds_{\tau}u\|_{U_2}, \quad
\|v||_{C^{2 + \mu}} \le K \|\pounds_{\tau}v\|_{U_2}.
\]
Thus, it follows from the Schauder global
estimate \cite{Gi2} that
\[
\|u - v\|_{C^{2 + \mu}} \le
k_{\{\|\pounds_{\tau}u\|_{U_2}, \|\pounds_{\tau}\|_{U_2}\}}
\|\pounds_{\tau}u - \pounds_{\tau}v\|_{U_2},
\]
which is the other desired estimate.

\textbf{Step 4.}
 Consequently, for $ u \in B_{s, r, w_0} $, we have that,
by \eqref{E:Time1A},
\begin{equation} \label{E:Time4A}
\|u\|_{C^{2 + \mu}}
\le r + \|\pounds_{s}^{-1}w_0\|_{C^{2 + \mu}}
\le r + K\|w_0\|_{U_2}
\equiv R_{\{r, \|w_0\|_{U_2}\}},
\end{equation}
and  that
\begin{align*}
& \|Su - \pounds_{s}^{-1}w_0\|_{C^{2 + \mu}} \\
&\le k_{\{\|w_0\|_{U_2}, \|w_0 + (\tau - s)
(\pounds_0 - \pounds_1)u\|_{U_2}\}}
\|(\tau - s)(\pounds_0 - \pounds_1)u\|_{U_2}
\quad \text{by \eqref{E:Time3B}} \\
&\le |\tau - s|k_{\{\|w_0\|_{U_2}, R_{\{r, \|w_0\|_{U_2}\}}\}}
\quad \text{by \eqref{E:Time2A} and \eqref{E:Time4A}}.
\end{align*}
Here the constant $ k_{\{\|w_0\|_{U_2}, R_{\{r, \|w_0\|_{U_2}\}}\}} $
when $ w_0 $ given and $ r $ chosen,
is independent of $ \tau $  and $ s $.
Hence, by choosing some sufficiently small
$ \delta_1 > 0 $,   there results
\[
S = S_{s}: B_{s, r, w_0} \subset U_1 \to
B_{s, r, w_0} \subset U_1
\]
 for $ |\tau -s| < \delta_1 $; that is, $ B_{s, r, w_0} $ is left
invariant by $ S = S_{s}$.

Next, it will be shown that, for small $ |\tau - s| $,
$ S = S_{s} $ is a strict contraction on
$ B_{s, r, w_0} $, from which $ S = S_{s} $
has a unique fixed point. Because, for
$ u, v \in B_{s, r, w_0} $,
\[
\|u\|_{C^{2 + \mu}} \le R_{\{r, \|w_0\|_{U_2}\}}, \quad
 \|v\|_{C^{2 + \mu}} \le R_{\{r, \|w_0\|_{U_2}\}}
 \quad \text{by \eqref{E:Time4A}},
\]
it follows  that, by \eqref{E:Time2A},
\begin{equation} \label{E:Time5A}
\begin{gathered}
\|w_0 + (\tau - s)(\pounds_0 - \pounds_1)u\|_{U_2}
\le  k_{\{\|w_0\|_{U_2}, R_{\{r, \|w_0\|_{U_2}\}}\}}, \\
\|w_0 + (\tau - s)(\pounds_0 - \pounds_1)v\|_{U_2}
\le  k_{\{\|w_0\|_{U_2}, R_{\{r, \|w_0\|_{U_2}\}}\}},
\end{gathered}
\end{equation}
and that, by \eqref{E:Time3A},
\begin{equation} \label{E:Time5B}
 \|(\tau - s)[(\pounds_0 - \pounds_1)u -
(\pounds_0 - \pounds_1)v]\|_{U_2}
\le |\tau - s|k_{\{R_{\{r, \|w_0\|_{U_2}\}}\}}\|u - v\|_{C^{2
+ \mu}}.
\end{equation}
Therefore, on account of \eqref{E:Time3B}, \eqref{E:Time5A},
and \eqref{E:Time5B}, we obtain
\[
\|Su - Sv\|_{C^{2 + \mu}} \\
 \le |\tau - s|k_{\{R_{\{r, \|w_0\|_{U_2}\}},
\|w_0\|_{U_2}\}} k_{\{ R_{\{r, \|w_0\|_{U_2}\}}\}}
\|u - v\|_{C^{2 + \mu}}.
\]
Here the constant $ k_{\{R_{\{r, \|w_0\|_{U_2}\}},
\|w_0\|_{U_2}\}} k_{\{R_{\{r, \|w_0\|_{U_2}\}}\}} $
when $ w_0 $ given and $ r $ chosen, is independent of $ \tau $ and
$ s $.
Hence, by choosing some sufficiently small $ \delta_2 > 0 $,
it follows that
\[
S = S_{s}: B_{s, r, w_0} \to B_{s, r, w_0}
\]
ia a strict contraction for
\[
|\tau - s| < \delta_2 \le \delta_1.
\]
Furthermore, the unique fixed point $ w $
of  $ S = S_{s} $ can be represented by
\[
\lim_{n \to \infty}S^{n}0 = \lim_{n \to \infty}(S_{s})^{n}0
\]
if $ \beta(x, t, 0) \equiv 0 $ and if
 $ r= r_{\{K\|w_0\|_{U_2}\}} $ is chosen such that
\begin{equation} \label{E:TimeQA}
r = r_{\{K\|w_0\|_{U_2}\}} \ge K\|w_0\|_{U_2}
\ge \|\pounds_{s}^{-1}w_0\|_{C^{2 + \mu}}
\end{equation}
(by \eqref{E:Time4A});
this is because $ 0 $ belongs to $ B_{s, r, w_0} $
in this case. Thus $ \pounds_{\tau} $ is onto for
$ |\tau - s|< \delta_2$.


It follows that, by dividing
$ [0, 1] $ into subintervals of length
less than $ \delta_2 $ and repeating the above arguments in a finite
number of times,
$ \pounds_{\tau} $ becomes  onto for all
$ \tau \in [0, 1] $, provided that it is onto for some
$ \tau \in [0, 1]$.
Since $ \pounds_0 $ is onto by linear elliptic theory
\cite{Gi2}, we have that
$ \pounds_1 $ is also onto.  Therefore, the equation, for
$ w_0 = (h, 0) $,
\[
\pounds_1u = w_0
\]
has a unique solution $ u $,  and the $ u $ is the  sought solution
to \eqref{E:TimeXA}.

Here it is to be observed that
$ \psi \equiv \pounds_0^{-1}(h, 0) $ is the unique solution
to the elliptic equation
\begin{gather*}
v -\Delta v = h, \quad x \in \Omega, \\
\frac{\partial v}{\partial \nu} + v(x) = 0 \quad \text{on} \quad
\partial \Omega,
\end{gather*}
 and that, by Proposition \ref{T:NA},
 $ \psi $ is the limit of a sequence
 where each term in the sequence is an explicit function of
the solution $ \phi $
to the elliptic equation \eqref{E:TimeC} with $
\varphi \equiv 0 $.

It is also to be
observed that
\begin{gather*}
S0 = S_00 = \pounds_0^{-1}(h, 0), \\
S^20 = (S_0)^20 =
\pounds_0^{-1}[(h, 0) + |\tau -0|
(\pounds_0 - \pounds_1)\pounds_0^{-1}(h, 0)], \\
\dots,
\end{gather*}
where $ (\pounds_0 - \pounds_1)\pounds_0^{-1} $ is a nonlinear
operator.
The proof is complete.
\end{proof}

\begin{remark} \label{rmk5} \rm
The constants
$ k_{\{R_{\{r, \|w_0\|_{U_2}\}}\}} $ and
 $ k_{\{R_{\{r, \|w_0\|_{U_2}\}},
\|w_0\|_{U_2}\}} k_{\{ R_{\{r, \|w_0\|_{U_2}\}}\}} $,
when $ w_0 $ is given and when $ r $ is chosen and conditioned
by  \eqref{E:TimeQA}, is not known explicitly, and so the corresponding
$ \delta_2 $ cannot be determined in advance. Hence, when dealing with
the elliptic equation \eqref{E:TimeXA} in Proposition \ref{T:OA}
numerically, it is more possible,
by choosing $ \tau \in [0, 1]  $ such that $ |\tau - s| $ is smaller,
 that the sequence $ S^{n}0 $ will converge,
for which
$|\tau - s| < \delta_2 \le \delta_1$
occurs.
\end{remark}

Finally, what  will be considered is the linear equation \eqref{E:MA} of space
dimension $1$.

\begin{proposition} \label{T:PA}
For $ h \in C[0, 1] $,
the solution $ u $ to
the equation \eqref{E:TimeXA}
\[
[I - \epsilon A(t)]u = h
\]
where  $ 0 \le t \le T $ and
$ \epsilon > 0 $, is the limit of a sequence where each term
in the sequence  is
an explicit function of the solution
$ \phi $ to the ordinary differential equation
\begin{equation} \label{E:TimeYA}
\begin{gathered}
v - v'' = h \quad x \in (0, 1), \\
v'(j) = (-1)^{j}v(j), \quad j = 0, 1.
\end{gathered}
\end{equation}
 Here $ A(t) $ is the linear
operator corresponding to the parabolic equation \eqref{E:MA}.
\end{proposition}

\begin{proof}
The linear operator $ A(t): D(A(t)) \subset C[0, 1] \to
C[0, 1] $  is defined by
\begin{gather*}
A(t)u \equiv a(x, t)u'' + b(x, t)u' + c(x, t)u  \quad
\text{for $ u \in D(A(t)) $ where} \\
D(A(t)) \equiv \{v\in C^2[0, 1]: v'(j) = (-1)^{j}
\beta_{j}(j, t)v(j), \quad j = 0, 1 \}.
\end{gather*}

The contraction mapping theorem in the proof of
Proposition \ref{T:NA} will
be used in order to solve the equation \eqref{E:TimeXA}.
To this end,
 set, for $ 0 \le \tau \le 1 $,
\begin{gather*}
U_1 = C^2[0, 1], \quad U_2 = C[0, 1] \times {\mathbb R}^2, \\
L_{\tau}u = \tau[u - \epsilon A(t)u]
+ (1 - \tau)(u - u''),  \\
\begin{aligned}
N_{\tau}u &= \Big(\tau [u'(0) - \beta_0(0, t)u(0)] +
(1 - \tau)[u'(0) - u(0)],  \\
&\quad \tau [u'(1) + \beta_1(1, t)u(1)]
+ (1 - \tau)[u'(1) + u(1)]\Big).
\end{aligned}
\end{gather*}
Define the linear operator $\pounds_{\tau} : U_1 \to U_2 $
by
\[
\pounds_{\tau}u = (L_{\tau}u, N_{\tau}u)
\]
for $ u \in U_1 $, and assume that $ \pounds_{s} $ is onto
for some $ s \in [0, 1] $.

The following will be readily derived.

$\bullet$  For $ u \in C^2[0, 1] $, we have
\begin{equation} \label{E:TimeWB}
 \|\pounds_{\tau}u\|_{U_2} = \|\pounds_{\tau}u\|_{C[0, 1]
\times {\mathbb R}^2}
\le k_{\{a, b, c, \beta_0, \beta_1\}}\|u\|_{C^2},
\end{equation}
where $ k_{\{a, b, c, \beta_0, \beta_1\}} $ is
independent of $ \tau $, and can be computed, depending
on the given $ a(x, t), b(x, t), c(x, t), \beta_0(0, t) $,
and $ \beta_1(1, t) $.

$\bullet$ For $ \pounds_{\tau}u = (h, (r, s)) $, the maximum
principle shows
\[
 \|u\|_{\infty} \le \|h\|_{\infty} + |\frac{r}{\beta_0(0, t)}|
+  |\frac{s}{\beta_1(1, t)}|.
\]
This, together with the known interpolation inequality
\cite[Page 65]{Gol} or \cite[Pages 7-8]{Mi}
\[
\|u'\|_{\infty} \le \frac{2}{\lambda}
\|u\|_{\infty} + \frac{\lambda}{2}\|u''\|_{\infty}
\]
for any $ \lambda > 0 $,
 applied to $
\pounds_{\tau}u = (h, (r, s)) $, it follows that,
by choosing small enough $ \lambda = \lambda_1 $,
\begin{equation} \label{E:TimeWA}
 \|u\|_{C^2} \le k_{\{\lambda_1, a, b, c,
\beta_0, \beta_1\}}(\|h\|_{\infty} + |r| + |s|)
= k_{\{\lambda_1, a, b, c, \beta_0, \beta_1\}}\|\pounds_{\tau}
u\|_{U_2},
\end{equation}
where $ k_{\{\lambda_1, a, b, c, \beta_0, \beta_1\}} $
is independent of
$ \tau $ and can be computed explicitly.
\medskip

On account of the estimate \eqref{E:TimeWA},
$ \pounds_{s} $ is one to one, and so $ \pounds_{s}^{-1} $ exists.
Thus, making use of $ \pounds_{s}^{-1} $, the equation, for
$ w_0 \in U_2 $ given,
$\pounds_{\tau}u = w_0 $
is equivalent to the equation
\[
u = \pounds_{s}^{-1}w_0 + (\tau - s)\pounds_{s}^{-1}(
\pounds_0 - \pounds_1)u,
\]
from which a linear map
\begin{gather*}
 S: U_1 = C^2[0, 1] \to U_1 = C^2[0, 1], \\
Su = S_{s}u \equiv \pounds_{s}^{-1}w_0
+ (\tau - s)\pounds_{s}^{-1}(\pounds_0 - \pounds_1)u, \quad
u \in U_1
\end{gather*}
is defined. Because of \eqref{E:TimeWA} and \eqref{E:TimeWB},
it follows that this $ S $ is a strict contraction if
\[
|\tau - s| < \delta = [k_{\{\lambda_1, a, b, c,
\beta_0, \beta_1\}}2k_{\{a, b, c, \beta_0,
\beta_1\}}]^{-1}.
\]
The rest of the proof will be the same as that
for Proposition \ref{T:NA}, in which the equation, for $ w_0
= (h, (0, 0)) $,
\[
\pounds_1u = w_0
\]
has a unique solution $ u $, and the $ u $ is the sought solution.
\end{proof}

\begin{remark} \label{rmk6} \rm
$\bullet$ The $ \delta = [k_{\{\lambda_1, a, b, c,
\beta_0, \beta_1\}}2k_{\{a, b, c, \beta_0,
\beta_1\}}]^{-1}  $
in the above proof of Proposition
\ref{T:PA} can be computed
explicitly.

$\bullet$  The quantity $ \pounds_0^{-1}(h, (0, 0)) $ is represented by
the integral
\[
\pounds_0^{-1}(h, (0, 0)) = \int_0^{1}g_0(x, y)h(y)
\, dy,
\]
where $ g_0(x, y) $ is the Green
function associated with the boundary value problem
\begin{gather*}
u - u'' = h  \quad \text{in} \quad
(0, 1), \\
u'(j) = (-1)^{j}u(j), \quad
j = 0, 1.
\end{gather*}
This $ g_0(x, y) $ is known explicitly by a
standard formula.

$\bullet$ As before, we have
\begin{gather*}
S0 = S_00 = \pounds_0^{-1}(h, (0, 0)), \\
S^20 = S_0^20 =
\pounds_0^{-1}(h, (0, 0)) + \pounds_0^{-1}[|\tau - 0|(
\pounds_0 -
\pounds_1)\pounds_0^{-1}(h, (0, 0))], \\
\dots.
\end{gather*}
\end{remark}


\section{Appendix} \label{S:VaryB}

In this section, the Proposition \ref{P:VaryB} in Section
\ref{S:B} will be proved, using the theory of difference
equations. We now introduce its basic theory  \cite{Mic}.
Let
\[
 \{b_{n}\} = \{b_{n}\}_{n \in \{0\}\cup {\mathbb N}}
= \{ b_{n} \}_{n = 0}^{\infty}
\]
be  a sequence of real numbers.
For such a sequence
$ \{b_{n}\} $, we  further extend it by defining
\[
b_{n} = 0 \quad \text{if $ n = -1, -2, \ldots. $}.
\]
The set of all
such sequences $ \{b_{n}\} $'s will be denoted by $ S $.
Thus, if $ \{a_{n}\} \in S $, then
$0 = a_{-1} = a_{-2} = \dots$.

  Define a right shift operator
$ E : S \to S $ by
\[
    E\{b_{n}\} = \{b_{n + 1}\} \quad \text{for $ \{b_{n}\} \in S $}.
\]
For $ c \in {\mathbb R} $ and $ c \ne 0 $,
define the  operator $ (E - c)^{*} : S \to S $
 by
\[
     (E - c)^{*}\{b_{n}\} = \{c^{n}\sum_{i = 0}^{n - 1}
\frac{b_i}{c^{i + 1}}\}
\]
for $ \{b_{n}\} \in S $.
Here the first term on the right side of the equality,
corresponding to $ n = 0 $, is zero.

 Define, for $ \{b_{n}\} \in S $,
\begin{gather*}
 (E - c)^{i *}\{b_{n}\} = [(E - c)^{*}]^{i}\{b_{n}\},
\quad i = 1, 2, \ldots; \\
 (E - c)^{0}\{b_{n}\} = \{b_{n}\}.
\end{gather*}
It follows that $ (E - c)^{*} $ acts approximately
as the inverse of $ (E - c) $ in this sense
\[
    (E - c)^{*}(E - c)\{b_{n}\} =  \{b_{n} - c^{n}b_0\}.
\]

Next we extend the above definitions to doubly indexed sequences.
For a doubly indexed sequence $ \{\rho_{m, n}\}
= \{\rho_{m, n}\}_{m, n = 0}^{\infty} $ of real numbers, let
\[
   E_1\{\rho_{m, n} \} = \{\rho_{m + 1, n} \}; \quad
   E_2\{\rho_{m, n} \} = \{\rho_{m, n + 1} \}.
\]
Thus, $ E_1 $ and $ E_2 $ are the right shift operators, which
acts on the first index and the second index, respectively.
It is easy to see that
\[
 E_1E_2 \{\rho_{m, n}\}
= E_2E_1 \{\rho_{m, n}\}\,.
\]
Before we prove the Proposition \ref{P:VaryB},
we need the following four lemmas, which are proved
in \cite{Lin1,Lin0,Lin1,Lin4}, respectively.

\begin{lemma} \label{L:VaryA}
If \eqref{E:VaryMain} is true, then
\begin{equation} \label{E:VaryMain3}
\begin{aligned}
      \{a_{m, n}\} &\leq (\alpha \gamma (E_2 - \beta \gamma)^{*})^{m}
\{a_{0, n}\}
+ \sum_{i = 0}^{m - 1}(\gamma \alpha(E_2 - \gamma \beta)^{*})^{i}
\{(\gamma \beta)^{n}a_{m - i, 0}\} \\
&\quad  + \sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}((E_2 -
\gamma \beta)^{*})^{j}
\{r_{m + 1 - j, n + 1}\},
\end{aligned}
\end{equation}
where $ r_{m, n} = K_4\mu \rho(|n\mu - m\lambda|) $.
\end{lemma}

\begin{lemma} \label{L:VaryB}
The following equality holds:
\[
((E_2 - \beta \gamma)^{*})^{m}\{n \gamma^{n}\}
= \{\frac{n \gamma^{n}}{\alpha^{m}}
\frac{1}{\gamma^{m}}
 - \frac{m \gamma^{n}}{\alpha^{m + 1}}\frac{1}{\gamma^{m}}
 + \Big(\sum_{i = 0}^{m - 1}
\binom{n}{i}\frac{\beta^{n - i}}{\alpha^{m + 1 - i}}
(m - i)\frac{1}{\gamma^{m}}\Big)
\gamma^{n}\}.
\]
Here $ \gamma, \alpha $ and $ \beta $ are defined in Proposition
\ref{P:XA}.
\end{lemma}

\begin{lemma} \label{L:VaryC}
The following equality holds:
\[
 ((E - \beta \gamma)^{*})^{j}\{\gamma^{n} \}
= \big\{\Big(\frac{1}{\alpha^{j}} -
 \frac{1}{\alpha^{j}}\sum_{i = 0}^{j - 1}\binom{n}{i}
 \beta^{n - i}\alpha^{i}\Big)\gamma^{n - j}\big\}
= \big\{\Big(\frac{1}{\alpha^{j}}
 \sum_{i = j}^{n}\beta^{n - i}\alpha^{i}\Big)\gamma^{n - j}\big\}
\]
for $ j \in \mathbb{N} $. Here $ \gamma,  \alpha $ and $  \beta $ are
defined in Proposition \ref{P:XA}
\end{lemma}


\begin{lemma} \label{L:VaryD}
The following equality holds:
\begin{align*}
    (E - \beta \gamma)^{m *}\{n^2\gamma^{n}\}
 &= \gamma^{n - m}\{\frac{n^2}{\alpha^{m}} -
\frac{(2m)n}{\alpha^{m + 1}} + (\frac{m(m - 1)}{\alpha^{m + 2}} +
\frac{m(1 + \beta)}{\alpha^{m + 2}})  \\
&\quad  - \sum_{j = 0}^{m - 1}\big(\frac{(m - j)(m - j - 1)}
{\alpha^{m - j + 2}} +
\frac{(m - j)(1 + \beta)}{\alpha^{m - j + 2}}\big)\binom{n}{j}
\beta^{n - j}\}.
\end{align*}
Here $ \gamma, \alpha $, and $ \beta $ are defined in
Proposition \ref{P:XA}.
\end{lemma}

\begin{proof}[Proof of Proposition \ref{P:VaryB}]
If $ S_2(\mu) = \emptyset $, then \eqref{E:VaryMain2} is true,
and so
\[
a_{m, n} \le L(K_2)|n\mu - m\lambda|.
\]
If $ S_1(\mu) = \emptyset $, then \eqref{E:VaryMain} is true,
and so the inequality \eqref{E:VaryMain3} follows by
Lemma \ref{L:VaryA}. Since, by Proposition \ref{P:VaryA},
\begin{gather*}
a_{0, n} \le K_1\gamma^{n}(2n + 1)\mu; \\
a_{m - i, 0} \le K_1(1 - \lambda \omega)^{-m}[2(m - i) + 1]\lambda;
\end{gather*}
it follows from Lemma \ref{L:VaryC} and from the Proposition 3
and its proof of
\cite[Pages 115-116]{Lin0} that  the first two terms of the right side
of the inequality \eqref{E:VaryMain3} is less than or equal to
\[
c_{m, n} + s_{m, n} + f_{m, n}.
\]

We finally estimate the third term,
denoted by $ \{t_{m, n}\} $, of the right-hand side
of  \eqref{E:VaryMain3}.
Observe that, using the subadditivity of $ \rho $, we have
\begin{align*}
    \{t_{m, n}\}
&\leq \sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}
(E_2 - \gamma \beta)^{j *}K_4
\mu \{\rho(|\lambda - \mu|) + \rho(|n \mu -m \lambda + j \lambda|)\} \\
&\leq \sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}(E_2 - \gamma \beta)^{
j *}K_4 \mu \{\gamma^{n}\rho(|\lambda - \mu|) +
\gamma^{n}\rho(|n \mu - (m - j)\lambda|)\} \\
&\equiv \{u_{m, n}\} + \{v_{m, n}\},
\end{align*}
where $ \gamma = ( 1 - \mu \omega)^{-1} > 1 $.
It follows from Lemma \ref{L:VaryC} that
\begin{align*}
\{u_{m, n}\}
&\le \{K_4\mu\gamma^{n}\rho(|\lambda - \mu|)\sum_{j = 1}^{m}
\alpha^{j - 1}\frac{1}{\alpha^{j}}\sum_{i = 1}^{n}\binom{n}{i}\beta^{n - i}
\alpha^{i}\} \\
&\le \{K_4\gamma^{n}\rho(|\lambda - \mu|)\mu \frac{1}{\alpha}m\}
= \{K_4\rho(|\lambda - \mu|)\gamma^{n}(m\lambda)\}.
\end{align*}

To estimate $ \{v_{m, n}\} $,
as in Crandall-Pazy \cite[page 68]{Cran}, let $ \delta > 0 $ be
given and write
\[
 \{v_{m, n}\} =
 \{I^{(1)}_{m, n}\} + \{I^{(2)}_{m, n}\},
\]
where  $ \{I^{(1)}_{m, n}\} $
is the sum over indices with $ |n \mu - (m - j)\lambda|  < \delta $,
and $ \{I^{(2)}_{m, n}\} $ is the sum over indices with
$ |n \mu - (m - j)\lambda| \geq \delta $.
As a consequence of  Lemma \ref{L:VaryC},
we have
\begin{align*}
\{I^{(1)}_{m, n}\}
&\leq \{K_4 \mu \gamma^{n}\rho(\delta)\sum_{j = 1}
^{m}\alpha^{j - 1}\frac{1}{\alpha^{j}}\sum_{i = j}^{n}\binom{n}{i}
\beta^{n - i}\alpha^{i}\} \\
&\leq \{K_4 \rho(\delta) \mu \gamma^{n}m \frac{1}{\alpha}\} = \{K_4
\rho(\delta)\gamma^{n}m \lambda \}.
\end{align*}
On the other hand,
\begin{align*}
  \{I^{(2)}_{m, n}\}
&\leq K_4\mu \rho(T)\sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}
 (E_2 - \gamma \beta)^{j *}\{\gamma^{n}\} \\
&\leq K_4 \mu \rho(T)\sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}
 (E_2 - \gamma \beta)^{j *}
\{\gamma^{n}\frac{[n \mu - (m - j)\lambda]^2}{\delta^2}\},
\end{align*}
which will be less than or equal to
\[
\{K_4\frac{\rho(T)}{\delta^2}\gamma^{n}[(m\lambda)
(n\mu -m\lambda)^2
+ (\lambda - \mu)\frac{m(m + 1)}{2}\lambda^2]\}
\]
and so the proof is complete. This is because of the calculations,
where Lemmas \ref{L:VaryB}, \ref{L:VaryC}, and \ref{L:VaryD}
were used:
\begin{gather*}
[n\mu - (m - j)\lambda]^2 = n^2\mu^2 - 2(n\mu)(m - j)\lambda
+ (m - j)^2\lambda^2; \\
\begin{aligned}
&\sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}
(E_2 - \gamma \beta)^{j *}\{\gamma^{n}n^2\}\mu^2 \\
& = \gamma^{n - 1}\sum_{j = 1}^{m}\alpha^{j - 1}
\{\frac{n^2}{\alpha^{j}} - \frac{2jn}{\alpha^{j + 1}}
+[\frac{j(j - 1)}{\alpha^{j + 2}} + \frac{j(1 + \beta)}{\alpha^{j + 2}}] \\
&\quad - \sum_{i = 0}^{j - 1}[\frac{(j - i)(j - i - 1)}{
\alpha^{j - i + 2}} + \frac{(j - i)(1 + \beta)}{\alpha^{j - i + 2}}]
\binom{n}{i}\beta^{n - i}\}\mu^2  \\
& \le \gamma^{n}\sum_{j = 1}^{m}\{\frac{n^2}{\alpha}
- \frac{2jn}{\alpha^2} + [\frac{j(j - 1)}{\alpha^{3}}
+ \frac{j(1 + \beta)}{\alpha^{3}}]\}\mu^2,
\end{aligned}
\end{gather*}
where  the negative
terms associated with $ \sum_{i = 0}^{j - 1} $ were dropped;
\begin{align*}
&\sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}
(E_2 - \gamma \beta)^{j *}\{\gamma^{n}n\}[2\mu(m - j)\lambda](-1)  \\
&= \sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}\{\gamma^{n - j}[
\frac{n}{\alpha^{j}} - \frac{j}{\alpha^{j + 1}} \\
&\quad + \sum_{i = 0}^{j - 1}\binom{n}{i}
\beta^{n - i}\alpha^{i - j - 1}(j - i)]\}[2\mu(m - j)\lambda](-1) \\
&\le \sum_{j = 1}^{m}\gamma^{n}\{\frac{n}{\alpha} -
\frac{j}{\alpha^2}\}[2\mu(m - j)\lambda](-1), \\
& = \sum_{j = 1}^{m}\gamma^{n}\alpha^{-1}
\{- 2(n\mu)(m\lambda) + j[2n\mu \lambda + \frac{2\mu}{\alpha}(m\lambda)]
- j^2(\frac{2\mu \lambda}{\alpha})\};
\end{align*}
where the negative terms associated with $ \sum_{i = 0}^{j - 1}$
were dropped;
\begin{align*}
&\sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}
(E_2 - \gamma \beta)^{j *}\{\gamma^{n}\}(m - j)^2\lambda^2 \\
&=  \sum_{j = 1}^{m}(\gamma \alpha)^{j - 1}
\{\gamma^{n - j}[\frac{1}{\alpha^{j}} - \frac{1}{\alpha^{j}}
\sum_{i = 0}^{j - 1}\binom{n}{i}\beta^{n - i}\alpha^{i}]\}
(m - j)^2\lambda^2 \\
& \le \sum_{j = 1}^{m}\gamma^{n}\alpha^{-1}(m^2
- 2mj + j^2)\lambda^2,
\end{align*}
where the negative terms
associated with $ \sum_{i = 0}^{j - 1} $ were dropped.

Adding up the right sides of the above three inequalities
and grouping them as a polynomial in $ j $ of
degree two, we have the
following:
The term involving $ j^{0} = 1 $ has the factor
\[
\mu \frac{1}{\alpha}\sum_{j = 1}^{m}[n^2\mu^2
- 2 (n\mu)(m\lambda)+(m\lambda)^2]
= (m\lambda)(n\mu - m\lambda)^2;
\]
the term involving $ j^2 $ has the factor
\[
\frac{\mu^2}{\alpha^{3}} - \frac{2\mu \lambda}{\alpha^2}
+ \frac{\lambda^2}{\alpha} = 0;
\]
the term involving $ j $ has two parts, one of which has
the factor
\[
\frac{2n\mu \lambda}{\alpha} + \frac{2\mu m \lambda}{\alpha^2}
- \frac{2m\lambda^2}{\alpha} - \frac{2n\mu^2}{\alpha^2}
= 0,
\]
and the other of which has the factor
\[
\mu \sum_{j = 1}^{m}(\frac{1 + \beta}{\alpha^{3}}
- \frac{1}{\alpha^{3}})j\mu^2 = (\lambda - \mu)\frac{m(m + 1)}{2}
\lambda^2.
\]
The proof is complete.
\end{proof}

\begin{remark} \label{rmk7} \rm
The results in Proposition \ref{P:VaryB}
are true for $ n, m \ge 0 $, but a similar result in the
\cite[Proposition 4, page 236]{Lin1} has the restriction
$ n\mu - m\lambda \ge 0 $ which is not suitable for a mathematical
induction proof.
\end{remark}

\subsection*{Acknowledgments}
The author wishes to thank very much Professor
Jerome A. Goldstein at University of Memphis,
for his teaching and training, which helps the author in many ways.

\begin{thebibliography}{00}
\bibitem{Ba} V. Barbu;
 \emph{Semigroups and Differential Equations in Banach Spaces}, Leyden :
Noordhoff, 1976.

\bibitem{Br} H. Brezis;
 \emph{On a problem of T. Kato}, Comm. Pure Appli. Math., \textbf{
24} (1971), 1-6.

\bibitem{Bre} H. Brezis;
 \emph{Semi-groupes non Lineaires et applications}, Symposium
sur les problemes d'evolution, Instituto Nazionale di Alta Mathematica,
Rome, May 1970.

\bibitem{Bro} F. E. Browder;
\emph{Nonlinear equations of evolution and nonlinear
accretive operators in Banach spaces}, Bull. Amer. Math. Soc., \textbf{73}
(1967), 867-874.

\bibitem{Co} E. A. Coddington and N. Levinson;
 \emph{Theory of Ordinary Differential
Equations}, McGraw-Hill Book Company Inc., New York, 1955.

\bibitem{Cr} M. G. Crandall and T. M. Liggett;
\emph{Generation of semigroups of
nonlinear transformations on general Banach spaces}, Amer. J. Math., \textbf{93} (1971), 256-298.

\bibitem{Crand} M. G. Crandall;
\emph{A generalized domain for semigroup Generators},
Proceedings of the AMS, \textbf{2}, (1973), 435-440.

\bibitem{Cra} M. G. Crandall and A. Pazy;
\emph{Semi-groups of nonlinear contractions
and dissipative sets}, J. Functional Analysis, \textbf{3} (1969), 376-418.

\bibitem{Cran} M. G. Crandall and A. Pazy;
\emph{Nonlinear evolution equations in Banach
spaces}, Israel J. Math., \textbf{11} (1972), 57-94.

\bibitem{En} K. Engel and R. Nagel;
\emph{One-Parameter Semigroups for Linear
Evolution Equations}, Springer-Verlag, New York, 1999.

\bibitem{Eng} K. Engel and R. Nagel;
\emph{A Short Course on Operator semigroups},
Springer-Verlag, New York, 2006.

\bibitem{Ka} J. Kacur;
\emph{Method of Rothe in Evolution Equations},
Teubner Texte Zur Mathematik, Band bf80, BSB B. G.
Teubner Verlagsgessellschaft, Leipzig, 1985.

\bibitem{Gau} L. Gaul, M. Kogl, and M. Wagner;
\emph{Boundary Element Methods for
Engineers And Scientists: An Introductory Course with Advanced Topics},
Springer-Verlag, New York, 2003.

\bibitem{Gi2} D. Gilbarg and N. S. Trudinger;
\emph{Elliptic Partial Differential
Equations of Second Order}, Second Edition, Springer-Verlag, New York, 1983.

\bibitem{Gol} J. A. Goldstein;
\emph{Semigroups of Linear Operators and Applications},
Oxford University Press, New York, 1985.

\bibitem{Hi} E. Hille and R. S. Phillips;
\emph{Functional Analysis and Semi-groups},
Amer. Math. Soc. Coll. Publ., Vol. 31, Providence, R. I., 1957.

\bibitem{Kat} T. Kato;
\emph{Nonlinear semi-groups and evolution equations}, J. Math.
Soc. Japan, \textbf{19} (1967), 508-520.

\bibitem{Kato} T. Kato;
\emph{Accretive operators and nonlinear evolution equations in
Banach spaces}, Proc. Symp. in Pure Math. \textbf{18}, Part I, Amer. Math.
Soc., Providence, R. I., 138-161.

\bibitem{Lie} G. M. Lieberman;
\emph{Second Order Parabolic Differential Equations},
World Scientic, Singapore, 1996.

\bibitem{Li} C. -Y. Lin;
 \emph{Cauchy problems and applications}, Topological Methods in
Nonlinear Analysis, \textbf{15}(2000), 359-368.

\bibitem{Lin2} C. -Y. Lin;
\emph{Time-dependent nonlinear
evolution equations}, Diff. and Int. Equations, \textbf{
15} (2002), 257-270.

\bibitem{Lin0} C. -Y. Lin;
 \emph{On generation of $ C_0
$ semigroups and nonlinear operator semigroups},
Semigroup Forum, \textbf{66} (2003), 110-120.

\bibitem{Lin1} C. -Y. Lin;
 \emph{On generation of nonlinear operator semigroups and nonlinear
evolution operators}, Semigroup Forum,
\textbf{67} (2003), 226-246.

\bibitem{Lin4} C. -Y. Lin;
 \emph{Nonlinear evolution equations}, Electronic
Journal of Differential Equations, Vol. 2005(2005), No. 42, pp. 1-42.

\bibitem{LinA} C. -Y. Lin;
 \emph{Theory and Examples of Ordinary Differential Equations},
 published by World Scientific, Singapore, 2011.

\bibitem{Mic} R. E. Mickens;
 \emph{Difference Equations, Theory and Applications}, Second
Edition, Van Mostrand Reinhold, New York, 1990.

\bibitem{Mi} I. Miyadera;
 \emph{Nonlinear Semigroups}, Translations of Mathematical
Monographs, vol. 109, American Mathematical Society, 1992.

\bibitem{Miy} I. Miyadera;
 \emph{Some remarks on semigroups of nonlinear operators},
Tohoku Math. J., \textbf{23} (1971), 245-258.

\bibitem{Oh} S. Oharu;
 \emph{On the generation of semigroups of nonlinear
contractions}, J. Math. Soc. Japan, \textbf{22} (1970), 526-550.

\bibitem{Pa} A. Pazy;
 \emph{Semigroups of nonlinear operators in Hilbert spaces},
Problems in Non-linear Analysis, C. I. M. E. session \textbf{4} (1970),
Edizioni Cremonese, Rome, 343-430.

\bibitem{Paz} A. Pazy;
 \emph{Semigroups of Linear Operators and Applications in Partial
Differential Equations}, Springer-Verlag, New York, 1983.

\bibitem{Ro} E. Rothe;
 \emph{Zweidimensionale parabolische Randvertaufgaben als Grenfall
 eindimensionale Renvertaufgaben}, Math. Ann.,
\textbf{102} (1930), 650-670.

\bibitem{Roy} H. L. Royden;
\emph{Real Analysis}, Macmillan Publishing Company, New York, 1989.

\bibitem{Sch}
A. Schatz, V. Thomee, and W. Wendland;
\emph{Mathematical Theory of Finite
and Boundary Element Methods}, Birkhauser, Basel, Boston, 1990.

\bibitem{Tr} G. M. Troianiello;
 \emph{Elliptic Differential Equations and Obstacle
Problems}, Plenum Press, New York, 1987.

\bibitem{We} G. F. Webb;
 \emph{Nonlinear evolution equations and product stable
operators in Banach spaces}, Trans, Amer. Math. Soc., \textbf{155} (1971),
409-426.

\bibitem{Wes} U. Westphal;
\emph{Sur la saturation pour des semi-groups ono lineaires},
C. R. Acad. Sc. Paris \textbf{274} (1972), 1351-1353.

\end{thebibliography}

\end{document}
