\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2015 (2015), No. 201, pp. 1--18.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2015 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2015/201\hfil Analytic solutions of nonlinear PDEs]
{Analytic solutions of a class of nonlinear partial differential equations}


\author[E. N. Petropoulou \hfil EJDE-2015/201\hfilneg]
{Eugenia N. Petropoulou}

\address{Eugenia N. Petropoulou \newline
 Department of Civil Engineering, 
 University of Patras, 26500 Patras, Greece}
\email{jenpetr@upatras.gr}

\thanks{Submitted April 29, 2015. Published August 4, 2015.}
\subjclass[2010]{35A01, 35A02, 35B99, 35C10, 35J60, 35L70}
\keywords{Analytic solution; series solution; bounded solution;  wave-type PDE; 
\hfill\break\indent Laplace-type PDE; PDE with mixed derivatives; sine-Gordon;
 Klein-Gordon}

\begin{abstract}
 We study a class of nonlinear partial differential equations,
 which can be connected with wave-type equations and Laplace-type equations,
 by using a functional-analytic technique.
 We establish primarily the existence and uniqueness of bounded
 solutions in the two-dimensional Hardy-Lebesque space of analytic functions
 with independent variables lying in the open unit disc. However these results
 can be modified to expand the domain of definition.
 The proofs have a constructive character enabling the determination of concrete
 and easily verifiable conditions, and the determination of the  coefficients
 appearing in the power series solution.
 Illustrative examples are given related to the sine-Gordon equation,
 the Klein-Gordon equation, and to equations with nonlinear terms of algebraic,
 exponential and logistic type.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{result}[theorem]{Result}
\allowdisplaybreaks


 \section{Introduction} \label{Intro}

 Recently in \cite{PSAIMS}, a functional-analytic technique was employed for 
the study of bounded, analytic or entire, complex solutions of the 
Benjamin-Bona-Mahony equation \cite{BBM1972}
 \begin{equation}
 u_t+u_x+uu_x-u_{xxt}=0,\quad u=u(x,t)\label{Intro_BBM}
 \end{equation}
as well as the associated  linear equation
 \begin{equation}
 u_t+u_x-u_{xxt}=0, \quad u=u(x,t). \label{Intro_LinBBM}
 \end{equation}
 This technique was used for the first time in \cite{PS2009}, 
for finding a necessary and sufficient condition for the existence of 
polynomial solutions of a class of linear partial differential equations (PDEs). 
Its main idea, is the transformation of the PDE 
into an equivalent operator equation in an abstract Hilbert or Banach space. 
Moreover, this technique is an extension of another functional-analytic 
technique for the study of analytic solutions of initial value problems 
of ordinary differential equations (ODEs), introduced by
Ifantis \cite{I1971} and systemized in \cite{I1978,I1987a}.

 In the present study, the analytic solutions of the general class of 
nonlinear PDEs
 \begin{equation}
 u_{xt}+au_{x}+bu_{t}+cu=g(x,t)+G(u(x,t)), \quad u=u(x,t)
 \label{Intro_GeneralPDE}
 \end{equation}
where $ G(u(x,t))=\sum_{n=2}^{\infty}c_n [u(x,t)]^n$ will be studied, 
extending in this way the method of \cite{PSAIMS} to other kind of nonlinear terms.
 It should be noted that the nonlinear term $G(u(x,t))$ appearing in
 \eqref{Intro_GeneralPDE} is quite general, since it includes all kind of
 nonlinear terms having a Taylor expansion.
 
 The problem of the analytic solutions of PDEs is an old and interesting 
problem on its own. From the various papers regarding several results on 
analytic solutions of PDEs,  \cite{ES1997,K1972,Z1980,Z1999} are indicatively 
mentioned, as well as the more recent \cite{CN2012,CGS2012,HP2012}.

 The main result of the present paper (Theorem \ref{MR_MainTheorem}) 
is stated in \S\ref{MR} and is of Cauchy-Kowalewski type establishing a 
unique bounded solution of \eqref{Intro_GeneralPDE} in the Banach space
\begin{align*}
  H_1(\Delta^{2})=\Big\{&f:\Delta^{2}\to\mathbb{C} ,
 \text{ where } f(x,t)=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}\overline
 {f}_{ij}x^{i-1}t^{j-1}\in H_2(\Delta^2),\\
  & \text{for which }\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}| f_{ij}|
 <+\infty\Big\},
 \end{align*}
where $\Delta^2=\Delta\times\Delta$, $\Delta=\{z\in\mathbb{C}:| z|<1\}$,
with norm $\| f(x,t)\|_{H_1(\Delta^{2})}=\sum_{i=1}^{\infty}
 \sum_{j=1}^{\infty}| f_{ij}|$.
The space $H_2(\Delta^2)$ appearing in the previous definition, is the Hilbert space
\begin{align*}
  H_2(\Delta^{2})=\Big\{&f:\Delta^{2}\to\mathbb{C} ,
 \text{ where }  f(x,t)=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}\overline
 {f}_{ij}x^{i-1}t^{j-1},
 \text{is analytic in } \Delta^{2}\\
 &\text{with } \sum_{i=1}^{\infty}\sum_{j=1}^{\infty}| f_{ij}|^{2}
 <+\infty\},
 \end{align*}
 with inner product defined by
 $$
\left(f_1(x,t),f_2(x,t)\right)_{H_2(\Delta^{2})}=\sum_{i=1}^{\infty}
 \sum_{j=1}^{\infty}\overline{\alpha}_{ij}b_{ij},
$$
 where
\[
f_1(x,t)=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}\overline{\alpha}_{ij}x^{i-1}t^{j-1},
\quad
f_2(x,t)=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}\overline{b}_{ij}x^{i-1}t^{j-1}
\]
are elements of $H_2(\Delta^{2})$. (The one dimensional spaces $H_2(\Delta)$
and $H_1(\Delta)$ are analogously defined with only one series involved in
their definitions.)
 
 For the proof of the main result, which is also given in \S\ref{MR}, 
the technique presented in \S\ref{Abstract} is utilized. This technique 
reduces the problem of $H_1(\Delta^{2})$ solutions of \eqref{Intro_GeneralPDE},
to an equivalent problem for the solutions of an operator equation in an 
abstract Banach space. One important advantage of this approach is that 
the conditions accompanying \eqref{Intro_GeneralPDE} are incorporated 
in the equivalent operator equation. Another equally important advantage 
of this technique, which is a consequence of the spaces $H_2(\Delta^{2})$ 
and $H_1(\Delta^{2})$, is that the established solution is by definition 
analytic in the form of a power series and thus, there is no need to prove 
convergence using for example the commonly used method of majorants.

 The reasons for studying PDEs in $H_1(\Delta^2)$ and $H_2(\Delta^2)$, 
apart from the fact that these spaces are included in the important class 
of analytic functions, is that they are quite useful in applications 
and their elements are represented by one function and not by a class 
of equivalent functions, as in the case of $L_2(\Delta^{2})$. 
Moreover, they are suitable for studying polynomial solutions of PDEs.  
Also, by establishing a solution of a PDE in $H_2(\Delta^{2})$ or 
$H_1(\Delta^{2})$, this solution is a convergent power series, the coefficients 
of which can be uniquely determined in many cases, thus obtaining an ``exact" 
solution. Finally, these spaces appear naturally in problems of quantum mechanics. 
For more details, see \cite{PSAIMS} and the references therein.
 
 For the main result of \eqref{Intro_GeneralPDE}, the independent variables 
$x$ and $t$ are both assumed in the open unit disc $\Delta$. 
However, this is not restrictive since one may choose instead equation
 \begin{equation}
 \tilde{u}_{\tilde{x}\tilde{t}}+\tilde{a}\tilde{u}_{\tilde{x}}
+\tilde{b}\tilde{u}_{\tilde{t}}+\tilde{c}\tilde{u}
=\tilde{g}(\tilde{x},\tilde{t})+\tilde{G}(\tilde{u}(\tilde{x},\tilde{t}))
 \label{Intro_GeneralPDEXT}
 \end{equation}
where $\tilde{u}=\tilde{u}(\tilde{x},\tilde{t})$ with 
$|\tilde{x}|<X$ and $|\tilde{t}|<T$, $X, T$,
 positive finite numbers. By using the simple transformations
 \begin{equation}
 \tilde{x}=x\cdot X,\quad  \tilde{t}=t\cdot T,
 \label{Intro_Trans2}
 \end{equation}
equation \eqref{Intro_GeneralPDEXT} reduces to an equation of the 
form \eqref{Intro_GeneralPDE} for the function  
$u(x,t)=\tilde{u}(xX,tT)=\tilde{u}(\tilde{x},\tilde{t})$ 
and the results for equation \eqref{Intro_GeneralPDE} can be carried to
 equation \eqref{Intro_GeneralPDEXT} (see Corollary \ref{MR_Cor1}).
 
 Apart from the fact that studying equation \eqref{Intro_GeneralPDE} 
is quite interesting on its own, another strong motivation is the 
connection of \eqref{Intro_GeneralPDE} with wave-type or Laplace-type equations.
 More precisely, equation \eqref{Intro_GeneralPDE} can be connected with 
the wave-type equation
 \begin{equation}
 \hat{u}_{\xi\xi}-\kappa^2\hat{u}_{\eta\eta}+\hat{a}\hat{u}_{\xi}
+\hat{b}\hat{u}_{\eta}+\hat{c}\hat{u}
=\hat{g}(\xi,\eta)+\hat{G}(\hat{u}(\xi,\eta)),\quad \hat{u}
=\hat{u}(\xi,\eta),\quad \kappa\neq0
 \label{Intro_WavePDE}
 \end{equation}
 using the classic transformations
 \begin{equation}
 x=\eta+\kappa\xi,\quad t=\eta-\kappa\xi
 \label{Intro_Trans1}
 \end{equation}
used also by d'Alembert. In this way, \eqref{Intro_WavePDE} is reduced 
to an equation of the form \eqref{Intro_GeneralPDE} for the function  
$u(x,t)=\hat{u}(\frac{x-t}{2\kappa},\frac{x+t}{2})=\hat{u}(\xi,\eta)$. 
Similarly, the Laplace-type equation
 \begin{equation}
 \hat{u}_{\xi\xi}+k^2\hat{u}_{\eta\eta}+\hat{a}\hat{u}_{\xi}
+\hat{b}\hat{u}_{\eta}+\hat{c}\hat{u}=\hat{g}(\xi,\eta)+\hat{G}(\hat{u}(\xi,\eta)),
\quad \hat{u}=\hat{u}(\xi,\eta),\quad k\neq0,
 \label{Intro_LaplacePDE}
 \end{equation}
using transformations \eqref{Intro_Trans1} but now for $\kappa=-ik$ 
is reduced to an equation of the form \eqref{Intro_GeneralPDE} for the function  
$u(x,t)=\hat{u}(\frac{t-x}{2ik},\frac{x+t}{2})=\hat{u}(\xi,\eta)$. 
In this way, the results of the present paper can provide useful information 
for the solutions of \eqref{Intro_WavePDE} and \eqref{Intro_LaplacePDE}. 
These results are presented in \S\ref{Connection}.

 The importance of equations of the form \eqref{Intro_WavePDE} 
or \eqref{Intro_LaplacePDE} is well-known in applications and can be found 
in various classic textbooks and a huge number of research papers. 
Most of the classical results regarding the existence and/or uniqueness 
of solutions of equations of the form \eqref{Intro_WavePDE} or 
\eqref{Intro_LaplacePDE} can be found for example in \cite{E2010} 
or \cite{Tc2011}.

 Summarizing, this paper is organized as follows: 
In \S\ref{Abstract}, the abstract setting of the method used is described. 
In \S\ref{MR} the main result is stated and proved. 
It worths mentioning that its proof has a constructive character, 
giving rise to two easily verifiable conditions for the existence 
and uniqueness of solutions of \eqref{Intro_GeneralPDE} in $H_1(\Delta^2)$. 
In \S\ref{Connection}, equation \eqref{Intro_GeneralPDE} is connected with 
equations \eqref{Intro_WavePDE} and \eqref{Intro_LaplacePDE} and the 
main result of \S\ref{MR} is ``translated" in terms of these two equations. 
Finally, various illustrative examples are given in \S\ref{Examples}. 
Most of these examples arise in various applications and concern the sine-Gordon 
equation, the Klein-Gordon equation, as well as equations involving nonlinear 
terms of exponential, algebraic and logistic type. For one of these examples, 
the coefficients of the predicted power series solution are also explicitly 
computed in order to illustrate the procedure and the established solution 
agrees with the solution already found in \cite{EB2014}.

 \section{Abstract setting}\label{Abstract}
 
 Denote by $H$ an abstract separable Hilbert space over the complex field 
$\mathbb{C}$ with orthonormal base $\{e_{i,j}\}_{i,j=1}^{\infty}$. 
The inner product and the induced norm will be denoted as usual by 
$(\cdot,\cdot)$, $\|\cdot\|$. Define also the shift operators
$V_1$, $V_2$ on $H$ as follows:
 $$
V_1e_{i,j}=e_{i+1,j},\quad i,j=1,2,\dots ,\quad
 V_2e_{i,j}=e_{i,j+1}, \quad i,j=1,2,\dots 
$$
 and their adjoint operators $V_1^{*}$, $V_2^{*}$ as:
\begin{gather*}
V_1^{*}e_{i,j}=e_{i-1,j},\quad  i=2,3,\dots , j=1,2,\dots
 \quad  V_1^{*}e_{1,j}=0, \quad  j=1,2,\dots;\\
V_2^{*}e_{i,j}=e_{i,j-1},\quad  i=1,2,\dots , j=2,3,\dots \quad
 V_2^{*}e_{i,1}=0, \quad  i=1,2,\dots .
\end{gather*}
The operators $V_{i}$, $V_{j}^{*}$, $i,j=1,2$ commute as long as the 
indices are different. For example, it is true that $V_1 V_2  = V_2 V_1$ 
or $V_1 V_2^*  = V_2^* V_1$. Moreover,
 \begin{equation}
 V_1^* V_1  = I,\quad  V_2^* V_2  = I, \quad  
\| V_1\|=\| V_2\|=\| V_1^{*}\|=\| V_2^{*}\|=1
 \label{Method_V*V}
 \end{equation}
where $I$ is the identity operator. The following two propositions are 
very important for the method employed in the present study
 
\begin{proposition}[{\cite[Proposition 1]{PS2009}}]   \label{Method_PropEigenvalues}
 Every point $xt$, with $x,t\in\Delta=\{x\in\mathbb{C}:| x|<1\}$,
belongs to the point spectrum of $V_1^{*}V_2^{*}$ and the set of the eigenelements:
 \begin{equation}
 f_{xt}=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}x^{i-1}t^{j-1}e_{i,j},
 \quad  
f_{0t}=\sum_{j=1}^{\infty}t^{j-1}e_{1,j}, \quad
f_{x0}=\sum_{i=1}^{\infty} x^{i-1}e_{i,1},\quad  
f_{00}=e_{1,1} \label{Method_Eigenelements}
\end{equation}
 forms a complete system in $H$ i.e., if $f$ is orthogonal to $f_{xt}$ for all
 $x,t\in\Delta$, then $f=0$.
 \end{proposition}

 \begin{proposition}[{\cite[\S3.2]{PS2009}}]  \label{Method_Representation}
 The mapping  $\phi :H \to H_2(\Delta^{2})$ with
 \begin{equation}
 \phi (f)=(f_{xt},f)=f(x,t),  \label{Method_Rep}
 \end{equation}
 is a one-to-one mapping from $H$ onto $H_2(\Delta^{2})$, which preserves the norm.
 \end{proposition}

 Actually, for every $f(x,t)=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}\overline
 {f}_{ij}x^{i-1}t^{j-1}\in H_2(\Delta^{2})$, there exists the element
 $f=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}f_{ij}e_{i,j}\in H$ such that
 $\phi (f)=f(x,t)$, which is called the \emph{abstract form} of $f(x,t)$.
Conversely, if $f=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}(f,e_{i,j})e_{i,j}$, 
then due to \eqref{Method_Rep}, 
$f(x,t)=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}\overline{(f,e_{i,j})}x^{i-1}t^{j-1}$.
 
 Consider now the linear manifold of all the elements of 
$H_2(\Delta^2)$, $f(x,t)=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}
\overline {f}_{ij}x^{i-1}t^{j-1}$ which satisfy the condition 
$\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}| f_{ij}|
 <+\infty$. This linear manifold equipped with the norm 
$\| f_1(x,t)\|_{H_1(\Delta^{2})}
=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}| f_{ij}|$,
becomes the well known Banach space $H_1(\Delta^{2})$ and it ``carries" 
the inner product of $H_2(\Delta^{2})$. The corresponding to $H_1(\Delta^{2})$ 
by the mapping \eqref{Method_Rep}, Banach abstract space will be denoted by 
$H_1$ and its norm by $\|\cdot\|_1$. For a discussion on why the space 
$H_1(\Delta^{2})$ is chosen, see \cite{I1987a} or \cite{PSAIMS}.

 As in \cite{I1987a}, the following statements are true:
 \begin{itemize}
\item  $H_1$ is invariant under the shift operators $V_i$, $V_{i}^{*}$, $i=1,2$ 
and their powers. Moreover, $\| V_1\|_1=\| V_2\|_1=\| V_1^{*}\|_1=\| V_2^{*}\|_1=1$.
 
\item $H_1$ is invariant under every bounded diagonal operator 
$De_{i,j}=d(i,j)e_{i,j}$, $i,j=1,2,\ldots$  on $H$. Moreover, 
$\| D\|_1=\| D\|=\sup_{i,j}| d(i,j)|$.

\item  The null spaces of $(V_1^{*})^k$ and $(V_2^{*})^k$ in $H$ belong to $H_1$.
 \end{itemize}
 
For the implementation of the method, the abstract forms of all the appearing 
terms in \eqref{Intro_GeneralPDE}, are needed. For all the linear terms, 
the corresponding abstract forms have been found in \cite[Proposition 2]{PS2009} 
and the following hold:
 \begin{equation}
 \begin{gathered}
  \frac{\partial f(x,t)}{\partial  x}=(f_{xt},C_1^{(0)}V_1^{*}f),\\
  \frac{\partial f(x,t)}{\partial  t}=(f_{xt},C_2^{(0)}V_2^{*}f), \\
  \frac{\partial^2 f(x,t)}{\partial x\partial t}=  (f_{xt},C_1^{(0)}V_1^{*}C_2^{(0)}V_2^{*}f)
 \end{gathered}  \label{Method_AbFormPartDer}
 \end{equation}
 where $C_1^{(0)}$, $C_2^{(0)}$ are the diagonal operators defined on
 $H$ as follows:
 $$
C_1^{(0)}e_{i,j}=ie_{i,j},\quad C_2^{(0)}e_{i,j}=je_{i,j}, \quad i,j=1,2,\ldots.
$$
 These operators have the following properties \cite[Remark 3]{PS2009}:

 (i) They have a self-adjoint extension with discrete spectrum,
 i.e. the definition domain of $C_1^{(0)}$, $C_2^{(0)}$ can
 be extended to the range of the bounded operators $B_1^{(0)}$,
 $B_2^{(0)}$, respectively, defined by:
 $B_1^{(0)}e_{i,j}=\frac{1}{i}e_{i,j}$, $B_2^{(0)}e_{i,j}=\frac{1}{j}e_{i,j}$, $i,j=1,2,\ldots$.

 (ii) The definition domains of the operators $(C_1^{(0)})^{p}$,
 $(C_2^{(0)})^{p}$ are extended to the range of the
 bounded operators $(B_1^{(0)})^{p}$, $(B_2^{(0)})^{p}$, $p=2,3,\ldots,k$,
 respectively.

 (iii) The range of $(B_1^{(0)})^{p}$ ($(B_2^{(0)})^{p}$) in $H$,
 $p=1,2,\ldots,k$, i.e. the definition domain of $(C_1^{(0)})^{p}$
 ($(C_2^{(0)})^{p}$) is isomorphic to the linear manifold in
 $H_2(\Delta^{2})$ which consists of functions with derivatives with
 respect to $x$ ($t$) up to order $p$ in $H_2(\Delta^{2})$.

 For the determination of the abstract form of the nonlinear term 
$ G(f(x,t))=\sum_{n=2}^{\infty}c_n [f(x,t)]^n$ appearing in 
\eqref{Intro_GeneralPDE}, a good starting point
 are the Propositions 3 and 4 of \cite{PSAIMS}, where it was 
found that the abstract form of the term $[f(x,t)]^2$ is the nonlinear
 Frech\'et differentiable operator
 \begin{equation}
 f(V_1,V_2)f=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}(f,e_{i,j})V_1^{i-1}V_2^{j-1}f
 \label{Method_OperatorN}
 \end{equation}
 defined on all $H_1$ for $f\in H_1$.
 
 In the proof of \cite[Proposition 3]{PSAIMS},
 the following useful relation was contained, although not explicitly stated:
 \begin{equation}
 f^{*}(V_1,V_2)f_{xt}=f(x,t)f_{xt},  \label{Method_EigenvalueTypeRelation}
 \end{equation}
 where $f^{*}(V_1,V_2)$ is the adjoint of $f(V_1,V_2)$.
 
 By using \eqref{Method_EigenvalueTypeRelation} and mathematical induction, 
the following can be proved:

 \begin{proposition}  \label{Method_AbFormPowers}
 The abstract form of $[f(x,t)]^n$ is the element $ [f(V_1,V_2)]^{n-1}f$, 
where $n=2,3,\ldots$ and is defined on all $H_1$ for $f\in H_1$.
 \end{proposition}

 \begin{proof}
 As already mentioned, this is true for $n=2$. For $n=3$, the element 
$ [f(V_1,V_2)]^{2}f$ is defined on all $H_1$ for $f\in H_1$, since
 $$
\| [f(V_1,V_2)]^{2}f\|_1\leq\| f(V_1,V_2)\|_1^{2}\cdot\| f\|_1
\leq\| f\|_1^{3}<\infty.
$$
 Moreover,
\begin{align*}
(f_{xt},[f(V_1,V_2)]^2f)&=(f_{xt},f(V_1,V_2)f(V_1,V_2)f)\\
&=(f^{*}(V_1,V_2)f_{xt},f(V_1,V_2)f)\\
&\stackrel{\eqref{Method_EigenvalueTypeRelation}}=
(f(x,t)f_{xt},f(V_1,V_2)f)\\
&=f(x,t)(f_{xt},f(V_1,V_2)f)=f(x,t)(f^{*}(V_1,V_2)f_{xt},f)\\
&\stackrel{\eqref{Method_EigenvalueTypeRelation}}=
[f(x,t)]^2(f_{xt},f)\\
&\stackrel{\eqref{Method_Rep}} = [f(x,t)]^3.
\end{align*}
Now suppose that for $n=p$, the abstract form of $[f(x,t)]^p$ is 
$ [f(V_1,V_2)]^{p-1}f$. Then, as before it can be proved that the 
abstract form of $[f(x,t)]^{p+1}$ is the element $ [f(V_1,V_2)]^{p}f$, 
which is defined on all $H_1$ for $f\in H_1$. Thus, the proposition 
is true by use of mathematical induction.
 \end{proof}

 \begin{proposition}  \label{Method_AbFormSeries}
 Suppose that the analytic function $ G(w)=\sum_{n=2}^{\infty}c_n w^n$ 
has a radius of convergence $R_1>0$. Then, the nonlinear operator
 \begin{equation}
 N(f)=\sum_{n=2}^{\infty}\overline{c}_n[f(V_1,V_2)]^{n-1}f
 \label{Method_OperatorSeries}
 \end{equation}
 is the abstract form of $ G(f(x,t))=\sum_{n=2}^{\infty}c_n [f(x,t)]^n$ and is defined in the open sphere $S(0,R_1)\subset H_1$.
\end{proposition}

 \begin{proof}
 The operator $N(f)$ is well defined for $f\in S(0,R_1)$, since
 $$
\| N(f)\|_1\leq\sum_{n=2}^{\infty}| c_n|\cdot\| [f(V_1,V_2)]^{n-1}f\|_1
\leq\sum_{n=2}^{\infty}| c_n|\cdot\| f\|_1^n
\leq\sum_{n=2}^{\infty}| c_n| R^n<\infty,
$$
 for $\| f\|\leq R<R_1$. Moreover, $N(f)$ is the abstract form of $G(f(x,t))$, since
\begin{align*}
(f_{xt},N(f))&=\Big(f_{xt},\sum_{n=2}^{\infty}
\overline{c}_n[f(V_1,V_2)]^{n-1}f\Big)\\
&=\sum_{n=2}^{\infty}c_n \left(f_{xt},[f(V_1,V_2)]^{n-1}f\right)=G(f(x,t)),
\end{align*}
due to Proposition \ref{Method_AbFormPowers}.
 \end{proof}

 \begin{remark} \label{rmk2.5} \rm
 If $G(w)$ is an entire function of $w$, then $N(f)$ is defined on all $H_1$.
 \end{remark}

Operator $N(f)$ defined by \eqref{Method_OperatorSeries}, 
is Frech\'et differentiable under specific assumptions and the proof 
of this fact is similar to the Weierstrass proof for the existence of the 
derivative of an analytic function. Also, it follows closely a proof 
given in \cite[Theorem 4.4]{I1987a}, for the Frech\'et differentiability 
of a similar to $N(f)$ nonlinear operator, used in the study of analytic 
solutions of nonlinear ODEs. However, the proof will be included here 
for reasons of self-completeness.

 \begin{proposition}  \label{Method_FrechetDifferentiability}
 Suppose that the analytic function $ G_1(w)=\sum_{n=2}^{\infty}c_n w^{n-1}$ 
has a radius of convergence $R_1>0$. Then, the nonlinear operator $N(f)$ 
defined by \eqref{Method_OperatorSeries}, is Frech\'et differentiable at 
every point $f_0\in S(0,R_1)$ and its derivative is given by
 \begin{equation}
 N'(f_0)f=\sum_{n=2}^{\infty}\overline{c}_n(n-1)[f_0(V_1,V_2)]^{n-2}f.
 \label{Method_FrechetDerivative}
 \end{equation}
\end{proposition}

\begin{proof}
 Since formally
\begin{align*}
(f_{xt},N'(f_0)f)
&=\Big(f_{xt},\sum_{n=2}^{\infty}\overline{c}_n(n-1)[f_0(V_1,V_2)]^{n-2}f\Big)\\
&=\sum_{n=2}^{\infty}c_n (n-1)\left(f_{xt},[f_0(V_1,V_2)]^{n-2}f\right) \\
&=\sum_{n=2}^{\infty}c_n (n-1)\left(f_{0}^{*}(V_1,V_2)f_{xt},[f_0(V_1,V_2)]^{n-3}
 f\right)\\
& \stackrel{\eqref{Method_EigenvalueTypeRelation}}=
\sum_{n=2}^{\infty}c_n (n-1)f_0(x,t)\left(f_{xt},[f_0(V_1,V_2)]^{n-3}f\right)
\end{align*}
which implies
\[
(f_{xt},N'(f_0)f)
=\sum_{n=2}^{\infty}c_n (n-1)[f_0(x,t)]^{n-2}f(x,t)=G_2(f(x,t)),
\]
it suffices to show that $G_2(f(x,t))$ is the Frech\'et derivative 
of 
\[
 G_1(f(x,t))=\sum_{n=2}^{\infty}c_n[f(x,t)]^{n-1}
\]
 at the point $f_0(x,t)\in S(0,R_1)\subset H_1(\Delta^2)$.

 Obviously, $G_2(f(x,t))$ is a linear operator of $f(x,t)$ for which
 $$
\| G_2(f(x,t))\|_{H_1(\Delta^2)}
\leq\sum_{n=2}^{\infty}| c_n|(n-1)R^{n-2}\| f(x,t)\|_{H_1(\Delta^2)}
<\sum_{n=2}^{\infty}| c_n|(n-1)R^{n-1},
$$
 which converges for $f_0(x,t)\in S(0,R_1)
\Rightarrow\| f_0(x,t)\|_{H_1(\Delta^2)}\leq R<R_1$ due to the analyticity 
of $G_1(w)$. Thus, $G_2(f(x,t))$ is well defined for $f(x,t)\in S(0,R_1)$.
 
Moreover, for $\| f_0(x,t)+h(x,t)\|_{H_1(\Delta^2)}\leq R<R_1$ it is
\begin{align*}
&G_1(f_0(x,t)+h(x,t))-G_1(f_0(x,t))\\
&=\sum_{n=2}^{\infty}c_n\left[(f_0(x,t)+h(x,t))^{n-1}-(f_0(x,t))^{n-1}\right] \\
&=\sum_{n=2}^{\infty}c_n h(x,t)\Big[(f_0(x,t))^{n-2}+(f_0(x,t))^{n-3}(f_0(x,t)
 +h(x,t)) \\
&\quad +\ldots+(f_0(x,t)+h(x,t))^{n-2}\Big] \\
&=h(x,t)\sum_{n=2}^{\infty}c_n\Big[(n-1)(f_0(x,t))^{n-2}
 +(f_0(x,t))^{n-3}(f_0(x,t)+h(x,t)-f_0(x,t)) \\
&\quad +\ldots+(f_0(x,t)+h(x,t))^{n-2}-(f_0(x,t))^{n-2}\Big]
\end{align*}
and as a consequence
\begin{align*}
&G_1(f_0(x,t)+h(x,t))-G_1(f_0(x,t))-G_2(h(x,t))\\
&=h(x,t)\sum_{n=3}^{\infty}c_n\Big[(f_0(x,t))^{n-3}h(x,t)
 +(f_0(x,t))^{n-4}h(x,t)(f_0(x,t)+h(x,t) \\
&\quad +f_0(x,t))+\ldots+h(x,t)(f_0(x,t)+h(x,t))^{n-3}+\ldots+(f_0(x,t))^{n-3}\Big] \\
&=(h(x,t))^2\sum_{n=3}^{\infty}c_n\Big[(f_0(x,t))^{n-3}+(f_0(x,t))^{n-4}(f_0(x,t)
 +h(x,t)+f_0(x,t)) \\
&\quad +\ldots+(f_0(x,t)+h(x,t))^{n-3}+\ldots+(f_0(x,t))^{n-3}\Big]
\end{align*}
which implies
\begin{align*}
&\| G_1(f_0(x,t)+h(x,t))-G_1(f_0(x,t))-G_2(h(x,t))\|_{H_1(\Delta^2)}\\
&\leq\| h(x,t)\|^{2}_{H_1(\Delta^2)}\sum_{n=3}^{\infty}
 | c_n|(R^{n-3}+2R^{n-3}+\ldots+(n-2)R^{n-3})\\
&=\frac{\| h(x,t)\|^{2}_{H_1(\Delta^2)}}{2}\sum_{n=3}^{\infty}
 | c_n|(n-1)(n-2)R^{n-3}
\end{align*}
which implies
\begin{align*}
&\frac{\| G_1(f_0(x,t)+h(x,t))-G_1(f_0(x,t))-G_2(h(x,t))
 \|_{H_1(\Delta^2)}}{\| h(x,t)\|_{H_1(\Delta^2)}}\\
&\leq\frac{\| h(x,t)\|_{H_1(\Delta^2)}}{2}
 \sum_{n=3}^{\infty}| c_n|(n-1)(n-2)R^{n-3}\to0,
\end{align*}
for $\| h(x,t)\|_{H_1(\Delta^2)}\to0$, since the series 
$\sum_{n=3}^{\infty}| c_n|(n-1)(n-2)R^{n-3}$ converges, due to
the analyticity of $G_1(w)$.
 \end{proof}

 \section{Main result}\label{MR}

 Consider the problem consisting of equation \eqref{Intro_GeneralPDE}, i.e.
 \begin{equation}
 u_{xt}+au_{x}+bu_{t}+cu=g(x,t)+\sum_{n=2}^{\infty}c_n [u(x,t)]^n, \quad u=u(x,t)
 \label{MR_GeneralPDE}
 \end{equation}
 and the conditions
 \begin{equation}
 u(x,0)=\phi_1(x), \quad u(0,t)=\phi_2(t).  \label{MR_IC}
 \end{equation}

 \begin{theorem}  \label{MR_MainTheorem}
 Assume that $g(x,t)\in H_1(\Delta^2)$, $u(x,0),u(0,t)\in H_1(\Delta)$. 
Suppose also that the series $\sum_{n=2}^{\infty}c_n w^n$ is an analytic 
function which converges for $| w|<R_1$, $R_1>0$, sufficiently large.
Then, if
 \begin{equation}
 | a|+| b|+| c|<1,  \label{MR_Con1}
 \end{equation}
 there exist $R_0>0$ and $P_0>0$ such that if
 \begin{equation}
 \| g(x,t)\|_{H_1(\Delta^2)}+\left(1+| b|\right)
\| u(x,0)\|_{H_1(\Delta)}+\left(1+| a|\right)
\| u(0,t)\|_{H_1(\Delta)}-| u(0,0)|<P_0,
 \label{MR_Con2}
 \end{equation}
 problem \eqref{MR_GeneralPDE}-\eqref{MR_IC} has a unique solution in 
$H_1(\Delta^2)$ bounded by $R_0$.
 \end{theorem}

 \begin{remark} \label{rmk3.2} \rm
 The previous result is not a purely local result, in the sense that the constants 
$R_0>0$ and $P_0>0$ can be explicitly determined. More precisely, as it will be 
made clear in the proof of Theorem \ref{MR_MainTheorem}, the constant $R_0$ 
is the point at which the function
$$
P(R)=\frac{R}{L}-\sum_{n=2}^{\infty}| c_n| R^n, \quad \text{with }
 L=\frac{1}{1-| a|-| b|-| c|}$$
 attaints its maximum and $P_0=P(R_0)$.
 \end{remark}

 \begin{remark} \label{rmk3.3} \rm
 Even if the quantities $R_0$ and $P_0$ cannot be explicitly determined in 
some cases, they can be approximately determined by truncating the power series 
appearing in $P(R)$. In this way $P(R)$, becomes a polynomial of which the  
maximum can be found, at least numerically.
 \end{remark}

The following corollary is an immediate consequence of Theorem \ref{MR_MainTheorem} 
and extends the previous result for independent variables lying in a disc with 
radius not equal to 1.

 \begin{corollary}  \label{MR_Cor1}
 Consider the equation
 \begin{equation}
 \tilde{u}_{\tilde{x}\tilde{t}}+\tilde{a}\tilde{u}_{\tilde{x}}
+\tilde{b}\tilde{u}_{\tilde{t}}+\tilde{c}\tilde{u}
=\tilde{g}_1(\tilde{x},\tilde{t})+\sum_{n=2}^{\infty}\tilde{c}_n 
[\tilde{u}(\tilde{x},\tilde{t})]^n, \quad\tilde{u}=\tilde{u}(\tilde{x},\tilde{t})
 \label{MR_GeneralPDEXT}
 \end{equation}
 with $|\tilde{x}|<X$ and $|\tilde{t}|<T$, $X, T$,
positive finite numbers, which after using transformations \eqref{Intro_Trans2} becomes:
 \begin{equation}
 u_{xt}+au_{x}+bu_{t}+cu=g(x,t)+\sum_{n=2}^{\infty}c_n [u(x,t)]^n,
 \label{MR_GeneralPDEXTu}
 \end{equation}
 where $u(x,t)=\tilde{u}(xX,tT)=\tilde{u}(\tilde{x},\tilde{t})$, 
$g(x,t)=XT\tilde{g}_1(xX,tT)$, $a=\tilde{a}T$, $b=\tilde{b}X$, 
$c=\tilde{c}XT$ and $c_n=\tilde{c}_n XT$. 
Assume that $g(x,t)\in H_1(\Delta^2)$, $u(x,0),u(0,t)\in H_1(\Delta)$, 
the series $\sum_{n=2}^{\infty}\tilde{c}_n w^n$ is an analytic function 
which converges for $| w|<R_1$, $R_1>0$, sufficiently large and
 \begin{equation}
 T|\tilde{a}|+X|\tilde{b}|+XT|\tilde{c}|<1.
 \label{MR_Con11}
 \end{equation}
 Then there exist $R_0>0$ and $P_0>0$ such that if
 \begin{equation}
 \begin{aligned}
&\| g(x,t)\|_{H_1(\Delta^2)}+\Big(1+X|\tilde{b}|\Big)
\| u(x,0)\|_{H_1(\Delta)}\\
&+  \left(1+T|\tilde{a}|\right)\| u(0,t)\|_{H_1(\Delta)}
-| u(0,0)|<P_0,
 \end{aligned} \label{MR_Con21}
 \end{equation}
then \eqref{MR_GeneralPDEXT} has a unique solution bounded by $R_0$,
of the form
\[
 \tilde{u}(\tilde{x},\tilde{t})
=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}\overline{u}_{ij}
\big(\frac{\tilde{x}}{X}\big)^{i-1}\big(\frac{\tilde{t}}{T}\big)^{j-1},
\]
which converges absolutely for $|\tilde{x}|<X$, $|\tilde{t}|<T$.
 \end{corollary}

 \begin{proof}[Proof of Theorem \ref{MR_MainTheorem}]
 According to \S\ref{Abstract}, equation \eqref{MR_GeneralPDE} is written as
\begin{align*}
&\big(f_{xt},C_1^{(0)}V_1^{*}C_2^{(0)}V_2^{*}u\big)
+ a\big(f_{xt},C_1^{(0)}V_1^{*}u\big)
+b\big(f_{xt},C_2^{(0)}V_2^{*}u\big)
+c \big(f_{xt},u\big)\\
&=\big(f_{xt},g\big)+\big(f_{xt},N(u)\big),
\end{align*}
where $g$ is the abstract form of $g(x,t)$ and $N(u)$ the operator defined 
by \eqref{Method_OperatorSeries}, or since $f_{xt}$ form a complete system of 
$H$,
 \begin{equation}
 C_1^{(0)}V_1^{*}C_2^{(0)}V_2^{*}u+
 \overline{a}C_1^{(0)}V_1^{*}u+\overline{b}C_2^{(0)}V_2^{*}u+\overline{c}
 u=g+N(u)\label{Proofs_AbstractForm}
\end{equation}
which is the equivalent to \eqref{MR_GeneralPDE} abstract operator equation in $H$.

 By using the inverse $B_1^{(0)}$ of $C_1^{(0)}$ and the properties of 
$V_1^{*}$, equation \eqref{Proofs_AbstractForm} becomes
 $$
V_1^{*}C_2^{(0)}V_2^{*}u+
 \overline{a}V_1^{*}u+\overline{b}B_1^{(0)}C_2^{(0)}V_2^{*}u+\overline{c}B_1^{(0)}
 u=B_1^{(0)}g+B_1^{(0)}N(u)
$$
which implies
 \begin{equation}
 \begin{aligned}
 &C_2^{(0)}V_2^{*}u+  \overline{a}u+\overline{b}V_1B_1^{(0)}C_2^{(0)}V_2^{*}u
+\overline{c}V_1B_1^{(0)}u \\
&=V_1B_1^{(0)}g+V_1B_1^{(0)}N(u)+\sum_{j=1}^{\infty}A_j e_{1,j},
 \end{aligned}\label{Proofs_Aj}
\end{equation}
where the coefficients $A_j$ are uniquely determined by the coefficients of 
$\phi_2(t)$, by taking the inner product of \eqref{Proofs_Aj} with $e_{1,j}$ 
as follows:
 $$
\big(C_2^{(0)}V_2^{*}u,e_{1,j}\big)+\overline{a}(u,e_{1,j})=A_j \Rightarrow
 A_j=j(u,e_{1,j+1})+\overline{a}(u,e_{1,j}).
$$
 But since  
$u(x,t)=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty} \overline{(u,e_{i,j})}x^{i-1}t^{j-1}$, 
it is $ \phi_2(t)=\sum_{j=1}^{\infty} \overline{(u,e_{1,j})}t^{j-1}$. 
Thus, the coefficients $A_j$ are determined via the coefficients of the power 
series in $t$ of $\phi_2(t)$. Proceeding in the same way, equation 
\eqref{Proofs_Aj} is rewritten as
\begin{align*}
&V_2^{*}u+\overline{a}B_2^{(0)}u+\overline{b}V_1B_1^{(0)}V_2^{*}u+
 \overline{c}B_2^{(0)}V_1B_1^{(0)}u \\
&=B_2^{(0)}V_1B_1^{(0)}g  +B_2^{(0)}V_1B_1^{(0)}N(u)
+\sum_{j=1}^{\infty}\frac{A_j}{j}e_{1,j}
\end{align*}
which implies
 \begin{equation}
 \begin{aligned}
& u+\overline{a}V_2B_2^{(0)}u+\overline{b}V_1B_1^{(0)}u+
 \overline{c}V_2B_2^{(0)}V_1B_1^{(0)}u\\
&=V_2B_2^{(0)}V_1B_1^{(0)}g
 +V_2B_2^{(0)}V_1B_1^{(0)}N(u)+\sum_{j=1}^{\infty}\frac{A_j}{j}e_{1,j+1}+
 \sum_{i=1}^{\infty}B_i e_{i,1}.
 \end{aligned} \label{Proofs_Bi}
 \end{equation}
The coefficients $B_i$ are again uniquely determined by the coefficients 
of $\phi_1(x)$, by taking the inner product of \eqref{Proofs_Bi} with $e_{i,1}$. 
More precisely, they are given by
 $$
B_1=(u,e_{1,1}), \quad 
B_i=(u,e_{i,1})+\frac{\overline{b}}{i-1}(u,e_{i-1,1}),\quad \forall\quad  i\neq1
$$
 and since $ \phi_1(x)=\sum_{i=1}^{\infty} \overline{(u,e_{i,1})}x^{i-1}$, 
it is obvious that $B_i$ are determined via the coefficients of the power series 
in $x$ of $\phi_1(x)$.

 For reasons of simplicity, \eqref{Proofs_Bi} is written as
 \begin{equation}
 (I+K)u=h+V_2B_2^{(0)}V_1B_1^{(0)}g+V_2B_2^{(0)}V_1B_1^{(0)}N(u), \label{Proofs_P1_K}
 \end{equation}
where 
\[
K=\overline{a}V_2B_2^{(0)}+\overline{b}V_1B_1^{(0)}+
 \overline{c}V_2B_2^{(0)}V_1B_1^{(0)},\quad  
h=\sum_{j=1}^{\infty}\frac{A_j}{j}e_{1,j+1}+  \sum_{i=1}^{\infty}B_i e_{i,1}.
\]
 According to a classical inversion theorem: 
``If $T$ is a linear bounded operator of a Hilbert space $H$, with 
$\| T\|<1$, then $I-T$ is invertible, defined on all $H$ and 
$\| (I-T)^{-1}\|\leq\frac{1}{1-\| T\|}$." 
Thus, since \eqref{MR_Con1} holds, the operator $I+K$ is invertible and 
its inverse is bounded by $ L=\frac{1}{1-| a|-| b|-| c|}$.
Then, \eqref{Proofs_P1_K} can be rewritten as:
 \begin{equation}
 u=(I+K)^{-1}\left[h+V_2B_2^{(0)}V_1B_1^{(0)}g+V_2B_2^{(0)}V_1B_1^{(0)}N(u)\right]
=g(u).
 \label{Proofs_FixedPointEquation}
 \end{equation}
 At this point the following fixed point theorem of Earle and Hamilton \cite{EH70} 
will be applied:
 ``If $f:X\rightarrow X$ is holomorphic, i.e. its Fr\'echet derivative exists, 
and $f(X)$  lies strictly inside $X$, then $f$ has a unique fixed point in $X$,
 where $X$ is a bounded,  connected and open subset of a Banach space $E$. 
(By saying that a subset  $X'$ of $X$ lies strictly inside $X$ it is meant that 
there exists an $\epsilon_1>0$ such that $\| x'-y\|>\epsilon_1$ for all 
$x'\in X'$ and $y\in E-X$.)"
 
 Returning to \eqref{Proofs_FixedPointEquation}, suppose that $u\in B(0,R)$, 
$R<R_1$. Then, $\| u\|_1<R<R_1$ and
 $$
\| g(u)\|_1\leq L(\| h\|_1+\| g\|_1+\| N(u)\|_1)
\leq L (\| h\|_1+\| g\|_1))+L\sum_{n=2}^{\infty}| c_n|\cdot\| u\|_1^{n}
$$
which implies 
 \begin{equation}
\| g(u)\|_1\leq L\left(\| h\|_1+\| g\|_1\right))+L\sum_{n=2}^{\infty}| c_n| R^{n}.
 \label{Proofs_Bounds}
 \end{equation}
Suppose $ M(R)=\sum_{n=2}^{\infty}| c_n| R^{n-2}$. By hypothesis, $R_1$ 
is sufficiently large and as a consequence there exists an $R_2\in [0, R_1)$ 
such that $LR_2M(R_2)>1$. Then, for the function
 $$
M_1(R)=1-LRM(R)
$$
 it is $M_1(0)=1>0$ and $M_1(R_2)<0$, which by the intermediate value theorem 
implies that there exists an $R_3\in(0,R_2)$ such that $M_1(R_3)=0$.

Consider now the continuous function
$$
P(R)=L^{-1}RM_1(R).
$$
Then, $P(0)=0=P(R_3)$ and $P'(0)>0$, whereas $P'(R_3)<0$. 
Thus, there exists an $R_0\in (0,R_3)$ where $P(R)$ attains its maximum.
 
 Now for every $\epsilon>0$ and $R=R_0$, if
 \begin{equation}
 \| h\|_1+\| g\|_1\leq P(R_0)-\frac{\epsilon}{L},
 \label{Proofs_Condition}
 \end{equation}
relation \eqref{Proofs_Bounds} gives
 $$
\| g(u)\|\leq LP(R_0)-\epsilon+LR_{0}^{2}M(R_0)
=LP(R_0)-\epsilon+R_0-R_{0}M_1(R_0)
$$
which implies $\| g(u)\|\leq R_0-\epsilon<R_0$.
 Moreover, $g(u)$ is Frech\'et differentiable and thus according to the 
theorem of Earle and Hamilton, equation \eqref{Proofs_FixedPointEquation} 
has a unique solution in $H_1$, bounded by $R_0$.

Rewriting the left-hand side of inequality \eqref{Proofs_Condition} in 
terms of the original functions gives
\begin{align*}
\| h\|_1+\| g\|_1
&=\| g\|_1+\| \sum_{j=1}^{\infty}\frac{A_j}{j}e_{1,j+1}+
 \sum_{i=1}^{\infty}B_i e_{i,1}\|_1 \\
&\leq\| g\|_1+\sum_{j=1}^{\infty}|(u,e_{1,j+1})|
 +| a|\sum_{j=1}^{\infty}|(u,e_{1,j})| \\
&\quad  + \sum_{i=1}^{\infty}|(u,e_{i,1})|+| b|\sum_{i=2}^{\infty}|(u,e_{i-1,1})| \\
&=\| g(x,t)\|_{H_1(\Delta^2)}+\| u(0,t)\|_{H_1(\Delta)}-| u(0,0)|
 +| a|\cdot\| u(0,t)\|_{H_1(\Delta)} \\
&\quad +\| u(x,0)\|_{H_1(\Delta)}+| b|\cdot\| u(x,0)\|_{H_1(\Delta)}.
\end{align*}
 Thus, if \eqref{MR_Con2} holds, the problem \eqref{MR_GeneralPDE}, \eqref{MR_IC}
 has a unique solution in $H_1(\Delta^2)$, bounded by $R_0$.
 \end{proof}

 \begin{remark} \label{rmk3.5} \rm
 Following a procedure similar to the one employed in \cite{PSAIMS}, 
one may prove by use of the Fredholm alternative, that operator $I+K$ is invertible 
without restriction \eqref{MR_Con1}.
Then, theorem \ref{MR_MainTheorem} remains valid without condition \eqref{MR_Con1}, 
but the bound $L$ is undetermined. Hence, it has now a pure local character, 
since $R_0$ and $P_0$ cannot be explicitly determined.
 \end{remark}

 \section{Connections with wave-type and Laplace-type equations}
 \label{Connection}
 
 As already mentioned in \S\ref{Intro}, equation \eqref{Intro_GeneralPDE} 
can be connected with wave-type and Laplace-type equations. 
Indeed, consider the wave-type equation
 \begin{equation}
 \hat{u}_{\xi\xi}-\kappa^2\hat{u}_{\eta\eta}+\hat{a}\hat{u}_{\xi}
+\hat{b}\hat{u}_{\eta}+\hat{c}\hat{u}=\hat{g}_2(\xi,\eta)
+\sum_{n=2}^{\infty}\hat{c}_n [\hat{u}(\xi,\eta)]^n,\quad 
\hat{u}=\hat{u}(\xi, \eta),  \label{Appl_WavePDE}
 \end{equation}
where $\kappa$ is a non zero real number. By using transformations 
\eqref{Intro_Trans1}, i.e.
 \begin{equation}
 x=\eta+\kappa\xi,\quad t=\eta-\kappa\xi  \label{Appl_Trans}
 \end{equation}
the previous equation becomes
 \begin{equation}
 u_{xt}+au_{x}+bu_{t}+cu=g(x,t)+\sum_{n=2}^{\infty}c_n [u(x,t)]^n,
 \label{Appl_WavePDEu}
 \end{equation}
 where $u(x,t)=\hat{u}(\frac{x-t}{2\kappa},\frac{x+t}{2})=\hat{u}(\xi,\eta)$, 
$g(x,t)=-\frac{1}{4\kappa^2}\hat{g}_2(\frac{x-t}{2\kappa},\frac{x+t}{2})$, 
$a=-\frac{\hat{b}+\hat{a}\kappa}{4\kappa^2}$, 
$b=-\frac{\hat{b}-\hat{a}\kappa}{4\kappa^2}$, 
$c=-\frac{\hat{c}}{4\kappa^2}$ and 
$c_n=-\frac{\hat{c}_n}{4\kappa^2}$. Then according to Theorem \ref{MR_MainTheorem} 
the following holds.

 \begin{corollary}
Assume that $g(x,t)\in H_1(\Delta^2)$, $u(x,0),u(0,t)\in H_1(\Delta)$, ($x,t$ 
given by \eqref{Appl_Trans}), the series $\sum_{n=2}^{\infty}\hat{c}_n w^n$ 
is an analytic function which converges for $| w|<R_1$, $R_1>0$, sufficiently 
large and
 \begin{equation}
 |\hat{b}+\hat{a}\kappa|+ |\hat{b}-\hat{a}\kappa|+|\hat{c}|<4\kappa^2.
 \label{Appl_WaveCon1}
 \end{equation}
Then there exist $R_0>0$ and $P_0>0$ such that if
 \begin{equation}
 \begin{aligned}
&\| g(x,t)\|_{H_1(\Delta^2)}+\Big(1+\frac{|\hat{b}
 -\hat{a}\kappa|}{4\kappa^2}\Big)\| u(x,0)\|_{H_1(\Delta)}\\
&+\Big(1+\frac{|\hat{b}+\hat{a}\kappa|}{4\kappa^2}\Big)
\| u(0,t)\|_{H_1(\Delta)}-| u(0,0)|<P_0,
 \end{aligned}  \label{Appl_WaveCon2}
 \end{equation}
then  \eqref{Appl_WavePDE} has a unique solution bounded by $R_0$, of the form 
\[
 \hat{u}(\xi,\eta)=\sum_{i=1}^{\infty}
\sum_{j=1}^{\infty}\overline{u}_{ij}(\eta+\kappa\xi)^{i-1}(\eta-\kappa\xi)^{j-1}, 
\]
which converges absolutely for $|\eta\pm\kappa\xi|<1$.
 \label{Appl_CorWave}
 \end{corollary}

In a similar way, consider the Laplace-type equation
 \begin{equation}
 \hat{u}_{\xi\xi}+k^2\hat{u}_{\eta\eta}+\hat{a}\hat{u}_{\xi}
+\hat{b}\hat{u}_{\eta}+\hat{c}\hat{u}=\hat{g}_2(\xi,\eta)
+\sum_{n=2}^{\infty}\hat{c}_n [\hat{u}(\xi,\eta)]^n,\quad 
\hat{u}=\hat{u}(\xi,\eta),
 \label{Appl_LaplacePDE}
 \end{equation}
where $k$ is a non zero real number. By using transformations 
\eqref{Appl_Trans} for $\kappa=-ik$, the previous equation becomes
\begin{equation}
 u_{xt}+au_{x}+bu_{t}+cu=g(x,t)+\sum_{n=2}^{\infty}c_n [u(x,t)]^n,\label{Appl_LaplacePDEu}
\end{equation}
where $u(x,t)=\hat{u}(\frac{t-x}{2ik},\frac{x+t}{2})=\hat{u}(\xi,\eta)$, 
$g(x,t)=\frac{1}{4k^2}\hat{g}_2(\frac{t-x}{2ik},\frac{x+t}{2})$, 
$a=\frac{\hat{b}-i\hat{a}k}{4k^2}$, $b=\frac{\hat{b}+i\hat{a}k}{4k^2}$, 
$c=\frac{\hat{c}}{4k^2}$ and $c_n=\frac{\hat{c}_n}{4k^2}$. 
Then according to Theorem \ref{MR_MainTheorem} the following holds

 \begin{corollary}
Assume that $g(x,t)\in H_1(\Delta^2)$, $u(x,0),u(0,t)\in H_1(\Delta)$, 
($x,t$ given by \eqref{Appl_Trans} for $\kappa=-ik$), the series 
$\sum_{n=2}^{\infty}\hat{c}_n w^n$ is an analytic function which converges 
for $| w|<R_1$, $R_1>0$, sufficiently large and
 \begin{equation}
 |\hat{b}-i\hat{a}k|+ |\hat{b}+i\hat{a}k|+|\hat{c}|<4k^2.
 \label{Appl_LaplaceCon1}
 \end{equation}
Then there exist $R_0>0$ and $P_0>0$ such that if
 \begin{equation}
 \begin{aligned}
&\| g(x,t)\|_{H_1(\Delta^2)}+\Big(1+\frac{|\hat{b}
 +i\hat{a}k|}{4k^2}\Big)\| u(x,0)\|_{H_1(\Delta)}\\
&+  \Big(1+\frac{|\hat{b}-i\hat{a}k|}{4k^2}\Big)\| u(0,t)\|_{H_1(\Delta)}
-| u(0,0)|<P_0,
 \end{aligned}  \label{Appl_LaplaceCon2}
 \end{equation}
then  \eqref{Appl_LaplacePDE} has a unique solution bounded by $R_0$,
 of the form 
\[
 \hat{u}(\xi,\eta)=\sum_{i=1}^{\infty}
\sum_{j=1}^{\infty}\overline{u}_{ij}(\eta-ik\xi)^{i-1}(\eta+ik\xi)^{j-1}, 
\]
which converges absolutely for $|\eta\pm ik\xi|<1$.
 \label{Appl_CorLaplace}
 \end{corollary}

 \section{Examples} \label{Examples}
 
To show the usefulness of Theorem \ref{MR_MainTheorem}, several examples will 
be given in this section, most of which arise in various applications. 
For the first example, the coefficients of the predicted power series solution 
will be explicitly computed in order to demonstrate the procedure. 
Of course this can be done for all the other examples, once the initial conditions 
are specified.

 \subsection{Equations with algebraic nonlinear terms}
 
In this first example, equations of the form \eqref{Appl_WavePDE} or 
\eqref{Appl_LaplacePDE} with a nonlinear term of the form $[\hat{u}(\xi,\eta)]^k$, 
$k\in\mathbb{N}$, $k\geq2$ will be considered. Such kind of equations have been 
studied for example in \cite{EB2014}, \cite{GR2010} and \cite{W2006}. 
More precisely in \cite{GR2010}, it was proved that there exist some quasi-periodic 
solutions with frequencies of the form $\omega=\lambda\omega^{*}$, $\lambda\sim1$, 
$\lambda\in\mathbb{R}$, $\omega^{*}$ a fixed Diophantine frequency, for the 
one dimensional nonlinear wave equation
 $$
\hat{u}_{\xi\xi}-\hat{u}_{\eta\eta}+m\hat{u}+\hat{u}^3=0,\quad 
\hat{u}=\hat{u}(\xi,\eta),
$$
 subject to Dirichlet boundary conditions.

 In \cite{EB2014}, exact solutions of the Klein-Gordon equation
 \begin{equation}
 \hat{u}_{\xi\xi}+\alpha\hat{u}_{\eta\eta}+\beta\hat{u}
+\gamma\hat{u}^k=f(\xi,\eta),\quad \hat{u}=\hat{u}(\xi,\eta),
 \label{Exa_Alg_PaperPDE}
 \end{equation}
were found for various values of $k$ and various functions $f(\xi,\eta)$ 
by using a modification of the homotopy perturbation method under initial 
conditions on $\hat{u}(\eta,0)$ and $\hat{u_\xi}(\eta,0)$. 
Some of the examples treated in \cite{EB2014} were also studied in \cite{W2006} 
by use of a modified decomposition method.

 Starting with this motivation, the PDE
 \begin{equation}
 u_{xt}+au_{x}+bu_{t}+cu=g(x,t)+u^k, \quad u=u(x,t),\quad 
k\in\mathbb{N},\quad k\geq2  \label{Exa_Alg_PDE}
 \end{equation}
is considered. The function $P(R)$ in this case is
 $$
P(R)=\frac{R}{L}-R^k,\quad L=\frac{1}{1-| a|-| b|-| c|},
$$
which attains its maximum at $R_0=(\frac{1}{kL})^{\frac{1}{k-1}}$. 
Thus, according to Theorem \ref{MR_MainTheorem} the following holds:
 
\begin{result}   \label{Exa_Alg_ResultGeneral} \rm
 Assume that $g(x,t)\in H_1(\Delta^2)$, $u(x,0),u(0,t)\in H_1(\Delta)$,
 \begin{equation}
 | a|+| b|+| c|<1
 \label{Exa_Alg_GenCon1}
 \end{equation}
 and
 \begin{equation}
 \begin{aligned}
 &\| g(x,t)\|_{H_1(\Delta^2)}+\left(1+| b|\right)\| u(x,0)\|_{H_1(\Delta)}\\
 &+\left(1+| a|\right)\| u(0,t)\|_{H_1(\Delta)}-| u(0,0)|\\
&<(k-1)\big(\frac{1}{kL}\big)^{\frac{k}{k-1}}.
 \end{aligned} \label{Exa_Alg_GenCon2}
 \end{equation}
 Then equation \eqref{Exa_Alg_PDE} has a unique solution in $H_1(\Delta^2)$ 
bounded by $R_0$.
\end{result}

One of the equations studied in \cite{EB2014} was
 \begin{equation}
 \hat{u}_{\xi\xi}+\hat{u}_{\eta\eta}+\hat{u}+\hat{u}^3=2\eta+\eta\xi^2+\eta^3\xi^6, \quad \hat{u}=\hat{u}(\xi,\eta),
 \label{Exa_Alg_PDESpecificPaper}
 \end{equation}
for which it was found that it has the exact solution $\hat{u}(\xi,\eta)=\eta\xi^2$.

By consequently using the transformations
 \begin{gather}
 \tilde{x}=\eta-i\xi,\quad \tilde{t}=\eta+i\xi,\quad 
\hat{u}(\xi,\eta)=\tilde{u}(\tilde{x},\tilde{t})  \label{Exa_Alg_Trans1}
\\
 \tilde{x}=xX,\quad \tilde{t}=tT,\quad \tilde{u}(\tilde{x},\tilde{t})=u(x,t),\quad 
X,T>0
 \label{Exa_Alg_Trans2}
\end{gather}
equation \eqref{Exa_Alg_PDESpecificPaper} is reduced to
 \begin{equation}
 u_{xt}+\frac{XT}{4}u=h(x,t)-\frac{XT}{4}u^3, \quad u=u(x,t),
 \label{Exa_Alg_PDESpecific}
 \end{equation}
 which is of the form \eqref{Exa_Alg_PDE}, with
\begin{align*}
h(x,t)
&=-\frac{t^9 T^{10} X}{2048}+\frac{3t^8 T^9 x X^2}{2048}
 -\frac{t^6T^7 x^3 X^4}{256}+\frac{3 t^5 T^6 x^4X^5}{1024} \\
&\quad +\frac{3 t^4 T^5 x^5 X^6}{1024}-\frac{t^3 T^4 x^6 X^7}{256}
 -\frac{t^3 T^4 X}{32}+\frac{t^2 T^3 x X^2}{32}+\frac{3 t T^2 x^8 X^9}{2048} \\
&\quad +\frac{t T^2 x^2 X^3}{32}+\frac{t T^2 X}{4}-\frac{T x^9 X^{10}}{2048}
 -\frac{T x^3 X^4}{32}+\frac{T x X^2}{4}.
\end{align*}
For reasons of simplicity only the real solutions of \eqref{Exa_Alg_PDESpecific} 
will be considered.

If \eqref{Exa_Alg_PDESpecific} is complemented by the initial conditions
 \begin{equation}
 u(x,0)=-\frac{X^3}{8}x^3,\quad u(0,t)=-\frac{T^3}{8}t^3,
 \label{Exa_Alg_IC}
 \end{equation}
then Result \ref{Exa_Alg_ResultGeneral} becomes

 \begin{result}  \label{Exa_Alg_ResultSpecific} \rm
 If
 \begin{equation}
 XT<4  \label{Exa_Alg_SpCon1}
 \end{equation}
 and
 \begin{equation}
 \begin{aligned}
&\frac{T^{10} X}{2048}+\frac{3T^9X^2}{2048}+\frac{T^7 X^4}{256}
 +\frac{3T^6X^5}{1024}+\frac{3T^5X^6}{1024}+\frac{T^4X^7}{256}+\frac{T^4 X}{32}
 +\frac{T^3 X^2}{32}\\
&+\frac{3T^2 X^9}{2048}+\frac{T^2 X^3}{32}+\frac{T^2 X}{4}+\frac{T X^{10}}{2048}
+\frac{T X^4}{32}+\frac{T X^2}{4} +\frac{X^3}{8}+\frac{T^3}{8}\\
&<2\big(\frac{4-XT}{12}\big)^{3/2},
 \end{aligned}  \label{Exa_Alg_SpCon2}
\end{equation}
the initial value problem \eqref{Exa_Alg_PDESpecific}, \eqref{Exa_Alg_IC} 
has a unique solution in $H_1(\Delta^2)$ bounded by
 $(\frac{4-XT}{12})^{1/2}$.
\end{result}

 Moreover, this solution can be determined by computing the coefficients 
$(u,e_{i,j})$ of the real solution 
$u(x,t)=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}(u,e_{i,j})x^{i-1}t^{j-1}$ 
in the following way:

The equivalent to \eqref{Exa_Alg_PDESpecific}--\eqref{Exa_Alg_IC}, for the abstract 
operator equation, according to \eqref{Proofs_P1_K}, is
 \begin{equation}
 \Big(I+\frac{XT}{4}V_2B_2^{(0)}V_1B_1^{(0)}\Big)u=h+V_2B_2^{(0)}V_1B_1^{(0)}g
+V_2B_2^{(0)}V_1B_1^{(0)}N(u),  \label{Exa_Alg_AbstractForm}
 \end{equation}
where
\begin{gather*}
N(u)=-\frac{XT}{4}[u(V_1,V_2)]^2u,\quad 
h=-\frac{T^3}{8}e_{1,4}-\frac{X^3}{8}e_{4,1},\\
\begin{aligned}
g&=\frac{T^2 X}{4}e_{1,2}-\frac{T^4 X}{32}e_{1,4}-\frac{T^{10} X}{2048}e_{1,10}
 +\frac{T X^2}{4}e_{2,1}+\frac{T^3 X^2}{32}e_{2,3}+\frac{3T^9 X^2}{2048}e_{2,9}\\
&\quad +\frac{T^2 X^3}{32}e_{3,2}-\frac{T X^4}{32}e_{4,1}-\frac{T^7 X^4}{256}e_{4,7}
+\frac{3 T^6 X^5}{1024}e_{5,6}+\frac{3 T^5 X^6}{1024}e_{6,5} \\
&\quad -\frac{T^4 X^7}{256}e_{7,4}+\frac{3T^2 X^9}{2048}e_{9,2}
 -\frac{T X^{10}}{2048}e_{10,1}.
\end{aligned}
\end{gather*}
By the second of the initial conditions \eqref{Exa_Alg_IC} it is deduced that
 $$
(u,e_{1,j})=0,\quad\forall  j\neq4\quad \text{and}\quad 
(u,e_{1,4})=-\frac{T^3}{8}.
$$
By taking the inner product of \eqref{Exa_Alg_AbstractForm} with 
$e_{2,j}$ and using the othonormality of $\{e_{i,j}\}$ one obtains:
\begin{gather*}
(u,e_{2,1})=0,\\
\begin{aligned}
(u,e_{2,j})&=-\frac{XT}{4(j-1)}(u,e_{1,j-1})+\frac{1}{j-1}(g,e_{1,j-1}) \\
&\quad -\frac{XT}{4(j-1)}\sum_{\ell=1}^{j-1}\sum_{p=1}^{j-\ell}(u,e_{1,\ell})
(u,e_{1,p})(u,e_{1,j-\ell-p+1}),
\end{aligned}
\end{gather*}
from where it is deduced that
 $$
(u,e_{2,j})=0,\quad\forall  j\neq3\quad \text{and}\quad 
(u,e_{2,3})=\frac{XT^2}{8}.
$$
 Similarly, by taking the inner product of \eqref{Exa_Alg_AbstractForm} 
with $e_{3,j}$ it is deduced that
 $$
(u,e_{3,j})=0,\quad\forall  j\neq2\quad \text{and}\quad 
(u,e_{3,2})=\frac{TX^2}{8}
$$
 and by taking the inner product of \eqref{Exa_Alg_AbstractForm} with $e_{4,j}$ 
it is obtained that
 $$
(u,e_{4,j})=0,\quad\forall   j\neq1\quad \text{and}\quad 
(u,e_{4,1})=-\frac{X^3}{8}.
$$
Continuing in the same way and after some tedious manipulations, which can 
be performed also by use of a symbolic package calculations such 
as \emph{Mathematica}, it is found that for $i=5,\ldots,11$ it is $(u,e_{i,j})=0$,
for all $j$ and by use of mathematical induction it is finally proved that 
$(u,e_{i,j})=0$, $\forall$ $j$ and $\forall$ $i\geq12$. Thus, the unique solution 
of \eqref{Exa_Alg_PDESpecific}, \eqref{Exa_Alg_IC} in $H_1(\Delta^2)$ is
 $$
u(x,t)=\sum_{i=1}^{\infty}\sum_{j=1}^{\infty}(u,e_{i,j})x^{i-1}t^{j-1}
=-\frac{T^3}{8}t^3+\frac{XT^2}{8}xt^2+\frac{TX^2}{8}x^2t-\frac{X^3}{8}x^3,
$$
 for $X$, $T$ satisfying \eqref{Exa_Alg_SpCon1} and \eqref{Exa_Alg_SpCon2}. 
Notice that by using \eqref{Exa_Alg_Trans2} and \eqref{Exa_Alg_Trans1}, 
$u(x,t)$ is rewritten as $\hat{u}(\xi,\eta)=\eta\xi^2$.

 \subsection{Equations with logistic type nonlinear terms}

 In \cite{AKB2002}, the traveling waves of
\begin{equation}
 u_{tt}=v^2 u_{xx}+ku(1-u),\quad u=u(x,t)  \label{Exa2_PDEPaper}
\end{equation}
were studied. Such kind of equations appear in chemical and population dynamics. 
Thus, in this example the PDE
 \begin{equation}
 u_{xt}+au_{x}+bu_{t}+cu=k u(1-u), \quad u=u(x,t),  \label{Exa2_PDE}
 \end{equation}
will be considered. The function $P(R)$ in this case is
$$
P(R)=R\left(1-| a|-| b|-| c-\lambda|\right)-|\lambda| R^2,
$$
 which attains its maximum at $R_0=\frac{1-| a|-| b|-| c-\lambda|}{2|\lambda|}$. 
Thus, according to Theorem \ref{MR_MainTheorem} the following holds.

 \begin{result}   \label{Exa2_Result} \rm
 Assume that $u(x,0),u(0,t)\in H_1(\Delta)$,
 $| a|+| b|+| c-\lambda|<1$
 and
 $$
| a|+| b|+2<\frac{1-| a|-| b|-| c-\lambda|}{4|\lambda|}.
$$
 Then equation \eqref{Exa2_PDE} has a unique solution in $H_1(\Delta^2)$ 
bounded by $R_0$.
 \end{result}

 \subsection{The sine-Gordon equation}

Consider now the well-known sine-Gordon equation
 \begin{equation}
 \hat{u}_{\xi\xi}-\omega^2\hat{u}_{\eta\eta}+d\sin\hat{u}=0,
 \label{Appl_sineGordon}
 \end{equation}
where $\omega$ is a non zero real number, which arises in various problems 
such as differential geometry, oscillations, optics, fluid mechanics, 
elementary particle physics and biology. (For more information see \cite{D2005} 
and the references therein). Equation \eqref{Appl_sineGordon} can be rewritten 
in the form
 \begin{equation}
 \hat{u}_{\xi\xi}-\omega^2\hat{u}_{\eta\eta}+d\hat{u}=
 -d\sum_{s=1}^{\infty}\frac{(-1)^{2s+1}}{(2s+1)!}\hat{u}^{2s+1}, \label{Appl_sineGordonSeries}
 \end{equation}
or after using \eqref{Appl_Trans} for $\kappa=\omega$ in the form
 \begin{equation}
 u_{xt}-\frac{d}{4\omega^2}u=\frac{d}{4\omega^2}
\sum_{s=1}^{\infty}\frac{(-1)^{2s+1}}{(2s+1)!}\hat{u}^{2s+1},
 \label{Appl_sineGordonu}
 \end{equation}
where $u(x,t)=\hat{u}(\frac{x+t}{2},\frac{x-t}{2\omega})=\hat{u}(\eta,\xi)$. 
The function $P(R)$ in this case is
\begin{align*}
P(R)&=\frac{4\omega^2 R}{4\omega^2-| d|}
 -\frac{| d|}{4\omega^2}\sum_{s=1}^{\infty}\frac{1}{(2s+1)!}R^{2s+1}\\
&=\frac{4\omega^2 R}{4\omega^2-| d|}-\frac{| d|}{4\omega^2}(\sinh R-R)\\
&= \frac{16\omega^4+4\omega^2 | d|-| d|^2}{4\omega^2(4\omega^2-| d|)}R
 -\frac{| d|}{4\omega^2}\sinh R,
\end{align*}
which attains its maximum at 
$R_0=\cosh^{-1}\big(\frac{16\omega^4+4\omega^2| d|-| d|^2}{4\omega^2-| d|}\big)$.
 Then a direct application of Corollary \ref{Appl_CorWave} gives

 \begin{result}  \label{Appl_CorsineGordon} \rm
Assume that $u(x,0),u(0,t)\in H_1(\Delta)$,
 \begin{gather}
 | d|<4\omega^2 ,  \label{Appl_sineGordonCon1} \\
 \| u(x,0)\|_{H_1(\Delta)}+\| u(0,t)\|_{H_1(\Delta)}-| u(0,0)|<P(R_0). \label{Appl_sineGordonCon2}
 \end{gather}
Then equation \eqref{Appl_sineGordonu} has a unique solution in $H_1(\Delta^2)$ 
bounded by $R_0$.
\end{result}

 \subsection{Equations with exponential nonlinear terms}
 Consider the PDE
 \begin{equation}
 u_{xt}+au_{x}+bu_{t}+cu=e^u, \quad u=u(x,t),  \label{Exa1_PDE}
 \end{equation}
 which can be rewritten as
 $$
u_{xt}+au_{x}+bu_{t}+(c-1)u=1+\sum_{n=2}^{\infty}\frac{1}{n!}u^{n}, \quad u=u(x,t).
$$
 Then, the function $P(R)$ becomes
\begin{align*}
 P(R)&=R\left(1-| a|-| b|-| c-1|\right)-\sum_{n=2}^{\infty}\frac{1}{n!} R^n\\
&=1+R\left(2-| a|-| b|-| c-1|\right)-e^R,
\end{align*}
which attains its maximum at $R_0=\ln\left(2-| a|-| b|-| c-1|\right)$. 
Thus, according to Theorem \ref{MR_MainTheorem} the following holds.

\begin{result}  \label{Exa_Exp_Result} \rm
Assume that $u(x,0),u(0,t)\in H_1(\Delta)$,  $| a|+| b|+| c-1|<1$
 and
\begin{align*}
&(1+| b|)\| u(x,0)\|_{H_1(\Delta)}+(1+| a|)\| u(0,t)\|_{H_1(\Delta)}-| u(0,0)|\\
&<\big(2-| a|-| b|-| c-1|\big)(\ln\left(2-| a|-| b|-| c-1|\right)-1).
\end{align*}
 Then  \eqref{Exa1_PDE} has a unique solution in $H_1(\Delta^2)$ bounded by $R_0$.
\end{result}

 \begin{thebibliography}{99}

\bibitem{AKB2002}  G. Abramson, V. M. Kenkre, A. R. Bishop;
 Analytic solutions for nonlinear waves in coupled reacting systems, 
\emph{Phys. A} \textbf{305} (2002), 427-–436.

\bibitem{BBM1972}  T. B. Benjamin, J. L. Bona, J. J. Mahony;
 Model equations for long waves in nonlinear dispersive systems, 
\emph{Philos. Trans. Roy. Soc. London Ser. A}, \textbf{272} (1972), 47–-78.

\bibitem{CN2012}  G. Caciotta, F. Nicol\'o;
 Local and global analytic solutions for a class of characteristic problems of 
the Einstein vacuum equations in the ``double null foliation gauge",
 \emph{Ann. Henri Poincare}, \textbf{13} (2012), 1167–-1230.

\bibitem{CGS2012}  G. M. Coclite, F. Gargano, V. Sciacca;
Analytic solutions and singularity formation for the Peakon $b$-family equations, 
\emph{Acta Appl. Math.}, \textbf{122} (2012), 419–-434.

\bibitem{D2005}  L. Debnath;
 \emph{Nonliner partial differential equations for scientists and engineers}, 
2nd editions, Birkh\"auser, 2005.

\bibitem{EH70}  C. J. Earle, R. S. Hamilton;
A fixed point theorem for holomorphic mappings in 
\emph{Global Analysis (Proc. Sympos. Pure Math., Vol.XVI, Berkeley, 
California 1968)}, Amer. Math. Soc., Providence R.I., (1970), 61--65.

\bibitem{ES1997}
 P. Ebenfelt, H. S. Shapiro;
 A quasi-maximum principle for holomorphic solutions of partial differential 
equations in $\mathbb{C}^n$, \emph{J. Funct. Anal.} \textbf{146}  (1997), 27–61.

\bibitem{EB2014}  M. Eslami, J. Biazar;
 Analytical solution of the Klein--Gordon equation by a new homotopy perturbation 
method, \emph{Comput. Math. Model.} \textbf{25} (2014) 124–-134.
 
\bibitem{E2010} L. C. Evans;
 \emph{Partial differential equations}, 2nd edition. Graduate Studies in Mathematics, 
19. American Mathematical Society, Providence, RI, 2010.

\bibitem{GR2010} J. Geng, X. Ren;
Lower dimensional invariant tori with prescribed frequency for nonlinear 
wave equation, \emph{J. Differential Equations} \textbf{249} (2010), 2796-–2821.

\bibitem{HP2012}  A. A. Himonas, G. Petronilho;
Analytic well-posedness of periodic gKdV, \emph{J. Differential Equations},
 \textbf{253} (2012), 3101–-3112.

\bibitem{I1971}  E. K. Ifantis;
 Solution of the Schr\"{o}dinger equation in the Hardy-Lebesgue space,
\emph{J. Math. Phys.}, \textbf{12} (1971), 1961–-1965.

\bibitem{I1978}  E. K. Ifantis;
 An existence theory for functional-differential equations and 
functional-differential systems, 
\emph{J. Differential Equations}, \textbf{29} (1978), 86–-104.

\bibitem{I1987a}  E. K. Ifantis;
Analytic solutions for nonlinear differential equations, 
\emph{J. Math. Anal. Appl.}, \textbf{124} (1987), 339–-380.

\bibitem{K1972}  J. Kajiwara;
 Holomorphic solutions of a partial differential equation of mixed type, 
\emph{Math. Balkanica}, \textbf{2} (1972), 76–-83.

\bibitem{PS2009}  E. N. Petropoulou, P. D. Siafarikas;
Polynomial solutions of linear partial differential equations,
 \emph{Commun. Pure Appl. Anal.} \textbf{8} (no. 3) (2009), 1053-–1065.

\bibitem{PSAIMS}  E. N. Petropoulou, P. D. Siafarikas;
 A functional-analytic technique for the study of analytic solutions of PDEs, 
\emph{Proceedings of the 10th AIMS International Conference (Madrid, Spain, 2014)} 
(accepted for publication).

\bibitem{Tc2011}  M. E. Taylor;
\emph{Partial differential equations III. Nonlinear equations}, 
2nd edition. Applied Mathematical Sciences, 117. Springer, New York, 2011.

\bibitem{W2006}  A.-M. Wazwaz;
The modified decomposition method for analytic treatment of differential equations,
 \emph{Appl. Math. Comput.} \textbf{173} (2006), 165-–176.

\bibitem{Z1980}  G. Zampieri;
A sufficient condition for existence of real analytic solutions of 
PDE with constant coefficients, in open sets of $\mathbb{R}^{2}$, 
\emph{Rend. Sem. Mat. Univ. Padova} \textbf{63} (1980), 83-–87.

 \bibitem{Z1999}  G. Zampieri;
 Analytic solutions of P.D.E.'s.,
 \emph{Ann. Univ. Ferrara--Sez. VII--Sc. Mat.} \textbf{XLV} (1999), 365-–372.

 \end{thebibliography}

 \end{document}
