\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{graphicx}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2010(2010), No. 125, pp. 1--47.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2010 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2010/125\hfil Approximate solutions]
{$C^1$-approximate solutions of  second-order singular ordinary
differential equations}

\author[G. L. Karakostas\hfil EJDE-2010/125\hfilneg]
{George L. Karakostas}

\address{George L. Karakostas \newline
 Department of Mathematics, University of Ioannina,
 451 10 Ioannina, Greece}
\email{gkarako@uoi.gr}

\thanks{Submitted March 21, 2010. Published September 7, 2010.}
\subjclass[2000]{34A45, 34A12, 34A25, 34B99}
\keywords{One-parameter second order ordinary differential equation; 
\hfill\break\indent
growth index of a function; Approximate solutions; Initial value problems; 
\hfill\break\indent boundary value problems}

\begin{abstract}
 In this work a new method is developed to obtain $C^1$-approximate
 solutions of initial and boundary-value problems generated from a
 one - parameter second order singular ordinary differential equation.
 Information about the order of approximation is also given by
 introducing the so called \emph{growth index} of a function.
 Conditions are given for the existence of such approximations
 for initial and boundary-value problems of several kinds.
 Examples associated with the corresponding graphs of the
 approximate solutions, for some values of the parameter,
 are also given.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{notation}[theorem]{Notation}
\newtheorem{condition}[theorem]{Condition}

\tableofcontents

\section{Introduction}

A one-parameter perturbation singular problem associated with a
second order ordinary differential equation is a problem whose
the solutions behave non-uniformly near the initial (or the boundary)
values, as the parameter approaches extreme levels.
In this work we develop a new method to obtain approximate solutions
of some  problems of this kind.  It is well known that under such a
limiting process  two situations may occur:

 (i) The limiting position of the system exists, thus one can talk about
the continuous or discontinuous  dependence  of the solutions on the
parameter.

Consider, for instance,  the following one-parameter scalar
autonomous Cauchy problem
$$
x''+f(x,p)=0,\quad x(0)=\alpha, \quad x'(0)=\beta,
$$
when the parameter $p$ takes large values (and tends to $+\infty$).
Under the assumption that $f$ satisfies some monotonicy conditions
and it approaches a certain function $g$ as the parameter $p$
tends to $+\infty$, a geometric argument is used  in the literature
(see, e.g., Elias and Gingold \cite{eg}) to show, among others,
that if the initial values lie in a suitable domain on the plane,
then  the solution approximates (in the $C^1$-sense) the corresponding
solution of the limiting equation. The same behavior have the periods
(in case of periodic solutions) and the escape times
(in case of non-periodic solutions). Donal O' Regan in his informative
book  \cite[p. 14]{O'r}  presents a  problem involving a second-order
differential equation, when the boundary conditions are of the
form $y(0)=a$ (fixed) and $y(1)=\frac{a}{n}$ and $n$  is large enough.
It is shown that for a delay equation of the form
$$
\varepsilon \dot{x}(t)+x(t)=f(x(t-1)),
$$
when $f$ satisfies some rather mild conditions, there exists a
periodic  solution which is close to the square wave corresponding
to the limiting (as $\varepsilon\to 0^+$) difference equation:
$$
x(t)=f(x(t-1)).
$$

 Similarly, as it is shown in the book of Ferdinand Verhulst
\cite[Ch. 10]{ver}, the equation
\begin{equation}\label{eee}
{x}''+x=\varepsilon f(x,{x}', \varepsilon), \quad (x,{x'})\in
D\subseteq{\mathbb{R}^2}
\end{equation}
($\varepsilon>0$ and small) associated with the initial values
$$
x(0)=a(\varepsilon), \quad {x'}(0)=0,
$$
under some  conditions on $f$, has a periodic solution $x(t;\varepsilon)$
satisfying
$$
\lim_{\varepsilon\to 0^+}x(t;\varepsilon)=a(0)\cos{t}.
$$
Notice that the limiting value $a(0)\cos{t}$ is the solution
of \eqref{eee} when $\varepsilon=0$.

(ii) There exist some coefficients of the system, or of some initial-boundary values, which  vanish, or tend
to infinity,  as the parameter approaches a liming value.
In this case we can not  formulate a limiting equation; however
we have an asymptotic approximate system for values of the parameter
which are close to the limiting value. The advantage of this situation
is that in many circumstances it is possible to have information
on the solutions of the limiting systems and, moreover, to compute
(in closed form) the so- called approximate solutions.

 A simple prototype of this situation is, for instance, the differential
equation
$$
\varepsilon\frac{d^2u}{dt^2}+2\frac{du}{dt}+u=0, \quad t>0,
$$
associated with the  initial values
\begin{equation}\label{e00}
u(0)=a,\quad \frac{du}{dt}=b+\frac{\gamma}{\varepsilon},
\end{equation}
which was discussed in the literature and especially in the classic detailed
book due to Donald R. Smith \cite[p. 134]{sm}.
Here the parameter $\varepsilon$ is small enough and it approaches zero.

  A more general situation, which we will discuss later, in Section 5,
is an equation of the form
\begin{equation}\label{e34}
x''+[a_1(t)+a_2(t)p^{\nu}]x'+[b_1(t)+b_2(t)p^{\mu}]x+a_0p^{m}x\sin(x)=0,
\quad t>0
\end{equation}
associated with the initial values
\begin{equation}\label{e35}
x(0;p)=\delta_1+\delta_2 p^{\sigma}, \quad x'(0;p)=\eta_1+\eta_2p^{\tau}.
\end{equation}
The entities $\mu, \nu, m, \sigma$ and $\tau$  are  real  numbers
 and $p$ is a large parameter.

 The previous two problems have the general form
\begin{equation}\label{e36}
x''(t)+a(t;p)x'(t)+b(t;p)x(t)+f(t,x(t);p)=0, \quad t>0,
\end{equation}
where the parameter $p$ is large enough, while the initial values
are of the form
\begin{equation} \label{e54}
x(0;p)=x_{0}(p),\quad x'(0;p)=\bar{x}_{0}(p).
\end{equation}

 It is well known that the Krylov-Bogoliubov method was developed
in the 1930's to handle situations described by second order
ordinary differential equations of the form \eqref{e36} motivated
by problems in mechanics of the type generated by the Einstein
equation for Mercury. This approach, which was applied to various
problems presented in  \cite{sm}, is based on the so
called O'Malley \cite{OM1, OM2} and Hoppensteadt \cite{Ho} method.
 According to this method (in case $f$ does not depend on $x$)
we seek an additive decomposition of the solution $x$ of \eqref{e36}
in the form
$$
x(t;p)\sim U(t;p)+U^*(\tau;p),
$$
where $\tau:=tp$ is the large variable and $U, U^*$ are suitable
functions, which are to be obtained in the form of asymptotic
expansions, as
$$
U(t;p)=\sum_{k=0}^{\infty}U_k(t)p^{-k}
$$
and
$$
U^*(t;p)=\sum_{k=0}^{\infty}U^*_k(t)p^{-k}.
$$
After the coefficients $U_k$ and $U^*_k$ are determined, we define
the remainder
$$
R_N:=R_N(t;p)
$$
by the relation
$$
x(t;p)=\sum_{k=0}^{\infty}[U_k(t)+U^*_k(t)]p^{-k}+R_N(t;p)
$$
and then obtain suitable  $C^1$ estimates of $R_N$
(see, \cite[p. 146]{sm}). This method is applied when the solutions
admit initial values as in \eqref{e00}. For the general
O'Malley-Hoppensteadt construction an analogous approach is
followed elsewhere, see \cite[p. 117]{sm}.
In  the book due to  O' Malley \cite{om} an extended exhibition of
the subject is given.  The central point of the method is to
obtain approximation of the solution, when the system depends
on a small parameter tending to zero, (or equivalently, on a
large parameter tending to $+\infty$). The small parameter $\epsilon$
is used in some of these cases and the functions involved are
smooth enough to guarantee the existence and uniqueness of solutions.

In the literature one can find a great number of  works dealing
with singular boundary-value problems, performing a set of different
methods.  And we do not mean singularities with respect to the independent variable, as for example, are the problems  which are studied  in the book due to R. P. Agarwal, D. O' Regan  \cite{agar2}, or the papers \cite{agar1, swk}, but we mean singularities with respect to some parameter. For instance, the work due to Kadalbajoo and Patidar
\cite{kada1} presents a (good background and a very rich list
of references on the subject, as well as a) deep survey of numerical
techniques used in many circumstances to solve singularly perturbed
ordinary differential equations depended on a small parameter.
Also, in \cite{stoja} a problem of the form
$$
-\varepsilon u''(t)+p(t)u'(t)+q(t)u(t)=f(x),\quad
u(a)=\alpha_0, \quad u(b)=\alpha_1,
$$
is discussed,  by using  splines fitted with delta sequences as
numerical strategies for the solution. See, also, \cite{stoja1}.
A similar problem is discussed in \cite{herc},  where the authors
apply a  fourth-order finite-difference method.  In \cite{kada}
a problem of the form
$$
\varepsilon y''(t)+[p(y(x))]'+q(x, y(x))=r(x),\quad  y(a)=\alpha,
\quad y(b)=\beta,
$$
is investigated by reducing it into an equivalent first order
initial-value problem and then by applying an appropriate non-linear
one-step explicit scheme.
In \cite{ramos}, where a problem of the form
$$
\varepsilon y''(t)=f(x, y, x'),\quad  y(a)=y_a, \quad y(b)=y_b,
$$
is discussed,  a smooth locally-analytical method is suggested.
According to this method  the author considers nonoverlapping
intervals and then linearize the ordinary differential equation
around a fixed point of each interval. The method applies by imposing
some continuity conditions of the solution at the two end points of
each interval and of its first-order derivative at the common end
point of two adjacent intervals.

 A similar problem as above, but with boundary conditions of the
form
$$
y'(0)-ay(0)=A, \quad y'(1)+by(1)=B,
$$
is presented in \cite{cohen}, where  a constructive iteration procedure
is provided yielding an alternating sequence which gives pointwise upper
and lower bounds of the solution. \par The so called method of
\emph{small intervals} is used in \cite{wang}, where the same problem
as above is discussed but having impulses. In some other works,
as e.g. \cite{dgz, cui} (see also the references therein)
two-point boundary-value problems  concerning third order differential
equations are investigated, when the conditions depend on the (small)
parameter  $\varepsilon$. The methods applied in these problems
are mainly computational.

In this work our contribution to the subject is to give
(assumptions and) information on the existence of the solution $x(t;p)$ of  the ordinary
differential equation \eqref{e36} and the existence and the form of
a $C^1$-approximate solution $\tilde{x}(t;p)$ of it, when the parameter $p$
tends to $+\infty$, by following an approach, which differs than the known ones:
We suggest a smooth transformation of the time through
which the equation \eqref{e36} looks like a perturbation of an
equation of the same order and with constant coefficients.
The latter is used to get the approximate solution of the original
equation without using the Sturm transformation.
Furthermore, these arguments permit us to provide estimates of the quantities
 $$
x(t;p)-\tilde{x}(t;p)\quad\text{and}\quad
\frac{d}{dt}\Big(x(t;p)-\tilde{x}(t;p)\Big)
$$
as $p$ tends to $+\infty$, uniformly for $t$ in compact intervals.
To handle the "size" of the approximation we introduce and use a kind
of measure of boundedness of a function, which we term
\emph{the growth index}.

Our approach differs from that one used (recently) in \cite{db}
for the equation of the form
\begin{equation}\label{e054}
x''+(p^2q_1(t)+q_2(t))x=0,\end{equation}
when $p$ approaches $+\infty$. In \cite{db} the authors suggest
a method to approximate the solutions of \eqref{e054} satisfying
the boundary conditions of the form
\begin{equation}\label{bvc1}
   x(0)=x_0, \quad x(1)=m x(\xi).
\end{equation}
To do that they provide an approximation of the equation, and then
(they claim that) as the parameter $p$ tends to $+\infty$,
the solution of the originaal equation approaches the solution of the new one.
And this fact is an implication of the following claim:
\begin{quote} If a function $\delta(p)$, $p\geq 0$ satisfies
$\delta(p)=o(p^{-2})$, as $p\to+\infty$, then the solution of
the equation
$$
v''(z;p)+v(z;p)=\delta(p)v(z;p),
$$
approaches the solution of the equation $v''(z;p)+v(z;p)=0$.
\end{quote}

However, as one can easily see, this is true only when
$v(z;p)=O(p^{r})$, as $p\to+\infty$, uniformly for all $z$,
for some $r\in(0,2)$. Therefore in order to handle such cases
more information on the solutions are needed.

This work is organized as follows:
In Section 2 we introduce the meaning of the growth index of a
function and some useful characteristic properties of it.
The basic assumptions of our problem and the auxiliary transformation
of the original equation \eqref{e36} is presented in Section 3,
while in Sections 4  and 6 we give results on the existence of
$C^1$-approximate solutions of the initial-value problem
\eqref{e34}-\eqref{e54}. In Section 4 we consider equation  \eqref{e36}
when the coefficient $b(t;p)$ takes (only) positive values and in
Section 6 we discuss the case when $b(t;p)$ takes (only) negative values.
 Illustrative examples are given in Sections 5 and 7. Section 8 of
the work is devoted to the approximate solutions of the
boundary-value problem
\begin{equation}\label{e361}
x''(t)+a(t;p)x'(t)+b(t;p)x(t)+f(t,x(t);p)=0, \quad t\in(0,1),
\end{equation}
associated with the boundary conditions of Dirichlet type
\begin{equation}\label{e362}
x(0;p)=x_0(p), \quad x(1;p)={x}_1(p),
\end{equation}
where the boundary values depend  on the parameter $p$, as well.
Here we use the (fixed point theorem of) Nonlinear Alternative to
show the existence of solutions and then we present the approximate
solutions. Some applications of these results are given in Section 9.
In Section 10  we investigate the existence of $C^1$-approximate
solutions of equation \eqref{e361} associated with the boundary
conditions \eqref{bvc1}. Again, the Nonlinear Alternative is
used for the existence of solutions and then $C^1$-approximate
solutions are given. An application of this result is given in the
last section 11. The work closes with a discussion and some comments on the results.

\section{The growth index of a function}

Before proceeding to the investigation of the main problem it is convenient
to present some auxiliary facts concerning the growth of a real valued
function $f$  defined in a neighborhood of $+\infty$. For such a
function we introduce an index, which, in a certain sense, denotes
the critical point at which the function stays in a real estate
as the parameter tends to $+\infty$, relatively to a positive and
unbounded function $E(\cdot)$. This meaning, which  we term
\emph{the growth index} of $f$, will help us to calculate and better
understand  the approximation results. More facts about the
growth index of functions will be published in a subsequent work.

All the (approximation) results of this work are considered with
respect to a basic  positive function $E(p)$, $p\geq 0$, as, e.g.,
$E(p):=\exp(p)$, or in general $E(p):=\exp^{(n)}(p)$, for all
integers $n$. Here $\exp^{(0)}(p):=p$, and
$\exp^{(-k)}(p):=\log^{(k)}(p)$, for all positive integers $k$.
Actually, the function $E(p)$ denotes the level of convergence
to $+\infty$ of a function $h$ satisfying  a relation of the form $h(p)=O((E(p))^{\mu})$,
as $p\to+\infty$. The latter stands for the well known big-O symbol.

  From now on we shall keep fixed such a function $E(p)$. To this
item there corresponds the set
$$
\mathcal{A}_E:=\{h:[0,+\infty)\to{\mathbb{R}}: \exists b\in\mathbb{R}:
\limsup_{p\to+\infty}(E(p))^b|h(p)|<+\infty\}.
$$
Then, for any $h\in\mathcal{A}_E$ we define the set
$$
\mathcal{N}_E(h):=\{b\in\mathbb{R}:
\limsup_{p\to+\infty}(E(p))^b|h(p)|<+\infty\}.
$$
 It is obvious that the set $\mathcal{N}_E(h)$ is a connected
interval of the real line of the form $(-\infty, \hat{b})$. In this case a very characteristic
property of the function $h\in\mathcal{A}_E$ is  the quantity
$$
{\mathcal{G}}_E(h):=\sup\mathcal{N}_E(h),
$$
which we call \emph{the growth index of  $h$ with respect to $E$.}
To save space in the sequel the  expression \emph{with respect to
$E$} will not be used.

 The simplest  case for the growth index
can be met in case of the logarithm of the absolute value of an
entire complex valued function of finite order. Indeed, if $F$
is such a function, its order is defined as the least of all reals
$\alpha$ such that
$$
|F(z)|\leq\exp({|z|^{\alpha}}),
$$
for all
complex numbers $z$. Now, the function $f(p):=\log{|F(p+i0)|}$
satisfies
$$
\limsup_{p\to+\infty}(E(p))^b|f(p)|<+\infty
$$
for all $b\leq-\alpha$, with respect to the level $E(p):=p$. Thus we have
$  {\mathcal{G}}_E(f)\geq-\alpha$. \par More generally, the growth
index of a function $h$ such that $h(p)=O(p^k)$, as $p\to+\infty$,
for some $k\in\mathbb{R}$, satisfies $  {\mathcal{G}}_E(h)\geq
-k$. Also, we observe that, if it holds $ {\mathcal{G}}_E(h)>b$,
then the function $h$ satisfies
$$
h(p)=O\Big([E(p)]^{-b}\Big),\quad \text{as } p\to+\infty,
$$
or equivalently,
 $$
|h(p)|\leq K(E(p))^{-b},
$$
  for all $p$ large enough and for some $K>0$, not depending on $p$.

We present a list of characteristic properties of the growth
index; some of them will be useful in the sequel.

 \begin{proposition} \label{prop2.1}
 If $h_1, h_2$ are elements of the class ${\mathcal{A}}_E$,
then their product $h_1h_2$ also is an element of  the class
${\mathcal{A}}_E$ and moreover it holds
  $$
{\mathcal{G}}_E(h_1h_2)\geq  {\mathcal{G}}_E(h_1)
+  {\mathcal{G}}_E(h_2).
$$
  \end{proposition}

\begin{proof}
  Given $h_1, h_2\in {\mathcal{A}}_E$, take any $b_1, b_2$ such that
$b_j<  {\mathcal{G}}_E(h_j)$, $j=1,2$. Thus we have
$$
\limsup_{p\to+\infty}(E(p))^{b_1}|h_1(p)|<+\infty,\quad
\limsup_{p\to+\infty}(E(p))^{b_2}|h_2(p)|<+\infty
$$
and therefore
\begin{align*}
& \limsup_{p\to+\infty}(E(p))^{b_1+b_2}|h_1(p)h_2(p)|\\
&\leq\limsup_{p\to+\infty}(E(p))^{b_1}|h_1(p)|
\limsup_{p\to+\infty}(E(p))^{b_2}|h_2(p)|<+\infty.
\end{align*}
This shows, first, that $h_1h_2\in {\mathcal{A}}_E$ and, second,
that
  $  {\mathcal{G}}_E(h_1h_2)\geq b_1+b_2$.
  The latter implies that
  \[
{\mathcal{G}}_E(h_1h_2)\geq   {\mathcal{G}}_E(h_1)
 +  {\mathcal{G}}_E(h_2).
\]
  \end{proof}

\begin{lemma}\label{l1}
Consider the functions $h_1, h_2, \dots, h_n$ in $\mathcal{A}_E$.
Then, for all real numbers $a_j>0$, the function
$\sum_{j=1}^na_jh_j$ belongs to $\mathcal{A}_E$ and 
it satisfies
\begin{equation}\label{e37}
  {\mathcal{G}}_E\Big(\sum_{j=1}^na_jh_j\Big)
=\min\{  {\mathcal{G}}_E(h_j): j=1,2,\dots,n\}.
\end{equation}
\end{lemma}

\begin{proof}
The fact that $\sum_{j=1}^na_jh_j$ is an element of $\mathcal{A}_E$
is obvious. To show the equality in \eqref{e37}, we assume that
the left side of \eqref{e37} is smaller than the right side.
Then there is a real number $N$ such that
$$
{\mathcal{G}}_E\Big(\sum_{j=1}^n\alpha_jh_j\Big)<N
<\min\{  {\mathcal{G}}_E(h_j): j=1,2,\dots,n\}.
$$
Thus, on one hand we have
\begin{equation}\label{e38}
\limsup_{p\to+\infty} \sum_{j=1}^na_j \big(E(p))^{N}|h_j(p)|
=\limsup_{p\to+\infty} (E(p))^{N}\Big(\sum_{j=1}^na_j|h_j(p)|\Big)
=+\infty
\end{equation}
 and on the other hand it holds
$$
\limsup_{p\to+\infty} (E(p))^{N}|h_j(p)|<+\infty, \quad
j=1,2,\dots ,n.
$$
The latter implies that
$$
\limsup_{p\to+\infty}
\sum_{j=1}^na_j \big(E(p))^{N}|h_j(p)|\leq
\sum_{j=1}^na_j\limsup_{p\to+\infty}
\big(E(p))^{N}|h_j(p)|<+\infty,
$$
contrary to \eqref{e38}.

If the right side of \eqref{e37} is smaller than the left one,
there is a real number $N$ such that
$$
{\mathcal{G}}_E\Big(\sum_{j=1}^na_jh_j\Big)>N
>\min\{  {\mathcal{G}}_E(h_j): j=1,2,\dots,n\}.
$$
Thus, on one hand we have
\begin{equation}\label{e39}
\limsup_{p\to+\infty}(E(p))^{N}\sum_{j=1}^na_j|h_j(p)|<+\infty
\end{equation}
 and on the other hand it holds
$$
\limsup_{p\to+\infty}  (E(p))^{N}|h_{j_0}(p)|=+\infty,
$$
for some $j_0\in\{1,2,\dots ,n\}$.
The latter implies
$$
\limsup_{p\to+\infty}(E(p))^{N} \sum_{j=1}^na_j|h_j(p)|
\geq\limsup_{p\to+\infty}a_{j_0}(E(p))^{N}|h_{j_0}(p)|=+\infty,
$$
contrary to \eqref{e39}.
\end{proof}

The growth index of a function denotes the way of convergence to
zero at infinity of the function. Indeed, we have the following
result.

\begin{proposition} \label{pr1}
For a given function $h: [r_0,+\infty)\to\mathbb{R}$ it holds
$$ {\mathcal{G}}_E(h)=\sup\{r\in\mathbb{R}: \limsup_{p\to+\infty}
(E(p))^{r}|h(p)|=0\}.
$$
\end{proposition}

\begin{proof}
If $b>  {\mathcal{G}}_E(h)$, then
$$
\limsup_{p\to+\infty}(E(p))^b|h(p)|=+\infty.
$$
Thus, it is clearly enough to show that for any real $b$ with
$b<  {\mathcal{G}}_E(h)$ it holds
$$
\limsup_{p\to+\infty}(E(p))^b|h(p)|=0.
$$
To this end consider arbitrary real number $b< {\mathcal{G}}_E(h)$ and then take any $b_1\in(b, {\mathcal{G}}_E(h))$.
Then we have
$$
\limsup_{p\to+\infty}(E(p))^{b_1}|h(p)|=:K<+\infty
$$
and therefore
\begin{align*}
\limsup_{p\to+\infty}(E(p))^b|h(p)|
&\leq\limsup_{p\to+\infty}(E(p))^{(b-b_1)}\limsup_{p\to+\infty}(E(p))^{b_1}|h(p)|\\
&=\limsup_{p\to+\infty}(E(p))^{(b-b_1)}K=0.
\end{align*}
\end{proof}

In the sequel the choice of a variable $t$ uniformly in compact
subsets of a set $U$ will be denoted by $t\in Co(U)$. Especially
we make the following:
\begin{notation} \label{not1} \rm
 Let $H(t;p)$ be a function defined for $t\in{S}\subseteq{\mathbb{R}}$
and $p$ large enough. In the sequel, in case we write
$$
H(t;p)\simeq 0,\quad\text{as }  p\to+\infty,\; t\in Co(S),
$$
we shall mean  that, given any compact set $I\subseteq{S}$ and any
$\varepsilon>0,$ there is some $p_0>0$ such that
$$
|H(t;p)|\leq \varepsilon,
$$
for all $t\in I$ and $p\geq p_{0}$.
\end{notation}

Also, keeping in mind Proposition \ref{pr1} we make the following:

\begin{notation}\label{not2} \rm
Again, let $h(t;p)$ be a function defined for
$t\in{S}\subseteq{\mathbb{R}}$ and $p$ large enough. Writing
$$
{\mathcal{G}}_E(h(t;\cdot))\geq b,\quad t\in Co(S),
$$
we shall mean that, for any $m<b$, it holds
$$
(E(p))^{m}h(t;p)\simeq 0,\quad\text{as } p\to+\infty,\; t\in Co(S).
$$
\end{notation}

\section{Transforming equation \eqref{e36}}

In this section our purpose is to present a transformation of the
one-parameter family of differential equations of the form \eqref{e36},
to a second order ordinary differential equation having constant
coefficients.\par
  Let $T_0>0$ be fixed and define $I:=[0,T_0)$.
Assume that the functions $a, b, f$ are satisfying the following:

\begin{condition}\label{ec1} \rm
For all large $p$ the following statements are true:
\begin{itemize}
\item[(1)] The function $f(\cdot,\cdot;p)$ is continuous,
\item[(2)]  $a(\cdot;p)\in C^1(I)$,
\item[(3)]  There exists some $\theta>0$ such that
$|b(t;p)|\geq\theta$, for all $t$ and all $p$ large. Also assume
that $b(\cdot;p)\in C^2(I)$ and  $\operatorname{sign}[b(t;p)]=:c$,
a constant, for all $t\in{I}$.
\end{itemize}
\end{condition}

 The standard existence theory ensures that if,  Condition \ref{ec1}
 holds,
then  \eqref{e36} admits at least one solution defined on
a (nontrivial) maximal interval of the form
$[0,T)\subseteq[0,T_0)$.

 To proceed, fix any $\hat{t}\in (0,T)$
and, for a moment, consider a strictly increasing one parameter
$C^2$- mapping
$$
v:=v(t;p):  [0,\hat{t}]\longrightarrow [0,v(\hat{t},p)]=:J
$$
with $v(0;p)=0$. Let  $\phi(\cdot;p)$  be the inverse of $v(\cdot;p)$.
These functions will be defined later. Now for any large $p$ define the transformation
$$S_p: f\longrightarrow \frac{f(\phi(\cdot;p))}{Y(\phi(\cdot;p);p)}:\enskip C([0,\hat{t}],\mathbb{R})\longrightarrow C([0,v(\hat{t};p)],\mathbb{R}).$$
Here $Y(\cdot;p)$, which will be specified later, is  a certain
$C^2$-function, depending on the parameter $p$. 

If $x(t;p)$,
$t\in[0,\hat{t}]$ is a solution of \eqref{e36},  define the
function
\begin{equation} \label{e55}
y(v;p):=(S_{p}x(\cdot;p))(v)=\frac{x(t;p)}{Y(t;p)}
=\frac{x(\phi(v;p);p)}{Y(\phi(v;p);p)},\quad v\in{J}.
\end{equation}
 We observe that it holds
$$
x'(t;p)=Y'(t;p)y(v(t;p);p)+Y(t;p)v'(t;p)y'(v(t;p);p), \quad t\in [0,\hat{t}]
$$
and
\begin{align*}
x''(t;p)&=Y''(t;p)y(v(t;p);p)+2Y'(t;p)v'(t;p)y'(v(t;p);p)\\&\quad
+Y(t;p)v''(t;p)y'(v(t;p);p)\\
&\quad +Y(t;p)(v'(t;p))^2y''(v(t;p);p),\quad t\in
[0,\hat{t}].
\end{align*}
 Then, equation \eqref{e36} is
transformed into the equation
\begin{equation}\label{e40}
y''(v;p)+A(t; p)y'(v;p)+B(t;p)y(v;p)+g(t;p)=0,\quad v\in{J},
\end{equation}
where the one-parameter functions $A, B$ and $g$ are defined as
follows:
\begin{gather*}
A(t;p): =\frac{2Y'(t;p)v'(t;p)+Y(t;p)v''(t;p)+a(t;p)Y(t;p)v'(t;p)}{Y(t;p)(v'(t;p))^2},\\
B(t;p): =\frac{Y''(t;p)+a(t;p)Y'(t;p)+b(t;p)Y(t;p)}{Y(t;p)(v'(t;p))^2},\\
g(t; p): =\frac{f(t,Y(t;p)y(v;p);p)}{Y(t;p)(v'(t;p))^2}.
\end{gather*}

 We will specify the new functions $v$ and $Y$. To get the specific
form of the function $v(\cdot;p)$ we set
\begin{equation}\label{e42}
v'(t;p)=\sqrt{cb(t;p)},\quad t\in {I},
\end{equation}
where, recall that,
$c=\operatorname{sign}[b(t;p)]$, $t\in{I}$.
To have  $v(t;p)\geq v(0;p)=0$, it is sufficient to
get
\begin{equation}\label{e43}
v(t;p)=\int_0^t\sqrt{cb(s;p)}ds, \quad t\in[0,\hat{t}].
\end{equation}
Setting the coefficient $A(t;p)$ in \eqref{e40} equal to zero, we
obtain
\begin{equation}\label{e44}
2Y'(t;p)v'(t;p)+Y(t;p)v''(t;p)+a(t;p)Y(t;p)v'(t;p)=0,\quad t\in
[0,\hat{t}],
\end{equation}
 which, due to \eqref{e42}, implies
\begin{equation}\label{e45}
Y'(t;p)+\Big(\frac{b'(t;p)}{4b(t;p)}+\frac{a(t;p)}{2}\Big)Y(t;p)=0,
\quad t\in [0,\hat{t}].
\end{equation}
We solve this equation, by integration and  obtain
$$
Y(t;p)=Y(0;p)\exp\Big(\int_0^t{[-\frac{b'(s;p)}{4b(s;p)}
-\frac{a(s;p)}{2}]ds}\Big),
$$
namely,
\begin{equation}\label{e46}
Y(t;p)=\Big(\frac{b(0;p)}{b(t;p)}\Big)^{1/4}
\exp\Big(-\frac{1}{2}\int_0^t{a(s;p)ds}\Big),\quad t\in [0,\hat{t}],
\end{equation}
where, without lost of generality, we have set $Y(0;p)=1$.

  From \eqref{e45} it follows that
\begin{equation}\label{e47}
\frac{Y'(t;p)}{Y(t;p)}=-\frac{b'(t;p)}{4b(t;p)}-\frac{a(t;p)}{2},
\end{equation}
 from which we have
\begin{equation}\label{e48}
\begin{aligned}
Y''(t;p)=&-Y'(t;p)\Big(\frac{b'(t;p)}{4b(t;p)}+
\frac{a(t;p)}{2}\Big)\\
&-Y(t;p)\Big(\frac{b(t;p)b''(t;p)-
[b'(t;p)]^2}{4[b(t;p)]^2}+
\frac{a'(t;p)}{2}\Big).
\end{aligned}
\end{equation}
Then, from relations \eqref{e45}, \eqref{e47} and \eqref{e48} we obtain
\begin{align*}
&Y''(t;p)+a(t;p)Y'(t;p)+b(t;p)Y(t;p)\\
 &=-Y'(t;p)\Big(\frac{b'(t;p)}{4b(t;p)}-\frac{a(t;p)}{2}\Big)\\
 &\quad -Y(t;p)\Big(\frac{b(t;p)b''(t;p)-[b'(t;p)]^2}{4[b(t;p)]^2}
+\frac{a'(t;p)}{2}-b(t;p)\Big)\\
&=Y(t;p)\Big[\Big(\frac{b'(t;p)}{4b(t;p)}
 +\frac{a(t;p)}{2}\Big)\Big(\frac{b'(t;p)}{4b(t;p)}
 -\frac{a(t;p)}{2}\Big)\\
&\quad-\frac{b(t;p)b''(t;p)-(b'(t;p))^2}{4(b(t;p))^2}
 -\frac{a'(t;p)}{2}+b(t;p)\Big].
\end{align*}
 Hence, the expression of the function $B$ appeared in \eqref{e40}
takes the form
\begin{align*}
B(t;p)&=-\frac{1}{(v'(t;p))^2}\Big[\Big(\frac{b'(t;p)}{4b(t;p)}+\frac{a(t;p)}{2}\Big)\Big(\frac{b'(t;p)}{4b(t;p)}-\frac{a(t;p)}{2}\Big)\\
&\quad-\frac{b(t;p)b''(t;p)-[b'(t;p)]^2}{4[b(t;p)]^2}-\frac{a'(t;p)}{2}+b(t;p)\Big]\\
&=\frac{1}{cb(t;p)}\Big[\Big(\frac{[b'(t;p)]^2}{16[b(t;p)]^2}-\frac{[a(t;p)]^2}{4}\Big)\\
&\quad-\frac{b(t;p)b''(t;p)-[b'(t;p)]^2}{4[b(t;p)]^2}
-\frac{a'(t;p)}{2}+b(t;p)\Big]\\
&=\frac{5}{16c}\frac{[b'(t;p)]^2}{(b(t;p))^3}
-\frac{1}{4c}\frac{[a(t;p)]^2}{b(t;p)}
 -\frac{1}{4c}\frac{b''(t;p)}{[b(t;p)]^2}
 -\frac{a'(t;p)}{2cb(t;p)}+\frac{1}{c}.
\end{align*}
Therefore, equation \eqref{e40} becomes
\begin{equation}\label{e49}
y''(v;p)+cy(v;p)=C(t,y(v;p);p)y(v;p),\quad v\in J,\end{equation}
 where
\[
C(t,u;p):=-\frac{5c}{16}\frac{[b'(t;p)]^2}{[b(t;p)]^3}
+c\frac{[a(t;p)]^2}{4b(t;p)}
 +\frac{c}{4}\frac{b''(t;p)}{[b(t;p)]^2}+c\frac{a'(t;p)}{2b(t;p)}
-c\frac{f(t,Y(t;p)u)}{b(t;p)Y(t;p)u}.
\]
(Recall that $c=\pm1$, thus $c^2=1$.) The expression of the
function $C(t,u;p)$ might assume a certain kind of singularity for
$u=0$, but, as we shall see later, due to condition \eqref{e52},
such  a case is impossible.

 Therefore we have proved the {\it{if}}  part of the following
theorem.

\begin{theorem}\label{th1}
Consider the differential equation \eqref{e36} and assume that
Condition \ref{ec1}  is satisfied. Then, a function
 $y(v;p)$, $v\in{J}$ is a solution of the differential
equation \eqref{e49}, if and only if, the function
$$
x(t;p)=(S_p^{-1}y(\cdot;p))(t)=Y(t;p)y(v(t;p);p), \quad t\in[0,{\hat{t}}]
$$
is a solution of \eqref{e36}. The quantities  $Y$ and $v$ are functions
defined in \eqref{e46} and \eqref{e43} respectively.
\end{theorem}

\begin{proof}
It is sufficient to prove the {\it{only if}} part.  From the
expression of $x(t;p)$ we obtain
$$
x'(t;p)=Y'(t;p)y(v(t;p);p)+Y(t;p)v'(t;p)y'(v(t;p);p)
$$
and
\begin{align*}
x''(t;p)
&=Y''(t;p)y(v(t;p);p)+2Y'(t;p)v'(t;p)y'(v(t;p);p)\\
&\quad
+Y(t;p)v''(t;p)y'(v(t;p);p)+Y(t;p)(v'(t;p))^{2}y''(v(t;p);p).
\end{align*}
 Then, by using \eqref{e44}, \eqref{e40} and the expression of
the quantity $B(t;p)$, we obtain
\begin{align*}
&x''(t)+a(t;p)x'(t)+b(t;p)x(t)+f(t,x(t);p)\\
&=Y(t;p)(v'(t;p))^{2}\Big[y''(v(t;p);p)+B(t;p)y(v(t;p);p)
+g(t;p)\Big]=0.
\end{align*}
\end{proof}
To proceed we state the following condition.

\begin{condition}\label{ec2} \rm
For each $j=1,2,\dots ,5$, there is a nonnegative function
$\Phi_j\in\mathcal{A}_E$, such that, for all $t\in[0,T)$,
$z\in{\mathbb{R}}$ and  large $p$, the following inequalities
hold:
\begin{gather}
\label{e50}
 |b'(t;p)|^2\leq \Phi_1(p)|b(t;p)|^3, \quad
 |b''(t;p)|\leq \Phi_2(p) |b(t;p)|^2,\\
\label{e51}
|a(t;p)|^2\leq \Phi_3(p)|b(t;p)|, \quad
|a'(t;p)|\leq \Phi_4(p) |b(t;p)|,\\
\label{e52}
|f(t,z;p)|\leq \Phi_5(p) |zb(t;p)|.
\end{gather}
\end{condition}

If Condition {\em\ref{ec2}} is true, then we have the relation
\begin{equation}\label{e66}
\Big|\frac{b'(0;p)}{b(0;p)}\Big|\leq \sqrt{\Phi_1(p)b(0;p)},
\end{equation}
as well as the estimate
\begin{equation}\label{e53}
|C(t,u;p)|
\leq \frac{5}{16}\Phi_1(p)+\frac{1}{4}\big(\Phi_2(p)+\Phi_3(p)\big)+\frac{1}{2}\Phi_4(p)+\Phi_5(p)\\
=:P(p),
\end{equation}
for all $t\in[0,T)$ and $p$ large enough.

\section{Approximate solutions of the initial-value
problem \eqref{e36}-\eqref{e54} when $c=+1$}

The previous facts will now help us to provide useful information
on the  asymptotic properties of the solutions of equation \eqref{e36}
having initial values which depend on the large parameter $p$,
and are of the form \eqref{e54}.

In this subsection we assume that $c=+1$, thus the last
requirement in Condition {\em\ref{ec1}} is satisfied with
$b(t;p)>0$, for all $t\geq 0$ and $p$ large enough.

As we have shown above, given a solution $x(t;p)$,
$t\in[0,\hat{t}]$ of \eqref{e36} the function $y(v;p)$, $ v\in J$
defined in \eqref{e55} solves equation \eqref{e49} on the interval
$J$. (Recall that $J$ is the interval $[0, v({\hat{t}};p)].$) We
shall find the images of the initial values \eqref{e54} under this
transformation.

 First we note that
\begin{equation}\label{e56}
y(0;p)=:y_0(p)=\frac{x(0;p)}{Y(0;p)}=x(0;p)=x_0(p).
\end{equation}
Also, from the fact that
$$
x'(0;p)=Y'(0;p)y(0;p)+Y(0;p)v'(0;p)y'(0;p)
$$
and relation \eqref{e45} we obtain
\begin{equation}\label{e57}
y'(0;p)=:\hat{y}_0(p)=\frac{1}{\sqrt{b(0;p)}}
\Big[\bar{x}_0(p)+\Big(\frac{b'(0;p)}{4b(0;p)}
+\frac{a(0;p)}{2}\Big)x_0(p)\Big].
\end{equation}
Consider the solution $w(v;p)$ of the homogeneous equation
\begin{equation}\label{e58}
w''+w=0\end{equation}
 having the same initial values \eqref{e56}-\eqref{e57} as
the function $y(\cdot;p)$. This requirement implies
that the function $w(v;p)$ has the form
$$
w(v;p)=c_1(p)\cos{v}+c_2(p)\sin{v},\quad v\in{\mathbb{R}},
$$
for some real numbers $c_1(p), c_2(p)$, which are uniquely
determined by the initial values of $y(\cdot;p)$,
namely $c_1(p)=y_0(p)$ and $c_2(p)=\hat{y}_0(p)$.
Then the difference function
\begin{equation}\label{e59}
R(v;p): =y(v;p)-w(v;p),
\end{equation}
satisfies
$$
 R(0;p)=R'(0;p)=0,
$$
and moreover
 \begin{equation}\label{e059}
 R''(v;p)+R(v;p)=C(t,y(v;p);p)R(v;p)
 +C(t,y(v;p);p)w(v;p), \quad v\in J.
\end{equation}
  Since the general solution of \eqref{e58} having zero
initial values is the zero function, applying the variation-of-constants
formula in \eqref{e059} we obtain
\begin{equation}\label{e60}
\begin{aligned}
R(v;p)&=\int_0^{v}K(v,s) C(s;p;y(s;p)) w(s;p)ds+\\
&\quad +\int_0^{v}K(v,s) C(s;p;y(s;p)) R(s;p)ds,
\end{aligned}
\end{equation}
 where
 $K(v,s)=\sin(v-s)$.
Observe that
$$
\int_0^v|\sin(v-s)w(s;p)|ds\leq(|c_{1}(p)|+|c_{2}(p)|)v
=:\gamma(p)v, \quad v\in{J}
$$
and therefore,
$$
|R(v;p)|\leq P(p)\gamma(p)v+P(p)\int_0^v|R(s;p)|ds.
$$
Applying Gronwall's inequality we obtain
 \begin{equation}\label{e61}
|R(v;p)|\leq \gamma({p})(e^{P(p)v}-1).
\end{equation}

 Differentiating $R(v;p)$ (with respect to $v$) in  \eqref{e60}
and using \eqref{e61}, we see that the quantity $|R'(v;p)| $ has
the same upper bound as $|R(v;p)|$ namely, we obtain
\begin{equation}\label{e62}
\max\{|R(v;p)|, |R'(v;p)|\}\leq \gamma(p)(e^{P(p)v}-1),\quad v\in{J}.
\end{equation}
By using the  transformation $S_p$ and relation \eqref{e62} we get
the following theorem.

\begin{theorem}\label{th2}
Consider the ordinary differential equation \eqref{e36}
associated with the initial  values \eqref{e54}, where assume
that $T_0=+\infty$ and Condition \ref{ec1} holds with c = +1.
Assume also that there exist functions $\Phi_j$, $j=1,2,\dots ,5$,
satisfying Condition \ref{ec2}. If $x(t;p)$, $t\in [0,T)$ is a
maximally defined solution of the problem \eqref{e36}-\eqref{e54},
then it holds
\begin{equation}\label{e108}
T=+\infty,
\end{equation}
and
 \begin{equation}\label{e63}
\begin{aligned}
 &|x(t;p)-Y(t;p)w(v(t;p);p)|\\
 &\leq\Big(\frac{b(0;p)}{b(t;p)}\Big)^{1/4}\exp\big({-\frac{1}{2}\int_0^ta(s;p)ds}\big)\\
 &\quad\times\Big(\bar{x}_0(p)+\frac{1}{\sqrt{b(0;p)}}\big[|\bar{x}_0(p)|+|x_0(p)|\Big|\frac{b'(0;p)}{4b(0;p)}+\frac{a(0;p)}{2}\big|\Big]\Big)\\
 &\quad \times\Big[\exp\Big(P(p)\int_0^t\sqrt{b(s;p)}ds\Big)-1\Big]=:{\mathcal{M}}(t;p),\end{aligned}\end{equation}
as well as
 \begin{equation}\label{e64}\begin{aligned}
&\Big| \frac{d}{dt}[x(t;p)-Y(t;p)w(v(t;p);p)]\Big|\\
&\leq Y(t;p)\gamma(p)\big(e^{P(p)v(t;p)}-1\big)
\Big[\frac{\sqrt{\Phi_1(p)b(t;p)}}{4}+\frac{|a(t;p)|}{2}
+\sqrt{b(t;p)}\Big],
\end{aligned}\end{equation}
for all $t>0$ and  $p$ large enough. Here we have set
\[
w(v;p):=x_{0}(p)\cos(v)
+\frac{1}{\sqrt{b(0;p)}}\Big(\hat{x}_{0}(p)
+\Big(\frac{b'(0;p)}{4b(0;p)}+\frac{a(0;p)}{2}
\Big)x_{0}(p)\Big)\sin(v),
\]
 and $P(p)$ is the quantity defined  in \eqref{e53}.
\end{theorem}

\begin{proof}
Inequality \eqref{e63} is easily implied from \eqref{e62} and
the relation
$$
x(t;p)=Y(t;p)y(v(t;p);p).
$$

Then property \eqref{e108} follows from \eqref{e63} and the fact
that the solution is noncontinuable (see, e.g., \cite[p. 90]{lp}).

 To show \eqref{e64} observe that
\[
\Big| \frac{d}{dt}[x(t;p)-Y(t;p)w(v(t;p);p)]\Big|
 =\Big|
\frac{d}{dt}Y(t;p)[y(v(t;p);p)-w(v(t;p);p)]\Big|
\]
and therefore,
\begin{align*}
&\Big| \frac{d}{dt}[x(t;p)-Y(t;p)w(v(t;p);p)]\Big|\\
&\leq\Big| [y(v(t;p);p)-w(v(t;p);p)]\frac{d}{dt}Y(t;p)\Big|\\
&\quad +\Big|Y(t;p)\frac{d}{dt}[y(v(t;p);p)-w(v(t;p);p)]\Big||\\
&\leq\Big| R(v(t;p);p)\frac{d}{dt}Y(t;p)\Big|
+\Big|Y(t;p)\frac{d}{dv}R(v(t;p);p)\frac{d}{dt}v(t;p)\Big|\\
&\leq Y(t;p)\gamma(p)\big(e^{P(p)v(t;p)}-1\big)
\Big[\frac{\sqrt{\Phi_1(p)b(t;p)}}{4}+\frac{|a(t;p)|}{2}
+\sqrt{b(t;p)}\Big].
\end{align*}
We have used relations \eqref{e47},  \eqref{e66} and \eqref{e61}.
\end{proof}
Now we present the main results concerning the existence of
approximate solutions of the initial-value problem
\eqref{e36}-\eqref{e54}.

 The function defined by
\begin{equation}\label{appr}\begin{aligned}
{\tilde{x}}(t;p)&:=Y(t;p)w(v(t;p);p)\\
&=Y(t;p)[y_0(p)\cos{\int_0^t\sqrt{b(s;p)}ds}+{\hat{y}}_0(p)\sin{\int_0^t\sqrt{b(s;p)}ds}]\\
&=\Big(\frac{b(0;p)}{b(t;p)}\Big)^{1/4}\exp\Big(-\frac{1}{2}\int_0^ta(s;p)ds\Big)\Big\{x_0(p)\cos(v(t;p))\\
&\quad+\frac{1}{\sqrt{b(0;p)}}\Big[\bar{x}_0(p)+\Big(\frac{b'(0;p)}{4b(0;p)}+\frac{a(0;p)}{2}\Big)x_0(p)\sin(v(t;p))\Big]\Big\}
\end{aligned}
\end{equation}
is the so called \emph{approximate solution} of the problem, since,
 as we shall see in the sequel, this function approaches the exact
solution, as the parameter tends to $+\infty$. Moreover, since this function approaches the solution $x$ in the $C^1$ sense,
namely in a sense given in the next theorem, we shall
refer to it as \emph{a $C^1$ approximate solution.}
To make the notation short consider the \emph{error} function
\begin{equation}\label{e660}
{\mathcal{E}}(t;p):=x(t;p)- {\tilde{x}}(t;p).
\end{equation}
Then, from \eqref{e63} and  \eqref{e64}, we obtain
\begin{equation}\label{e67}|
{\mathcal{E}}(t;p)|\leq {\mathcal{M}}(t;p)\end{equation}
 and
\begin{equation}\label{e68}\begin{aligned}
&|\frac{d}{dt}{\mathcal{E}}(t;p)|\\
&\leq Y(t;p)\gamma(p)\big(e^{P(p)v(t;p)}-1\big)
\Big[\frac{\sqrt{\Phi_1(p)b(t;p)}}{4}+\frac{|a(t;p)|}{2}+\sqrt{b(t;p)}\Big],\end{aligned}\end{equation}
respectively.

\begin{theorem} \label{th3}
Consider the initial-value problem \eqref{e36}-\eqref{e54}, where
the conditions of Theorem \ref{th2} are satisfied and
\begin{equation}\label{cc01}
\min\{\mathcal{G}_E(\Phi_j), j=1,\dots 5\}>0.
\end{equation}
 Moreover, we assume that
\begin{gather}\label{cc1}
x_0, \; x_1\in\mathcal{A}_E, \\
\label{c1}a(\cdot;p)\geq 0,\quad\text{for all large}\quad p,\\
\label{ccc1} a(t;\cdot), b(t;\cdot)\in\mathcal{A}_E,\quad
t\in C_o(\mathbb{R}^+).
\end{gather}
If ${\mathcal{E}}(t;p)$ is the error function defined in
\eqref{e660} and
\begin{equation}\label{ea69}
\begin{aligned}
&\min\{{\mathcal{G}}_E(\Phi_j),j=1,\dots, 5\}\\
&+\Big[\frac{3}{4}  {\mathcal{G}}_E(b(t;\cdot)
+\min\big\{  {\mathcal{G}}_E(\bar{x}_0),
 {\mathcal{G}}_E(x_0)+ \frac{1}{2}  {\mathcal{G}}_E(b(t;\cdot),
 {\mathcal{G}}_E(x_0)+ {\mathcal{G}}_E(a(t;\cdot)\big\}
\Big]\\
&=:N_0>0,\quad t\in C_o(\mathbb{R}),
\end{aligned}
\end{equation}
 then we have
\begin{equation}\label{e70}
{\mathcal{E}}(t;p)\simeq 0,\quad p\to +\infty, \;
 t\in C_o(\mathbb{R}^+)
\end{equation}
 and the growth index of the error function satisfies
 \begin{equation}\label{e71}
   {\mathcal{G}}_E({\mathcal{E}}(t;\cdot))\geq N_0,\quad
t\in C_o(\mathbb{R}^+).
 \end{equation}
 In addition to the assumptions above for the functions
$x_0, {\bar{x}}_0, a, b$  assume the condition
\begin{equation}\label{ea720}
\begin{aligned}
&\min\{ {\mathcal{G}}_E(\Phi_j), j=1,\dots, 5\}
+\Big[\frac{3}{4}  {\mathcal{G}}_E(b(t;\cdot)
+\min\big \{  {\mathcal{G}}_E(\bar{x}_0)+  {\mathcal{G}}_E(a(t;\cdot),\\
& \frac{1}{2}  {\mathcal{G}}_E(b(t;\cdot)+  {\mathcal{G}}_E(\bar{x}_0),   {\mathcal{G}}_E(x_0)+  {\mathcal{G}}_E(b(t;\cdot),\\
&  {\mathcal{G}}_E(x_0)+2  {\mathcal{G}}_E(a(t;\cdot),
\frac{1}{2}  {\mathcal{G}}_E(b(t;\cdot)+   {\mathcal{G}}_E(x_0)
+   {\mathcal{G}}_E(a(t;\cdot) \big\}\Big]\\
&=:N_1>0,\quad t\in C_o(\mathbb{R}^+),
\end{aligned}
\end{equation}
instead of \eqref{ea69}.
Then we have
\begin{equation}\label{e73}
\frac{d}{dt}{\mathcal{E}}(t;p)\simeq 0,\quad p\to +\infty, \;
 t\in Co(\mathbb{R}^+),
\end{equation}
 and the growth index  of the derivative of the error function satisfies 
  \begin{equation}\label{e74}
   {\mathcal{G}}_E\big(\frac{d}{dt}{\mathcal{E}}(t;\cdot)\big)\geq N_1,
\quad t\in C_o(\mathbb{R}^+).
 \end{equation}
 \end{theorem}

\begin{proof}
  Due to our assumptions given $\varepsilon>0$ small enough, we can find  real numbers $\sigma, \tau$, and $\mu, \nu$, close to the quantities
  $-{\mathcal{G}}_E(x_0)$, $-{\mathcal{G}}_E(\bar{x}_0)$,
$-{\mathcal{G}}_E(a(t;\cdot))$
  and $-{\mathcal{G}}_E(b(t;\cdot))$ respectively, such that,
 as $p\to+\infty$,
\begin{gather}\label{e682}
x_0(p)=O(E(p))^{\sigma}), \quad \bar{x}_0(p)=O((E(p))^{\tau}),\\
\label{e680}
a(t;p)=O((E(p))^{\nu}),\quad\text{as }  p\to+\infty, \;
 t\in C_o(\mathbb{R}^+),\\
\label{e681}
b(t;p)=O((E(p))^{\mu}),\quad\text{as }  p\to+\infty,\;
t\in C_o(\mathbb{R}^+),
\end{gather}
as well as  the relation
\begin{equation}\label{e69}
\min\{  {\mathcal{G}}_E(\Phi_j), j=1\dots,5\}
-\Big[\frac{3\mu}{4}+\max\{\tau,\sigma+ \frac{\mu}{2},
\sigma+\nu\}\Big]=:{N}_0-\varepsilon>0.
\end{equation}

Assume  that \eqref{c1} holds. We start with the proof of \eqref{e70}.
Fix any $\hat{t}>0$ and take any  $N\in(0,N_0-\varepsilon)$.
Then, due to \eqref{e69}, we can let $\zeta>0$ such that
\begin{equation}\nonumber
\min\{ {\mathcal{G}}_E(\Phi_j), j=1,\dots,5 \}
>\zeta>N+\Big[\frac{3\mu}{4}+\max\{\tau,\sigma
+ \frac{\mu}{2}, \sigma+\nu\}\Big].
\end{equation}
Therefore,
\begin{equation}\label{e81}
\max\{\frac{3\mu}{4}+\tau,\; \frac{5\mu}{4}+\sigma,\;
 \frac{3\mu}{4}+\sigma+\nu\}-\zeta<-N,
\end{equation}
and, due to  Lemma \ref{l1}, it holds
\begin{equation}\label{e82}
  {\mathcal{G}}_E(P)>\zeta, \quad
{\mathcal{G}}_E(\Phi_1)>\zeta.
\end{equation}
 The latter implies that  there exist $K>0$ and $p_0>1$ such that
 \begin{equation}\label{e83}\begin{gathered}
 0<P(p)\leq K (E(p))^{-\zeta}, \\
 0<\Phi_1(p)\leq K (E(p))^{-\zeta},
\end{gathered}
\end{equation}
 for all $p\geq p_0$.

  From relations  \eqref{e680}, \eqref{e681} and \eqref{e682}
it follows that there are positive real numbers $K_j, j=1,2,3,4$
such that
\begin{gather}\label{e84}
|b(t;p)\leq K_1(E(p))^{\mu},\\
\label{e85}
|\bar{x}_0(p)|\leq K_2(E(p))^{\tau}, \quad
|x_0(p)|\leq K_3(E(p))^{\sigma},\\
\label{e86}
 0\leq a(t;p)\leq K_4(E(p))^{\nu},
\end{gather}
 for all $t\geq 0$ and $p\geq p_1$, where $p_1\geq p_0$.

 Also keep in mind that from Condition \ref{ec1} we have
\begin{equation}\label{e87}
b(t;p)\geq \theta,
\end{equation}

In the sequel, for simplicity, we shall denote by $q$
the quantity $E(p)$.

Consider the function ${\mathcal{M}}(t;p)$ defined in \eqref{e63}.
Then, due to \eqref{e83}, \eqref{e66} and \eqref{e84}-\eqref{e87},
for all $t\in[0,\hat{t}]$ and $p$ with $q=E(p)\geq p_{1}$, we have
\begin{equation} \label{e88}
\begin{aligned}
{\mathcal{M}}(t;p)
&\leq K_1^{1/4}\theta^{-\frac{1}{4}}q^{\frac{\mu}{4}}
\Big[K_2q^{\tau}+\theta^{-1/2}\Big(K_2q^{\tau}
 +K_3q^{\sigma}\big[\frac{1}{4}(K_1K)^{1/2}
 q^{\frac{-\zeta+\mu}{2}}\\
&\quad  +\frac{1}{2}K_4q^{\nu}\big]\Big)\Big]
\Big(\sum_{n=1}^{+\infty}\frac{1}{n!}K^{n}
q^{-n\zeta}t^n(K_1)^{\frac{n}{2}}q^{\frac{n\mu}{2}}\Big).
\end{aligned}
\end{equation}
 Since the series
$$
1+\sum_{n=1}^{+\infty}\frac{1}{(n+1)!}(tK)^{n}q^{-n\zeta}
(K_1)^{\frac{n}{2}}q^{\frac{n\mu}{2}}
$$
converges uniformly for $t$ in compact sets, it admits an upper
bound $K_5(\hat{t})>0$, say, on $[0,\hat{t}]$.
Therefore, for all $t\in[0,\hat{t}]$ and $q:=E(p)\geq p_1$, it holds
$$
\sum_{n=1}^{+\infty}\frac{1}{n!}K^{n}q^{-n\zeta}t^n
(K_1)^{\frac{n}{2}}q^{\frac{n\mu}{2}}\leq K_5(\hat{t})
\hat{t}K q^{-\zeta}(K_1)^{1/2}q^{\frac{\mu}{2}}.
$$
So, from \eqref{e81} and \eqref{e88} we obtain
\begin{equation} \label{e89}
\begin{aligned}
{\mathcal{M}}(t;p)
&\leq K_1^{1/4}\theta^{-\frac{1}{4}}q^{\frac{\mu}{4}}
\times\Big[(1+\theta^{-1/2})K_2q^{\tau}
 +K_3\theta^{-1/2}q^{\sigma}\frac{1}{4}(K_1K)^{1/2}
 q^{\frac{-\zeta+\mu}{2}}\\
&\quad +K_3\theta^{-1/2}q^{\sigma}\frac{1}{2}K_4q^{\nu}\Big]
\times K_5(\hat{t})\hat{t}K q^{-\zeta}(K_1)^{1/2}
 q^{\frac{\mu}{2}}\\
&=K_6q^{\frac{\mu}{4}+\tau-\zeta+\frac{\mu}{2}}
 +K_7q^{\frac{\mu}{4}+\sigma+\frac{-\zeta+\mu}{2}-\zeta
 +\frac{\mu}{2}}+K_8q^{\frac{\mu}{4}+\sigma+\nu-\zeta+\frac{\mu}{2}}\\
&\leq K_6q^{-N}+K_7q^{-N-\frac{\zeta}{2}}+K_8q^{-N}<K_9q^{-N},
\end{aligned}
\end{equation}
for some positive constants $K_j, j=6,7,8,9$. Recall that
$$
q=E(p)\geq p_{1}\geq p_{0}>1.
$$
This and \eqref{e67} complete the proof of \eqref{e70}.

Now, from the  previous arguments, it follows that given any
$\Lambda\in(0,N)$  it holds
$$
{\mathcal{M}}(t;p)q^{\Lambda}\leq K_9q^{-N+\Lambda}\to 0,
\quad\text{as } p\to+\infty,
$$
where the constant $K_9$ is uniformly chosen for $t$ in the
compact interval $[0,\hat{t}]$. Then from \eqref{e67}  we get
$$
{\mathcal{E}}(t;p)q^{\Lambda}\to 0, \quad\text{as }
 p\to+\infty,
$$
which implies that the growth index  of the error
function satisfies
$$
{\mathcal{G}}_E({\mathcal{E}}(t;p))\geq \Lambda.
$$
  From here we get
${\mathcal{G}}_E({\mathcal{E}}(t;p))\geq N$.
Since $N$ is arbitrary in the interval $(0,N_0-\varepsilon)$
and $\varepsilon$ is any small positive number, we
obtain \eqref{e71}.

 We proceed to the proof of \eqref{e73}.
 Again, from our assumptions and \eqref{ea720}, for any small enough
$\varepsilon>0$, we can choose real numbers $\sigma, \tau$,
and $\mu, \nu$, as above, satisfying
\eqref{e84}, \eqref{e85}, \eqref{e86}, as well as
 \begin{equation}\label{ea72}
\begin{aligned}
&\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}\\
&-\Big[\frac{3\mu}{4}+\max\{\tau+\nu,
\frac{\mu}{2}+\tau, \sigma+\mu,\sigma+2\nu,
\frac{\mu}{2}+\sigma+\nu\}\Big]
=:N_1-\varepsilon>0.
\end{aligned}
\end{equation}
Take any $N\in(0,N_1-\varepsilon)$. Then, because of \eqref{ea72},
we can choose $\zeta>0$ such that
\begin{align*}
&\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}\\
&>\zeta>N+\Big[\frac{3\mu}{4}+\max\{\tau+\nu,\frac{\mu}{2}
 +\tau, \sigma+\mu,\sigma+2\nu,
\frac{\mu}{2}+\sigma+\nu\}\Big],
\end{align*}
 From this relation it follows that
\begin{align*}
\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}
&> N+\Big[\frac{3\mu}{4}+\max\{\frac{\mu}{2}+\tau, \sigma+\mu,
 \frac{\mu}{2}+\sigma+\nu\}\Big]\\
&=(N+\frac{\mu}{2})+\Big[\frac{3\mu}{4}+\max\{\tau, \sigma+\frac{\mu}{2}, \sigma+\nu\}\Big]
\end{align*}
and
\begin{align*}
\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}
&> N+\Big[\frac{3\mu}{4}+\max\{\tau+\nu,\sigma+2\nu,
 \frac{\mu}{2}+\sigma+\nu\}\Big]\\
&=(N+\nu)+\Big[\frac{3\mu}{4}+\max\{\tau, \sigma+\nu,
  \sigma+\frac{\mu}{2}\}\Big].
\end{align*}
These inequalities with a double use  of \eqref{e89}, with $N$
being replaced with
$N+\frac{\mu}{2}$ and $N+\nu$  respectively imply
$$
{\mathcal{M}}(t;p)<K_9q^{-N-\frac{\mu}{2}}\quad\text{and}\quad
{\mathcal{M}}(t;p)<K_9q^{-N-\nu}.
$$
Then, from \eqref{e68}, \eqref{e83} and conditions \eqref{e84},
\eqref{e86}  it follows that there are constants
$K_{10}, K_{11}, K_{12}$ such that
\begin{equation}\label{e106}
\begin{aligned}
|\frac{d}{dt}{\mathcal{E}}(t;p)|
&\leq {\mathcal{M}}(t;p)\Big[\frac{\sqrt{\Phi_1(p)b(t;p)}}{4}
 +\frac{|a(t;p)|}{2}+\sqrt{b(t;p)}\Big]\\
&\leq {\mathcal{M}}(t;p)[K_{10}q^{-\frac{\zeta}{2}}q^{\frac{\mu}{2}}
 +K_{11}q^{\nu}+K_{12}q^{\frac{\mu}{2}}]\\
&\leq K_{10}K_9q^{-N-\frac{\mu}{2}}q^{-\frac{\zeta}{2}}
q^{\frac{\mu}{2}}+K_{11}K_9q^{-N-\nu}p^{\nu}
+K_{12}K_9q^{-N-\frac{\mu}{2}}q^{\frac{\mu}{2}}\\
&= K_{10}K_9q^{-N-\frac{\zeta}{2}}+K_{11}K_9q^{-N}+K_{12}K_9q^{-N}\\
&\leq (K_{10}+K_{11}+K_{12})q^{-N}.
\end{aligned}
\end{equation}
Since $N$ is arbitrary,  this relation completes the proof
of \eqref{e73}.
Relation \eqref{e74} follows from \eqref{e106}, exactly in the
same way as \eqref{e71} follows from \eqref{e89}.
\end{proof}

\begin{theorem} \label{th4}
Consider the initial-value problem \eqref{e36}-\eqref{e54},
where  the conditions of Theorem \ref{th2} and conditions
\eqref{cc1}, \eqref{c1}, \eqref{ccc1} are satisfied.
Moreover, assume that there is a measurable function
$\omega:[0,+\infty)\to[0,+\infty)$ such that
 \begin{equation}\label{e1030}
 |a(t;p)|\leq\omega(t)\log(E(p)),\quad t\geq 0
\end{equation}
for $p$ large enough.  If  ${\mathcal{E}}(t;p)$ is the error
function defined in \eqref{e660} and the relation
\begin{equation}\label{e75}
\begin{aligned}
&\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}+\Big[\frac{3}{4}
{\mathcal{G}}_E(b(t;\cdot)\\
&+\min\{  {\mathcal{G}}_E(\bar{x}_0),  {\mathcal{G}}_E(x_0)
+ \frac{1}{2}  {\mathcal{G}}_E(b(t;\cdot)),   {\mathcal{G}}_E(x_0)\}\Big]
=:M_0>0,
\end{aligned}
\end{equation}
 holds, then
\begin{equation}\label{e76}
{\mathcal{E}}(t;p)\simeq 0, \quad p\to +\infty, \;
 t\in Co([0, T(M_0))),
\end{equation}
where, for any $M>0$ we have set
 \begin{equation}\label{e706}
 T(M):=\sup\{t>0:  \Omega(t):=\int_0^t\omega(s)ds<2M\} .
\end{equation}
In this case the growth index  of the error function satisfies
 \begin{equation}\label{e77}
   {\mathcal{G}}_E({\mathcal{E}}(t;\cdot))\geq M_0,  \quad
t\in Co([0, T(M_0))).
 \end{equation}
Also, if \eqref{e1030}  is satisfied and the condition
\begin{equation}\label{e78}
\begin{aligned}
&\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}
+\Big[\frac{3}{4}  {\mathcal{G}}_E(b(t;\cdot))
+\min\{  {\mathcal{G}}_E(\bar{x}_0),\frac{1}{2}
{\mathcal{G}}_E(b(t;\cdot))\\
&+  {\mathcal{G}}_E(\bar{x}_0),   {\mathcal{G}}_E(x_0)
+  {\mathcal{G}}_E(b(t;\cdot)),  {\mathcal{G}}_E(x_0),
 \frac{1}{2}  {\mathcal{G}}_E(b(t;\cdot))
+  {\mathcal{G}}_E(x_0)\}\Big]\\
&=:M_1>0
\end{aligned}
\end{equation}
is satisfied, then
\begin{equation}\label{e79}
\frac{d}{dt}{\mathcal{E}}(t;p)\simeq 0,\quad p\to +\infty,
\quad t\in Co([0, T(M_1)))
\end{equation}
 and the growth index of the first derivative of the error function is such that
  \begin{equation}\label{e80}
   {\mathcal{G}}_E\big(\frac{d}{dt}{\mathcal{E}}(t;\cdot)\big)
\geq M_1, \quad t\in Co([0, T(M_1))).
 \end{equation}
   \end{theorem}

\begin{proof}
 Let  $\hat{t}\in(0,T(M_0))$ be fixed. Then from \eqref{e75}
we can choose numbers $\mu, \sigma, \tau$ satisfying \eqref{e84}
and \eqref{e85} and such that $-\mu, -\sigma, -\tau$ are close to
$  {\mathcal{G}}_E(b(t;\cdot))$, $  {\mathcal{G}}_E(x_0)$ and
$  {\mathcal{G}}_E(\bar{x}_0)$, respectively and moreover
$$
\Big[\frac{3\mu}{4}+\max\{\tau,\sigma+ \frac{\mu}{2},
\sigma\}\Big]+\frac{1}{2}\Omega(\hat{t})<
\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}.
$$
Take  $\zeta, \nu, N$ (strictly) positive such that
\begin{equation}\label{e101}
\begin{aligned}
&\Big[\frac{3\mu}{4}+\max\{\tau,\sigma+ \frac{\mu}{2},
\sigma+\nu\}\Big]+\frac{1}{2}\Omega(\hat{t})+N\\
&<\zeta<
\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}.
\end{aligned}
\end{equation}
Let $p_0>1$ be chosen so that $\log(p)\leq p^{\nu}$ and
\eqref{e1030} holds, for all $p\geq p_0$.
Then, due to \eqref{e1030}, we have \begin{equation}\label{e102}
|a(0;p)|\leq \omega(0)q^{\nu}, \end{equation}
for all $p\geq p_0$. Recall that $q:=E(p)$.

 Now we proceed as in Theorem \ref{th3}, where, due to
 \eqref{e1030} and \eqref{e102}, relation \eqref{e89} becomes
\begin{equation} \label{e104}
\begin{aligned}
{\mathcal{M}}(t;p)
&\leq K_1^{1/4}\theta^{-\frac{1}{4}}q^{\frac{\mu}{4}}
 e^{\frac{1}{2}\Omega(\hat{t})\log(q)}\\
&\quad \times\Big[(1+\theta^{-1/2})K_2q^{\tau}
 +K_3\theta^{-1/2}q^{\sigma}\frac{1}{4}(K_1K)^{1/2}q^{\frac{-\zeta+\mu}{2}}\\
&\quad +K_3\theta^{-1/2}q^{\sigma}\frac{1}{2}\omega(0)
 \log(q)\Big]\times K_5(\hat{t})\hat{t}K q^{-\zeta}(K_1)^{1/2}
 q^{\frac{\mu}{2}}\\
&\leq K_6q^{\frac{\mu}{4}+\tau-\zeta+\frac{\mu}{2}
 +\frac{1}{2}\Omega(\hat{t})}
 +K_7q^{\frac{\mu}{4}+\sigma+\frac{-\zeta+\mu}{2}-\zeta
 +\frac{\mu}{2}+\frac{1}{2}\Omega(\hat{t})}\\
&\quad +K_8q^{\frac{\mu}{4}+\sigma+\nu-\zeta
+\frac{\mu}{2}+\frac{1}{2}\Omega(\hat{t})}.
\end{aligned}
\end{equation}
Notice that \eqref{e104} holds for all $q:=E(p)$ with $p\geq p_0>1$.
 From this inequality and \eqref{e101} we obtain the estimate
\begin{equation}\label{e105}
{\mathcal{M}}(t;p)\leq (K_6+K_7+K_8)q^{-N},
\end{equation}
which implies the approximation \eqref{e76}.
Inequality \eqref{e77} follows as the corresponding one in
Theorem \ref{th3}. Finally, as in Theorem \ref{th3}, we can
use the above procedure and  relation \eqref{e105} in order to get
a relation similar to \eqref{e106}, from which \eqref{e79}
and \eqref{e80} follow.
\end{proof}

\section{Application to the initial-value problem \eqref{e34}-\eqref{e35}}

Consider the initial-value problem \eqref{e34}-\eqref{e35},
where assume the following conditions:
\begin{itemize}
\item[(i)]  The function $b_1\in C^2([0,+\infty),[0,+\infty))$
 it is bounded and it has bounded derivatives.

\item[(ii)] The functions $a_1, a_2\in C^1([0,+\infty),[0,+\infty))$
 are bounded with bounded derivatives.

\item[(iii)] The function $b_2$ is a nonzero positive constant and,
 as we said previously, the exponents $\mu, \nu, m, \sigma, \tau$
 of the model are real numbers.
\end{itemize}

 Observe that Condition $\ref{ec2}$ is satisfied by choosing
the following functions:
\begin{gather*}
\Phi_1(p)=l_1p^{-3\mu}, \quad
\Phi_2(p)=l_2p^{-2\mu},\quad
\Phi_3(p)=l_3p^{2\nu-\mu},\\
\Phi_4(p)=l_4p^{\nu-\mu},\quad
\Phi_5(p)=l_5p^{m-\mu},
\end{gather*}
for some positive constants $l_j$, $j=1,2,\dots, 5$.
It is not hard to show  that the growth index  of these functions
with respect to the function $E(p):=p$, are
\begin{gather*}
{\mathcal{G}}_E(\Phi_1)=3\mu, \quad
{\mathcal{G}}_E(\Phi_2)=2\mu,\quad
{\mathcal{G}}_E(\Phi_3)=-2\nu+\mu,\\
{\mathcal{G}}_E(\Phi_4)=-\nu+\mu,\quad
{\mathcal{G}}_E(\Phi_5)=-m+\mu.
\end{gather*}
In this case the results \eqref{e70}-\eqref{e71} and
\eqref{e73}-\eqref{e74} are satisfied with $N_0$ and $N_1$
being defined as
$$
N_{0}:=\min\{\frac{5\mu}{4},\frac{\mu}{4}-2\nu,
\frac{\mu}{4}-m\}-\max\{\tau, \frac{\mu}{2}+\sigma,\sigma+\nu\}
$$
and
$$
N_{1}=\min\{\frac{5\mu}{4},\frac{\mu}{4}-2\nu, \frac{\mu}{4}-m\}
-\max\{\tau+\nu, \mu+\sigma, \frac{\mu}{2}+\tau, \sigma+2\nu,
 \frac{\mu}{2}+\sigma+\nu\},
$$
respectively, provided that they are positive.

To give a specific application let us assume that the functions
$a_1, a_2, b_1$ are constants.
Then we can obtain the approximate solution of the
initial-value problem \eqref{e34}-\eqref{e35} by finding the error function.
Indeed, via \eqref{appr}, we can see that a $C^1$-approximate
solution of problem \eqref{e34}-\eqref{e35}  is  the function
defined by
\begin{align*}
{\tilde{x}}(t;p)
&:=\exp[-\frac{1}{2}t(a_1+a_2p^{\nu})]
 \Big[(\delta_1+\delta_2p^{\sigma})\cos[t(b_1+b_2p^{\mu})]
+(b_1+b_2p^{\mu})^{-1/2}\\
&\times \Big(\eta_1+\eta_2p^{\tau}+\frac{1}{2}
(\delta_1+\delta_2p^{\sigma})(a_1+a_2p^{\nu})\Big)
\sin[t(b_1+b_2p^{\mu})]\Big], \quad t\geq 0.
\end{align*}
This approximation is uniform for $t$ in compact intervals of
the positive real axis.
For instance, for the values
\begin{equation}\label{e92}\begin{gathered}
a_1=2,\quad a_2=\delta_2=0,\quad
\delta_1=b_1=b_2=\eta_1=\eta_2=1,\\
\mu=\frac{9}{10},\quad \nu=\frac{1}{10},\quad m<0, \quad
\tau=-\frac{9}{20}, \quad \sigma=-1,
\end{gathered}
 \end{equation}
we can find that  the  growth index  of the error
function  ${\mathcal{E}}(t;\cdot))$ satisfies
 $$
{\mathcal{G}}_E({\mathcal{E}}(t;\cdot))\geq \frac{19}{40},\quad
{\mathcal{G}}_E(\frac{d}{dt}{\mathcal{E}}(t;\cdot))\geq \frac{1}{40}.
$$
 In Figure \ref{fig1} the approximate solution for the values $p$=50, $p=150$
and $p$=250 are shown.

\begin{figure}[ht]
\begin{center}
 \includegraphics[width=0.32\textwidth]{fig1a} 
 \includegraphics[width=0.32\textwidth]{fig1b} 
 \includegraphics[width=0.32\textwidth]{fig1c} 
\end{center}
\caption{Approximate solutions of the problem
 \eqref{e34}-\eqref{e35}, with the values \eqref{e92} and when $p=50$,
$p=150$ and $p=$250, respectively}
\label{fig1}
\end{figure}

\section{Approximate solutions of the initial-value problem
\eqref{e36}-\eqref{e54} when $c=-1$}

 In this section we shall discuss the IVP  \eqref{e36}-\eqref{e54},
when $c=-1$, thus we assume that $b(t;p)<0$, for all $t$ and large $p$.
We shall assume throughout of this section that Condition $\ref{ec2}$
(given in the end of Section 3) is satisfied.
 Here the function $y$ defined in \eqref{e55} takes initial values
$y_0(p)$ and $\hat{y}_0(p)$ as in
\eqref{e56} and \eqref{e57}. We wish to proceed as in Section 4
and consider a fixed point $\hat{t}>0$, as well as the solution
$$
w(v;p):=c_{1}(p)e^v+c_{2}(p)e^{-v}, \quad v\in[0,\hat{v}]
$$
of the equation
\begin{equation}\label{e93}
w''-w=0,
\end{equation}
associated with the same initial values as $y$.
We have set $\hat{v}:=v(\hat{t};p)$.
Thus,  for $j=1,2$  we obtain
\[
c_j(p)=\frac{1}{2}\Big[x_0(p)-\frac{(-1)^j}{\sqrt{-b(0;p)}}
\Big(\bar{x}_0(p)+x_0(p)\big[\frac{b'(0;p)}{4b(0;p)}
+\frac{a(0;p)}{2}\big]\Big)\Big]
\]
and therefore it holds
\begin{equation}\label{e1060}
|c_j(p)|\leq\frac{1}{2}\Big[|x_0(p)|+\frac{1}{\sqrt{-b(0;p)}}
\Big|\bar{x}_0(p)
 +x_0(p)\big[\frac{b'(0;p)}{4b(0;p)}+\frac{a(0;p)}{2}\big]\Big|\Big]
=:\kappa(p).
\end{equation}
Also, the difference function $R$ defined in \eqref{e59} satisfies
\eqref{e60} where, now, we have $K(v,s)=\sinh(v-s)$.
Observe that
\begin{equation}\nonumber\begin{aligned}
&\int_0^v\sinh(v-s)|w(s;p)|ds\\
&\leq\frac{|c_{1}(p)|}{2}\int_0^v(e^{v-s}-e^{-v+s})e^sds
+\frac{|c_{2}(p)|}{2}\int_0^v(e^{v-s}-e^{-v+s})e^{-s}ds\\
&\leq\frac{|c_{1}(p)|}{2}(ve^v-\sinh(v))
+\frac{|c_{2}(p)|}{2}(\sinh(v)-ve^{-v})\\
&\leq \kappa(p)v\sinh(v)
\end{aligned}
\end{equation}
and therefore, for any $v\in[0,\hat{v}]$, it holds
\begin{align*}
|R(v;p)|&\leq P(p) \kappa(p)v\sinh(v)+P(p)\int_0^v\sinh(v-s)|R(s;p)|ds\\
&\leq P(p) \kappa(p)v\sinh(v)+P(p)\sinh(v)\int_0^v|R(s;p)|ds.
\end{align*}

Here we apply the method of proving Gronwall's inequality,
but we follow a different procedure.  Indeed, we set
$$
F(v):=\int_0^v|R(s;p)|ds.
$$
Then
$$
F'(v)=|R(v;p)|\leq P(p) \kappa(p)v\sinh(v)+P(p)\sinh(v)F(v)
$$
and therefore
$$
F'(v)-P(p)\sinh(v)F(v)\leq P(p) \kappa(p)v\sinh(v).
$$
Multiply both sides with the factor $\exp\big(-P(p)\cosh(v)\big)$
and integrate from 0 to $v$. Then  we obtain
\begin{align*}
F(v)e^{-P(p)\cosh(v)}
&\leq P(p)\kappa(p)\int_0^vs\sinh(s)e^{-P(p)\cosh(s)}ds\\
&=\kappa(p)(-ve^{-P(p)\cosh(v)}+\int_0^ve^{-P(p)\cosh(s)}ds\\
&\leq \kappa(p)v(1-e^{-P(p)\cosh(v)}).
\end{align*}
Therefore,
\begin{equation} \label{e94}
\begin{aligned}
|R(v;p)|&\leq P(p) \kappa(p)v\sinh(v)
+P(p)\kappa(v)v\sinh(v)(e^{P(p)\cosh(v)}-1)\\
&=P(p)\kappa(p)v\sinh(v)e^{P(p)\cosh(v)}.
\end{aligned}
\end{equation}
Next we observe that
\begin{align*}
\int_0^v\cosh(v-s)|w(s;p)|ds
&\leq\frac{|c_{1}(p)|}{2}(ve^v+\sinh(v))
+\frac{|c_{2}(p)|}{2}(\sinh(v)+ve^{-v})\\
&\leq \kappa(p)(v\cosh(v)+\sinh(v))
\end{align*}
and therefore, for any $v\in[0,\hat{v}]$, it holds
\begin{align*}
|R'(v;p)|
&\leq P(p)\kappa(p)(v\cosh(v)+\sinh(v))
+P(p)\int_0^v\cosh(v-s)|R(s;p)|ds\\
&\leq P(p)\kappa(p)(v\cosh(v)
+\sinh(v))+P(p)\cosh(v)\int_0^v|R(s;p)|ds.
\end{align*}
Using this inequality and \eqref{e94} we obtain
\begin{equation}\label{e95}
\begin{aligned}
|R'(v;p)|&\leq P(p)\kappa(p)(v\cosh(v)+\sinh(v))\\
&\quad +P(p)\kappa(p)e^{P(p)}v\cosh(v)(e^{P(p)(\cosh(v)-1)}-1).
\end{aligned}
\end{equation}
The proof of the next theorem follows as the proof of
Theorem \ref{th2}, by using \eqref{e94}, \eqref{e95}
and the expression of the functions $v$ and $Y$
from \eqref{e43} and  \eqref{e46} respectively. So we omit it.

\begin{theorem}\label{th5}
Consider the ordinary differential equation \eqref{e36} associated
with the initial  values \eqref{e54}, where assume that the
Condition \ref{ec1} holds with $c = -1$.
Assume also that there exist functions $\Phi_j$, $j=1,2,\dots ,5$,
satisfying \eqref{e50}, \eqref{e51}, \eqref{e52}. If $x(t;p)$,
$t\in [0,T)$ is a maximally defined solution of the problem
\eqref{e36}-\eqref{e54},  then it holds
$T=+\infty$,
 and if we set
\[
w(v;p):=\frac{1}{2}\sum_{j=1}^{2}e^{-(-1)^jv}\Big[x_0(p)
-\frac{(-1)^j}{\sqrt{-b(0;p)}}\Big(\bar{x}_0(p)
+x_0(p)\big[\frac{b'(0;p)}{4b(0;p)}
+\frac{a(0;p)}{2}\big]\Big)\Big]
\]
  and
\[
{\mathcal{E}}(t;p): =x(t;p)-Y(t;p)w(v(t;p);p),
\]
then
 \begin{equation}\label{e96}
\begin{aligned}
 |{\mathcal{E}}(t;p)|
&\leq P(p)\kappa(p)\Big(\frac{b(0;p)}{b(t;p)}
 \Big)^{1/4}\exp\big({-\frac{1}{2}\int_0^ta(s;p)ds}\big)\\
 &\quad \times\int_0^t\sqrt{-b(s;p)}ds\sinh\Big[\int_0^t
 \sqrt{-b(s;p)}ds\Big]\\
 &\quad \times\exp\big(P(p)\cosh(\int_0^t\sqrt{-b(s;p)}ds)\big)
=:{\mathcal{L}}(t;p),
\end{aligned}
\end{equation}
 as well as
\begin{equation}\label{e97}
\begin{aligned}
&\Big| \frac{d}{dt}{\mathcal{E}}(t;p)\Big|\\
&\leq {\mathcal{L}}(t;p)\Big[\frac{\sqrt{\Phi_1(p)|b(t;p)|}}{4}
 +\frac{|a(t;p)|}{2}\Big]\\
&\quad +\Big(\frac{b(0;p)}{b(t;p)}\Big)^{1/4}
 \exp\big({-\frac{1}{2}\int_0^ta(s;p)ds}\big)\sqrt{-b(t;p)}\\
&\quad \times P(p)\kappa(p)\Big[\big(\int_0^t\sqrt{-b(s;p)}ds\big)
 \cosh(\int_0^t\sqrt{-b(s;p)}ds)\\
&\quad +\sinh(\int_0^t\sqrt{-b(s;p)}ds)
+e^{P(p)}\int_0^t\sqrt{-b(s;p)}ds\cosh(\int_0^t\sqrt{-b(s;p)}ds)\\
&\quad \times\Big(e^{P(p)(\cosh(\int_0^t\sqrt{-b(s;p)}ds)-1}-1\Big)\Big],
\end{aligned}
\end{equation}
for all $t\in I$ and  $p$. Here $P$ is defined in \eqref{e53} and
$\kappa$ in \eqref{e1060}.
\end{theorem}
Now we give the main results of this section.

\begin{theorem} \label{th6}
Consider the initial-value problem \eqref{e36}-\eqref{e54},
where  the conditions of Theorem \ref{th5} are satisfied. Moreover,
assume that $a(\cdot;p)\geq 0$, for all large $p$, as well as the
following properties:
\begin{itemize}
\item[(i)]   It holds  $\sup_{t>0}b(t;p)<0$,
 for all  large $p$.

\item[(ii)] It holds $ {\mathcal{G}}_E(\Phi_j)>0$, for all $j=1,2,\dots ,5$.

\item[(iii)] It holds $x_0, x_1\in{\mathcal{A}}_E$.

\end{itemize}
Define the function
\begin{equation} \label{appr2}
\begin{aligned}
{\tilde{x}}(t;p)
&:=\Big(\frac{b(0;p)}{b(t;p)}\Big)^{1/4}
\exp\Big({-\frac{1}{2}\int_0^ta(s;p)ds}\Big)
\frac{1}{2}\sum_{j=1}^{2}e^{-(-1)^jv}\Big[x_0(p)\\
&\quad -\frac{(-1)^j}{\sqrt{-b(0;p)}}\Big(\bar{x}_0(p)
 +x_0(p)\big[\frac{b'(0;p)}{4b(0;p)}+\frac{a(0;p)}{2}\big]\Big)\Big].
\end{aligned}
\end{equation}
Let  $x$ be a solution of the problem and we let ${\mathcal{E}}(t;p)$
 be the error function defined
by
$$
 {\mathcal{E}}(t;p):=x(t;p)-{\tilde{x}}(t;p).
$$

(a) If  $a(t;\cdot)\in{\mathcal{A}}_E$, $t\in Co(\mathbb{R}^+)$
holds and there is a measurable function $z(t)$, $t\geq 0$ such that
\begin{equation}\label{e110}
|b(t;p)|\leq z(t)\big[\log(\log(E(p)))\big]^2,
\end{equation}
for all $t\geq 0$ and $p$ large enough, then we have
\begin{equation}\label{e113}
{\mathcal{E}}(t;p)\simeq 0,\quad p\to +\infty,\; t\in Co(\mathbb{R}^+),
\end{equation}
provided that the quantities above satisfy the relation
\begin{equation}\label{e111}\begin{aligned}
&\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}
+\min\{  {\mathcal{G}}_E(\bar{x}_0),  {\mathcal{G}}_E(x_0),
 {\mathcal{G}}_E(x_0)
+{\mathcal{G}}_E(a(t;\cdot))\}\\
&=:L_0>0,\quad t\in Co(\mathbb{R}^+).
\end{aligned}
\end{equation}
The growth index of the error function satisfies
 \begin{equation}\label{e112}
   {\mathcal{G}}_E({\mathcal{E}}(t;\cdot))\geq L_0,\quad
t\in Co(\mathbb{R}^+).
 \end{equation}

(b) Assume that \eqref{e110} holds and $z(t)$, $t\geq 0$ is a constant,
 $z(t)=\eta$, say. If the condition
\begin{equation}\label{e720}
\begin{aligned}
&\min\{\mathcal{G}_E(\Phi_j):j=1,\dots,5\}-1
+\min\{{\mathcal{G}}_E(\bar{x}_0),\;
{\mathcal{G}}_E(x_0),\\
&{\mathcal{G}}_E(x_0)+ {\mathcal{G}}_E(a(t;\cdot)),
{\mathcal{G}}_E(a(t;\cdot))+ {\mathcal{G}}_E(\bar{x}_0),
{\mathcal{G}}_E(x_0)+2{\mathcal{G}}_E(a(t;\cdot))\}\\
&=:L_1>0,\quad t\in Co(\mathbb{R}^+)
\end{aligned}
\end{equation}
holds, then
\begin{gather}\label{e730}
\frac{d}{dt}{\mathcal{E}}(t;p)\simeq 0,\quad p\to +\infty,\;
 t\in Co(\mathbb{R}^+), \\
\label{e740}
   {\mathcal{G}}_E\big(\frac{d}{dt}{\mathcal{E}}(t;\cdot)\big)
\geq L_1,\quad t\in Co(\mathbb{R}^+).
\end{gather}
\end{theorem}

\begin{proof}
(a) We start with the proof of \eqref{e113}. Due to \eqref{e111},
given any small $\varepsilon>0$ and $N\in(0,L_0-\varepsilon)$
we  take  real numbers $\zeta>0$ and $\tau, \sigma, \nu$ close to
 $-{\mathcal{G}}_E(\hat{x}_0)$, $-{\mathcal{G}}_E(x_0)$,
$-{\mathcal{G}}_E(a(t;\cdot))$ respectively, such that
\begin{equation}\nonumber
\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}>\zeta>N+\max\{\tau,\sigma,
\sigma+\nu\}.
\end{equation}
 Hence \eqref{e85} and \eqref{e86} are
satisfied. These arguments and Lemma \ref{l1} imply that inequalities 
\eqref{e83} hold, for some $K>0$ and $p\geq p_0\geq
1$. Notice, also, that
\begin{equation}\label{e118}
\max\{\tau,\;\sigma,\; \sigma+\nu\}-\zeta<-N.
\end{equation}
Because of \eqref{e118} we can obtain some
$\delta>0$ and $p_1\geq p_0$ such that
\begin{equation}\label{e119}
\frac{5\delta}{2}+\frac{K}{2}q^{-\zeta}+\max\{\tau,\;\sigma+{\delta},
\;\sigma+\nu\}-\zeta<-N, \quad p\geq p_1.
\end{equation}
Keep in mind assumption (i) of the theorem, relations \eqref{e85}
and \eqref{e86}, for some positive constants $K_2, K_3, K_4$ and,
 moreover,
 \begin{equation}\label{e1190}
 b(t;p)\leq -\theta,
\end{equation}
for all $t$ and $p$ large.
Fix any $\hat{t}>0$ and define
$$
\lambda:=\int_0^{\hat{t}}\sqrt{z(s)ds}.
$$
 Obviously there is a $p_2\geq p_1$ such that  for all $q\geq p_2$,
we have
\begin{gather}\label{e121}
Kq^{-\zeta}\leq 1, \quad q\geq p_2,\\
\label{e120}
\log(\log(u))\leq\log(u)\leq u^{\delta}, \quad u\geq p_2.
\end{gather}\par
Now, consider the function ${\mathcal{L}}(t;p)$ defined in \eqref{e96}.
Then, due to \eqref{e83}, \eqref{e66}, \eqref{e84},
\eqref{e85}, \eqref{e1190},  \eqref{e110} and \eqref{e120},
for all $t\in[0,\hat{t}]$ and $q\geq p_{2}$, we have
\begin{align*}
{\mathcal{L}}(t;p)
&\leq P(p)\kappa(p)\Big(\frac{z(0)}{\theta}\Big)^{1/4}
{\mathcal{G}}_E\Big[\log(\log(q))\Big]^{3/2}\sinh[  {\mathcal{G}}_E\log(\log(q))]\\
&\quad \times\exp\big[P(p)\cosh[\log(\log(q))]\big]\\
&\leq   {\mathcal{G}}_E P(p)\kappa(p)
\Big(\frac{z(0)}
{\theta}\Big)^{1/4} q^{3\delta/2}
\frac{1}{2}q^{\delta}p^{\frac{P(p)}{2}}\exp(\frac{  {\mathcal{G}}_E P(p)}{2\log(q)})\\
&\leq   {\mathcal{G}}_E Kq^{-\zeta}\frac{1}{2}
\Big[K_3q^{\sigma}+\frac{K_2}{\sqrt{\theta}}q^{\tau}
+\frac{K_3}{\sqrt{\theta}}q^{\sigma}\Big(\frac{\sqrt{K}}{4}q^{-\frac{\zeta}{2}}q^{\delta}\sqrt{z(0)}+\frac{K_4}{2}q^{\nu}\Big)\Big]\\
&\quad \times
\Big(\frac{z(0)}
{\theta}\Big)^{1/4}
q^{3\delta/2}
\frac{1}{2}q^{\delta}q^{\frac{P(p)}{2}}
\exp(\frac{  {\mathcal{G}}_E P(p)}{2\log(p)})e^{1/\lambda}.
\end{align*}
Therefore,
\begin{equation} \label{e123}
\begin{aligned}
{\mathcal{L}}(t;p)
&\leq  \Lambda_1q^{-\zeta+\sigma+\frac{3\delta}{2}+\delta
 +\frac{K}{2}q^{-\zeta}}+\Lambda_2q^{-\zeta+\tau+\frac{3\delta}{2}
 +\delta+\frac{K}{2}q^{-\zeta}}\\
&\quad +\Lambda_3q^{-\zeta+\sigma-\frac{\zeta}{2}+{\delta}
 +\frac{3\delta}{2}+\delta+\frac{K}{2}q^{-\zeta}}
 +\Lambda_4q^{-\zeta+\sigma+\nu+\frac{3\delta}{2}+\delta
 +\frac{K}{2}q^{-\zeta}},
\end{aligned}
\end{equation}
for some constants $\Lambda_j, j=1,2,3,4$.  From \eqref{e119} and \eqref{e123} we obtain
\begin{equation}\label{e124}
{\mathcal{L}}(t;p)\leq \Lambda_0 q^{-N}, \quad
t\in[0,\hat{t}]
\end{equation}
for some $\Lambda_0>0$.
 This and \eqref{e96} complete the proof of \eqref{e113}.

Now,  from the  previous arguments it follows that given
any $L\in(0,N)$  it holds
$$
{\mathcal{L}}(t;p)q^{L}\leq \Lambda_0q^{-N+L}\to 0, \quad\text{as }
p\to+\infty,
$$
where, notice that, the constant $\Lambda_0$ is uniformly chosen
for $t$ in the interval $[0,\hat{t}]$ and $p$ with $E(p)\geq p_2$.
This gives
$$
{\mathcal{E}}(t;p)q^{L}\to 0, \quad\text{as } p\to+\infty,\;
 t\in Co(\mathbb{R}^+).
$$
 Hence the growth index of the error function $\mathcal{E}$ satisfies
 $  {\mathcal{G}}_E({\mathcal{E}}(t;p))\geq L$ and so we get
$$
{\mathcal{G}}_E({\mathcal{E}}(t;p))\geq N \quad\text{as }
 p\to+\infty.
$$
Since $N$ is arbitrary in the interval $(0,N_0-\varepsilon)$ and
$\varepsilon$ is small, we get \eqref{e112}.

(b) Fix any $\hat{t}>0$ and take any small $\varepsilon>0$ and
$N\in(0, L_1-\varepsilon)$. Also from  \eqref{e720}  we can
get $\zeta>0$, $\delta>0$ and reals $\sigma, \nu, \tau$ as above,
such that
\begin{equation}\label{e125}
\begin{aligned}
&\max\Big\{\frac{5\delta}{2}+1+\max\{\delta+\sigma+\nu, \; \delta+\tau,\nu+\tau,\;
 2\delta+\sigma, 2\nu+\sigma\},\\
&2\delta+\hat{t}\sqrt{\eta}\delta+1+\max\{\tau, \delta+\sigma, \sigma
+\nu\}\Big\}+N\\
&<\zeta<\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}.
\end{aligned}
\end{equation}
Such a $\delta$ may be chosen in such way that
${\hat{t}}\sqrt{\eta}\delta<1$.


By using  inequality \eqref{e97} and relation \eqref{e47}, we obtain
\begin{align*}
|\frac{d}{dt}{\mathcal{E}}(t;p)|
&\leq {\mathcal{L}}(t;p)\Big[\frac{\sqrt{\Phi_1(p)|b(t;p)|}}{4}
 +\frac{|a(t;p)|}{2}\Big]\\
&\quad +\Big(\frac{b(0;p)}{b(t;p)}\Big)^{1/4}
 \exp\big(-\frac{1}{2}\int_0^{\hat{t}}a(s)ds\big)\sqrt{-b(t;p)}\\
&\times\Big[\int_0^t\sqrt{-b(s;p)}ds\cosh(\int_0^t\sqrt{-b(s;p)}ds)
+\sinh(\int_0^t\sqrt{-b(s;p)}ds)\\
&\quad +e^{P(p)}\int_0^t\sqrt{-b(s;p)}ds\cosh(\int_0^t\sqrt{-b(s;p)}ds)\\
&\quad \times\Big(\exp\Big(P(p)(\cosh(\int_0^t\sqrt{-b(s;p)}ds)-1)\Big)-1\Big)\Big],
\end{align*}
namely
\begin{align*}
|\frac{d}{dt}{\mathcal{E}}(t;p)|
&\leq{\mathcal{L}}(t;p)\Big[\frac{1}{4}K^{1/2}p^{\frac{-\zeta}{2}}\sqrt{\eta}\log(\log(q))+\frac{K_4q^{\nu}}{2}\Big]\\
&\quad +\Big(\frac{\eta}{\theta}\Big)^{1/4}\big(\log(\log(q))
\big)^{1/2}Kq^{-\zeta}
\frac{1}{2}\Big[K_3q^{\sigma}+\frac{1}{\sqrt{\eta}}\Big(K_2q^{\tau}\\
&\quad
 +K_3q^{\sigma}\Big[\frac{1}{4}K^{1/2}
q^{\frac{-\zeta}{2}}\sqrt{\eta}\log(\log(q))+\frac{K_4p^{\nu}}{2}\Big]
\\
&\times\Big[{\hat{t}}\sqrt{\eta}(\log(\log(q)))\cosh({\hat{t}}
 \sqrt{\eta}\log(\log(q))
 +\sinh({\hat{t}}\sqrt{\eta}\log(\log(q)))\\
&\quad +e^{Kq^{-\zeta}}{\hat{t}}\sqrt{\eta}\log(\log(q))\cosh({\hat{t}}\sqrt{\eta}\log(\log(q)))\\
&\quad \times\Big(\exp\Big(Kq^{-\zeta}(\cosh({\hat{t}}
 \sqrt{\eta}\log(\log(q)))-1)\Big)-1\Big)\Big]
 \lambda{q}^{\frac{\delta}{2}}.
\end{align*}
Letting for any  $p$ with $q:=E(p)\geq p_0>e$, and $p_0$ being
such that
$$
q\geq p_0\Longrightarrow\log(q)\leq q^{\delta}
$$
and using the fact that
$$
x>0\Longrightarrow \cosh(x)\leq e^x \quad\text{and}\quad
\sinh(x)\leq \frac{1}{2}e^x,
$$
from the previous estimate, we obtain
\begin{align*}
|\frac{d}{dt}{\mathcal{E}}(t;p)|
&\leq\Big[\Lambda_1q^{-\zeta+\sigma+\frac{3\delta}{2}+\delta
 +\frac{K}{2}q^{-\zeta}}+\Lambda_2q^{-\zeta+\tau+\frac{3\delta}{2}
 +\delta+\frac{K}{2}q^{-\zeta}}\\
&\quad +\Lambda_3q^{-\zeta+\sigma-\frac{\zeta}{2}+{\delta}
 +\frac{3\delta}{2}+\delta+\frac{K}{2}q^{-\zeta}}+\Lambda_4q^{-\zeta+\sigma+\nu+\frac{3\delta}{2}+\delta+\frac{K}{2}q^{-\zeta}}\Big]\\
&\quad\times\Big[\frac{1}{4}K^{1/2}q^{\frac{-\zeta}{2}}
 \sqrt{\eta}q^{\delta}+\frac{K_4q^{\nu}}{2}\Big]
 +\Big(\frac{\eta}{\theta}\Big)^{1/4}q^{\frac{\delta}{2}}Kq^{-\zeta}\\
&\quad \times\frac{1}{2}\Big[K_3q^{\sigma}
 +\frac{1}{\sqrt{\eta}}\Big(K_2q^{\tau}
 +K_3q^{\sigma}\Big[\frac{1}{4}K^{1/2}q^{\frac{-\zeta}{2}}
 \sqrt{\eta}q^{\delta}+\frac{K_4q^{\nu}}{2}\Big]\\
&\quad \times\Big[{\hat{t}}\sqrt{\eta}q^{\delta}p^{{\hat{t}}
 \sqrt{\eta}\delta}+\frac{1}{2}q^{{\hat{t}}\sqrt{\eta}\delta}
 +e^{Kq^{-\zeta}}{\hat{t}}\sqrt{\eta}q^{\delta}q^{{\hat{t}}
 \sqrt{\eta}\delta}\\
&\quad \times\Big(\exp\Big(Kq^{-\zeta}((\log(q))^{{\hat{t}}
 \sqrt{\eta}\delta})\Big)\Big]\lambda{q}^{\frac{\delta}{2}}.
\end{align*}
Therefore it follows that
\begin{equation}\begin{aligned}\label{e126}
|\frac{d}{dt}{\mathcal{E}}(t;p)|\leq \sum_{j=1}^{20}\Gamma_jq^{r_j},
\end{aligned}\end{equation}
for some positive constants $\Gamma_j$, $j=1,2,\dots,20$ and
\begin{gather*}
r_1:=-\zeta-\frac{\zeta}{2}+\sigma+\frac{7\delta}{2}+1, \quad
r_2:=-\zeta+\sigma+\frac{5\delta}{2}+1+\nu,\\
r_3:=-\zeta-\frac{\zeta}{2}+\tau+\frac{7\delta}{2}+1, \quad
r_4:=-\zeta+\tau+\frac{5\delta}{2}+1+\nu,\\
r_5:=-2\zeta+\sigma+\frac{9\delta}{2}+1, \quad
r_6=r_7:=-\zeta-\frac{\zeta}{2}+\sigma+\frac{7\delta}{2}+1+\nu,\\
r_8:=-\zeta+\sigma+\frac{5\delta}{2}+1+2\nu,\quad
r_{9}:=2\delta-\zeta+\sigma+{\hat{t}}\sqrt{\eta}\delta,\\
r_{10}:=\delta-\zeta+\sigma+{\hat{t}}\sqrt{\eta}\delta,\quad
r_{11}:=2\delta-\zeta+\sigma+{\hat{t}}\sqrt{\eta}\delta+1,\\
r_{12}:=2\delta-\zeta+\tau+{\hat{t}}\sqrt{\eta}\delta, \quad
r_{13}:= \delta-\zeta+\tau+{\hat{t}}\sqrt{\eta}\delta,\\
r_{14}:=2\delta-\zeta+\tau+{\hat{t}}\sqrt{\eta}\delta+1, \quad
r_{15}:=3\delta-\zeta-\frac{\zeta}{2}+\sigma+{\hat{t}}\sqrt{\eta}\delta,\\
r_{16}:=2\delta-\zeta+\sigma-\frac{\zeta}{2}+{\hat{t}}\sqrt{\eta}\delta,\quad
r_{17}:=3\delta-\zeta+\sigma-\frac{\zeta}{2}+{\hat{t}}\sqrt{\eta}\delta+1,\\
r_{18}:=2\delta-\zeta+\sigma+\nu+{\hat{t}}\sqrt{\eta}\delta, \quad
r_{19}:=\delta-\zeta+\sigma+\nu+{\hat{t}}\sqrt{\eta}\delta,\\
r_{20}:=2\delta-\zeta+\sigma+\nu+{\hat{t}}\sqrt{\eta}\delta+1.
\end{gather*}
     Due to \eqref{e125} all the previous constants are smaller
than $-N$. Then, for the quantity $\Gamma_0:=\max_j \Gamma_j$,
inequality \eqref{e126}  gives
\begin{equation}\label{e1100}
  |\frac{d}{dt}{\mathcal{E}}(t;p)|\leq \Gamma_0q^{-N},\quad q\geq p_0,
\end{equation}
   which leads to \eqref{e730}, since the constant $N$ is arbitrary.

 The proof of the claim \eqref{e740} follows from \eqref{e1100}
in the same way as \eqref{e71} follows from \eqref{e89}.
  \end{proof}

 \begin{theorem} \label{th7}
Consider the initial-value problem \eqref{e36}-\eqref{e54}, where
the conditions of
 Theorem \ref{th5} and (i), (ii), (iii) of Theorem \ref{th6}
are satisfied. Assume, also, that \eqref{e1030} and
\eqref{e110} keep in force.

(a) If relation \eqref{e720} is true, then
$$
{\mathcal{E}}(t;p)\simeq 0, \quad p\to +\infty, \; t\in Co([0,T(L_0))).
$$
 Moreover, the growth index  of the error function satisfies
$$
   {\mathcal{G}}_E({\mathcal{E}}(t;\cdot))\geq L_0,\quad
t\in Co([0,T(L_0))).
$$

(b) If \eqref{e720}  is satisfied, then
\begin{gather*}
\frac{d}{dt}{\mathcal{E}}(t;p)\simeq 0, \quad p\to +\infty, \;
 t\in Co([0,T(L_1))),\\
{\mathcal{G}}_E(\frac{d}{dt}{\mathcal{E}}(t;p))\geq L_1, \quad
t\in Co([0,T(L_1))).
\end{gather*}
\end{theorem}

\begin{proof}
First of all we can see that for a fixed ${\hat{t}}\in(0,T(L_0))$,
due to \eqref{e1030} and \eqref{e706} we can find reals
$\tau, \sigma, \nu$ close to 
 $-{\mathcal{G}}_E(\hat{x}_0)$, $-{\mathcal{G}}_E(x_0)$,
$-{\mathcal{G}}_E(a(t;\cdot))$, respectively, such that
$$
\exp\Big(-\frac{1}{2}\int_0^{\hat{t}}a(s;p)ds\Big)
\leq p^{\Omega(\hat{t})/2}.
$$
Taking into account this fact and relation  \eqref{e111}, we can see
that
$$
\max\{\tau, \sigma, \sigma+\nu\}+\frac{1}{2}\Omega(\hat{t})
<\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}.
$$
Now, we proceed as in the proof of Theorem \ref{th6},
where it is enough to observe that the right hand side of
relation \eqref{e123} is multiplied by the factor
$$
\exp(-\frac{1}{2}\int_0^{\hat{t}}a(s;p)ds).
$$
A similar procedure is followed for the proof of part (b) of the
theorem.
\end{proof}

 \section{A specific case of the initial-value problem
\eqref{e34}-\eqref{e35}}
 We shall apply the results of theorem \ref{th6} to a specific case
of the problem \eqref{e34}-\eqref{e35}, namely to the problem
 \begin{equation}\label{e130}
 x''+2ap^{\nu}x'-a^2p^{2\mu}x+p^mx\sin(x)=0,\end{equation}
  associated with the initial conditions
 \begin{equation}\label{e131}
 x(0;p)=ap^{\sigma}, \quad x'(0;p)=ap^{\tau},
\end{equation}
  where, for simplicity, we have set
 $$
a:=\frac{1}{10}, \quad \mu:=2, \quad
\nu:=\frac{1}{9}, \quad \tau=\sigma:=\frac{1}{2}, \quad
m\leq \frac{2}{9}.
$$
Using these quantities we can see that all assumptions of
Theorem \ref{th6} hold, with $E(p)=p$,
 $$
L_0=\frac{19}{6}, \quad L_1=\frac{7}{6}.
$$
 Then an approximate solution of the problem is given by
$$
{\tilde{x}}t;p):=\frac{1}{10}e^{-{\frac{t}{10}p^{\frac{1}{9}}}}
p^{1/2}\cosh(\frac{p^2t}{10})+(10p^{-\frac{3}{2}}
+p^{\frac{11}{18}})\sinh(\frac{p^2t}{10}),\quad t\geq 0.
$$
In Figure \ref{fig2}  the approximate solution for the values
$p$=1, 3.45, 5.90, 8.38, 10.80, 13.25, 15.70, 18.15 is shown.

\begin{figure}[th]
\begin{center}
 \includegraphics[width=0.2\textwidth]{fig2a} \hskip .1 in
 \includegraphics[width=0.2\textwidth]{fig2b} \hskip .1 in
 \includegraphics[width=0.2\textwidth]{fig2c} \hskip .1 in
 \includegraphics[width=0.2\textwidth]{fig2d} \vskip .1 in
 \includegraphics[width=0.2\textwidth]{fig2e} \hskip .1 in
 \includegraphics[width=0.2\textwidth]{fig2f} \hskip .1 in
 \includegraphics[width=0.2\textwidth]{fig2g} \hskip .1 in
 \includegraphics[width=0.2\textwidth]{fig2h}
\end{center}
\caption{Approximate solutions of
 \eqref{e130}-\eqref{e131}, when $p$=1, 3.45, 5.90, 
 8.38, 10.80, 13.25, 15.70, 18.15 respectively}
 \label{fig2}
\end{figure}

\section{Approximate solutions of the boundary-value problem
\eqref{e361}-\eqref{e362}}

In this section we consider  \eqref{e361} associated with the
boundary conditions \eqref{e362}. Our purpose is to use the results
of section 3 in order to approximate the solutions of the
boundary-value problem, when the parameter $p$ approaches the
critical value $+\infty$.

To begin with, define $\tau:=v(1;p)$ and from now on the letter
$J_p$ will denote the interval $[0,\tau]$. Also, in order to unify
our results, we make the following convention:
We shall denote by
\begin{gather*}
S_c(v)=\begin{cases} \sin(v), &\text{if } c=+1\\
\sinh(v), &\text{if } c=-1,
\end{cases}\\
C_c(v)=\begin{cases} \cos(v), &\text{if } c=+1\\
\cos(v), &\text{if } c=-1.
\end{cases}
\end{gather*}
Our basic hypothesis which will be assumed in all the sequel
without any mention is the following:

\begin{condition}\label{ec3} \rm
In case $c=+1$ let
\begin{equation}\label{e134}
\tau:=v(1;p)=\int_0^1\sqrt{b(s;p)}ds<\pi,
\end{equation}
for all $p$ large enough.
\end{condition}

Suppose that  the problem \eqref{e361}-\eqref{e362}  admits a
solution $x(t;p)$, $t\in[0,1]$. Then, Theorem \ref{th1} implies,
and inversely, that  $y(v;p)=(S_px(\cdot;p))(v)$ is a solution of
 \eqref{e49} having boundary conditions
\begin{equation}\label{e132}\begin{gathered}
y(0;p)=x_0(p)=:y_0(p)\\
\begin{aligned}
y(\tau;p)&=y(v(1;p);p)=\frac{x(1;p)}{Y(1;p)}\\
&={x}_1(p)\Big(\frac{b(1;p)}{b(0;p)}\Big)^{1/4}
e^{\frac{1}{2}\int_0^1a(s;p)ds}=:y_{\tau}(p).
\end{aligned}
\end{gathered}
\end{equation}

Before we seek for approximate solutions of  \eqref{e361}-\eqref{e362},
we shall give conditions for the existence of solutions.
To do that we need the following classical fixed point theorem.


\begin{theorem}[Nonlinear alternative \cite{DG}]\label{thNA}
 Let $D$ be a convex subset of a Banach space $X$, let $U$ be an
open subset of $D$, and let $A: \bar{U}\to D$ be a
completely continuous mapping. If $q \in U$ is a fixed element,
then either $A$ has
a fixed point in $\bar{U}$, or there is a point $u\in\partial{ U}$ and
 $\lambda\in(0,1)$, such that $u   =\lambda Au +(1-\lambda)q$.
\end{theorem}

To proceed we shall formulate the integral form of the problem and
then we shall apply Theorem \ref{thNA}. To this end we let $w$ be
the solution of the homogeneous equation
$$
w''+cw=0,
$$
with boundary conditions $w(0;p)=y_0(p)$ and
$w(\tau;p)=y_{\tau}(p)$. This means that $w$ is defined as
\begin{equation}\label{e133}
w(v;p)=\frac{1}{S_c(\tau)}\big(y_0(p)(S_c(\tau-v)+y_{\tau}(p)S_c(v)\big).
\end{equation}
(Notice that because of \eqref{e134} in case $c=+1$ the factor
$S_c(\tau)$ is positive for all $\tau$.) Hence we see
that
\[
|w(v;p)|\leq q_c(|y_0|+|y_{\tau}|),
\]
where
\[
q_c:=\begin{cases}
\frac{1}{\min\{\sin(\sqrt{\theta}), \sin({\tau})\}}, & c=+1\\[4pt]
\frac{\sinh({\tau})}{\sinh(\sqrt{\theta})}, & c=-1.
\end{cases}
\]
Next we  let $R(v;p),$ $v\in J$ be the solution  of  equation
\begin{equation}\label{e1340}
R''(v;p)+cR(v;p)=H(v;p), \quad v\in J_p
\end{equation}
satisfying the boundary conditions
\begin{equation}\label{e1330}
R(0;p)=R(\tau;p)=0.\end{equation}
where
\[
H(v;p):=C(t,y(v;p);p)y(v;p)
=C(t,y(v;p);p)R(v;p)+C(t,y(v;p);p)w(v;p).
\]
The latter, due to \eqref{e53}, implies that
\begin{equation}\label{e141}
|H(v;p)|\leq P(p)|R(v;p)|+P(p)q_c(|y_0(p)|+|y_{\tau}(p)|).
\end{equation}
To  formulate an integral form of the problem we follow an
elementary method and obtain
\begin{equation}\label{e136}
R(v;p)=d_1C_c(v)+d_2S_c(v)+\int_0^vS_c(v-s)H(s;p)ds,
\quad v\in J_p,
\end{equation}
for some constants $d_1, d_2$ to be determined from the
boundary values \eqref{e1330}. Indeed,
we have
$0=R(0;p)=d_1$ and
$$
0=R(\tau;p)=d_1C_s(\tau)+d_2S_c(\tau)+\int_0^\tau S_c(\tau-s)H(s;p)ds.
$$
This implies
$$
d_2=-\frac{1}{S_c(\tau)}\int_0^{\tau}S_c(\tau-s)H(s;p)ds$$
and so  we have
\begin{equation}\label{e138}
R(v;p)=\int_0^{\tau}G(v,s;p)H(s;p)ds,
\end{equation}
where the one-parameter Green's function $G$ is defined by
\begin{equation}\label{e139}
G(v,s;p):=\frac{-S_c(v)S_c(\tau-s)}{S_c(\tau)}+S_c(v-s)\chi_{[0,v]}(s).
\end{equation}
Here the symbol $\chi_{A}$ denotes the characteristic function
of the set $A$.
  From \eqref{e139} we can see that
\[
G(v,s;p)=\begin{cases}-\frac{S_c(s)S_c(\tau-v)}{S_c(\tau)},
&0\leq s\leq v\leq \tau\\[4pt]
-\frac{S_c(v)S_c(\tau-s)}{S_c(\tau)}, & 0\leq v\leq s\leq \tau
\end{cases}
\]
  From \ref{ec1} and \eqref{e134} it follows that  for all
$s, v\in[0,\tau]$ it holds
\begin{equation}\label{e139a}
\max\{|G(v,s;p)|,\; |\frac{\partial}{\partial v}G(v,s;p)|\}
\leq Q_c,
\end{equation}
where \[
Q_c:=\begin{cases}
\frac{1}{\min\{\sin(\sqrt{\theta}), \sin({\tau})\}}, & c=+1\\[4pt]
\frac{(\sinh({\tau}))^2}{\sinh(\sqrt{\theta})}, & c=-1.
\end{cases}
\]

Now we see that the integral form of the boundary-value
problem \eqref{e49}-\eqref{e132} is the following:
\begin{equation}\label{e1390}
y(v;p)=w(v;p)+\int_0^{\tau}G(v,s;p)C(\phi(s;p),y(s;p);p)y(s;p)ds,
\quad v\in J_p.
\end{equation}
 To show the existence of a solution of \eqref{e1390} we consider
the  space
$C(J_p,\mathbb{R})$ of all continuous functions $y:J_p\to\mathbb{R}$
endowed with the sup-norm
$\|\cdot\|$-topology. This is a Banach space. Fix a $p$ large
enough and define the operator
$A: C(J_p,\mathbb{R}) \to C(J_p,\mathbb{R})$ by
$$
(Az)(v):=w(v;p)+\int_0^{\tau}G(v,s;p)C(\phi(s;p),z(s);p)z(s)ds
$$
which is completely continuous
(due to Properties \ref{ec1} and \ref{ec2}).

To proceed we assume for a moment that
\begin{equation}\label{ea139}
1-P(p)v(1;p)Q_c=:\Delta(p)>0, \enskip p\enskip\text{large}
\end{equation}
where (recall that) $P(p)$ is defined in \eqref{e53}.
Take any large $p$ and let $\tau=v(1;p)=:v$. Then, from \eqref{ea139}, for some $\Delta>0$, we have
$$
1-P(p){{\tau}}Q_c\geq \Delta>0
$$
Consider the open ball $B(0,l)$ in the space $C(J_p,\mathbb{R})$, where
$$
l:=\frac{\|w\|}{1-P(p){{\tau}}Q_c}+1.
$$
Here $\|w\|$ is the sup-norm of $w$ on $J_p$.

Assume that the operator $A$ does not have any fixed point in $B(0,l)$.
Thus, due to Theorem \ref{thNA} and by setting $q=0$, there exists
a point $z$ in the boundary of $B(0,l)$ satisfying
$$
z=\lambda Az,
$$
for some $\lambda\in(0,1)$. This means that, for each $v\in J_p,$
it holds
\[
|z(v)|\leq \|w\|+\int_0^{\tau}|G(v,s;p)||C(\phi(s;p),z(s);p)||z(s)|ds.
\]
Then, from \eqref{e139a} we have
 $$
|z(v)|\leq \|w\|+Q_cP(p)\int_0^{\tau}|z(s)|ds.
$$
 Thus,
\begin{equation}\label{e142}
\begin{aligned}
|z(v)|\leq \|w\|+{Q_cP(p)\tau}\|z\|,\end{aligned}
\end{equation}
which leads to the contradiction
$$
l=\|z\|\leq \frac{\|w\|}{1-P(p){{\tau}}Q_c}=l-1.
$$
Taking into account the relation between the solutions of the original
problem and the solution of the problem \eqref{e361}-\eqref{e362},
as well the previous arguments,  we conclude the following result:

\begin{theorem}\label{th8}
If Properties \ref{ec1}, \ref{ec2} and \eqref{ea139} are true,
then the boundary-value problem \eqref{e361}-\eqref{e362}
admits at least one solution.
\end{theorem}

Now, we  give the main results of this section. First we define
the function
\begin{equation} \label{appr21}
\begin{aligned}
{\tilde{x}}(t;p)&:=\Big(\frac{b(0;p)}{b(t;p)}\Big)^{1/4}
 \exp\big(-\frac{1}{2}\int_0^ta(s;p)ds\big)
 \frac{1}{S_c(\int_0^1\sqrt{b(s;p)}ds)}\\
&\quad \times\Big\{x_0(p)S_c(\int_t^1\sqrt{b(s;p)}ds)\\
&\quad +x_1(p)\Big(\frac{b(1;p)}{b(0;p)}\Big)^{1/4}e^{\frac{1}{2}\int_0^1a(s;p)ds}
S_c(\int_0^t\sqrt{b(s;p)}ds))\Big\}\\
&=\frac{1}{S_c(\int_0^1\sqrt{b(s;p)}ds)}\Big\{\Big(\frac{b(0;p)}{b(t;p)}
 \Big)^{1/4}\\
&\quad \times\exp\big(-\frac{1}{2}\int_0^ta(s;p)ds\big)S_c(\int_t^1
 \sqrt{b(s;p)}ds)x_0(p)\\
&\quad +\Big(\frac{b(1;p)}{b(t;p)}\Big)^{1/4}e^{\frac{1}{2}
 \int_0^1a(s;p)ds}
S_c(\int_0^t\sqrt{b(s;p)}ds))x_1(p)\Big\}
\end{aligned}
\end{equation}
which, as we shall see,  is an approximate solution of the problem.

\begin{theorem} \label{th9}
Consider the boundary-value problem \eqref{e361}-\eqref{e362},
where  assume that Properties \ref{ec1}, \ref{ec2}, \ref{ec3},
 the  conditions (i), (ii) of Theorem \ref{th3} and assumption
\eqref{e1030} are satisfied. Also, assume that the boundary values
have a behavior like
\begin{equation}\label{e143}
x_0, \enskip x_1\in{\mathcal{A}_E}.\end{equation}

(a) If $\omega$ is given in assumption \eqref{e1030} and for  $$
\Omega:=\frac{1}{2}\int_0^1\omega(s)ds
$$ the condition
 \begin{equation}\label{e144}\begin{aligned}
&\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}+ \frac{3}{4}  {\mathcal{G}}_E(b(t;\cdot))-\Omega+\min\{  {\mathcal{G}}_E(x_0),   {\mathcal{G}}_E(x_1)\}\\
 &=:L_0>0\end{aligned}
 \end{equation}
is satisfied, then the existence of a solution  $x$  of the
problem is guaranteed, and if
\begin{equation}\label{ea212}
{\mathcal{E}}(t;p):=x(t;p)-{\tilde{x}}(t;p)\end{equation}
 is the error function, where ${\tilde{x}}$ is defined by \eqref{appr21},
 then  we have
\begin{equation}\label{e1130}
{\mathcal{E}}(t;p)\simeq 0,\quad p\to +\infty,\quad t\in Co([0,1]).
\end{equation}

Also, the growth index of the error function satisfies
 \begin{equation}\label{e1120}
   {\mathcal{G}}_E({\mathcal{E}}(t;\cdot))\geq L_0,\quad t\in Co([0,1]).
 \end{equation}

(b) Assume that  the condition
\begin{equation}\label{e7201}
\begin{aligned}
&\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}
 +\frac{3}{4}{\mathcal{G}}_E(b(t;\cdot))-\Omega
 +\min\{{\mathcal{G}}_E(x_0)+\frac{1}{2}{\mathcal{G}}_E(b(t;\cdot)), \\
& {\mathcal{G}}_E(x_0)+ {\mathcal{G}}_E(b(t;\cdot)),
 {\mathcal{G}}_E(x_1)+\frac{1}{2} {\mathcal{G}}_E(b(t;\cdot)),
 {\mathcal{G}}_E(x_0),{\mathcal{G}}_E(x_1)\}\\
&=:L_1,\quad t\in Co([0,1])>0,
\end{aligned}
\end{equation}
holds. Then  the existence of a solution  $x$  of the problem
is guaranteed and it satisfies
\begin{gather}\label{e7301}
\frac{d}{dt}{\mathcal{E}}(t;p)\simeq 0,\quad p\to +\infty,\;
 t\in Co([0,1]), \\
\label{e7401}
   {\mathcal{G}}_E\big(\frac{d}{dt}{\mathcal{E}}(t;\cdot)\big)
\geq L_1,\quad t\in Co([0,1]).
 \end{gather}
  \end{theorem}

\begin{proof} (a) Take any  $N\in(0, L_0)$ and, because
of  \eqref{e144}, we can choose $\zeta>0$ and real numbers
$\mu, \sigma, \varrho$ near $-{\mathcal{G}}_E(b(t;\cdot))$,
$-{\mathcal{G}}_E(x_0)$, $-{\mathcal{G}}_E(x_1)$, respectively,
 such that
\begin{equation}\label{e145}
 \min\{\lambda(\Phi_j):  j=1,\dots,5\}>\zeta\geq  N+\frac{\mu}{4}
+\Omega+\max\{\sigma, \varrho\}.
\end{equation}
  Thus, we have
\begin{equation}\label{e146}
 \frac{\mu}{4}+\Omega+\max\{\sigma, \varrho\}-\zeta\leq -N
\end{equation}
 and, and due to Lemma \ref{l1},
\begin{equation}\label{e147}
 P(p)\leq K(E(p))^{-\zeta},
 \end{equation}
for some $K>0$.
 Thus \eqref{ea139} is satisfied for $p$ large enough.
This makes Theorem \ref{th8} applicable and the existence of
a solution is guaranteed.

 Let ${\mathcal{E}}(t;p)$ be the error function defined
in \eqref{ea212}.  From \eqref{e138}, \eqref{e139a} and \eqref{e141}
we have
$$
|R(v;p)|\leq q_cQ_cP(p){{\tau}}(|y_0|+|y_{\tau}|)+Q_cP(p)\int_0^{\tau}|R(s;p)|ds,$$
  and therefore
\begin{equation}\label{e144a}
|R(v;p)|\leq \frac{q_cQ_cP(p){{\tau}}(|y_0|
 +|y_{\tau}|)}{1-Q_cP(p){{\tau}}}
\leq \frac{1}{\Delta}q_cQ_cP(p){{\tau}}(|y_0|+|y_{\tau}|),
\quad v\in J_p.
\end{equation}
Then observe that
\begin{equation}\nonumber\begin{aligned}
|{\mathcal{E}}(t;p)|
&=|x(t;p)-Y(t;p)w(v(t;p);p)|\\
&=|Y(t;p)||y(v(t;p);p)-w(v(t;p);p)|\\
&=|Y(t;p)||R(v(t;p);p)|,
\end{aligned}
\end{equation}
because of \eqref{e59}. Thus, from  \eqref{e144a} it follows
that for all $t\in[0,1]$ it holds
\begin{equation} \label{e1440}
\begin{aligned}
|{\mathcal{E}}(t;p)|
&\leq {\Delta}^{-1}|Y(t;p)|q_cQ_cP(p){{\tau}}(|y_0|+|y_{\tau}|)\\
&={\Delta}^{-1}\Big(\frac{b(0;p)}{b(t;p)}\Big)^{1/4}e^{-\frac{1}{2}\int_0^ta(s;p)ds}q_cQ_cP(p){\tau}\big(|y_0|+|y_{\tau}|\big)\\
&={\Delta}^{-1}q_cQ_c\sqrt{\|b(\cdot;p)||}P(p)
 \Big[\Big(\frac{b(0;p)}{b(t;p)}\Big)
 ^{1/4}e^{-\frac{1}{2}\int_0^ta(s;p)ds}|x_0(p)|\\
&\quad +\Big(\frac{b(1;p)}{b(t;p)}\Big)^{1/4}e^{\frac{1}{2}
 \int_t^1a(s;p)ds}|x_1(p)|\Big].
\end{aligned}
\end{equation}
  From \eqref{e147} and \eqref{e1440} for all large $p$ (especially
for all $p$ with $q:=E(p)>1$) it follows that
\begin{align*}
|{\mathcal{E}}(t;p)|
&\leq {\Delta}^{-1}q_cQ_c{\tau}Kq^{-\zeta}
 \frac{K_1^{\frac{3}{4}}q^{\frac{3\mu}{4}}}{\theta^{1/4}}
 \exp\Big(\log(q)\frac{1}{2}\int_0^1\omega(s)ds\Big)
 \Big(K_2q^{\sigma}+K_3q^{\varrho}\Big)\\
&\leq K_4q^{-\zeta+\frac{3\mu}{4}+\Omega}(K_2q^{\sigma}
+K_3q^{\varrho}).
\end{align*}
  Finally, from  \eqref{e146} we obtain
\begin{equation}\label{e149}
|{\mathcal{E}}(t;p)|\leq {\hat{K}}q^{-N},
\end{equation}
for some $\hat{K}>0$, which, obviously, leads to \eqref{e1130}.
Relation \eqref{e1120} follows from \eqref{e149} as exactly
relation \eqref{e71} follows from \eqref{e89}.

(b) Next consider the first order derivative of the error function
${\mathcal{E}}(t;p)$. Due to \eqref{e7201}, given any  $N_1\in(0,L_1)$, we get
 reals $\zeta>0$  and  $\mu, \nu, \sigma, \varrho>0$, close to
 $-{\mathcal{G}}_E(b(t;\cdot))$, $ -{\mathcal{G}}_E(a(t;\cdot))$,
$ -{\mathcal{G}}_E(x_0)$, $ {\mathcal{G}}_E(x_1)$, respectively,
such that
\begin{equation}\label{ea21}
\begin{aligned}
&\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}\\
&>\zeta>N_1+\frac{3\mu}{4}+\Omega+\max\{\sigma+\frac{\mu}{2},
\sigma+ \nu, \varrho+\frac{\mu}{2},
\varrho+\nu, \mu+\varrho, \mu+\sigma\}.
\end{aligned}
\end{equation}
  From \eqref{e139} and \eqref{e139a} we observe that
\begin{align*}
|\frac{d}{dv}R(v;p)|
&=|\frac{d}{dv}\int_0^{\tau}G(v,s;p)H(s;p)ds|\\
&\leq Q_c{{\tau}}(P(p)|R(v;p)|+P(p)q_c(|y_0|+|y_{\tau|})\\
&\leq q_cQ_c{\tau}P(p)[{\Delta}^{-1}Q_c{\tau}P(p)+1](|y_0|+|y_{\tau}|).
\end{align*}
  From this relation it follows that
\begin{align*}
&|\frac{d}{dt}{\mathcal{E}}(t;p)|\\
&=|\frac{d}{dt}Y(t;p)R(v(t;p);p)+Y(t;p)\frac{d}{dv}R(v(t;p);p)
 \frac{d}{dt}v(t;p)|\\
&\leq|Y(t;p)|\Big\{\Big(\frac{\sqrt{\Phi_1(p)b(t;p)}}{4}
 +\frac{|a(t;p)|}{2}\Big)|R(v(t;p);p)|
 +|\frac{d}{dv}R(v(t;p);p)|\sqrt{b(t;p)}\Big\}\\
&\leq|Y(t;p)|\Big\{\Big(\frac{\sqrt{\Phi_1(p)b(t;p)}}{4}
 +\frac{|a(t;p)|}{2}\Big)\Delta^{-1}q_cQ_cP(p){{\tau}}
 (|y_0|+|y_{\tau}|)\\
&\quad +\sqrt{b(t;p)}q_cQ_c{\tau}P(p)
 [{\Delta}^{-1}Q_c{\tau}P(p)+1](|y_0|+|y_{\tau}|)\Big\}.
\end{align*}
Therefore, for all large $p$ (especially for $p$ with $q:=E(p)>1$)
we obtain
\begin{equation} \label{eae1}
\begin{aligned}
|\frac{d}{dt}{\mathcal{E}}(t;p)|
&\leq q_cQ_c\hat{\tau}P(p)\Big[|x_0(p)|\Big(\frac{b(0;p)}{b(t;p)}
 \Big)^{1/4}e^{-\int_0^ta(s;p)ds}\\
&\quad +|x_1(p)|\Big(\frac{b(1;p)}{b(t;p)}\Big)^{1/4}
 e^{\int_t^1a(s;p)ds}\Big]
\Big\{\Big(\frac{\sqrt{\Phi_1(p)b(t;p)}}{4}+\frac{|a(t;p)|}{2}\Big)
 {\Delta}^{-1}\\
&\quad +\sqrt{b(t;p)}[{\Delta}^{-1}Q_c{\tau}P(p)+1]\Big\}\\
&\leq q^{-\zeta+\Omega+\frac{3\mu}{4}}(M_1q^{\sigma+\frac{\mu}{2}}
 +M_2q^{\sigma+\nu}\\
&\quad +M_3q^{\rho+\frac{\mu}{2}}+M_4q^{\rho+\nu}
 +M_5q^{\varrho+\mu}+M_6q^{\varrho+\sigma}),
\end{aligned}\end{equation}
for some positive constants $M_1, M_2, M_3, M_4, M_5, M_6$ not
depending on the parameter $p$.
Taking into account the condition \eqref{ea21} we conclude that
$$
|\frac{d}{dt}{\mathcal{E}}(t;p)|\leq Mq^{-N_1},
$$
for all large $p$.
Now, the rest of the proof follows as previously.
\end{proof}

 From inequalities \eqref{e1440} and \eqref{eae1} we can easily see
that if the function $a(\cdot;p)$ is non-negative uniformly for
all $p$ and $x_1(p)=0$, or $a(\cdot;p)$
is non-positive  for all $p$ and $x_0(p)=0$, then
the conditions of Theorem \ref{th9} can be weaken. Indeed,
we have the following results, whose the proofs follow
the same lines as in Theorem \ref{th9}.

\begin{theorem}\label{th10}
Consider the boundary-value problem \eqref{e361}-\eqref{e362},
where  assume that Properties \ref{ec1}, \ref{ec2}, \ref{ec3}
and  the  conditions (i), (ii) of Theorem \ref{th3} hold.
Also, assume that $a(t;p)\geq 0$
[respectively $a(t;p)\leq 0$], for all $t\in[0,1]$ and $p$ large,
as well as
\[
x_0\in{\mathcal{A}_E}\quad\text{and}\quad x_1(p)=0,
\quad\text{for all large}\quad p
\]
[resp.
\[
x_0(p)=0,\quad\text{for all large $p$ and }
 x_1\in {\mathcal{A}_E}].
\]
(a) If the condition
\[
\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}
+ \frac{1}{4}  {\mathcal{G}}_E(b(t;\cdot))+{\mathcal{G}_E}(x_0)
=:L_0>0
\]
 [resp.
 \[
 \min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}
+\frac{1}{4}  {\mathcal{G}}_E(b(t;\cdot))+{\mathcal{G}_E}(x_1)=:L_0>0]
 \]
 is satisfied, then the existence of a solution  $x$  of the problem
is guaranteed and if
$$
{\mathcal{E}}(t;p)=x(t;p)-{\tilde{x}}(t;p)
$$
is the error function, where ${\tilde{x}}$ is defined by \eqref{appr21},
then  \eqref{e1130} holds.
Also, the growth index  of the error function satisfies
 \eqref{e1120}.

(b) If   the condition
\begin{align*}
&\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}
+\frac{1}{4}{\mathcal{G}_E}(b(t;\cdot))+{\mathcal{G}_E}(x_0)
+\min\{\frac{1}{2}{\mathcal{G}_E}(b(t;\cdot)),
{\mathcal{G}_E}(a(t;\cdot)) \}\\
&=:L_1>0
\end{align*}
[resp.
\begin{align*}
&\min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}
+\frac{1}{4}{\mathcal{G}_E}(b(t;\cdot))+{\mathcal{G}_E}(x_1)
+\min\{\frac{1}{2}{\mathcal{G}_E}(b(t;\cdot),
{\mathcal{G}_E}(a(t;\cdot)) \}\\
&=:L_1>0]
\end{align*}
 holds, then  the existence of a solution  $x$  of the problem
is guaranteed and it satisfies
\eqref{e7301} and \eqref{e740}.
\end{theorem}

\section{Applications}

1. Consider the equation
\begin{equation}\label{eq1}
x''+\frac{2}{\sin(1)}\cos(t)\log(p)x'-[1+p^{10}]x+p^{-1}x\sin(x)=0,
\end{equation}
associated with the boundary values
\begin{equation}\label{eq2}
x_0(p)=\frac{1}{5}p, \quad
x_1(p)=\frac{1}{10}(p+\frac{1}{p}).
\end{equation}
Conditions \eqref{e50}, \eqref{e51} and \eqref{e52} are satisfied,
if we set the functions
\[
\Phi_1(p)= \Phi_2(p)= \Phi_3(p)=\Phi_4(p)=k_1p^{-\frac{39}{4}},\quad
\Phi_5(p):=k_2{p^{-10}},
\]
for some $k_1, k_2>0$.
So  case (a) of Theorem \ref{th9} is applicable with $E(p):=p$.
It is not hard to see that an approximate solution of the problem
is the function
\[
{\tilde{x}}(t;p):=
e^{-\frac{\sin(t)}{\sin(1)}}\Big[p\frac{\sinh\Big((1-t)
\sqrt{1+p^{10}}\Big)}{\sinh\Big(\sqrt{1+p^{10}}\Big)}
+e(p+\frac{1}{p})\frac{\sinh\Big(t\sqrt{1+p^{10}}\Big)}{\sinh
\Big(\sqrt{1+p^{10}}\Big)}\Big],
\]
satisfying
$$
{\mathcal{G}_E}\big(x(t;\cdot)-{\tilde{x}}(t;\cdot)\big)
\geq \frac{1}{4}.
$$
The  function for the values of $p=1, 1.5, 2, 2.5$ has a graph
shown  in Figure \ref{fig3}.

\begin{figure}[th]
\begin{center}
 \includegraphics[width=0.4\textwidth]{fig3a} \hskip .2 in
 \includegraphics[width=0.4\textwidth]{fig3b} \vskip .1 in
 \includegraphics[width=0.4\textwidth]{fig3c} \hskip .2 in
 \includegraphics[width=0.4\textwidth]{fig3d} \hskip .1 in
\end{center}
\caption{Approximate solutions of
 \eqref{eq1}-\eqref{eq2}, when $p=1$, 1.5, 2, 2.5, respectively}
 \label{fig3}
\end{figure}

2. Consider the equation
\begin{equation}\label{eqa1}
x''+\frac{2}{\sqrt{p}}x'+[\frac{\pi}{4}+p^{-0.1}]x+\frac{x\sin(x)}{p}=0,
\end{equation}
 associated with boundary values
\begin{equation}\label{eqa2}
x_0(p)=0.2\sqrt{p}, \quad x_1(p)=0.
\end{equation}
We can take $E(p):=p$ and
$$
\Phi_1(p)= \Phi_2(p)= \Phi_3(p)=\Phi_4(p)=\Phi_5(p):=k_1p^{-0.9}.
$$
Then conditions \eqref{e50}, \eqref{e51} and \eqref{e52} are
satisfied and so Theorem \ref{th9} is applicable with
$L_0=\frac{3}{8}$ and $L_1=\frac{23}{40}.$  In this case it is
not hard to see that an approximate solution of the problem is
the function defined on the interval $[0,1]$ by the type
\[
{\tilde{x}}(t;p):=0.1\sqrt{p}(1+\cos(15\sqrt{t})\exp
\Big(\frac{-t}{\sqrt{p}}\Big)\frac{\sin\Big((1-t)
\sqrt{\frac{\pi}{4}+p^{-0.1}}\Big)}{\sin
\Big(\sqrt{\frac{\pi}{4}+p^{-0.1}}\Big)}.
\]
The  graph of this function for the values of $p=4, 10, 20, 30$
is shown  in Figure \ref{fig4}

\begin{figure}[th] 
\begin{center}
 \includegraphics[width=0.4\textwidth]{fig4a} \hskip .2 in
 \includegraphics[width=0.4\textwidth]{fig4b} \vskip .1 in
 \includegraphics[width=0.4\textwidth]{fig4c} \hskip .2 in
 \includegraphics[width=0.4\textwidth]{fig4d} \hskip .1 in
\end{center}
\caption{Approximate solutions of
 \eqref{eqa1}-\eqref{eqa2}, when $p=4$, 10, 20, 30 respectively}
 \label{fig4}
\end{figure}

\section{Approximate solutions of the boundary-value problem
\eqref{e361}-\eqref{bvc1}}

In this section we shall discuss the approximate solutions of the
problem \eqref{e361}-\eqref{bvc1}. We shall use the results of
section 3 to obtain  approximate solutions when the parameter $p$
tends to $+\infty$. Again, as in section 8 we define
$\tau:=v(1;p)$, $J_p:=[0,\tau]$ and use the symbols $S_c$ and
$C_c$. \par Our basic hypothesis which will be assumed in all the
sequel without any mention is that Properties \ref{ec1} and
\ref{ec2} will be satisfied for all $t\in [0,1]$.

 Assume that equation \eqref{e361} admits a solution satisfying the conditions
$$
x(0;p)=x_0(p),\quad  x(1;p)=m(p)x(\xi;p),
$$
for a certain point $\xi\in[0,1)$ and a real number $m(p)$.
Then Theorem \ref{th1} implies that a function $x(\cdot;p)$
is a solution of the problem, if and only if $y(\cdot;p)$
is a solution of equation \eqref{e49} and boundary conditions
\begin{equation}\label{ea132}
\begin{gathered}
y(0;p)=x_0(p)=:y_0(p)\\
\begin{aligned}
y(\tau;p)&=y(v(1);p)=\frac{x(1;p)}{Y(1;p)}
=m(p)\frac{x(\xi;p)}{Y(1;p)}\\
& =m(p)\frac{Y(\xi;p)}{Y(1;p)}y(v(\xi;p);p)
=:m^*(p)y(v(\xi;p);p).
\end{aligned}
\end{gathered}
\end{equation}

Before we seek for approximate solutions of the problem
\eqref{e361}-\eqref{bvc1} we shall impose conditions for the
existence of solutions. To do that we shall use, again, the
Fixed Point Theorem \ref{thNA}.
To proceed we assume the following condition.

 \begin{condition}\label{cond3} \rm
\begin{itemize}
\item[(i)] There is some $\rho>0$ such that
$$
\frac{S_c(\int_0^{\xi}\sqrt{b(s;p)}ds}{S_c(\int_0^{1}\sqrt{b(s;p)}ds}
\geq\rho,
$$ for all $p$ large enough.

\item[(ii)]  It holds  $\lim_{p\to+\infty}m(p)=+\infty$.

\item[(iv)] There is some ${\bar{a}}>0$ such that
 $0\leq a(t;p)\leq 2{\bar{a}},$ for all $t\in[0,1]$ and $p$ large enough.

\item[(iii)] There are $\theta, b_0>0$ such that
 $\theta\leq b(t;p)\leq b_0,$ for all $t\in(0,1)$ and $p$ large enough.
\end{itemize}
\end{condition}

 Before we seek for approximate solutions of the problem
\eqref{e49}-\eqref{ea132}, we shall investigate the existence
of solutions.
Let $w$ solve the equation $w''+cw=0$ and satisfies the conditions
\begin{gather*}
w(0;p)=y_0(p),\\
w(\tau;p)=m^*(p)w(v(\xi;p);p).
\end{gather*}
Solving this problem we obtain
\begin{equation}\label{eq22}
w(v;p)=\frac{S_c(\tau-v)-m^*(p)S_c(v(\xi;p)-v)}
{S_c(\tau)-m^*(p)S_c(v(\xi;p))}y_0(p).
\end{equation}

We shall show that the solution $w$ is bounded. Indeed,
 from \eqref{eq22} we observe that
$$
|w(v;p)|\leq\frac{S_c(\tau)+m^*(p)S_c(\tau)}{m^*(p)S_c(v(\xi;p))
-S_c(\tau)}|y_0(p)|
$$
and by using obvious bounds of all arguments involved we obtain
$$
|w(v;p)|\leq\frac{m(p)\Big(\frac{b(1;p)}{b(\xi;p)}\Big)^{1/4}
e^{\frac{1}{2}\int_0^1a(s;p)ds}+1}{m(p)\Big(\frac{b(1;p)}{b(\xi;p)}
\Big)^{1/4}\frac{S_c(\int_0^{\xi}\sqrt{b(s;p)}ds)}
{S_c(\int_0^1\sqrt{b(s;p)}ds)}-1}|y_0(p)|.
$$
Hence, because of Condition \ref{cond3}, we obtain
 \begin{equation}\label{eq24}
|w(v;p)|\leq\frac{m(p)\sqrt{b_0}e^{\bar{a}}
+(b_0\theta)^{1/4}}{m(p)\sqrt{\theta}\rho-(b_0\theta)^{1/4}}|y_0(p)|
\leq \rho_0|y_0(p)|,
\end{equation}
 for all large $p$, where
$$
\rho_0:=\Big(\frac{\sqrt{b_0}e^{\bar{a}}}{\sqrt{\theta}\rho}+1\Big).
$$

As in previous sections, we set $R:=y-w$. We shall search for constants
$d_1$ and $d_2$ such that the function
$$
R(v;p):=d_1C_c(v)+d_2S_c(v)+\int_0^vS_c(v-s)H(s;p)ds
$$
be a solution of the nonhomogeneous equation
$$
R''+cR=H
$$
satisfying the conditions
\begin{equation}\label{bvc3}
R(0;p)=0\quad\text{and}\quad R(\tau;p)
=y(\tau;p)-w(\tau;p)=m^*R(v(\xi;p)).
\end{equation}
 Here $H$ is the function defined by
\[
H(t;p):=C(t,y(v;p);p)R(v;p)+C(t,y(v;p);p)w(v;p),
\]
which, due to \eqref{eq24}, satisfies the inequality
\begin{equation}\label{e1410}
|H(v;p)|\leq P(p)|R(v;p)|+P(p)\rho_0|y_0(p)|.
\end{equation}
Then we obtain that $d_1=0$ and
\begin{align*}
d_2&=\frac{1}{S_c(\tau)-m^*(p)S_c(v(\xi;p))}\\
&\quad\times \Big[\int_0^{v(\xi;p)}S_c(v(\xi;p))-s)H(s;p)ds
 -\int_0^{\tau}S_c(\tau-s)H(s;p)ds\Big].
\end{align*}
Therefore, the solution $R(v;p)$ has the form
\begin{align*}
R(v;p)&=\frac{S_c(v)}{S_c(\tau)-m^*(p)S_c(v(\xi;p))}\\
&\quad\times \Big[\int_0^{v(\xi;p)}S_c(v(\xi;p))-s)H(s;p)ds
 -\int_0^{\tau}S_c(\tau-s)H(s;p)ds\Big]\\
&\quad +\int_0^vS_c(v-s)H(s;p)ds;
\end{align*}
namely,
\[
R(v;p)=\int_0^{\tau}G(v,s;p)H(s;p)ds,
\]
where the Green's function $G$ is defined by
\[
G(v,s;p):=\begin{cases}
\frac{S_c(v)\big[S_c(v_{\xi}-s)-S_c(\tau-s)\big]}
{S_c(\tau)-m^*(p)S_c(v(\xi;p))}+S_c(v-s),
& 0\leq s\leq v_{\xi}\leq v\\[4pt]
-\frac{S_c(v)S_c(\tau-s)}{S_c(\tau)-m^*(p)S_c(v(\xi;p))}+S_c(v-s),
& 0\leq v_{\xi}\leq s\leq v\\[4pt]
-\frac{S_c(v)S_c(\tau-s)}{S_c(\tau)-m^*(p)S_c(v(\xi;p))}, &
 0\leq v_{\xi}\leq v\leq s.
\end{cases}
\]

To obtain upper $C^1$ bounds of the kernel $G$ we distinguish
the following cases:

Case  $0\leq s\leq v_{\xi}\leq v$.
 In this case for $p$ large enough it holds
 \begin{align*}
|G(v,s;p)|
&\leq \frac{2(S_c(\tau))^2}{m^*(p)S_c(v(\xi;p))-S_c(\tau)}+S_c(\tau)\\
&\leq \frac{2S_c\big(\int_0^1\sqrt{b(s;p)}ds\big)} {m(p)
\Big(\frac{b(1;p)}{b(\xi;p)}\Big)^{1/4}
e^{\frac{1}{2}\int_0^1a(s)ds}\frac{S_c\big(\int_0^{\xi}
 \sqrt{b(s;p)}ds\big)}{S_c\big(\int_0^1\sqrt{b(s;p)}ds\big)}-1}
+S_c\big(\int_0^1\sqrt{b(s;p)}ds\big).
\end{align*}
Thus, due to Condition \ref{cond3}, there exists some $\hat{p}$
such that for all $p\geq \hat{p}$ it holds
\[
|G(v,s;p)|\leq \big[\frac{2}{m(p)(\frac{\theta}{b_0})^{1/4}\rho-1}
+1\big]k_1\leq 2k_1,
\]
where
\[
k_1:=\begin{cases}
e^{\sqrt{b_0}}, & c=-1\\
1, & c=1.\end{cases}
\]
Also, we can easily see that, for large enough $p$ the first
partial derivative of $G$ (with respect to $v$) satisfies
\begin{align*}
&\Big |\frac{\partial}{\partial v}G(v,s;p)\Big|\\
&\leq \frac{S_c(\tau)C_c(\tau)}{m^*(p)S_c(v(\xi;p))-S_c(\tau)}+C_c(\tau)\\
&\leq \frac{C_c\big(\int_0^1\sqrt{b(s;p)}ds\big)}{m(p)
\Big(\frac{b(1;p)}{b(\xi;p)}\Big)^{1/4}
e^{\frac{1}{2}\int_0^1a(s)ds}
 \frac{S_c\big(\int_0^{\xi}\sqrt{b(s;p)}ds\big)}
 {S_c\big(\int_0^1\sqrt{b(s;p)}ds\big)}-1}
 +C_c\big(\int_0^1\sqrt{b(s;p)}ds\big)\\
&\leq \frac{2k_1}
{m(p)(\frac{\theta}{b_0})^{1/4}\rho-1}+2k_1\\
&=2k_1\big[\frac{1}{m(p)
(\frac{\theta}{b_0})^{1/4}
\rho-1}+1\big]\leq 4k_1.
\end{align*}

Case $0\leq v_{\xi}\leq s\leq v$.
 In this case for $p$ large enough it holds
\begin{align*}
&|G(v,s;p)|\\
&\leq \frac{(S_c(\tau))^2}{m^*(p)S_c(v(\xi;p))-S_c(\tau)}+S_c(\tau)\\
&\leq \frac{S_c\big(\int_0^1\sqrt{b(s;p)}ds\big)}{m(p)
\Big(\frac{b(1;p)}{b(\xi;p)}\Big)^{1/4}
e^{\frac{1}{2}\int_0^1a(s)ds}\frac{S_c\big(\int_0^{\xi}
\sqrt{b(s;p)}ds\big)}{S_c\big(\int_0^1\sqrt{b(s;p)}ds\big)}-1}
+S_c\big(\int_0^1\sqrt{b(s;p)}ds\big)\\
&\leq\dots\leq 2k_1.
\end{align*}

Similarly, we can obtain that for $0\leq v_{\xi}\leq s\leq v$ and
$p$ large enough, it holds
$$
|G(v;s;p)|\leq 2k_1,\quad
\Big |\frac{\partial}{\partial v}G(v,s;p)\Big|\leq 4k_1,
$$
while, for
$0\leq v_{\xi}\leq v\leq s$, it holds
$$
|G(v;s;p)|\leq k_1,\quad
\Big |\frac{\partial}{\partial v}G(v,s;p)\Big|\leq 2k_1.
$$
Therefore, for all $s, v$ we have
\begin{equation}\label{eq25}
\max\{|G(v,s;p)|, \quad  \big |\frac{\partial}{\partial v}G(v,s;p)\big|\}
\leq 4k_1.
\end{equation}
Applying the previous arguments we obtain
\begin{equation}\label{eq34}
|R(v;p)|\leq \frac{4k_1\rho_0b_0^{1/2}}{\Delta_1}P(p)|x_0(p)|.
\end{equation}
Here $\Delta_1$ is defined as
\begin{equation}\label{eq27}
\Delta_1:=1-4k_1P(p)b_0^{1/2}=:\Delta_1(p)>0,
\end{equation}
where  $P(p)$ is defined in \eqref{e53}.

Hence the operator form of the boundary-value problem
\eqref{e49}-\eqref{ea132} is the following:
\begin{equation}\label{eq26}
y(v;p)=w(v;p)+\int_0^{\tau}G(v,s;p)C(\phi(s;p),y(s;p);p)y(s;p)ds,
\quad v\in J_p.
\end{equation}

To show the existence of a solution of \eqref{eq26}, as in Section 8,
we consider the  Banach space
$C(J_p,\mathbb{R})$ of all continuous functions $y:J_p\to\mathbb{R}$
endowed with the sup-norm
$\|\cdot\|$-topology. Fix a $p$ large enough and define the
operator  $A: C(J_p,\mathbb{R}) \to C(J_p,\mathbb{R})$ by
$$
(Az)(v):=w(v;p)+\int_0^{\tau}G(v,s;p)C(\phi(s;p),z(s);p)z(s)ds
$$
which is completely continuous (due to Properties \ref{ec1}
and \ref{ec2}).

To proceed we assume for a moment that  \eqref{eq27} holds for all $p$ large enough.

Take a large enough $p$ and set $\tau=v(1;p)=:v$.
Then we have $v\leq b_0^{1/2}$ and so it holds
$$
1-4k_1P(p){{\tau}}\geq \Delta_1>0.
$$
Consider the open ball $B(0,l_1)$ in the space $C(J,\mathbb{R})$, where
$$
l_1:=\frac{\|w\|}{1-4k_1P(p)\tau}+1.
$$
 As in Section 8, assume that the operator $A$ does not have
any fixed point in $B(0,l_1)$. Thus, due to Theorem \ref{thNA}
and by setting $q=0$, there exists a point $z$ in the boundary
of $B(0,l_1)$ satisfying
$z=\lambda Az$,
for some $\lambda\in(0,1)$. This means that, for each
$v\in J_p,$ it holds
\[
|z(v)|\leq \|w\|+\int_0^{\tau}|G(v,s;p)||C(\phi(s;p),z(s);p)||z(s)|ds.
\]
Then  we have
 $$
|z(v)|\leq \|w\|+4k_1P(p)\int_0^{\tau}|z(s)|ds.
$$
Therefore,
\[
|z(v)|\leq \|w\|+{4k_1P(p)\tau}\|z\|,
\]
which leads to the contradiction
$$
l_1=\|z\|\leq \frac{\|w\|}{1-4k_1P(p){\tau}}=l_1-1.
$$
Taking into account the relation between the solutions of the
original problem and the solution of the problem
\eqref{e361}-\eqref{bvc1}, as well the previous arguments,
we conclude the following result:

\begin{theorem}\label{th11}
If Properties \ref{ec1}, \ref{ec2} and \eqref{eq27} are satisfied,
then the boundary-value problem \eqref{e361}-\eqref{bvc1}
 admits at least one solution.
\end{theorem}

Now, we  give the main results of this section.
If $w$ is the function defined in \eqref{eq22}
we define the function
\begin{equation}\begin{aligned}\label{appr20}
{\tilde{x}}(t;p)&:=Y(t;p)w(v(t;p);p)\\
&=Y(t;p)\frac{S_c(\tau-v)-m^*S_c(v(\xi;p)-v)}
 {S_c(\tau)-m^*S_c(v(\xi;p))}y_0(p)\\
&=\Big(\frac{b(0;p)}{b(t;p)}\Big)^{1/4}\exp
\big(-\frac{1}{2}\int_0^ta(s;p)ds\big)\frac{X(t;p)}{X(0;p)}x_0(p),
\end{aligned}\end{equation}
where
\[
X(t;p):=S_c(\int_t^{1}\sqrt{b(s;p)}ds)
-m(p)\Big(\frac{b(1;p)}{b(\xi;p)}\Big)^{1/4}e^{\frac{1}{2}
\int_{\xi}^1a(s)ds}S_c(\int_t^{\xi}\sqrt{b(s;p)}ds),
\]
which, as we shall show, it is an approximate solution of the
 problem under discussion.

\begin{theorem} \label{th12}
Consider the boundary-value problem \eqref{e361}-\eqref{bvc1},
where  assume that Properties \ref{ec1}, \ref{ec2}, \ref{ec3},
the  conditions \eqref{eq27} and (i), (ii) of Theorem \ref{th3}
are satisfied. Also, assume that
$x_0\in{\mathcal{A}_E}$.

(a) If the condition
 \begin{equation}\label{eq44}
 \min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}
+ {  {\mathcal{G}}_E(x_0)}=:L>0
 \end{equation}
 is satisfied, then the existence of a solution  $x$
of the problem is guaranteed and if
\[
{\mathcal{E}}(t;p):=x(t;p)-{\tilde{x}}(t;p)
\]
 is the error function, where ${\tilde{x}}$ is defined by \eqref{appr21},
 then  we have
\begin{equation}\label{eq36}
{\mathcal{E}}(t;p)\simeq 0,\quad p\to +\infty,\quad t\in Co([0,1]).
\end{equation}
Also, the growth index  of the error function satisfies
 \begin{equation}\label{eq37}
   {\mathcal{G}}_E({\mathcal{E}}(t;\cdot))\geq L,\quad t\in Co([0,1]).
 \end{equation}   \par

(b) Moreover we have
\begin{gather}\label{eq38}
\frac{d}{dt}{\mathcal{E}}(t;p)\simeq 0,\quad p\to
+\infty,\quad t\in Co([0,1]), \\
\label{eq39}
   {\mathcal{G}}_E\big(\frac{d}{dt}{\mathcal{E}}(t;\cdot)\big)\geq L,\quad t\in Co([0,1]).
\end{gather}
  \end{theorem}

\begin{proof}
(a) Take a  $N\in(0, L)$ and choose $\zeta>0$ as well as
$-\sigma< {\mathcal{G}}_E(x_0)$, (thus we have
$$
|x_0(p)|\leq K_3(E(p))^{\sigma},
$$
for some $K_3>0$)  such that
\begin{equation}\label{eq33}
 \min\{ {\mathcal{G}}_E(\Phi_j):  j=1,\dots,5\}>\zeta
\geq  N+{\sigma}.\end{equation}
Therefore,
 \begin{gather}\label{eq30}
 \sigma-\zeta\leq -N, \\
\label{eq31}
 P(p)\leq K(E(p))^{-\zeta},
 \end{gather}
for some $K>0$. Thus \eqref{eq27} is satisfied for $p$ large enough.
This makes Theorem \ref{th11} applicable and the existence of
a solution is guaranteed.
 Let ${\mathcal{E}}(t;p)$ be the error function defined
in \eqref{ea212}.  From \eqref{eq34} it is easy to obtain
$$
|{\mathcal{E}}(t;p)|\leq \Lambda_1(E(p))^{\sigma-\zeta}.
$$
for all large $p$, for some $\Lambda_1>0$. Obviously, this
relation implies \eqref{eq36} as well as \eqref{eq37}.

(b) Next consider the first order derivative of the error function
${\mathcal{E}}(t;p)$. Again, as above, we  obtain
\begin{align*}
|\frac{d}{dt}R(v(t;p);p)|
&=|\frac{d}{dv}\int_0^{\tau}G(v,s;p)H(s;p)ds\frac{d}{t}v(t;p)|\\
&\leq Y(t;p)\Big[\frac{1}{4}\sqrt{\Phi_1(p)b(0;p)}+\frac{a(t;p)}{2}\\
&\quad +\int_0^{\tau}\Big(G(v,s;p)|+|\frac{d}{dt}v(t;p)|
|\frac{\partial}{\partial v}G(v,s;p)|\Big)|H(s;p)|ds\Big].
\end{align*}
Now, we use \eqref{eq33}, \eqref{eq31}, \eqref{eq30}, \eqref{eq25},
\eqref{e1410} and \eqref{eq34} to conclude that for some
positive constants $k_3, k_4$ it holds
\[
|\frac{d}{dt}{\mathcal{E}}(t;p)|\leq k_3P(p)|x_0(p)|
\leq k_4(E(p))^{\sigma-\zeta}<k_4(E(p))^{-N},
\]
from which the result follows.
\end{proof}

\section{An application}

Consider the equation
\begin{equation}\label{equ1}
x''+x'+x+\frac{x\sin(x)}{p}=0, \quad t\in[0,1]
\end{equation}
associated with the following boundary value conditions:
\begin{equation}\label{equ2}
x(0;p)=p^{-1}, \quad x(1;p)=e^px(\frac{1}{2};p).
\end{equation}
We can easily see that with respect to the unbounded function
$E(p):=p$ we have
${\mathcal{G}}_E(\Phi_j)=1$, $j=1, 2, 3, 4, 5$ and
${\mathcal{G}}_E(x_0)=2$. Therefore
$L=2$ and, so, Theorem \ref{th12} applies.
This means that there is a solution of  \eqref{equ1}-\eqref{equ2}
and an approximate solution of it is the following
(according to \eqref{appr20}):
$$
{\tilde{x}}(t;p):=\frac{\sin(1-t)-e^pe^{1/4}
\sin(\frac{1}{2}-t)}{\sin(1)-e^pe^{1/4}\sin(\frac{1}{2})}
e^{-\frac{t}{2}}p^{-2}, \quad t\in[0,1].
$$
The  graph of this function for the values of
$p= 3.83, 6.33, 8.83, 15.50$ is shown  in Figure \ref{fig5}

\begin{figure}[th] 
\begin{center}
 \includegraphics[width=0.45\textwidth]{fig5a} \hskip .2 in
 \includegraphics[width=0.45\textwidth]{fig5b} \vskip .1 in
 \includegraphics[width=0.45\textwidth]{fig5c} \hskip .2 in
 \includegraphics[width=0.45\textwidth]{fig5d}
\end{center}
\caption{Approximate solutions of
 \eqref{equ1}-\eqref{equ2}, when $p=3.83$, 6.33, 8.83, 15.50,
respectively}
\label{fig5}
\end{figure}

\section{Discussion}

We have presented a method of computing the approximate  solutions
of two initial-value problems and two boundary-value problems
concerning the second order ordinary differential equation
\eqref{e36}.  First of all, in section 2 we have given the meaning
of measuring the approximation, by introducing the growth index of
a function. It is proved that this meaning helps a lot to get
information on how close to the actual solution is the approximate
solution, as the parameter $p$ tends to $+\infty$. Section 3 of the
work provided the first step of the method and we have
shown the way of using \eqref{e55} to transform the original equation
to an auxiliary differential equation
\eqref{e49}. \par The sign of the response coefficient $b(t;p)$
plays an essential role. If this coefficient is positive, we have an wave
featured solution, while, in case it is negative, we have
exponential behavior. This is the reason for discussing the two
cases separately, especially in the initial-value problems. The
first case is exhibited in Section 4, where in Theorem \ref{th2}
we show first the existence of a solution of the initial-value
problem and prepare the ground for the proof of  existence of
$C^1$-approximate solutions presented in Theorems \ref{th3} and
Theorem \ref{th4}. The two theorems give, mainly, analogous results,
where in the first theorem we assumed that the coefficient $a(t;p)$
is positive and in the second one it is assumed that it may take
negative values as well.

 Applications of the results to 
examples  where the two coefficients $a(t;p)$ and $b(t;p)$ are
positive, are given in Section 5, where the $C^1$-approximate
solution is computed. The case of negative $b(t;p)$ is discussed
in section 6 and the results are applied to an
initial-value problem given in Section 7.

 The boundary-value problem
\eqref{e361}-\eqref{e362} is discussed in Section 8. First, by the
help of the (Fixed Point Theorem of) Nonlinear Alternative  in Theorem \ref{th8} we have
guaranteed the existence of solutions of the
problem. Then,  in Theorem \ref{th9} we have given estimates of the
error function $\mathcal{E}(t;p):=x(t;p)-{\tilde{x}}(t;p)$, where
${\tilde{x}}(t;p)$ is the $C^1$-approximate solution. We are
able to give simultaneously our results in the cases of positive
and negative functions $b(t;p)$. A specific case, when the function $a(t;p)$ is
nonnegative and the solution vanishes in an edge of the existence
interval, is discussed separately in Theorem \ref{th10}, while two
applications of the results were given in Section 9.

 In Section 10 we investigated the boundary-value problem
\eqref{e361}-\eqref{bvc1}. Again, first  in Theorem \ref{th11} we
solved the existence problem by using the Nonlinear Alternative Method
and then we proceeded to the proof of the existence of
$C^1$-approximate solutions in Theorem \ref{th12}. An application
to a specific equation were given in the last section 11.

All examples which we have presented are associated with the graphs \footnote{made with the help of  Graphing Calculator 3.5
of Pacific Tech} of the functions for various values of the parameter, which show the change of the variation of the
solutions, as the parameter $p$ takes large values and tends to
$+\infty$.

As we have seen, in order  to apply the method to a problem
we have to do two things: First to transform the  original
equation to a new one and then to transform the initial
values, or to the boundary values to the new ones. Both of them
are significant in the process of the method.

As the transformation of the original equation was
already given in \eqref{e49}, what one has to do is to
proceed to the transformation of the boundary values.
For instance, in case the boundary values of the original
problem are of the form
\[
x(0;p)=x'(0;p), \quad x(1;p)=x'(1;p),
\]
then, it is not hard to show that, under the transformation
$S_p$ the new function $y(\cdot;p)$ is required to satisfy
the boundary values
\[
y'(0;p)=\frac{1}{\sqrt{b(0;p)}}\Big[1+\frac{1}{4}
\frac{b'(0;p)}{b(0;p)}+\frac{1}{2}a(0;p)\Big]y(0;p)
\]
 and
 \[
y'(\tau;p)=\frac{1}{\sqrt{b(1;p)}}\Big[1+\frac{1}{4}
\frac{b'(1;p)}{b(1;p)}+\frac{1}{2}a(1;p)\Big]y(1;p).
\]
Now one can proceed to the investigation of the existence of
approximate solutions as well as to their computation.

\begin{thebibliography}{00}

\bibitem{agar1} R. P. Agarwal, D. O' Regan and S. St\v{a}nek;
Positive solutions of non-positone
Dirichlet boundary value problems with singularities in the phase variables,
\emph{Mathematische Nachrichten} \textbf{281} (2008), 612--625.

\bibitem{agar2} R. P. Agarwal and D. O'Regan;
\emph{Singular Differential and Integral Equations with
Applications}, Kluwer Academic Publishers, Dordrecht, 2002.

\bibitem{cohen} Donald S. Cohen;
 Singular Perturbation of Nonlinear Two-Point Boundary-Value Problems,
\emph{J. Math. Anal.} \textbf{43} (1973), pp. 151-160.

\bibitem{cui} M. Cui, F. Geng;
 A computational method for solving third-order singularly perturbed
 boundary-value problems, \emph{Appl. Math. Comput.} \textbf{198} (2008), pp. 896-903.

\bibitem{db} Zengji Du and Zhanbing Bai;
Asymptotic solutions for a second - order differential
equation with three-point boundary conditions, \emph{Appl. Math. Comput.}
\textbf{186} (2007), pp. 469-473.

\bibitem{dgz} Zengji Du, Weigao Ge, Mingru Zhou;
Singular perturbations for third-order nonlinear
multi-point boundary-value problem, \emph{J. Differ. Equations}
\textbf{218} (2005), pp. 69-90.

\bibitem{herc} Dragoslav Herceg, Djordje Herceg;
 On a fourth-order finite-difference method for singularly
perturbed boundary value problems, \emph{Appl. Math. Comput.}
\textbf{203} (2008), pp.  828-837.

\bibitem{DG}  J. Dugundji and A. Granas;
 \emph{Fixed Point Theory,} Vol. I, Monographie Matematyczne,
Warsaw, 1982.

\bibitem{eg} U. Elias and H. Gilgold;
 Effects of varying nonlinearity and their singular perturbation flavour,
\emph{J. Math. Anal. Appl.}, \textbf{ 248}(2000),  pp. 309-326.

\bibitem{Ho} F. Hoppensteadt;
 Properties of solutions of ordinary differential equations with
small parameters, \emph{Comm. Pure Appl. Math.} 24(1971), pp. 807-840.

\bibitem{Ivan} Anatoli Ivanov, Eduardo Liz;
Periodic solutions of a singular differential delay equation
with the Farey-type nonlinearity, \emph{J. Comput. Appl. Math.}
\textbf{180} (2005), pp. 137-145.

\bibitem{kada1} Mohan K. Kadalbajoo, Kailash C. Patidar;
 A survey of numerical techniques for
solving singularly perturbed ordinary differential equations,
\emph{Appl. Math. Comput.} \textbf{130} (2002), pp. 457-510.

\bibitem{kada} Mohan K. Kadalbajoo, Devendra Kumar;
 A non-linear single step explicit scheme for non-linear two-point
singularly perturbed boundary value problems via initial value
technique, \emph{Appl. Math. Comput.} \textbf{202} (2008), pp. 738-746.

\bibitem{OM1} R. E. O'Malley;
 Boundary layer methods for nonlinear initial value problems,
\emph{SIAM Review}, 13(1971), pp. 425-434.

\bibitem{OM2} R. E. O'Malley;
 On initial value problems for nonlinear systems of differential
 equations with two small parameters, \emph{Arch. Rational Mech. Anal.}
 \textbf{40} (1972), pp. 209-222.

\bibitem{om} R. E. O'Malley;
 \emph{Singular Perturbation Methods for Ordinary Differential Equations},
 Springer, New York, 1991.

\bibitem{O'r} Donal O' Regan;
 \emph{Theory of Singular Boundary Value Problems},
World Scientific Publishing Co., Singapore, 1994.

\bibitem{lp} Lawrence Perko;
 \emph{Differential Equations and Dynamical Systems}, Springer-Verlag
New York, 1991.

\bibitem{ramos} J. I. Ramos;
 A smooth locally-analytical technique for singularly perturbed
two-point boundary-value problems, \emph{Appl. Math. Comput.}
\textbf{ 163} (2005), pp.  1123-1142.

\bibitem{ru} Walter Rudin;
 \emph{Real and Complex Analysis}, TATA McGraw-Hill Publ. Co.,
 8th Edition, New York, 1983.

\bibitem{sm} D. R. Smith;
 \emph{Singular perturbation theory}, Cambridge University Press,
New York, 1985.

\bibitem{stoja1} Mirjana Stojanovi\'c;
 A difference method for non-self-adjoint singular
perturbation problem of second order,
\emph{Appl. Math. Modelling} \textbf{15} (1991), pp. 381-385.

\bibitem{stoja} Mirjana Stojanovi\'c;
 Global convergence method for singularly perturbed
boundary value problems, \emph{J. Comput. Appl. Math.} \textbf{181} (2005),
pp. 326-335.

 \bibitem{swk} N.H. Sweilama and M.M. Khaderb, Approximate solutions to the nonlinear vibrations of multivalled carbon 
 nanotubes using Adomian decomposition method, \emph{Appl. Math. Comput.} \textbf{217} (2) (2010), pp. 495-505.

\bibitem{ver} Ferdinand Verhust;
 \emph{Nonlinear Differential equations and Dynamical Systems},
Springer-Verlang, Berlin, 1990.

\bibitem{wang} Xiao-Yun Wang, Yao-Lin Jiang;
 A general method for solving singular  perturbed impulsive
differential equations  with two-point boundary conditions,
\emph{Appl. Math. Comput.} \textbf{171} (2005), pp. 775-806.

\end{thebibliography}

\end{document}
