
\documentclass[reqno]{amsart}

\AtBeginDocument{{\noindent\small
{\em Electronic Journal of Differential Equations},
Vol. 2005(2005), No. 19, pp. 1--21.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2005 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2005/19\hfil Existence and uniqueness of solutions]
{Existence and uniqueness of solutions to a super-linear three-point
boundary-value problem}
\author[B. Calvert, C. P. Gupta\hfil EJDE-2005/19\hfilneg]
{Bruce Calvert, Chaitan P. Gupta}  % in alphabetical order

\address{Bruce Calvert \hfill\break
Department of Mathematics\\
University of Auckland\\
Auckland, New Zealand}
\email{calvert@math.auckland.ac.nz}

\address{Chaitan P. Gupta \hfill\break
Department of Mathematics, 084\\
University of Nevada, Reno, NV 89557, USA}
\email{gupta@unr.edu}

\date{}
\thanks{Submitted November 16, 2004. Published February 7, 2005.}
\subjclass[2000]{34B10, 34B15}
\keywords{Super-linear; three-point boundary-value problem}

\begin{abstract}
 In previous papers, degree theory for nonlinear operators has 
 been used to study a class of three-point boundary-value problems
 for second order ordinary differential equations having a 
 super-linear term, and existence of a sequence of solutions has 
 been shown. In this paper, we forgo the  previous approach for 
 the shooting method, which gives a drastically  simpler existence 
 theory, with less assumptions, and easy calculation  of solutions.
 We even obtain uniqueness in the simplest case.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}

\section{Introduction}

In the papers \cite{cap,cap2,hen1,Hthesis,hen2} the authors use
degree theory to give existence of a sequence of solutions to a super-linear
boundary value problem. More specifically, in \cite{Hthesis,hen2} they
give existence of solutions to
\begin{gather}
x''+g(x) =p(t,x,x')  \label{1} \\
x(0) =0\mbox,\quad  x(\eta )=\beta x(1)  \label{2}
\end{gather}
Here $\eta \in (0,1)$, making this a three point boundary value problem. The
function $g$ is assumed to be super-linear, that is, it satisfies
$g(x)/x\to \infty $ as $|x|\to \infty $, and $\beta=1$.
In \cite{cal} the case $\beta \neq 1$ is argued along similar lines.
In this paper, we obtain existence  of solutions to (\ref{1}), (\ref{2})
for $\beta \neq 1$ via the intermediate value theorem, i.e. the
shooting method, giving a drastically simpler existence theory, with less
assumptions.  Calculation of solutions numerically may be carried out by
the shooting method.  The shooting method is used theoretically in
\cite{din,kol,kwo}, and elsewhere.

Uniqueness is studied by Kwong in \cite{kwo}, which recovers results such
as Moroney's theorem, giving uniqueness of a positive solution of a
boundary-value problem involving a superlinear function.
This builds on Kolodner's paper \cite{kol}, which gave the exact number of
solutions of a rotating string problem, given the angular velocity.
Similarly, in \cite{cos}, the boundary value problem
\begin{gather*}
 x''+\lambda x^+-\alpha x^- = \sin(t) \\
x(0)=x(\pi )=0
\end{gather*}
was shown to have exactly $2k$ solutions if $0<\alpha < 1$ and
$k^2 < \lambda < (k+1)^2$,
and Dinca and Sanchez \cite{din} pose the question of whether this uniqueness
result can be obtained by elementary methods.  Our uniqueness result, giving
uniqueness of solutions to (\ref{1}) and (\ref{2}) in case $p=0$, is elementary
and presumably new.  Our approach does not readily lend itself to the case of
nonzero $p$, and this gives an open question.

There has been much recent work on 3-point boundary value problems, and much of
it has concentrated on positive solutions, as in \cite{he,inf,kos,ma}.
He and Ge \cite{he} give the existence of three positive solutions to the
 B.V.P. (\ref{1}), (\ref{2}), but the condition (\ref{3.02}) of our uniqueness
theorem and their conditions (D2), (D3) cannot hold at the same time.
Thus their work cannot be used to show that Theorem 
\ref{Theorem2} may not hold for all $k$.  

Similarly Infante and Webb \cite[Th 4.2]{inf}  cannot
be used because their conditions ($S_1$) and ($S_2$) are incompatible
with (\ref{3.02}).

Ma \cite{ma} shows that one can get existence of positive solutions to
the B.V.P. (\ref{1}), (\ref{2}), assuming $g(x)/x \to 0$ as
$x \to 0$, and $p=0$, which does show that one can obtain existence theorems
like our Theorem \ref{Theorem1} for small $k$.  Infante and Webb \cite{inf} show that one
need not have positive coefficients in an m-point boundary value problem,
and in this work we can indeed take $\beta$ to be negative.

Capietto and Dambrosio \cite{cap} consider the case of asymmetric $g(x)$,
superlinear for positive $x$, and give an extensive review of superlinear
boundary value problems.

\section{Assumptions and Preliminaries}

A background on o.d.e.s involving functions satisfying Caratheodory's conditions
is given in Chapter 18 of \cite{kur}.

\noindent{\bf Assumption A}: - Assume that
$g:{\mathbb{R}} \mapsto {\mathbb{R}}$ is
a continuous super-linear function, that is, it satisfies
$\frac{g(x)}{x}\to \infty $ as $|x|\to \infty $.  Let
$p:[0,1]\times \mathbb{R}^{2}\to \mathbb{R}$ be a function satisfying
Caratheodory's
conditions, i.e. for every $(x,y)\in \mathbb{R}^{2}$, $p(t,x,y)$ is Lebesgue
measurable in $t$, and for a.e. $t\in [ 0,1]$, $p(t,x,y)$ is
continuous in $(x,y)$.  Suppose there exists an $M_{1}:[0,1]\times [
0,\infty )\mapsto [ 0,\infty )$ such that (a) for each $s\in
[ 0,\infty )$, $M_{1}(\cdot ,s)$ is integrable on $[0,1]$, (b) for
each $t\in [ 0,1]$, $M_{1}(t,\cdot )$ is increasing on $[0,\infty )$
with $s^{-1}\int_{0}^{1}M_{1}(t,s)dt\to 0$ as $s\to \infty$, and (c)
 for all $t\in [ 0,1]$, and $(x,y)\in \mathbb{R}^{2}$,
\[
|p(t,x,y)|\leq M_{1}(t,\max (|x|,|y|))\mbox{.}
\]
We need the next result, proved in \cite{cal} as Lemma 2.

\begin{lemma}
\label{Lemma1} Let $g$, $p$, and $M_{1}$ satisfy Assumption A. Suppose that
$\frac{g(x)}{x}\geq 1$ for $x\neq 0$. Suppose that $(x(t),y(t))$ is an
absolutely continuous solution for the initial value problem
\begin{gather}
x'(t) =y(t),  \label{eq1-1} \\
y'(t)+g(x(t)) = p(t,x(t),y(t)),\quad \text{for a.e. }t\in [ 0,1]
,  \label{eq1-2} \\
x(0) =0\,,  \label{eq1-3} \\
y(0) =\alpha \,.  \label{eq1-4}
\end{gather}
For $x\in \mathbb{R}$, let $G(x)=\int_{0}^{x}g(s)ds$. Let $\varepsilon >0$ be
given.  Then for $\alpha >0$, large enough, we have
\begin{gather}
|y(t)| \leq \alpha (1+\varepsilon )  \label{eq2-1} \\
2G(x(t)) \leq \alpha ^{2}(1+\varepsilon ),  \label{eq2-2}
\end{gather}
for every $t\in [ 0,1]$.  Moreover,
\begin{equation}
|\frac{d}{dt}(y^{2}(t)+2G(x(t)))|\leq 2|y(t)|M_{1}(t,\max (|x(t)|,|y(t)|)),
\label{eq3}
\end{equation}
for $t\in [ 0,1]$ a.e.
\end{lemma}

We note that if we assume $g$ continuous and $g(x)/x \geq 1$ then the
function $G(x)=\int_{0}^{x}g(s)ds$ is defined for $x\in \mathbb{R}$
and is such that $G$ is strictly increasing on $[0,\infty )$ and is strictly
decreasing on $(-\infty ,0]$. Also, $G(x)>0$ for $x\in \mathbb{R}$, $x\neq 0$ and
$G(0)=0$. \ We denote the inverse of the function $G$ restricted to
$[0,\infty )$, $G\big|_{[0,\infty )}$, by $G_{+}^{-1}$ and the
inverse of the function $G\big|_{(-\infty ,0]}$ by $G_{-}^{-1}$.
We now need a new version of \cite[Lemma 3]{cal}, in which
(\ref{eq7}) and (\ref{eq8}) replace (13) and (14) of \cite{cal}.

\begin{lemma} \label{Lemma2}
Let $\varepsilon >0$ be given and $g$, $p$, $M_{1}$ be as in
Lemma \ref{Lemma1}. Then there exists an $A>0$ such that if $(x(t),y(t))$ is
a solution for the initial value problem \eqref{eq1-1}, \eqref{eq1-2},
\eqref{eq1-3}, \eqref{eq1-4} and $t_{0}\in (0,1]$ is such that $x(t_{0})>0$,
$y(t_{0})=0$; then
\begin{equation}
G_{+}^{-1}(\frac{\alpha ^{2}}{2(1+\varepsilon )})\leq x(t_{0})\leq
G_{+}^{-1}(\frac{\alpha ^{2}}{2}(1+\varepsilon ))  \label{eq7}
\end{equation}
if $|\alpha |>A$. \ Similarly, if $x(t_{0})<0$, $y(t_{0})=0$; then
\begin{equation}
G_{-}^{-1}(\frac{\alpha ^{2}}{2(1+\varepsilon )})\leq x(t_{0})\leq
G_{-}^{-1}(\frac{\alpha ^{2}}{2}(1+\varepsilon ))  \label{eq8}
\end{equation}
if $|\alpha |>A$. Also,
\[
\min_{t\in [ 0,1]}\sqrt{x^{2}(t)+y^{2}(t)}\geq \frac{1}{2}\min
\{G_{+}^{-1}(\frac{\alpha ^{2}}{8}),\frac{\alpha }{2}\}.
\]
\end{lemma}

\begin{proof} We observe that the right inequality in (\ref{eq7}) follows
immediately from (\ref{eq2-2}). Accordingly, it suffices to show that
\begin{equation}
\frac{\alpha ^{2}}{2}\leq (1+\varepsilon )G(x(t_{0})),  \label{eq9}
\end{equation}
to prove that (\ref{eq7}) holds. Let us choose $A>0$, such that for
$|\alpha |>A$, both (\ref{eq2-1}) and (\ref{eq2-2}) hold with $\alpha $
replaced by $|\alpha |$. With $h(t):=\sqrt{y^2(t)+2G(x(t)) }$, we get,
by integrating (\ref{eq3}) from $0$ to $t$ and using
(\ref{eq2-1}),
\begin{equation}
h^{2}(t)-\alpha ^{2}+2|\alpha |(1+\varepsilon )\int_{0}^{t}M_{1}(s,\max
(|x(s)|,|y(s)|))ds\geq 0. \nonumber \label{eq10}
\end{equation}
We now take an $\varepsilon _{1}>0$ such that $2\varepsilon
_{1}(1+\varepsilon )^{2}\leq \min \{\frac{\varepsilon }{1+\varepsilon },
\frac{1}{2}\}$. Next, we use the assumption $s^{-1}\int_{0}^{1}M_{1}(t,s)dt
\to 0$ as $s\to \infty $, from Assumption A, to choose an
$A>0$ so that for $\alpha >A$ the inequalities (\ref{eq2-1}), (\ref{eq2-2})
hold for $s\in [ 0,1]$.  When for $s\in [ 0,1]$
\begin{equation}
\max \{|x(s)|,|y(s)|\}\geq \frac{1}{2}\min \{G_{+}^{-1}(\frac{\alpha
^{2}}{8}),\frac{\alpha }{2}\}  \label{eq11}
\end{equation}
we have, with $M(x):=\int_0^1M_1 (t,x)dt$,
\begin{equation}
M(\max \{|x(s)|,|y(s)|\})<\varepsilon _{1}\max \{|x(s)|,|y(s)|\}.
\nonumber
\label{eq12}
\end{equation}
For $\alpha >A$, we get on using the inequalities (\ref{eq2-1}),
(\ref{eq2-2}), (\ref{eq12}) and the assumption $\frac{g(x)}{x}\geq 1$
for $x\neq 0$, that
\begin{equation}
M(\max \{|x(s)|,|y(s)|\})<\varepsilon _{1}\alpha (1+\varepsilon ),
\label{eq13}
\end{equation}
and hence, using (\ref{eq10}), we get
\begin{equation}
h^{2}(t)\geq \alpha ^{2}(1-2\varepsilon _{1}(1+\varepsilon )^{2}),
\label{eq14}
\end{equation}
provided (\ref{eq11}) holds for all $s\in [ 0,t]$. Since we chose
$\varepsilon _{1}>0$ such that $2\varepsilon _{1}(1+\varepsilon )^{2}\leq
\min \{\frac{\varepsilon }{1+\varepsilon },\frac{1}{2}\}$, we see that
\[
y^{2}(t)+2G(x(t))\geq \frac{\alpha ^{2}}{2},
\]
provided (\ref{eq11}) holds for all $s\in [ 0,t]$. Accordingly, either
$y^{2}(t)\geq \frac{\alpha ^{2}}{4}$ or $|x(t)|\geq G_{+}^{-1}(\frac{\alpha
^{2}}{8})$ and hence
\begin{equation}
\max \{|x(t)|,|y(t)|\}\geq \min \{G_{+}^{-1}(\frac{\alpha ^{2}}{8}),\frac{
\alpha }{2}\},  \label{eq15}
\end{equation}
provided (\ref{eq11}) holds for all $s\in [ 0,t]$. We observe that
(
\ref{eq11}) holds near $s=0$ since $y(0)=\alpha $. Let us next assume that
(\ref{eq11}) holds for all $s\in [ 0,t]$, for some $t\in (0,1]$. If $0<t<1$, it follows from (\ref{eq15}) that there exists a $t_{1}>t$
such that (\ref{eq11}) holds for all $s\in [ 0,t_{1}]$. Accordingly,
it follows that (\ref{eq11}) holds for all $s\in [ 0,1]$. Finally, if
$y(t_{0})=0$, we see from (\ref{eq14}) and the assumption that $2\varepsilon
_{1}(1+\varepsilon )^{2}\leq \min \{\frac{\varepsilon }{1+\varepsilon },
\frac{1}{2}\}$, that
\[
\frac{\alpha ^{2}}{2}\leq (1+\varepsilon )G(x(t_{0})),
\]
and (\ref{eq9}) holds. This completes the proof that (\ref{eq7}) holds. A similar
proof works to prove that (\ref{eq8}) holds.
\end{proof}

\begin{definition} \label{def}\rm
For ${\bf u}(t)=(x(t),y(t))\in C^{1}([0,1],\mathbb{R}^{2}\backslash
\{(0,0)\})$ we define
\[
\varphi _{1}({\bf u})=\varphi _{1}(x,y)=-\int_{0}^{1}\frac{x(t)y'(t)-y(t)x'(t)}{x^{2}(t)+y^{2}(t)}dt,
\]
as the angle traversed clockwise from ${\bf u}(0)$ to ${\bf u}(1)$.
\end{definition}

We need a variant of \cite[Lemma 4.3]{hen1} and  of \cite[Lemma 3]{hen2} to
show that the angle
$\varphi _{1}(x,y)\to \infty $, for solutions $(x,y)$ to
(\ref{eq1-1})-(\ref{eq1-2}), when
$\min_{t\in [ 0,1]}\|(x(t),y(t))\| \to \infty $.  We use the following assumption:
\smallskip

\noindent{\bf Assumption B}: Let $g:\mathbb{R}\to \mathbb{R}$ be continuous
and super-linear. Let $p:[0,1]\times \mathbb{R}{\bf \times }\mathbb{R}
\to \mathbb{R}$ be a function satisfying Caratheodory's
conditions. \ Suppose that there exists a $\mu \in (0,1]$, $\beta \geq 0$,
$0\leq \gamma \in L^{1}[0,1]$, and $M_{2}:\mathbb{R}^{2}\to \mathbb{R}$,
with $\frac{M_{2}(x,y)}{\|(x,y)\|}\to 0$ as $\|(x,y)\|\to \infty $,
such that for a.e. $t\in [ 0,1]$, and $(x,y)\in \mathbb{R}^{2}$,
\begin{equation}
\mathop{\rm sign}(x)p(t,x,y)\leq (1-\mu ){\rm sign}(x)g(x)+\beta |y|+\gamma (t)M_{2}(x,y).
\label{eq17}
\end{equation}
The inequality (\ref{eq17}) corresponds to inequality (4.3) in \cite{hen1},
i.e.,
\begin{equation}
|p(t,x,y)|\leq (1-\mu )|g(x)|+\beta |y|+\gamma ,  \label{eq18}
\end{equation}
where $\gamma \in \mathbb{R}$.

We shall provide the slight modifications needed in the proof of
Lemma 4.3 of \cite{Hthesis}, and Lemma 3 of \cite{hen2}
to cater for the difference between
(\ref{eq17}) and  (\ref{eq18}).

\begin{lemma} \label{Lemma3}
Suppose $g$ and $p$ satisfy Assumption B. Then for all $N\geq
0 $ there exists an $R\geq 0$ such that for all absolutely continuous
solutions $(x(t),y(t))$ for the system \eqref{eq1-1}, \eqref{eq1-2} with
$\min_{t\in [ 0,1]}\| (x(t),y(t))\| \geq R$, we have
$\varphi _{1}(x,y)\geq N$.
\end{lemma}

\begin{proof}  Since $\varphi _{1}({\bf u})=-\int_{0}^{1}
\frac{x(t)y'(t)-y(t)x'(t)}{x^{2}(t)+y^{2}(t)}dt$,
we see using (\ref{eq1-1}),(\ref{eq1-2}) that
\[
-x(t)y'(t)+y(t)x'(t)=y^{2}(t)+x(t)g(x(t))-x(t)p(t,x(t),y(t)).
\]
Let us set
\[
\theta (t)-\theta (0)=\int_{0}^{t}\frac{x(s)y'(s)-y(s)x'(s)}{x^{2}(s)+y^{2}(s)}ds,
\]
so that
\begin{align*}
-\theta '(t) &= -\frac{x(t)y'(t)-y(t)x'(t)}{
x^{2}(t)+y^{2}(t)} \\
&= \frac{y^{2}(t)+x(t)g(x(t))-x(t)p(t,x(t),y(t))}{x^{2}(t)+y^{2}(t)}.
\end{align*}
Let $N>0$ be given.  Since $g$ is super-linear, we have for $K>0$ (to be
chosen later) there is an $M=M(K)$ such that if $|x|\geq M$ then
$\mu \frac{g(x)}{x}\geq K$.  Hence
\[
\mu \frac{g(x)}{x}+\frac{KM^{2}}{x^{2}}\geq K\text{ }
\]
for all $x\neq 0$, and
$\mu xg(x)\geq Kx^{2}-KM^{2}$
for all $x\in \mathbb{R}$. Hence,
\begin{align*}
&y^{2}+xg(x)-x(t)p(t,x,y) \\
&\geq y^{2}+xg(x)-(1-\mu )xg(x)-\beta |x\|y|-\gamma (t)|x|M_{2}(x,y) \\
&\geq y^{2}+Kx^{2}-KM^{2}-\frac{\beta }{2}(\beta x^{2}+\frac{y^{2}}{\beta}
)-\gamma (t)|x|M_{2}(x,y) \\
&\geq \frac{y^{2}}{2}+(K-\frac{\beta ^{2}}{2})x^{2}-KM^{2}-\gamma
(t)|x|M_{2}(x,y) \\
&= \frac{y^{2}}{2}+\frac{k}{2}x^{2}-KM^{2}-\gamma (t)|x|M_{2}(x,y),
\end{align*}
where $k=2(K-\frac{\beta ^{2}}{2})$.
Then,
\begin{align*}
-\theta '(t) &= \frac{y^{2}(t)+x(t)g(x(t))-x(t)p(t,x(t),y(t))}{
x^{2}(t)+y^{2}(t)} \\
&\geq \frac{\frac{y^{2}}{2}+\frac{k}{2}x^{2}-KM^{2}-\gamma
(t)|x|M_{2}(x,y)}{x^{2}(t)+y^{2}(t)} \\
&\geq \frac{k}{2}(\frac{x^{2}+k^{-1}y^{2}}{x^{2}+y^{2}})-\frac{1}{\sqrt{k}}-
\frac{\gamma (t)M_{2}(x,y)}{\|(x,y)\|},
\end{align*}
assuming $\min_{t}\|(x(t),y(t))\|\geq M\sqrt{K}\sqrt[4]{k}$. We can then
write
\[
-\theta '(t)\geq \frac{k}{2}(\cos ^{2}\theta +k^{-1}\sin ^{2}\theta
)-\frac{1}{\sqrt{k}}-\frac{\gamma (t)M_{2}(x,y)}{\|(x,y)\|}.
\]
Next we estimate
\[
\int_{\theta (1)}^{\theta (0)}\frac{d\theta }{\cos ^{2}\theta +k^{-1}\sin
^{2}\theta },
\]
rather than estimating the integral $\int_{\theta (1)}^{\theta (0)}d\theta$.

Since $\frac{1}{\cos ^{2}\theta +k^{-1}\sin ^{2}\theta }\leq k$ we get
\[
-\frac{\theta '(t)}{\cos ^{2}\theta +k^{-1}\sin ^{2}\theta }
\geq \frac{k}{2}-\sqrt{k}-\frac{\gamma (t)M_{2}(x,y)k}{\|(x,y)\|}
\geq \frac{k}{2}-\sqrt{k}-\gamma (t),
\]
assuming $\frac{M_{2}(x,y)}{\|(x,y)\|}\leq \frac{1}{k}$, which holds if
$\min_{t}\|(x(t),y(t))\|\geq \xi (k)$, say.
Note
\begin{align*}
\int_{0}^{\frac{\pi }{2}}\frac{d\theta }{\cos ^{2}\theta +k^{-1}\sin
^{2}\theta } &= \int_{0}^{\frac{\pi }{2}}\frac{k\sec ^{2}\theta d\theta }{
k+\tan ^{2}\theta } \\
&= \int_{0}^{\infty }\frac{kdu}{k+u^{2}} \\
&= \frac{k}{\sqrt{k}}\tan ^{-1}(\frac{u}{\sqrt{k}})|_{0}^{\infty } \\
&= \frac{\sqrt{k}\pi }{2}.
\end{align*}
Since $\cos ^{2}\theta +k^{-1}\sin ^{2}\theta $ has period $\pi $, given an
interval $(a,b)$ we write $b-a=(n-f)\pi $, where $n$ is an integer and $f\in
[ 0,1)$. \ Then
\begin{align*}
\int_{a}^{b}\frac{d\theta }{\cos ^{2}\theta +k^{-1}\sin ^{2}\theta }
&\leq \int_{0}^{n\pi }\frac{d\theta }{\cos ^{2}\theta +k^{-1}\sin ^{2}\theta } \\
&\leq 2n\frac{\sqrt{k}\pi }{2} \\
&= \sqrt{k}\pi (\frac{b-a}{\pi }+f) \\
&\leq \sqrt{k}(b-a+\pi ).
\end{align*}
In particular, we get
\[
-\int_{\theta (0)}^{\theta (1)}\frac{d\theta }{\cos ^{2}\theta +k^{-1}\sin
^{2}\theta }\leq \sqrt{k}(\theta (0)-\theta (1)+\pi ).
\]
Next we change the variable of integration from $\theta $ to $t$ to get
\begin{align*}
\sqrt{k}(\varphi _{1}(x,y)+\pi ) &\geq -\int_{0}^{1}\frac{\theta '(t)dt}{\cos ^{2}\theta (t)+k^{-1}\sin ^{2}\theta (t)} \\
&\geq \int_{0}^{1}(\frac{k}{2}-\sqrt{k}-\gamma (t))dt \\
&= \frac{k}{2}-\sqrt{k}-\int_{0}^{1}\gamma .
\end{align*}
This gives
\begin{align*}
\varphi _{1}(x,y) &\geq \frac{\sqrt{k}}{2}-1-\int_{0}^{1}\gamma -\pi,
\quad \text{if }k\geq 1, \\
&= N,\quad \text{if }k=4(N+1+\int_{0}^{1}\gamma +\pi )^{2}\geq 1.
\end{align*}
Since
\[
k=2(K-\frac{\beta ^{2}}{2}),
\]
we choose $K$ $=2(N+1+\int_{0}^{1}\gamma +\pi )^{2}+\frac{\beta ^{2}}{2}$.
Finally choosing
\[
R=\max (M\sqrt{K}\sqrt[4]{k},\xi (k)),
\]
we see that $\varphi _{1}(x,y)\geq N$ if $\min_{t\in [ 0,1]}\|
(x(t),y(t))\| \geq R$.
\end{proof}


\section{Existence of Solutions}

To fix ideas we study the case $\beta > 1$, as in \cite{cal}.

\begin{theorem}\label{Theorem1}
Let $\eta \in (0,1)$ and $\beta >1$ be given.  Let $g$ and
$p$ satisfy Assumptions A and B. Then, for each $k$ sufficiently large, there
are (at least) two solutions ${\bf u}(t)=(x(t),y(t))$ of
\begin{gather}
x'(t) = y(t)  \label{2.1} \\
y'(t) = -g(x(t))+p(t,x(t),y(t))  \label{2.2} \\
x(0) = 0  \label{2.3} \\
x(\eta ) = \beta x(1)  \label{2.4}
\end{gather}
with $\varphi _{1}({\bf u})\in (\frac{\pi }{2}+k\pi ,\frac{\pi }{2}+(k+1)\pi)$,
one with $x'(0)>0$ and the other with $x'(0)<0$.
\end{theorem}

We may, when thinking of calculating these solutions, say that we have one
sequence $x_{n}$ of solutions to (\ref{1}), (\ref{2}) with $x_{n}'(0)\to \infty $ and another sequence $x_{n}$ of solutions to (\ref
{1}), (\ref{2}) with $x_{n}'(0)\to -\infty $, with the
angles they traverse as above.

We break the proof of the theorem into two parts. We first prove existence
of the solution $x$ when $p$ is smooth enough, and then we see that we can
approximate $p$ by a sequence of smooth $p_{n}$, giving solutions $x_{n}$,
and then then take limits to obtain existence when $p$ is not smooth. A
sufficiently smooth $p$ will satisfy the following Caratheodory-Lipschitz
condition.

\begin{definition} \cite{kur} \rm
 Let $U$ be open in $\mathbb{R}^{n}$, and let $[a,b]$ be an
interval of real numbers. Let $F:[a,b]\times U\to \mathbb{R}^{n}$ be
given. We say $F$ satisfies a Caratheodory-Lipschitz condition if for all
$x$, $t\mapsto F(t,x)$ is Lebesgue measurable, and for any
$(t_{0},x_{0})\in D$, there are real valued integrable functions $m$ and $L$,
such that
\begin{gather}
\| F(t,x)-F(t,y)\| \leq L(t)\| x-y\|  \label{5} \\
\| F(t,x)\| \leq m(t)  \label{6}
\end{gather}
for all $x$ and $y$ in some neighbourhood of $x_{0}$, and $t$ a.e. in some
neighbourhood of $t_{0}$.
\end{definition}

We need the following definition.

\begin{definition}\label{def1}\rm
For $\alpha \in \mathbb{R}$ let $(x,y)$ be a solution of
(\ref{eq1-1}), (\ref{eq1-2}), (\ref{eq1-3}) and (\ref{eq1-4}),
and let $\eta \in (0,1)$ and $\beta >1$ be as in Theorem \ref{Theorem1}. We
define a function $H:(0,\infty )\to \mathbb{R}$ by $H(\alpha )=\beta
x(1)-x(\eta )$, for $\alpha \in (0,\infty )$.
\end{definition}

\begin{remark} \rm
Since the function $g:\mathbb{R}\to \mathbb{R}$ in Theorem \ref{Theorem1}
is assumed to be super-linear, we see that there exists an $M>0$
such that $\frac{g(x)}{x}\geq 1$ for $|x|\geq M$. Let us, now, define a
function $\widetilde{g}:\mathbb{R}\to \mathbb{R}$ by
\[
\widetilde{g}(x)=\begin{cases}
g(x), & \text{ for }x\geq M \\
\frac{g(M)}{M}x\text,  & \text{for }0\leq x\leq M \\
\frac{g(-M)}{-M}x, & \text{for }-M\leq x\leq 0 \\
g(x), & \text{for }x\leq -M.
\end{cases}
\]
It then follows that $\frac{\widetilde{g}(x)}{x}\geq 1$ for all $x\neq 0$
and $g-\widetilde{g}$ is a bounded function on $\mathbb{R}$. Also, $p+
\widetilde{g}-g$ satisfies the same conditions as $p$ in Theorem \ref
{Theorem1}. Accordingly, we shall assume in the following that the function
$g$ in Theorem \ref{Theorem1} is such that $\frac{g(x)}{x}\geq 1$ for all
$x\neq 0$, by replacing $g$ by $\widetilde{g}$ and $p$ by
$p+\widetilde{g}-g$, if necessary.
\end{remark}

\begin{proof}[Proof of Theorem: Smooth case]
 Here we assume, in
addition to Assumption A, that $p$ is Caratheodory-Lipschitz on $[0,1]\times
\mathbb{R}^{2}$, $g$ is locally Lipschitz, and for all nonzero $x$,
$g(x)/x \geq 1$.

We first see from Lemma \ref{Lemma3} and the last claim in Lemma
\ref{Lemma2}, that $\varphi _{1}(x,y)\to \infty $, as $y(0)\to \infty $.
 Accordingly, for every positive integer $k$, sufficiently large, there
exists an $h_{k}\in \mathbb{R}$ such that if $(x,y)$ is a solution of
\begin{equation}
\begin{aligned}
x' = y  \\
y' = -g(x)+p(t,x,y)   \\
x(0) = 0   \\
y(0) = h_{k}, 
\end{aligned}\label{h_k}
\end{equation}
then $\varphi _{1}(x,y)=\pi /2+k\pi $. There may be more than one value for
$h_{k}$, so we let $h_{k}^{min}$ and $h_{k}^{max}$ be the smallest and
largest such numbers.

Then for the function $H$, defined in Definition \ref{def1}, we claim
$H(h_{k})>0$, if $k$ is even. Since, now, $\varphi _{1}(x,y)=\pi /2+k\pi $
and $k$ is even, we see that $x(1)>0$ and $y(1)=0$, from the definition of
$\varphi _{1}(x,y)$ (see Definition \ref{def}).  Suppose, now, $x$ is
maximised at $\eta ^{\ast }\in (0,1]$. We then get from (\ref{eq7}) of Lemma
\ref{Lemma2} that $x(\eta )\leq x(\eta ^{\ast })\leq G_{+}^{-1}(\frac{
h_{k}^{2}}{2}(1+\epsilon ))$ and $\beta x(1)\geq \beta G_{+}^{-1}(\frac{
h_{k}^{2}}{2(1+\epsilon )})$, since $x(1)>0$ and $y(1)=0$.

Now,
\begin{equation}
\begin{aligned}
H(h_{k}) &= \beta x(1)-x(\eta )  \\
&\geq \beta G_{+}^{-1}(\frac{h_{k}^{2}}{2(1+\epsilon
)})-G_{+}^{-1}(\frac{
h_{k}^{2}}{2}(1+\epsilon ))  \\
&= (\beta -1)G_{+}^{-1}(\frac{h_{k}^{2}}{2(1+\epsilon
)})+G_{+}^{-1}(\frac{
h_{k}^{2}}{2(1+\epsilon )})-G_{+}^{-1}(\frac{h_{k}^{2}}{2}(1+\epsilon )).
\end{aligned}\label{eq16x} 
\end{equation}
We may assume that $0<\epsilon <1$, and let us set
\begin{equation}
t=G_{+}^{-1}(\frac{h_{k}^{2}}{2(1+\epsilon )})  \label{eq16a}
\end{equation}
and
\[
t+\delta =G_{+}^{-1}(\frac{h_{k}^{2}}{2}(1+\epsilon )).
\]
Then
\[
G(t)=\frac{h_{k}^{2}}{2(1+\epsilon )}
\quad\mbox{and}\quad
G(t+\delta )=\frac{h_{k}^{2}}{2}(1+\epsilon ).
\]
Next, we see that
\begin{equation}
h_{k}^{2}\epsilon \geq \frac{h_{k}^{2}}{2}(1+\epsilon
)-\frac{h_{k}^{2}}{2(1+\epsilon )}
= G(t+\delta )-G(t)
= \int_{t}^{t+\delta }g(s)ds
\geq t\delta ,  \label{eq16b}
\end{equation}
in view of our assumption $\frac{g(x)}{x}\geq 1$ for all $x\neq 0$. It then
follows from (\ref{eq16x}), (\ref{eq16a}), (\ref{eq16b}), the assumption
$0<\epsilon <1$ and the fact that $G_{+}^{-1}$ is an increasing function that
\[
H(h_{k}) = \beta x(1)-x(\eta )
\geq (\beta -1)G_{+}^{-1}(\frac{h_{k}^{2}}{4})
-\frac{h_{k}^{2}\epsilon}{G_{+}^{-1}(\frac{h_{k}^{2}}{4})}
>0
\]
if $\epsilon >0$ is chosen sufficiently small. Hence $H(h_{k})>0$.
Similarly, $H(h_{k})<0$ when $k$ is odd.

Now $H$ is continuous, indeed the map $(0,\alpha )\mapsto (x(1),x(\eta ))$
is locally Lipschitz by \cite{kur}.

By the intermediate value theorem, there is an $\alpha \in
(h_{2k}^{max},h_{2k+1}^{min})$ and a solution $(x,y)$ of (\ref{eq1-1}), (\ref
{eq1-2}), (\ref{eq1-3}), (\ref{eq1-4}) such that $H(\alpha )=0$. We claim
$\varphi _{1}(x,y)\in (\pi /2+2k\pi ,\pi /2+(2k+1)\pi )$. Suppose $\varphi
_{1}(x,y)\leq \pi /2+2k\pi $. Then by the intermediate value theorem there
is an $h_{2k}>\alpha $, contradicting $\alpha >h_{2k}$. This concludes the
proof of Theorem \ref{Theorem1} in the smooth case.
 \end{proof}

The next Lemma is needed in the proof for the non-smooth case. Writing
$(x,y)={\bf u}$, we mollify the function $p(t,{\bf u})$ with respect to the
second variable ${\bf u}$.

\begin{lemma}
Suppose $p:[0,1]\times \mathbb{R}^{2}\to \mathbb{R}$ satisfies (a) the
Caratheodory-Lipschitz conditions, and
$M_{1}:[0,1]\times [ 0,\infty )\to [ 0,\infty )$ satisfies:
\begin{itemize}
\item[(b)] for all $t\in [ 0,1]$, $M_{1}(t,\cdot )$ is increasing on
$[0,\infty )$,
\item[(c)] for all $s\in [ 0,\infty )$, $M_{1}(\cdot ,s)$ is integrable on
$[0,1]$, and
\item[(d)] $s^{-1}\int_{0}^{1}M_{1}(t,s)dt\to 0$ as $s\to \infty $.
\end{itemize}
Suppose that for all $t$ and ${\bf u}$,
\begin{itemize}
\item[(e)] $|p(t,{\bf u})|\leq M_{1}(t,\| {\bf u}\| _{\infty })$.
\end{itemize}
 Let $\varphi \in C_{0}^{\infty }(\mathbb{R}^{2})$
have support in $\{{\bf u}\in \mathbb{R}^{2}:\| {\bf u}\| _{\infty }\leq
1\}$, $\varphi \geq 0$, $\int \varphi =1$.  Let $\epsilon >0$ be given. Let
$p^{\epsilon }(t,{\bf u})=\int p(t,{\bf u}-\epsilon {\bf v})\varphi ({\bf
v})d{\bf v}$,
and let $M_{1}^{\epsilon }(t,s)=M_{1}(t,s+\epsilon )$.
Then the pair of functions $p^{\epsilon }$ and ${M_{1}}^{\epsilon }$ satisfy
conditions (a) through (e).
\end{lemma}

\begin{proof}  To show $p^{\epsilon }$ satisfies (a), we first let
${\bf u}$ be given, and claim $t\mapsto p^{\epsilon }(t,{\bf u})$ is Lebesgue
measurable. That is,
\[
t\mapsto \epsilon ^{-2}\int p(t,{\bf x})\varphi ({\frac{{\bf u}-{\bf x}}{
\epsilon }})dx_{1}dx_{2}
\]
is measurable. For a.e. $t\in [ 0,1]$ , $p(t,x)$ is continuous in $x$,
and so the integral is a Riemann integral. Accordingly,
\[
\epsilon ^{-2}\int p(t,{\bf x})\varphi ({\frac{{\bf u}-{\bf x}}{\epsilon }}
)dx_{1}dx_{2}=\lim_{n\to \infty }\sum_{{\bf x}\in P(n)}p(t,{\bf x}
)\varphi ({\frac{{\bf u}-{\bf x}}{\epsilon }}),
\]
where $\{P(n)\}$ is a sequence of partitions of $[0,1]\times [ 0,1]$.
Each of these sums is a measurable function of $t$ since $p$ satisfies the
Caratheodory conditions, Lebesgue measure is complete, and the Lebesgue
measurable functions form a vector space. The limit of a sequence of
measurable functions is measurable, and so we have proved the claim.

To show $p^{\epsilon }$ satisfies (a), let $(t_{0},{\bf u}_{0})\in D$ be given. We
claim there are real valued integrable functions $m$ and $L$, such that
\begin{gather}
\| p^{\epsilon }(t,{\bf u})-p^{\epsilon }(t,{\bf w})\|
\leq L(t)\| {\bf u}-{\bf w}\|  \label{15} \\
\| p^{\epsilon }(t,{\bf u})\| \leq  m(t)  \label{16}
\end{gather}
for all ${\bf u}$ and ${\bf w}$ in some neighbourhood of ${\bf u}_{0}$, and
for a.e. $t$ in some neighbourhood of $t_{0}$, i.e. (\ref{5}) and (\ref{6})
hold. Now
\[
\| p^{\epsilon }(t,{\bf u})-p^{\epsilon }(t,{\bf w})\| \leq \epsilon
^{-2}\int_{N(\epsilon )}p(t,{\bf x})|\varphi ({\frac{{\bf u}-{\bf x}}{
\epsilon }})-\varphi ({\frac{{\bf w}-{\bf x}}{\epsilon }})|dx_{1}dx_{2}
\]
where $N(\epsilon )$ stands for $\{{\bf x}:\| {\bf x}-{\bf u}\| \leq
\epsilon \}\cup \{x:\| {\bf x}-{\bf w}\| \leq \epsilon \}$. Note
$N(\epsilon )\subset \{{\bf x}:\| {\bf x}\| \leq \max (\| {\bf u}
\| ,\| {\bf v}\| )+\epsilon )$. By (b), with $K(\varphi )$ the
Lipschitz constant of $\varphi $,
\begin{align*}
RHS &\leq \epsilon ^{-3}\int_{N(\epsilon )}M_{1}(t,\max (\| {\bf u}\|
,\| {\bf v}\| )+\epsilon )K(\varphi )\| {\bf u}-{\bf w}\|
dx_{1}dx_{2} \\
&\leq 2\epsilon ^{-1}M_{1}(t,\max (\| {\bf u}\| ,\| {\bf v}\|
)+\epsilon )K(\varphi )\| {\bf u}-{\bf w}\| .
\end{align*}
Since $M_{1}$ is increasing, (b) shows that (\ref{15}) holds for all ${\bf
u}
$ and ${\bf w}$ in any given bounded set, and all $t\in [ 0,1]$.

For (\ref{16}), we check $\| p^{\epsilon }(t,0)\| $ is integrable, and
this and (\ref{15}) gives (\ref{16}).
\begin{align*}
|p^{\epsilon }(t,0)|
&= \| \int p(t,-\epsilon {\bf v})\varphi ({\bf v})d {\bf v}\| \\
&\leq \int |p(t,-\epsilon {\bf v})\varphi ({\bf v})|d{\bf v} \\
&\leq \int_{\| v\| \leq 1}M_{1}(t,\epsilon )\varphi ({\bf v})dv,
\quad \text{by (e),} \\
&= M_{1}(t,\epsilon ).
\end{align*}
To show (b) for ${M_{1}}^{\epsilon }$ we note that $s\mapsto
M_{1}(t,s+\epsilon )$ is increasing on $[0,\infty )$.
To show (c) for ${M_{1}}^{\epsilon }$ we note that for all $s$, $t\mapsto
M_{1}(t,s+\epsilon )$ is integrable on $[0,1]$.
To show (d) for ${M_{1}}^{\epsilon }$ we note that $s^{-1}
\int_{0}^{1}M_{1}(t,s+\epsilon )dt\to 0$ as $s\to \infty $.
To show (e) for $p^{\epsilon }(t,0)$ and ${M_{1}}^{\epsilon }$ we note that
\begin{align*}
|p^{\epsilon }(t,{\bf u})|
&\leq \int |p(t,{\bf u}-\epsilon {\bf v})\varphi
({\bf v})d{\bf v} \\
&\leq \int M_{1}(t,\| {\bf u}\| +\epsilon )\varphi ({\bf v})d{\bf v}
\\
&\leq \int M_{1}^{\epsilon }(t,\| {\bf u}\| )\varphi ({\bf v})d{\bf v}
\\
&= M_{1}^{\epsilon }(t,\| {\bf u}\| ).
\end{align*}
\end{proof}

\begin{proof}[Proof of Theorem: Non-smooth case]
 Given $g$, we will, by
adding a term to $g$ and subtracting it from $p$, assume that $g(x)/x\geq 1$
for all $x\neq 0$. For $\epsilon >0$, we take $g^{\epsilon }$ which is
locally Lipschitz and such that $g^{\epsilon }(x)\to g(x)$ uniformly
on bounded sets, and $g^{\epsilon }(x)/x\geq 1$ for all $x\neq 0$.

For each large integer $k$, and $\epsilon >0$, we let $(x_{\epsilon
},y_{\epsilon })$ be a solution of (\ref{2.1}) to (\ref{2.4}) with
$g^{\epsilon }$ and $p^{\epsilon }$ replacing $g$ and $p$, satisfying
$\varphi _{1}(x_{\epsilon },y_{\epsilon })\in (\frac{\pi }{2}+k\pi ,\frac{\pi
}{2}+(k+1)\pi )$, with $y_{\epsilon }(0)>0$. Now we can check the
$(x_{\epsilon },y_{\epsilon })$ are uniformly bounded, and we can check they
are equi-continuous, since their derivatives are uniformly bounded. By the
Arzela-Ascoli Theorem, there is a sequence $\epsilon (n)\to 0$ with
$(x_{\epsilon (n)},y_{\epsilon (n)})$ converging to $(x,y)$, say, in
$C([0,1]; \mathbb{R}^{2})$. Now for all $t\in [ 0,1]$,
\begin{equation}
\begin{pmatrix}
x_{\epsilon } \\
y_{\epsilon }
\end{pmatrix}(t)
=\begin{pmatrix}
0 \\
y_{\epsilon }(0)
\end{pmatrix}
+\int_{0}^{t}\begin{pmatrix}
y_{\epsilon }(s) \\
-g^{\epsilon }(x_{\epsilon }(s))+p^{\epsilon }(s,x_{\epsilon
}(s),y_{\epsilon }(s))
\end{pmatrix} ds   \label{17}
\end{equation}
We use the dominated convergence theorem to let $n\to \infty $ with
$\epsilon =\epsilon (n)$.\\
 (a) We claim $p^{\epsilon }(s,x_{\epsilon}(s),y_{\epsilon }(s))$ converges
 to $p(s,x(s),y(s))$ for $s$ a.e. in $[0,1]$. Take $s$ so that $p(s,{\bf u})$
is continuous in ${\bf u}$. Then we note
\[
\int_{\mathbb{R}^{2}}p(s,x_{\epsilon (n)}(s)-{\epsilon }(n)v_{1},y_{\epsilon
(n)}(s)-{\epsilon }(n)v_{2})\varphi (v_{1},v_{2})dv_{1}dv_{2}\to
p(s,x(s),y(s)),
\]
proving the claim.
\\
(b) We note $g^{\epsilon (n)}(s,x_{\epsilon (n)}(s))$ converges to $g(x(s))$
for any $s$.
\\
(c) We claim $p^{\epsilon (n)}(s,x_{\epsilon (n)}(s),y_{\epsilon
(n)}(s))\leq M_{1}(s,K)$ for some $K>0$. Just take $K\geq \sup_{s\in [
0,1]}\sup_{n}\max (|x_{\epsilon (n)}(s)|,|y_{\epsilon
(n)}(s)|)+\max_{n}\epsilon (n)$.
\\
(d) We note there is $K$ such that for all $n$ and $x$, $|g^{\epsilon
(n)}(x_{\epsilon (n)}(s))|\leq K$.
\\
The dominated convergence theorem is applicable by (a) -- (d). Hence
\begin{equation}
\begin{pmatrix}
x \\
y
\end{pmatrix}(t)=\begin{pmatrix}
0 \\
y(0)
\end{pmatrix}
+\int_{0}^{t}\begin{pmatrix}
y(s) \\
-g(x(s))+p(s,x(s),y(s))
\end{pmatrix} ds \label{18}
\end{equation}
Hence the o.d.e. (\ref{2.1}) and (\ref{2.2}) holds for $(x,y)$. The boundary
conditions (\ref{2.3}) and (\ref{2.4}) hold for $(x,y)$, since they held for
the approximations $(x_{\epsilon },y_{\epsilon })$.

We note that $\varphi _{1}(x_{\epsilon (n)},y_{\epsilon (n)})\to
\varphi _{1}(x,y)$, noting that for all $n$, $(x_{\epsilon (n)},y_{\epsilon
(n)})$ are outside some neighbourhood of $(0,0)$. Hence $\varphi _{1}(x,y)\in
[ \frac{\pi }{2}+k\pi ,\frac{\pi }{2}+(k+1)\pi ]$, since
$\varphi_{1}(x_{\epsilon (n)},y_{\epsilon (n)})\in
(\frac{\pi }{2}+k\pi ,\frac{\pi}{2}+(k+1)\pi )$. Because of the boundary
condition (\ref{2.4}), $\varphi_{1}(x,y)\neq \frac{\pi }{2}+k\pi $ for all
large $k$. This ends the proof of the theorem.
\end{proof}

\section{Uniqueness}

We proved in Theorem \ref{Theorem1} that the equations (\ref{2.1}),
(\ref{2.2}), (\ref{2.3}), (\ref{2.4}) have at least one solution $(x,x')$
with $\varphi_{1}(x,x')\in (\frac{\pi }{2}+k\pi ,\frac{\pi }{2}+(k+1)\pi )$, and
$x'(0)>0$. In this section we shall show that the equations
(\ref{2.1}), (\ref{2.2}), (\ref{2.3}), (\ref{2.4}) have exactly one solution $(x,x')$ with
$\varphi _{1}(x,x')\in (\frac{\pi }{2}+k\pi ,\frac{\pi }{2}
+(k+1)\pi )$ and $x'(0)>0$, when $p\equiv 0$, $g$ is like the
function $x\mapsto |x|^{s}sgn(x)$, for some $s>1$, and $\beta $, $\eta $
satisfy a suitable inequality. The arguments can be easily modified to prove
that the equations (\ref{2.1}), (\ref{2.2}), (\ref{2.3}), (\ref{2.4}) have
exactly one solution $(x,x')$ with $\varphi _{1}(x,x')\in (\frac{\pi }{2}
+k\pi ,\frac{\pi }{2}+(k+1)\pi )$, and $x'(0)<0$.
In Remark \ref{rem14} we give a result for $\beta < 1$.

\begin{theorem}\label{Theorem2}
Let $g:\mathbb{R}\to \mathbb{R}$ be a continuously
differentiable function. Suppose that there exist $p_{0}>0$, $p_{1}>0$, and
an $s>1$ such that for all $x\in \mathbb{R}$,
\begin{equation}
p_{0}|x|^{s}\leq g(x)\mathop{\rm sgn}(x)\leq p_{1}|x|^{s},  \label{3.01}
\end{equation}
and there exists a $h>0$ such that
\begin{equation}
\frac{g(x)}{x^{1+h}}\text{ is
increasing on }(0,\infty ) \text{ and } (-\infty , 0).
  \label{3.02}
\end{equation}
Let $\beta >1$ and $\eta \in (0,1)$ be such that
\begin{equation}
\beta ^{2}>\sqrt{1+\frac{\eta ^{4}}{4}}+\frac{\eta ^{2}}{2}.
\label{3.1}
\end{equation}
Then, for $k$ (an integer) sufficiently large, the solution of the system of
equations
\begin{gather}
x'(t)=y(t), \label{3-03} \\
y'(t)=-g(x(t)),\label{3-04} \\
x(0) = 0,  \label{3-05} \\
x(\eta ) = \beta x(1),  \label{3-06}
\end{gather}
with $\varphi _{1}(x,x')\in (\frac{\pi }{2}+k\pi ,\frac{\pi }{2}
+(k+1)\pi )$, and $x'(0)>0$, is unique.
\end{theorem}

\begin{remark} {\rm
The existence of a solution for the system of equations (\ref{3-03}), (\ref
{3-04}), (\ref{3-05}), (\ref{3-06}) is obtained using Theorem
\ref{Theorem1}.}
\end{remark}

\begin{proof}[Proof of Theorem \ref{Theorem2}]
 Let $(x_{\alpha }(t),y_{\alpha }(t))$ be the
solution of the equations (\ref{3-03}), (\ref{3-04}) with $x(0)=0$,
$y(0)=\alpha $. Recalling $G(x)=\int_{0}^{x}g(t)dt$, we see, from
(\ref{3-03}), (\ref{3-04}) with $(x,y)=(x_{\alpha }(t),y_{\alpha }(t))$, that
\begin{equation}
G(x)+\frac{y^{2}}{2}=\frac{\alpha ^{2}}{2}=G(\gamma )=G(-\gamma ^{\ast }),
\nonumber \label{3.3}
\end{equation}
where $\gamma =G_+^{-1}(\alpha^2/2)$ and $-\gamma ^{\ast }=G_-^{-1}(\alpha^2/2)$.
Note that both $\gamma $ and $\gamma ^{\ast }$ are positive.

In the following, we shall use $\gamma $ to parametrise the solution, giving
$(x,y)=(x_{\alpha }(t),y_{\alpha }(t)):=(x(t,\gamma ),y(t,\gamma ))$. We
define $\gamma _{k}$ by setting $\gamma =\gamma _{k}$ when $\varphi
_{1}(x,y)=\frac{\pi }{2}+k\pi $. This corresponds to $\alpha =h_{k}$ (see
equation (\ref{h_k})). We next consider $\beta x(1,\gamma )$ and $x(\eta
,\gamma )$ for $\gamma \in (\gamma _{k},\gamma _{k+1})$. Now, from Theorem
\ref{Theorem1} we see for $k$ sufficiently large that there exists a $\gamma
_{0}\in (\gamma _{k},\gamma _{k+1})$ such that $\beta x(1,\gamma
_{0})=x(\eta ,\gamma _{0})$. To show uniqueness of $\gamma _{0}$ it suffices
to show that
\begin{equation}
|\beta \frac{\partial x}{\partial \gamma }(1,\gamma _{0})|>|
\frac{\partial x}{\partial \gamma }(\eta ,\gamma _{0})|.  \label{3.2}
\end{equation}
Let us define $\widetilde{\varphi }(t,\gamma )$ by setting
\begin{equation}
\widetilde{\varphi }(t,\gamma )
=\int_{0}^{t}\frac{x'(s)y(s)-y'(s)x(s)}{x^{2}(s)+y^{2}(s)}ds, \nonumber \label{3.4}
\end{equation}
where $(x,y)=(x(t,\gamma ), y(t,\gamma ))$. Now, we define a function
$\widetilde{t}(\varphi ,\gamma )$ by
\begin{equation}
t=\widetilde{t}(\varphi ,\gamma )\text{ }\Longleftrightarrow \varphi =
\widetilde{\varphi }(t,\gamma ).   \label{3.5}
\end{equation}
We note that $\widetilde{t}(\varphi ,\gamma )$ is the time taken for the
solution $(x(t,\gamma ),y(t,\gamma ))$ to traverse the angle $\varphi $.
For $t=1$, we then have
\[
1=\widetilde{t}(\widetilde{\varphi }(1,\gamma ),\gamma ),
\]
from (\ref{3.5}). Next we use the implicit function theorem to get
\begin{equation}
\frac{\partial }{\partial \gamma }\widetilde{\varphi }(1,\gamma )
= -\frac{\frac{\partial \widetilde{t}}{\partial \gamma }(1,\gamma )}{\frac{\partial
\widetilde{t}}{\partial \varphi }(1,\gamma )}
= -\frac{\partial \widetilde{t}}{\partial \gamma }(\widetilde{\varphi }
(1,\gamma ),\gamma )\frac{\partial \widetilde{\varphi }}{\partial t}
(1,\gamma ).  \label{3.7}
\end{equation}
Let us define $\widetilde{x}(\varphi ,\gamma )$ as follows: since traversing
the angle $\varphi $ clockwise along the curve $G(x)+\frac{y^{2}}{2}
=G(\gamma )$ from $(0,\alpha )$ brings us to a point $(x,y)$, we define
\begin{equation}
\widetilde{x}(\varphi ,\gamma )=x. \nonumber \label{3.8}
\end{equation}
Note
\[
\widetilde{x}(\widetilde{\varphi }(1,\gamma ),\gamma )=x(1,\gamma ).
\]
>From the chain rule,
\begin{equation}
\frac{\partial x}{\partial \gamma }(1,\gamma )=\frac{\partial
\widetilde{x}}{
\partial \gamma }(\widetilde{\varphi }(1,\gamma ),\gamma )+\frac{\partial
\widetilde{x}}{\partial \varphi }(\widetilde{\varphi }(1,\gamma ),\gamma )
\frac{\partial \widetilde{\varphi }}{\partial \gamma }(1,\gamma ).
\label{3.9}
\end{equation}
Similarly, we get
\begin{equation}
\frac{\partial x}{\partial \gamma }(\eta ,\gamma )=\frac{\partial
\widetilde{
x}}{\partial \gamma }(\widetilde{\varphi }(\eta ,\gamma ),\gamma )+\frac{
\partial \widetilde{x}}{\partial \varphi }(\widetilde{\varphi }(\eta ,\gamma
),\gamma )\frac{\partial \widetilde{\varphi }}{\partial \gamma }(\eta
,\gamma ).  \label{3.91}
\end{equation}
We express $\widetilde{t}(\varphi ,\gamma )$ in terms of $n$, the
number of times the solution $(x,y)$ goes around the origin, and the time
taken to traverse the angle $2\pi $. Let
\begin{equation}
\widetilde{t}(\varphi ,\gamma )=n(\varphi ,\gamma )t_{P}(\gamma
)+t_{Q}(\varphi ,\gamma ),  \label{3.10}
\end{equation}
where $t_{P}$ is the time for a full revolution or the time taken to
traverse the angle $2\pi $, and
$t_{Q}\in [ 0,t_{P})$.
Note
\begin{equation}
\frac{n(\widetilde{\varphi }(\eta ,\gamma ),\gamma )}{n(\widetilde{\varphi
}(1,\gamma ),\gamma )}\to \eta \text{ as }\gamma \to
\infty .  \nonumber% \label{3.11}
\end{equation}
We differentiate $\widetilde{t}(\varphi ,\gamma )$ with respect to $\gamma $
in (\ref{3.10}) when $\widetilde{\varphi }(1,\gamma )$ is not a multiple of
${2\pi }$ to get
\begin{equation}
\frac{\partial \widetilde{t}}{\partial \gamma }(\varphi ,\gamma )=n\frac{
\partial t_{P}}{\partial \gamma }+\frac{\partial t_{Q}}{\partial \gamma }
.  \label{3.110}
\end{equation}
Next, define
\begin{equation}
\varphi _{\eta }:=\widetilde{\varphi }(\eta ,\gamma _{0})\text{ and }\varphi
_{1}:=\widetilde{\varphi }(1,\gamma _{0}). \nonumber \label{3.111}
\end{equation}
We evaluate (\ref{3.110}) at two points.
\begin{gather}
\frac{\partial \widetilde{t}}{\partial \gamma }(\varphi _{1},\gamma
_{0})=n(\varphi _{1},\gamma _{0})\frac{\partial t_{P}}{\partial \gamma }
(\varphi _{1},\gamma _{0})+\frac{\partial t_{Q}}{\partial \gamma }(\varphi
_{1},\gamma _{0}).  \label{3.12} \\
\frac{\partial \widetilde{t}}{\partial \gamma }(\varphi _{\eta },\gamma
_{0})=n(\varphi _{\eta },\gamma _{0})\frac{\partial t_{P}}{\partial \gamma
}(\varphi _{\eta },\gamma _{0})+\frac{\partial t_{Q}}{\partial \gamma }
(\varphi _{\eta },\gamma _{0}).  \label{3.121}
\end{gather}
Now, by  \cite[Lemma 4]{cal} we see that
\begin{equation}
\frac{\partial }{\partial \gamma }\frac{\partial \widetilde{t}}{\partial
\varphi }(\varphi ,\gamma )\leq 0\text{, for all }(\varphi ,\gamma ).
\nonumber \label{3.122}
\end{equation}
Also, notice that
$\frac{\partial \widetilde{t}}{\partial \gamma }(0,\gamma )=0$  for all
$\gamma$. So we get
\[
0\geq \frac{\partial t_{Q}}{\partial \gamma }(\varphi ,\gamma _{0})\geq
\frac{\partial t_{P}}{\partial \gamma }(\gamma _{0}).
\]
Now, to prove (\ref{3.2}) we compute the ratio of (\ref{3.91}) to
(\ref{3.9}). This gives
\[
\frac{\frac{\partial x}{\partial \gamma }(\eta ,\gamma )}{\frac{\partial
x}{
\partial \gamma }(1,\gamma )}=\frac{\frac{\partial \widetilde{x}}{\partial
\gamma }(\widetilde{\varphi }(\eta ,\gamma ),\gamma )+\frac{\partial
\widetilde{x}}{\partial \varphi }(\widetilde{\varphi }(\eta ,\gamma ),\gamma
)\frac{\partial \widetilde{\varphi }}{\partial \gamma }(\eta ,\gamma )}{
\frac{\partial \widetilde{x}}{\partial \gamma }(\widetilde{\varphi }
(1,\gamma ),\gamma )+\frac{\partial \widetilde{x}}{\partial \varphi }(
\widetilde{\varphi }(1,\gamma ),\gamma )\frac{\partial \widetilde{\varphi
}}{\partial \gamma }(1,\gamma )}.
\]
Using (\ref{3.7}) we then get
\begin{equation}
\frac{\frac{\partial x}{\partial \gamma }(\eta ,\gamma )}{\frac{\partial
x}{
\partial \gamma }(1,\gamma )}=\frac{\frac{\partial \widetilde{x}}{\partial
\gamma }(\widetilde{\varphi }(\eta ,\gamma ),\gamma )-\frac{\partial
\widetilde{x}}{\partial \varphi }(\widetilde{\varphi }(\eta ,\gamma ),\gamma
)\frac{\partial \widetilde{t}}{\partial \gamma }(\widetilde{\varphi }(\eta
,\gamma ),\gamma )\frac{\partial \widetilde{\varphi }}{\partial t}(\eta
,\gamma )}{\frac{\partial \widetilde{x}}{\partial \gamma }(\widetilde{
\varphi }(1,\gamma ),\gamma )-\frac{\partial \widetilde{x}}{\partial \varphi
}(\widetilde{\varphi }(1,\gamma ),\gamma )\frac{\partial \widetilde{t}}{
\partial \gamma }(\widetilde{\varphi }(1,\gamma ),\gamma )\frac{\partial
\widetilde{\varphi }}{\partial t}(1,\gamma )}.  \label{3.130}
\end{equation}
We note that
\begin{equation}
\frac{\partial x}{\partial t}(t,\gamma )=\frac{\partial \widetilde{x}}{
\partial \varphi }(\widetilde{\varphi }(t,\gamma ),\gamma )\frac{\partial
\widetilde{\varphi }}{\partial t}(t,\gamma )\text{ for all }(t,\gamma
). \nonumber
 \label{3.13}
\end{equation}
Hence, (\ref{3.130}) becomes
\[
\frac{\frac{\partial x}{\partial \gamma }(\eta ,\gamma )}{\frac{\partial
x}{
\partial \gamma }(1,\gamma )}=\frac{\frac{\partial \widetilde{x}}{\partial
\gamma }(\widetilde{\varphi }(\eta ,\gamma ),\gamma )-\frac{\partial
\widetilde{t}}{\partial \gamma }(\widetilde{\varphi }(\eta ,\gamma ),\gamma
)
\frac{\partial x}{\partial t}(\eta ,\gamma )}{\frac{\partial
\widetilde{x}}{
\partial \gamma }(\widetilde{\varphi }(1,\gamma ),\gamma )-\frac{\partial
\widetilde{t}}{\partial \gamma }(\widetilde{\varphi }(1,\gamma ),\gamma )
\frac{\partial x}{\partial t}(1,\gamma )}.
\]
Now, by (\ref{3.12}) and (\ref{3.121}) we get
\begin{equation}
\frac{\frac{\partial x}{\partial \gamma }(\eta ,\gamma _{0})}{\frac{\partial
x}{\partial \gamma }(1,\gamma _{0})}=\frac{NUM}{DENOM},  \label{3.14}
\end{equation}
where
\begin{equation}
NUM=\frac{\partial \widetilde{x}}{\partial \gamma }(\widetilde{\varphi }
(\eta ,\gamma _{0}),\gamma _{0})-\frac{\partial x}{\partial t}(\eta ,\gamma
_{0})[n(\varphi _{\eta },\gamma _{0})\frac{\partial t_{P}}{\partial \gamma
}
(\gamma _{0})+\frac{\partial t_{Q}}{\partial \gamma }(\varphi _{\eta
},\gamma _{0})],  \label{3.15}
\end{equation}
and
\begin{equation}
DENOM=\frac{\partial \widetilde{x}}{\partial \gamma }(\widetilde{\varphi }
(1,\gamma _{0}),\gamma _{0})-\frac{\partial x}{\partial t}(1,\gamma
_{0})[n(\varphi _{1},\gamma _{0})\frac{\partial t_{P}}{\partial \gamma }
(\gamma _{0})+\frac{\partial t_{Q}}{\partial \gamma }(\varphi _{1},\gamma
_{0})].  \label{3.16}
\end{equation}

Next, we need to show that the first term in (\ref{3.16}) is small compared
to the second term.  To do this we need the following lemma.
\end{proof}

\begin{lemma}\label{Lemma5}
Suppose that the assumptions (\ref{3.01}) and (\ref{3.02}) of
Theorem \ref{Theorem2} hold. Then
\begin{equation}
\frac{|\frac{\partial \widetilde{x}}{\partial \gamma }(\varphi _{1},\gamma
_{0})|}{n(\varphi _{1},\gamma _{0})|\frac{\partial t_{P}}{\partial \gamma }
(\gamma _{0})|\cdot |\frac{\partial x}{\partial t}(1,\gamma _{0})|}
\to 0\text,   \label{4.0}
\end{equation}
as $\gamma _{0}\to \infty $, (note that $\gamma _{0}$ depends on
$k$ and as $k\to \infty $, $\gamma _{0}\to \infty
$).
\end{lemma}

\begin{proof} For better readability we break the proof into a
sequence of items.\\
\textbf{Item 1:-} We show that
\begin{gather}
\frac{\partial \widetilde{x}}{\partial \gamma }(\varphi ,\gamma )=\frac{
xg(\gamma )}{xg(x)+2(G(\gamma )-G(x))}=\frac{xg(\gamma
)}{xg(x)+y^{2}},   \label{4.03} \\
\frac{\partial \widetilde{y}}{\partial \gamma }(\varphi ,\gamma )=\frac{
yg(\gamma )}{xg(x)+2(G(\gamma )-G(x))}=\frac{yg(\gamma
)}{xg(x)+y^{2}}.   \label{4.04}
\end{gather}
We first note that as we hold $\varphi $ constant, we hold $y/x$
constant i.e.
\[
\frac{\sqrt{2(G(\gamma )-G(x))}}{x}
\]
is constant, which in turn implies that
\[
\frac{\partial }{\partial \gamma }\Big( \frac{G(\gamma )-G(x)}{x^{2}}\Big) =0.
\]
Hence,
\[
(g(\gamma )-g(x)\frac{\partial \widetilde{x}}{\partial \gamma })x^{2}-2x
\frac{\partial \widetilde{x}}{\partial \gamma }(G(\gamma )-G(x))=0,
\]
i.e.
\[
\frac{\partial \widetilde{x}}{\partial \gamma }[g(x)x^{2}+2x(G(\gamma
)-G(x))]=x^{2}g(\gamma ),
\]
and so (\ref{4.03}) holds. Also, since we hold $\varphi $ constant, 
$y/x$ is constant and we get
\[
\frac{\partial \widetilde{y}}{\partial \gamma }=\frac{y}{x}\frac{\partial
\widetilde{x}}{\partial \gamma },
\]
giving (\ref{4.04}).
\\
\textbf{Item 2:-} There exists a constant $K_{0}=K_{0}(s,p_{0},p_{1})$
such that
\begin{equation}
|\frac{\partial \widetilde{x}}{\partial \gamma }|\leq K_{0}(s,p_{0},p_{1})
, \nonumber \label{4.1}
\end{equation}
for all $(\varphi ,$ $\gamma )$.
We first claim that for all $x\in \mathbb{R}$,
\begin{equation}
xg(x)-2G(x)\geq 0. \nonumber \label{4.05}
\end{equation}
Indeed, both for $x>0$ and for $x<0$,
\[
G(x)=\int_{0}^{x}g(t)dt\leq \int_{0}^{x}\frac{tg(x)}{x}dt=\frac{xg(x)}{2},
\]
since $\frac{g(x)}{x\text{ }}$ is increasing. Hence the claim.
Next, we claim that for all $x$,
\begin{equation}
\frac{p_{1}|x|^{s+1}}{s+1}\geq G(x)\geq \frac{p_{0}|x|^{s+1}}{s+1}.
\label{4.06}
\end{equation}
We see from (\ref{3.01}) that for $x>0$,
\[
G(x)\geq \int_{0}^{x}p_{0}t^{s}dt=\frac{p_{0}x^{s+1}}{s+1}.
\]
Similarly, we get for $x<0$,
\[
G(x)\geq \frac{p_{0}|x|^{s+1}}{s+1}.
\]
The proof of the left half of the inequality in (\ref{4.06}) is similar.
>From (\ref{4.03}) we have
\begin{equation}
\begin{aligned}
\frac{\partial \widetilde{x}}{\partial \gamma }(\varphi ,\gamma )
&= \frac{xg(\gamma )}{xg(x)+2(G(\gamma )-G(x))}   \\
&\leq \frac{|x|g(\gamma )}{2G(\gamma )}\text{, by (\ref{4.05}),}  \\
&\leq \max (\gamma ,\gamma ^{\ast })\frac{p_{1}\gamma ^{s}(s+1)}{
2p_{0}\gamma ^{s+1}}.
\end{aligned} \label{4.061}
\end{equation}
Next, we see from (\ref{4.06}) that there exists a constant $K$, depending
on $p_{0}$, $p_{1}$, and $s$ such that
\begin{equation}
\gamma ^{\ast }\leq K\gamma \quad\text{and}\quad \gamma \leq K\gamma ^{\ast }.
\label{4.07}
\end{equation}
Using (\ref{4.07}) in (\ref{4.061}) the proof of Item 2 is immediate.
\\
\textbf{Item 3:-} There exists a constant $K_{1}=K_{1}(s,p_{0},p_{1})>0$
such that
\begin{equation}
|\frac{\partial x}{\partial t}(1,\gamma _{0})|\geq K_{1}\gamma _{0}^{\frac{
s+1}{2}},  \label{4.4}
\end{equation}
for all $\gamma _{0}$.
Since $g(x)/x$ is increasing, we have
$G(x)/x$ is increasing by  \cite[Lemma 3]{cal}. So
\[
G(\frac{\gamma }{\beta })\leq \frac{G(\gamma )}{\beta }.
\]
Hence, assuming $x(1,\gamma _{0})\geq 0$, so that $x(1,\gamma _{0})\leq
\frac{\gamma }{\beta }$, we have
\begin{align*}
\frac{1}{2}(\frac{\partial x}{\partial t}(1,\gamma _{0}))^{2} &= G(\gamma
_{0})-G(x(1,\gamma _{0})) \\
&\geq G(\gamma _{0})-G(\frac{\gamma _{0}}{\beta }) \\
&\geq (1-\frac{1}{\beta })G(\gamma _{0}) \\
&\geq (1-\frac{1}{\beta })\frac{p_{0}\gamma ^{s+1}}{s+1},
\end{align*}
by (\ref{4.06}). Taking square roots, we get (\ref{4.4}), proving Item 3.
\\
\textbf{Item 4:-} Let $t_{R}$ be the time taken to go from $(0,\alpha )$
to $(\gamma ,0)$. Also let $t_{L}$ be the time taken to go from
$(0,-\alpha)$ to $(-\gamma ^{\ast },0)$. We shall show that
\begin{equation}
\frac{dt_{R}}{d\gamma }=\int_{0}^{\frac{\pi }{2}}\frac{xg(\gamma
)(g(x)-xg'(x))}{(xg(x)+y^{2})^{3}}(x^{2}+y^{2})d\varphi .
\label{4.5}
\end{equation}
We note that since
\begin{gather*}
x' = y, \\
y' = -g(x),
\end{gather*}
we have
\[
\frac{\partial \widetilde{\varphi }}{\partial t}(t,\gamma )=-\frac{
xy'-yx'}{x^{2}+y^{2}}=\frac{xg(x)+y^{2}}{x^{2}+y^{2}}.
\]
Accordingly,
\[
t_{R}(\gamma )=\int_{0}^{\frac{\pi }{2}}\frac{x^{2}+y^{2}}{xg(x)+y^{2}}
d\varphi .
\]
Using Leibnitz's rule, (\ref{4.03}) and (\ref{4.04}), we get
\begin{align*}
\frac{dt_{R}}{d\gamma } &= \int_{0}^{\frac{\pi }{2}}\frac{1}{
(xg(x)+y^{2})^{2}}\{(2x\frac{xg(\gamma )}{xg(x)+y^{2}}
+2y\frac{yg(\gamma)}{xg(x)+y^{2}})(xg(x)+y^{2}) \\
&\quad -(x^{2}+y^{2})[(xg'(x)+g(x))\frac{xg(\gamma )}{xg(x)+y^{2}}+2y
\frac{yg(\gamma )}{xg(x)+y^{2}}\}d\varphi  \\
&= \int_{0}^{\frac{\pi }{2}}\frac{2x^{2}g(\gamma )+2y^{2}g(\gamma )-\frac{
x^{2}+y^{2}}{xg(x)+y^{2}}[(xg'(x)+g(x))xg(\gamma )+2y^{2}g(\gamma)]
}{(xg(x)+y^{2})^{2}}d\varphi  \\
&= \int_{0}^{\frac{\pi }{2}}\frac{2g(\gamma )(xg(x)+y^{2})
-(xg'(x)+g(x))xg(\gamma )-2y^{2}g(\gamma )}{(xg(x)+y^{2})^{3}}
(x^{2}+y^{2})d\varphi ,
\end{align*}
which gives (\ref{4.5}).
\\
\textbf{Item 5:-} We change variables from $\varphi $ to $u$ to
calculate\ (\ref{4.5}). We parametrise the curve
\[
\frac{y^{2}}{2}+G(x)=G(\gamma ),
\]
in the first quadrant, by setting
\begin{gather*}
y = \sqrt{2G(\gamma )}\sin u \\
x = G_{+}^{-1}(G(\gamma )\cos ^{2}u).
\end{gather*}
So, with $\gamma $ kept fixed,
\begin{align*}
\frac{dy}{du} &= \sqrt{2G(\gamma )}\cos u \\
g(x)\frac{dx}{du} &= -2G(\gamma )\cos u\sin u,
\end{align*}
i.e.
\[
\frac{dx}{du}=-\frac{2G(\gamma )\cos u\sin u}{g(x)}.
\]
Now,
\begin{equation}
(x^{2}+y^{2})\frac{d\varphi }{du}=y\frac{dx}{du}-x\frac{dy}{du}.
\nonumber %\label{4.6}
\end{equation}
Hence,
\[
(x^{2}+y^{2})\frac{d\varphi }{du}=-\frac{(2G(\gamma ))^{\frac{3}{2}}\cos
^{2}u\sin u}{g(x)}-x\sqrt{2G(\gamma )}\cos u.
\]
Therefore,
\begin{equation}
\frac{dt_{R}}{d\gamma }=\int_{0}^{\frac{\pi }{2}}\frac{xg(\gamma
)(xg'(x)-g(x))}{(xg(x)+y^{2})^{3}}\Big[ \frac{(2G(\gamma
))^{\frac{3}{2}}\cos ^{2}u\sin u}{g(x)}+x\sqrt{2G(\gamma )}\cos u\Big] du,
\label{4.7}
\end{equation}
noting that as $\varphi $ varies from $0$ to $\pi/2$, $u$ also
varies from $0$ to $\pi/2$.

In the next items we estimate the various terms in (\ref{4.7}).
\\
\textbf{Item 6:-} There exist constants $K_{3}>0$ and $K_{4}>0$ depending
on $p_{0}$, $p_{1}$, and $s$ (but not $u$, $\gamma $) such that
\begin{gather}
x \leq K_{3}\gamma (\cos u)^{\frac{2}{s+1}},  \label{4.8} \\
x \geq K_{4}\gamma (\cos u)^{\frac{2}{s+1}}.  \label{4.9}
\end{gather}
Indeed,
\[
\frac{p_{1}\gamma ^{s+1}}{s+1}\cos ^{2}u\geq G(\gamma )\cos ^{2}u=G(x)\geq
\frac{p_{1}x^{s+1}}{s+1}.
\]
The inequality given by the two outside terms above gives (\ref{4.8}). We
obtain (\ref{4.9}) similarly.
\\
\textbf{Item 7:-} There exists a constant $K_{5}>0$ depending on $p_{0}$,
$p_{1}$, and $s$ (but not $\gamma $) such that
\begin{equation}
xg(x)+y^{2}\leq K_{5}\gamma ^{s+1}.  \label{4.10}
\end{equation}
Indeed,
\begin{gather*}
xg(x) \leq p_{1}x^{s+1}\leq \frac{p_{1}}{p_{0}}(s+1)G(x)\leq
\frac{p_{1}}{p_{0}}(s+1)G(\gamma )\leq \frac{p_{1}^{2}}{p_{0}}\gamma ^{s+1}, \\
y^{2} = 2G(\gamma )\sin ^{2}u\leq 2p_{1}\frac{\gamma ^{s+1}}{s+1}\sin ^{2}u,
\end{gather*}
and (\ref{4.10}) follows.
\\
\textbf{Item 8:-} There exists a constant $K_{6}>0$ depending on $p_{0}$,
$p_{1}$, and $s$ (but not $\gamma $) such that
\begin{equation}
xg'(x)-g(x)\geq K_{6}x^{s}.  \label{4.11}
\end{equation}
We see from (\ref{3.02}) that $\frac{g(x)}{x^{1+h}}$ is increasing, which
implies $\log \frac{g(x)}{x^{1+h}}$ is increasing, which implies
$\frac{d}{dx}(\log \frac{g(x)}{x^{1+h}})\geq 0$, which implies $\frac{g'(x)
}{g(x)}-\frac{1+h}{x}\geq 0$, which implies $xg'(x)-g(x)\geq
hg(x)\geq kp_{0}x^{s}$, thereby proving (\ref{4.11}).
\\
\textbf{Item 9:-} We find a lower bound for
$\frac{(2G(\gamma))^{\frac{3}{2}}\cos ^{2}u\sin u}{g(x)}+x\sqrt{2G(\gamma )}\cos u$,
 using (\ref{4.06}), (\ref{4.8}) and (\ref{4.9}). Indeed,
\begin{align*}
&\frac{(2G(\gamma ))^{\frac{3}{2}}\cos ^{2}u\sin u}{g(x)}+x\sqrt{2G(\gamma
)}\cos u   \\
&\geq 2\sqrt{2}\frac{p_{0}^{\frac{3}{2}}\gamma
^{\frac{3(s+1)}{2}}}{(s+1)^{
\frac{3}{2}}p_{1}x^{s}}\cos ^{2}u\sin u +K_{4}\gamma (\cos u)^{\frac{2}{s+1}}
\sqrt{2\frac{p_{0}}{s+1}}\gamma ^{\frac{s+1}{2}}\cos u   \\
&\geq 2\sqrt{2}\frac{p_{0}^{\frac{3}{2}}\gamma
^{\frac{3(s+1)}{2}}}{(s+1)^{
\frac{3}{2}}p_{1}K_{3}^{s}\gamma ^{s}(\cos u)^{\frac{2s}{s+1}}}\cos
^{2}u\sin u+K_{4}\gamma (\cos u)^{\frac{2}{s+1}}\sqrt{2\frac{p_{0}}{s+1}}
\gamma ^{\frac{s+1}{2}}\cos u  \nonumber \\
&\geq K_{7}\gamma ^{\frac{s+3}{2}}[(\cos u)^{\frac{2}{s+1}}\sin u
+(\cos u)^{\frac{s+3}{s+1}}].
\end{align*} %\label{4.12}
\textbf{Item 10:-} Now we use our estimates to find a lower bound for (\ref{4.7}).
\begin{align*}
&\frac{dt_{R}}{d\gamma }\\
&= \int_{0}^{\frac{\pi }{2}}\frac{xg(\gamma
)(xg'(x)-g(x))}{(xg(x)+y^{2})^{3}}\Big[ \frac{(2G(\gamma
))^{\frac{
3}{2}}\cos ^{2}u\sin u}{g(x)}+x\sqrt{2G(\gamma )}\cos u\Big] du  \\
&\geq \int_{0}^{\frac{\pi }{2}}\frac{K_{4}\gamma (\cos u)^{\frac{2}{s+1}
}g(\gamma )(K_{6}x^{s})}{(K_{5}\gamma ^{s+1})^{3}}K_{7}\gamma
^{\frac{s+3}{2}}[(\cos u)^{\frac{2}{s+1}}\sin u+(\cos u)^{\frac{s+3}{s+1}}]du \\
&\geq \int_{0}^{\frac{\pi }{2}}\frac{K_{4}\gamma (\cos u)^{\frac{2}{s+1}
}p_{0}\gamma ^{s}K_{6}(K_{4}\gamma (\cos u)^{\frac{2}{s+1}})^{s}}{(K_{5}
\gamma ^{s+1})^{3}}\\
&\quad\times K_{7}\gamma ^{\frac{s+3}{2}}\big[(\cos u)^{\frac{2}{s+1}
}\sin u+(\cos u)^{\frac{s+3}{s+1}}\big]du   \\
&\geq K_{8}\gamma ^{-\frac{s+1}{2}}. %\label{4.13}
\end{align*}
We note in a similar way that
\[
\frac{dt_{L}}{d\gamma }\geq K_{9}\gamma ^{-\frac{s+1}{2}}\text,
\]
and hence
\[
\frac{dt_{P}}{d\gamma }\geq K_{10}\gamma ^{-\frac{s+1}{2}}.
\]
Next, to complete the proof of the lemma we use the various estimates
obtained above, in (\ref{4.0}).
\[
\frac{|\frac{\partial \widetilde{x}}{\partial \gamma }(\varphi _{1},\gamma
_{0})|}{n(\varphi _{1},\gamma _{0})|\frac{\partial t_{P}}{\partial \gamma }
(\gamma _{0})|\cdot |\frac{\partial x}{\partial t}(1,\gamma _{0})|}\leq
\frac{K_{11}}{n(\varphi _{1},\gamma _{0})\gamma _{0}^{\frac{s+1}{2}}\gamma
_{0}^{-\frac{s+1}{2}}}=\frac{K_{11}}{n(\varphi _{1},\gamma _{0})}
\to 0,
\]
as $\gamma _{0}\to \infty $.
\end{proof}

Next, we find an upper bound for $\frac{|y(\eta ,\gamma _{0})|}{|y(1,\gamma
_{0})|}$ $\ $, using (\ref{3.1}).

\begin{lemma}\label{Lemma6}
Suppose $g\ $is continuous, super-linear and $\frac{g(x)}{x}$
is increasing on $(0,\infty )$ and decreasing on $(-\infty ,0)$. Suppose (\ref{3.1})
holds. \ Then there exists a $\beta _{0}<\beta $ such that
\begin{equation}
\frac{|y(\eta ,\gamma _{0})|}{|y(1,\gamma _{0})|}\leq \frac{\beta _{0}}{
\eta }.  \label{5.1}
\end{equation}
\end{lemma}

\begin{proof}
 We have $x(1,\gamma _{0})=\frac{x(\eta ,\gamma _{0})}{\beta
}$
giving
\[
-\frac{\gamma _{0}^{\ast }}{\beta }\leq x(1,\gamma _{0})\leq \frac{\gamma
_{0}}{\beta },
\]
where $\gamma _{0}^{\ast }>0$ is defined by $G(-\gamma _{0}^{\ast
})=G(\gamma _{0})$.
Suppose $x(\eta ,\gamma _{0})\geq \frac{\gamma _{0}}{\beta }$.  Then
$x(\eta ,\gamma _{0})\geq x(1,\gamma _{0})$, giving $|y(\eta ,\gamma
_{0})|\leq |y(1,\gamma _{0})|$, and (\ref{5.1}) holds for any $\beta _{0}\in
[ \eta ,\beta )$.  Similarly, if $x(\eta ,\gamma _{0})\leq -\frac{
\gamma _{0}^{\ast }}{\beta }$, then (\ref{5.1}) holds.  Hence we assume that
\[
-\frac{\gamma _{0}^{\ast }}{\beta }\leq x(\eta ,\gamma _{0})\leq \frac{
\gamma _{0}}{\beta },
\]
giving
\[
-\frac{\gamma _{0}^{\ast }}{\beta ^{2}}\leq x(1,\gamma _{0})\leq \frac{
\gamma _{0}}{\beta ^{2}}.
\]
This implies if $x(1,\gamma _{0})\geq 0$, that
\[
|y(1,\gamma _{0})|\geq \sqrt{2(G(\gamma _{0})-G(\frac{\gamma _{0}}{\beta
^{2}
}))}.
\]
Similarly, if $x(1,\gamma _{0})\leq 0$, then
\[
|y(1,\gamma _{0})|\geq \sqrt{2(G(\gamma _{0})-G(-\frac{\gamma _{0}^{\ast
}}{
\beta ^{2}}))}.
\]
Since $|y(\eta ,\gamma _{0})|\leq \sqrt{2G(\gamma _{0})}$, (\ref{5.1})
holds if
\[
\frac{\beta _{0}^{2}}{\eta ^{2}}\geq \frac{G(\gamma _{0})}{G(\gamma
_{0})-G(
\frac{\gamma _{0}}{\beta ^{2}})},
\]
i.e.
\begin{equation}
(\beta _{0}^{2}-\eta ^{2})G(\gamma _{0})\geq \beta _{0}^{2}G(\frac{\gamma
_{0}}{\beta ^{2}}),  \label{5.2}
\end{equation}
and also
\begin{equation}
(\beta _{0}^{2}-\eta ^{2})G(\gamma _{0})\geq \beta _{0}^{2}G(-\frac{\gamma
_{0}^{\ast }}{\beta ^{2}}).  \label{5.3}
\end{equation}
We claim that for all $z\in \mathbb{R}$ and $\delta \geq 1$,
\begin{equation}
G(\delta z)\geq \delta ^{2}G(z).  \label{5.4}
\end{equation}
Suppose $s>0$. Then $\frac{g(\delta s)}{\delta s}\geq \frac{g(s)}{s}$.  For $z>0$,
\[
G(\delta z)=\int_{0}^{\delta z}g(t)dt=\int_{0}^{z}g(\delta s)\delta ds\geq
\delta ^{2}\int_{0}^{z}g(s)ds,
\]
proving the claim.  Similarly the claim can be proved for $z<0$.

In (\ref{5.4}) put $z=\frac{\gamma _{0}}{\beta ^{2}}$, and $\delta =\beta
^{2}$, giving $G(\gamma _{0})\geq \beta ^{4}G(\frac{\gamma _{0}}{\beta
^{2}}
)$ and $G(\gamma _{0})\geq \beta ^{4}G(-\frac{\gamma _{0}^{\ast }}{\beta
^{2}})$.  Hence, (\ref{5.2}) and (\ref{5.3}) hold if
\begin{equation}
(1-\frac{\eta ^{2}}{\beta _{0}^{2}})\beta ^{4}\geq 1.  \label{5.5}
\end{equation}
Taking $\beta _{0}=(1-\varepsilon )\beta $, (\ref{5.5}) holds if
\begin{equation}
\beta ^{2}\geq \frac{(\frac{\eta }{1-\varepsilon })^{2}}{2}+\sqrt{\frac{(
\frac{\eta }{1-\varepsilon })^{4}}{4}+1},  \label{5.6}
\end{equation}
and (\ref{5.6}) holds for some $\varepsilon >0$ by (\ref{3.1}).
\end{proof}

\begin{proof}[Proof of Theorem \ref{Theorem2}, continued]
We divide $NUM$ from (\ref{3.15}) and
$DENOM$ from (\ref{3.16})
by $\frac{\partial x}{\partial t}(1,\gamma _{0})n(\varphi _{1},\gamma
_{0})
\frac{\partial t_{P}}{\partial \gamma }(\gamma _{0})$ to define
\begin{gather*}
NEWNUM = \frac{NUM}{\frac{\partial x}{\partial t}(1,\gamma _{0})n(\varphi
_{1},\gamma _{0})\frac{\partial t_{P}}{\partial \gamma }(\gamma
_{0})}\quad \text{and} \\
NEWDENOM = \frac{DENOM}{\frac{\partial x}{\partial t}(1,\gamma
_{0})n(\varphi _{1},\gamma _{0})\frac{\partial t_{P}}{\partial \gamma }
(\gamma _{0})}.
\end{gather*}
Now, $NEWDENOM\to -1$ as $\gamma _{0}\to \infty $
since the first summand converges to zero by Lemma \ref{Lemma5} and the
second summand is
\[
-(1+\frac{\frac{\partial t_{Q}}{\partial \gamma }(\varphi _{1},\gamma
_{0})}{
n(\varphi _{1},\gamma _{0})\frac{\partial t_{P}}{\partial \gamma }(\gamma
_{0})}),
\]
which converges to $-1$ since
\[
\frac{\frac{\partial t_{Q}}{\partial \gamma}
(\varphi _{1},\gamma _{0})}{\frac{\partial t_{P}}{\partial \gamma }(\gamma
_{0})}\leq 1
\]
and $n(\varphi _{1},\gamma _{0})\to \infty $.
Now,
\begin{align*}
NEWNUM &= \frac{\frac{\partial \widetilde{x}}{\partial \gamma }(\widetilde{
\varphi }(\eta ,\gamma _{0}),\gamma _{0})}{\frac{\partial x}{\partial t}
(1,\gamma _{0})n(\varphi _{1},\gamma _{0})\frac{\partial t_{P}}{\partial
\gamma }(\gamma _{0})} \\
&\quad -\frac{\frac{\partial x}{\partial t}(\eta ,\gamma _{0})[n(\varphi _{\eta
},\gamma _{0})\frac{\partial t_{P}}{\partial \gamma }(\gamma _{0})+\frac{
\partial t_{Q}}{\partial \gamma }(\varphi _{\eta },\gamma _{0})]}{\frac{
\partial x}{\partial t}(1,\gamma _{0})n(\varphi _{1},\gamma _{0})\frac{
\partial t_{P}}{\partial \gamma }(\gamma _{0})},
\end{align*}
and we set
\begin{gather*}
NEWNUM1 = \frac{\frac{\partial \widetilde{x}}{\partial \gamma
}(\widetilde{
\varphi }(\eta ,\gamma _{0}),\gamma _{0})}{\frac{\partial x}{\partial t}
(1,\gamma _{0})n(\varphi _{1},\gamma _{0})\frac{\partial t_{P}}{\partial
\gamma }(\gamma _{0})}, \\
NEWNUM2 = \frac{\frac{\partial x}{\partial t}(\eta ,\gamma _{0})[n(\varphi
_{\eta },\gamma _{0})\frac{\partial t_{P}}{\partial \gamma }(\gamma _{0})+
\frac{\partial t_{Q}}{\partial \gamma }(\varphi _{\eta },\gamma _{0})]}{
\frac{\partial x}{\partial t}(1,\gamma _{0})n(\varphi _{1},\gamma
_{0})\frac{
\partial t_{P}}{\partial \gamma }(\gamma _{0})}.
\end{gather*}
Item 2 of Lemma \ref{Lemma5} shows that $|\frac{\partial \widetilde{x}}{
\partial \gamma }(\widetilde{\varphi }(\eta ,\gamma _{0}),\gamma _{0})|\leq
K_{0}$, and hence Lemma \ref{Lemma5} applies to show that $NEWNUM1$
converges to zero as $\gamma _{0}\to \infty $. We rewrite
$NEWNUM2$ as
\[
\frac{\frac{\partial x}{\partial t}(\eta ,\gamma _{0})}{\frac{\partial x}{
\partial t}(1,\gamma _{0})}(\frac{n(\varphi _{\eta },\gamma _{0})}{n(\varphi
_{1},\gamma _{0})}+\frac{\frac{\partial t_{Q}}{\partial \gamma }(\varphi
_{\eta },\gamma _{0})}{n(\varphi _{1},\gamma _{0})\ \frac{\partial t_{P}}{
\partial \gamma }(\gamma _{0})}).
\]
Hence, we get from Lemma \ref{Lemma5} that
\[
|NEWNUM2|\leq \frac{\beta _{0}}{\eta }\Big(\frac{n(\varphi _{\eta },\gamma
_{0})
}{n(\varphi _{1},\gamma _{0})}+\big|\frac{\frac{\partial t_{Q}}{\partial \gamma}
(\varphi _{\eta },\gamma _{0})}{n(\varphi _{1},\gamma _{0}) \frac{\partial
t_{P}}{\partial \gamma }(\gamma _{0})}\big|\Big).
\]
Since  $\frac{n(\varphi _{\eta },\gamma _{0})}{n(\varphi_{1},\gamma _{0})}\to \eta $
 and
\[
\Big|\frac{\frac{\partial t_{Q}}{\partial \gamma }(\varphi _{\eta },
\gamma _{0})}{n(\varphi _{1},\gamma_{0})
\frac{\partial t_{P}}{\partial \gamma }(\gamma _{0})}\Big|\to 0
\]
 as $\gamma _{0}\to \infty $, we have $|\frac{NEWNUM2}{\beta }|<1$
for $\gamma _{0}$ sufficiently large. Hence, (\ref{3.2}) holds and the proof
of the theorem is complete.
\end{proof}

\begin{remark}\label{rem14} \rm
Given $\eta \in (0,1)$, and $\beta \in (0,1)$, satisfying
\begin{equation}\label{93}
\beta < \frac{\eta}{\sqrt{0.5+\sqrt{ 0.25+\eta^4} } },
\end{equation}
the solution of (\ref{3-03}) - (\ref{3-06}) satisfying
$\varphi_{\eta}(x,x') \in (\frac{\pi}{2} +k\pi ,\frac{\pi}{2} +(k+1)\pi)$
exists and is unique, if $k$ is large.  Note we have replaced 1 by $\eta$
in Definition \ref{def}, to give
\[
\varphi _{\eta}(x,y)=-\int_{0}^{\eta}\frac{x(t)y'(t)-y(t)x'(t)}{x^{2}(t)
+y^{2}(t)}dt\,.
\]
The inequality (\ref{93}) follows from (\ref{3.1}) by replacing $\eta$ and
$\beta$ by their inverses.  The change of variable $\tau = \eta^{-1}t$ leads
to this, using the fact that Theorem \ref{Theorem2} holds for $\eta > 1$ too.
\end{remark}

\begin{thebibliography}{00}
\bibitem{cal} B. Calvert and C. Gupta; {\em Multiple solutions for a super-linear
three-point boundary value problem}, Nonlinear Analysis {\bf 50} (2002), 115--128.

\bibitem{cap} A. Capietto and W. Dambrosio; {\em Multiplicity results for some
two-point superlinear asymmetric boundary value problem}, Nonlinear Analysis
{\bf 38}(7) (1999), 869--896.

\bibitem{cap2} A. Capietto, M. Henrard, J. Mawhin, and F. Zanolin;
{\em Continuation approach to superlinear two points boundary value problem},
Topol. Methods Nonlinear Analysis {\bf 3} (1994), 81--100.

\bibitem{cos} D. Costa, D. G. de Figuerido, and P. Srikanth;
{\em The exact number of solutions for a class of ordinary differential equations
through Morse index computation}, J. Diff. Eq. {\bf 96} (1992), 185--199.

\bibitem{din} G. Dinka and  L. Sanchez; {\em Multiple solutions of boundary value
problems: an elementary approach via the shooting method}, Nonlinear Differential
Equations and Applications {\bf 1} (1994), 163--178.

\bibitem{he} X. He and W. Ge; {\em Triple solutions for second-order three-point
boundary value problems}, J. Math. Anal. Appl. {\bf 268} (2002) 256--265.

\bibitem{hen1} M. Henrard; {\em Degre topologique et existence d'une infinite de
solutions d'un probleme aux limites pour une equation singuliere}, Portugal.
Math., {\bf 52} (1995), 153--165.

\bibitem{Hthesis} M. Henrard; {\em Topological degree in boundary value
problems: Existence and multiplicity results for second order differential
equations}, Thesis, Universit\'{e} Catholique de Louvain, Louvain-la-Neuve,
Belgium,  123pp (1995).

\bibitem{hen2} M. Henrard; {\em Infinitely many solutions of weakly coupled
super-linear systems}, Adv. Differential Equations {\bf 2} (1997), 753--778.

\bibitem{inf} G. Infante and J. R. L. Webb; {\em Positive solutions of some
nonlocal boundary value problems}, Abstract and Applied Analysis {bf 2003}
18 (2003), 1047--1060.

\bibitem{kol} I. I. Kolodner; {\em Heavy rotating string - a nonlinear
eigenvalue problem}, Comm Pure Appl Math {\bf 8} (1955), 395--408.

\bibitem{kos} N. Kosmatov; {\em Semipositone m-point boundary value problems},
Electron. J. Diff. Eqns. {\bf 2004} No 119 (2004), 1--7.

\bibitem{kwo} M. K. Kwong; {\em On the Kolodner-Coffman method for the
uniqueness problem of Emden-Fowler BVP}, J. of Applied Math and Physics(ZAMP),
{\bf 41} (1990), 79--103.

\bibitem{kur} J. Kurzweil; Ordinary Differential Equations, Elsevier,
Amsterdam, 1986.

\bibitem{ma} R. Ma; {\em Positive solutions of a nonlinear 3-point boundary
value problem}, Electronic J. Diff. Eqns. {\bf 34} (1999), 1--8.
\end{thebibliography}

\end{document}
