\documentclass[reqno]{amsart}
\usepackage{hyperref}


\AtBeginDocument{{\noindent\small
{\em Electronic Journal of Differential Equations},
Vol. 2006(2006), No. 98, pp. 1--13.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2006 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2006/98\hfil Positive solutions]
{Positive solutions of a three-point boundary-value problem for
differential equations with damping and actively bounded delayed
forcing term}

\author[G. L. Karakostas \hfil EJDE-2006/98\hfilneg]
{George L. Karakostas}  

\address{George L. Karakostas \newline
 Department of Mathematics, University of Ioannina,
 451 10 Ioannina, Greece}
\email{gkarako@uoi.gr}

\date{}
\thanks{Submitted July 17, 2006. Published August 25, 2006.}
\subjclass[2000]{34K10}
\keywords{Boundary value problems; delay
differential equations; \hfill\break\indent  positive solutions;
actively bounded functions; Krasnoselskii fixed point theorem}

\begin{abstract}
 We provide sufficient conditions for the existence of positive
 solutions of a three-point boundary value problem concerning a
 second order delay differential equation with damping and forcing
 term whose the delayed part is an actively bounded function,
 a meaning introduced in \cite{k1}. By writing the damping term
 as a difference of two factors one can extract more information on
 the solutions. (For instance, in an application, given in the last
 section, we can give the exact value of the norm of the solution).
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{definition}[theorem]{Definition}

\section {Introduction}
 This paper is motivated by the work of Henderson \cite{h3}
where the existence of two positive solutions of the differential equation
$x''+f(x)=0$ satisfying the conditions $x(0)=0$ and $x(\eta)=x(1)$ is investigated. \par To say exactly what we shall
do in this paper we need some notation. For any interval
$Y$ of the real line ${\bf{R}}$ we shall denote by $C(Y)$ the Banach space of all continuous functions $x:Y\to\bf
R$ furnished with the usual sup-norm $\|\cdot\|_Y$. If, in addition, the set $Y$ contains the origin, we shall
write $C_0(Y)$ for the set of all $\psi\in C(Y)$ with
$\psi(0)=0$. In this paper we shall work, mainly, on sets of the form
$$
C_{0}^{+}(Y):=\{x\in C_{0}(Y): x(t)\geq 0, \; t\in Y\}.
$$
 Consider the sets $I:=[0,1]$ and $J:=[-r,0]$ for a fixed
$r\geq 0$.

 Our intention is to provide sufficient
conditions for the existence of positive solutions of a three-point boundary  value problem concerning the second
order delay differential equation
\begin{gather}\label{e1}
x''(t)+p(t)x'(t)+Q(t,x(t))+f(t,x_t)=0, \quad t\in I:=[0,1], \\
\label{e2}
 x_0=\phi,\quad x(\eta)= x(1),
\end{gather}
where $\phi\in C_0^+(J)$, $0<\eta<1$ and the delayed part $f(t,x_t)$
of the forcing term is actively bounded function, in a sense introduced
in \cite{k1}. Our technique is based on the fact that the coefficient
$p(t)$ of the damping term can be written as the difference of two (suitable)
functions:
$$
p(t)=p_1(t)-p_2(t).
$$
The advantage of such an approach is that we can vary the
functions $p_1, p_2$. Then the conditions imposed as  well as
the existence range of the solutions also vary appropriately. A
sight of what we  mean is seen in the last section, where an
application is presented. On the other hand such a decomposition
of the damping term affects the  Green function of the problem.
Thus, our first intention is to construct such a kernel of the
integral operator, which plays the most crucial role in our
discussion.

  As it is noticed elsewhere (see, e.g. \cite{d3,h2}),
boundary-value problems associated with delay differential
equations are generated from physics and control theory and other
topics of applied mathematics. In the literature one can find a
relatively great number of works dealing with the existence of
solutions of boundary value problems which are associated not
necessary with ordinary differential equations. For instance, in
\cite{a1} one can find such problems for difference and integral
equations, in \cite{b2} for equations whose the solutions depend
on the past and on the future, in \cite{g1} for equations with
deviating arguments, etc. Moreover a great deal can be met in the
literature for the case of delay differential equations. We refer,
for instance to \cite{a2, a3, d1, d2, e2, g2, h3, h4, h5, j1, k1,
k2, k3, k4, l1, w1, w2} and to the references therein.

 Most of the works mentioned above do use of the following important Fixed
Point Theorem of Krasnoselskii.

\begin{theorem}[\cite{k5}] \label{thm1.1}
Let $\mathcal{B}$ be a Banach space and let $\mathcal{K}$ be a
cone in $\mathcal{B}$. Assume that $\Omega_1$ and $\Omega_2 $ are
open subsets of $\mathcal{B}$, with $0\in\Omega_1\subset
\overline{\Omega}_1\subset \Omega_2$, and let
$$
A: {\mathcal{K}}\cap ({\overline{\Omega}}_2\setminus\Omega_1)\to{\mathcal{K}}
$$
be a completely continuous operator such that either
$$
\|Au\|\leq \|u\|,\quad u\in {\mathcal{K}}\cap\partial\Omega_1
\quad {and}\quad\|Au\|\geq \|u\|, \quad u\in {\mathcal{K}}
\cap\partial \Omega_2 ,
$$
or
$$\|Au\|\geq\|u\|,\quad u\in {\mathcal{K}}\cap\partial\Omega_1
\quad{and}\quad\|Au\|\leq\|u\|, \quad u\in {\mathcal{K}}
\cap\partial \Omega_2 .
$$
Then $A$ has a fixed point in
${\mathcal{K}}\cap({\overline{\Omega}}_2\setminus\Omega_1 )$.
\end{theorem}

 We recall that an operator $A: X\to Y$ is  called
completely continuous if it is continuous and it maps bounded sets
into precompact sets.  We notice that when Theorem \ref{thm1.1} is applied
to boundary-value problems for functional differential equations,
usually the most crucial point is to provide suitable conditions
on the forcing delayed term which guarantee the fact that the
corresponding integral operator satisfies the two alternatives of
Krasnoselskii's fixed point theorem. As in \cite{k1}, in this
article, in order to cover the autonomous and nonautonomous cases,
the continuous and discrete delay, as well as the atomic and the
nonatomic response, we assume that the function $f$ is a so called
{\it actively bounded} function. To be more precise we shall
repeat its definition here.

\begin{definition}[\cite{k1}] \label{def2.1} \rm
We call a function $f(\cdot,\cdot):I\times
C_0^+(J)\to\bf[0,+\infty)$ \emph{actively bounded}, if for each
$t\in I$ there exist a nonempty closed set $\Theta_t\subseteq J$
and two real nonnegative functions $L_0(t;\cdot,\cdot)$ and
$\omega(t;\cdot,\cdot)$, such that
$$
\omega(t;m,M)\leq f(t,\psi)\leq L_0(t;m,M),
$$
for all $t\in I$ and $\psi\in P(t;m,M)$, where
$$
P(t;m,M):=\{\psi\in C_0^+(J):  m\leq\inf_{s\in
\Theta_t}\psi(s),\;\|\psi\|_J\leq M\}.
$$
\end{definition}

Let $\Theta_t(f)$ be the smallest set of the form $\Theta_t$.
In \cite{k1} it was shown that the class of the actively bounded functions
is closed under summation
and multiplication. Also, several examples of such functions were given.

\section{Formulation of the BVP}

The basic theory of delay differential equations is exhibited in several
places of the literature. Especially we
refer to the classical books \cite{d2, h1}.  For any continuous
function $y$ defined on the interval
$[-r,1]$ and any $t\in [0,1]=:I$, the symbol $y_{t}$
(appeared, also, in  \eqref{e2}) is used to denote the element of
$C_{r}$ defined by
$$y_{t}(s)=y(t+s), \quad s\in J.$$

Our purpose is to establish sufficient conditions
for the existence of positive solutions of the boundary value
problem  \eqref{e1}-\eqref{e2}.
 Here we want to make clear what makes the difference between the ordinary
and the delay case and in particular what is going to be proved for
the delay boundary value problem.

 (We find it convenient to repeat some comments made, also, in \cite{k1}.)
It is well known that in the ordinary case, namely, when
$r=0$, (thus  \eqref{e1} is an ordinary differential equation), we look
for conditions which guarantee the truth of the following fact:
{\it There is a solution $x$ of the (ordinary differential equation)
  \eqref{e1} with $x(0)=0$ and satisfying condition  \eqref{e2}.}
It follows that uniqueness of such a solution means that
there is exactly one function with these properties.

 But in the (nontrivial) delay case the
problem is quite different. Indeed, here we are invited to give our
 response to the following challenge:
{\it Determine a class $S$ of initial functions with the property that
 for each $\phi\in S$ there is a solution $x$ of  \eqref{e1} satisfying
condition  \eqref{e2}.} (Notice that some authors use to extend
the situation from the ordinary case by simply assuming that $\phi(s)=0$,
for all $s\in J$, see, e.g. \cite{b1}.)
Therefore uniqueness of solutions of the BVP  \eqref{e1}-\eqref{e2}
presupposes that there is only one solution with initial
value the fixed initial function $\phi$. Any new initial function from
the class $S$ implies new solution of the boundary value problem
\eqref{e1}-\eqref{e2}. As we shall see later, in this paper the set $S$
will be a closed ball in $C_0^+(J)$.


We shall reformulate the problem  \eqref{e1}-\eqref{e2} by transforming
it into a fixed point problem. Then the existence
of a solution of the latter is guaranteed by Theorem \ref{thm1.1}.

To proceed, fix a $\phi\in C_0^+(J)$. For each function
$x\in C_{0}(I)$ we shall denote by
$T(\cdot,x;\phi)$ the function defined on $[-r,1]$ by
 \[
  T(s,x;\phi) :=\begin{cases}
 \phi(s),&   s \in J,\\
 x(s), &   s \in I,
\end{cases}
\]
It is easy to see that
\begin{equation}\label{e3}
\|T_t(\cdot,x_1;\phi)-T_t(\cdot,x_2;\phi)\|_J\leq \|x_1-x_2\|_I,
\end{equation}
for all $t\in I$ and $x_1, x_2 \in C_0(I)$.
(Recall that for each $t\in I$ the symbol
$T_t(\cdot,x;\phi)$ denotes the element of
$C(J)$ defined by
$T_{t}(s,x;\phi):=T(t+s,x;\phi)$, $s \in J.$) Thus the
function
$$
x\to T_t(\cdot,x;\phi):C_0(I)\to C(J)
$$
is continuous (uniformly with respect to $t$).

 By a solution of the boundary-value problem  \eqref{e1}-\eqref{e2} we mean
a function $x\in C_{0}(I)$ satisfying  \eqref{e2} and its second
derivative $x''(t)$ exists for all $t\in I$ satisfying the
relation
\begin{equation}\label{e4}
x''(t)+p(t)x'(t)+q(t)x(t)+f(t,T_{t}(\cdot,x;\phi))=0.
\end{equation}

Our first basic condition of the problem states as follows:
\begin{itemize}
\item[(H)]  The functions $p, q:I\to {\bf R}$ are continuous and such that
$p$ can be written in the form
$$
p=p_1-p_2,
$$
where $p_1$ is continuous, $p_2$ is positive
and differentiable and, moreover,
they satisfy the inequality
$$
Q(t,\xi)+(p'_2(t)+p_1(t)p_2(t))\xi\geq 0,$$
for all $t\in I$ and $\xi\geq 0$.
\end{itemize}

To simplify our presentation we set
\begin{gather*}
V(u,s,t):=e^{\int_u^sp_1(\theta)d\theta+\int_u^tp_2(\theta)d\theta},\\
Y(t):=\int_0^te^{\int_\theta^1(p_1(u)+p_2(u))du}d\theta,\\
\upsilon_i(s):=e^{-\int_{s}^1p_i(u)du}, \quad i=1,2.
\end{gather*}
Specially, we shall denote by $\upsilon$ the value
$\upsilon_2(\eta)$. Clearly it holds
$$
\int_0^{\theta}V(u,s,t)du=\upsilon_1(s)\upsilon_2(t)Y(\theta),
$$
 for all $\theta, s, t\in I$.
\smallskip

\noindent{\bf Remark.}
We observe that for all $s\geq\eta$ it holds
\begin{equation}\label{e5}
Y(s)-\upsilon
Y(\eta)=\int_{\eta}^se^{\int_v^1(p_1(u)+p_2(u))du}dv+(1-\upsilon)Y(\eta)>0.
\end{equation}

  To proceed, we set $y(t): =x'(t)$ and write equation \eqref{e4}
in the form
$$
y'(t)+p_1(t)y(t)-p_2(t)x'(t)+Q(t,x(t))+f(t,T_{t}(\cdot,x ;\phi))=0.
$$
Integrate from $t$ $(\geq 0)$ to 1 and get
\begin{equation}
\begin{aligned}
y(t)&=y(1)e^{\int_t^1p_1(s)ds}\\
&+\int_t^1[-p_2(u)x'(u)+Q(u,x(u))+f(u,T_{u}(\cdot,x;\phi))]e^{\int_t^up_1(s)ds}du,
\end{aligned}\nonumber
\end{equation}
 which leads to
\begin{equation}\nonumber
x'(t)-p_2(t)x(t)=[x'(1)-p_2(1)x(1)]e^{\int_t^1p_1(s)ds}
+\int_t^1z(u)e^{\int_t^up_1(s)ds}du,
\end{equation}
where, for simplicity, we have put
$$
z(u): =f(u,T_{u}(\cdot,x;\phi))+Q(u,x(u))+[p_1(u)p_2(u)+p_2'(u)]x(u),\quad u\in I.
$$
Thus the solution $x$ satisfies
\begin{equation}\label{e6}
x(t)=[x'(1)-p_2(1)x(1)]\upsilon_2(t) Y(t)+\int_0^t\int_u^1V(u,s,t)z(s)\,ds\,du,
\quad t\in I.
\end{equation}

 In \eqref{e6} we set $t=1$ and find
$$
x'(1)=\frac{1}{Y(1)}\Big[x(1)[1+p_2(1)Y(1)]-\int_0^1\int_u^1V(u,s,1)z(s)ds
du\Big].
$$
Substitute this value in \eqref{e6} and obtain
\begin{equation}\label{e7}
\begin{aligned}
x(t)&=\frac{\upsilon_2(t)Y(t)}{Y(1)}\Big[x(1)-\int_0^1\int_u^1V(u,s,1)z(s)\,ds\,du\Big]\\
&\quad +\int_0^t\int_u^1V(u,s,t)z(s)\,ds\,du,\quad
t\in I.
\end{aligned}
\end{equation}
Now take into account that $x(\eta)=x(1)$. From \eqref{e7} it follows that
$$
x(1)=\gamma\Big[Y(1)\int_0^{\eta}\int_u^1V(u,s,\eta)z(s)ds\,du
-\upsilon Y(\eta)\int_0^{1}\int_u^1V(u,s,1)z(s)ds\,du,
$$
where
$$
\gamma:=\Big(Y(1)-\upsilon Y(\eta)\Big)^{-1}.
$$
Because of \eqref{e5} the constant $\gamma$ is
positive. Substituting this value to \eqref{e7}, after some manipulation,
we derive
\begin{equation}\label{e8}
\begin{aligned}
x(t)&=\gamma\upsilon_2(t)Y(t)\Big[\int_0^{\eta}\int_u^1V(u,s,\eta)z(s)\,ds\,du
-\int_0^{1}\int_u^1V(u,s,1)z(s)\,ds\,du\Big]\\
&\quad +\int_0^{t}\int_u^1V(u,s,t)z(s)\,ds\,du.\end{aligned}
\end{equation}

\begin{lemma} \label{lem2.1}
A function $x$ is a solution of the boundary-value problem \eqref{e1}-\eqref{e2} if
and only if it satisfies the operator equation
\begin{equation}\label{e9}
x=A_{\phi}x,
\end{equation}
 where $A_{\phi}$ is the operator
\begin{equation}\label{e10}
(A_{\phi}x)(t):=\int_0^1G(t,s)F(s,T_s(\cdot,x;\phi))ds,\quad x\in C_0^+(I).
\end{equation}
Here we have set 
$$F(s,T_{s}(\cdot,x;\phi)): =f(s,T_{s}(\cdot,x;\phi))+Q(s,x(s))+[p_1(s)p_2(s)+p_2'(s)]x(s),\quad u\in I
$$
and the kernel $G(t,s)$ is defined by
\begin{equation}
\begin{aligned}
G(t,s):&=\gamma\upsilon_1(s)\upsilon_2(t)\Big[\upsilon Y(s\land\eta)Y(t)-Y(t)Y(s)\nonumber\\
& +Y(1)Y(s\land t)-\upsilon Y(s\land t)Y(\eta)\Big],\end{aligned}
\end{equation}
 where, as usually, $\alpha\land \beta:=\min\{\alpha,\beta\}$.
\end{lemma}

\begin{proof}
 Assume that $x$ is a solution. Then it satisfies  \eqref{e8} and, so,
we have
\begin{equation}\label{e11}
x(t)=\int_0^1\int_u^1U(u,s,t)z(s)\,ds\,du,
\end{equation}
where
$$
U(u,s,t):=\gamma\upsilon_2(t)Y(t)\Big[V(u,s,\eta)\chi_{[0,\eta]}(u)-V(u,s,1)
\Big]+V(u,s,t)\chi_{[0,t]}(u),
$$
where $\chi_{[0,t]}(\cdot)$ stands for the characteristic function of the
interval $[0,t]$. We apply Fubini's Theorem in the right side of \eqref{e11}
and get
\begin{equation}
x(t)=\int_0^1G(t,s)z(s)ds,\nonumber
\end{equation}
where
\begin{equation}
G(t,s):=\int_0^sU(u,s,t)du.\nonumber
\end{equation}
The inverse is proved by the inverse way. The proof is complete.
\end{proof}

Next we simplify the form of the kernel $G$ by examining the following cases:

\noindent\textbf{Case 1.1:} $s\leq t\leq \eta$. Then we have
\begin{equation}
\begin{aligned}
G(t,s)&=\gamma\upsilon_1(s)\upsilon_2(t)\Big[\upsilon Y(s)Y(t)-Y(t)Y(s)+Y(1)Y(s)-\upsilon Y(s)Y(\eta)\Big]\\
&=\gamma\upsilon_1(s)\upsilon_2(t)Y(s)\Big[[Y(1)-Y(t)]+\upsilon [Y(t)-Y(\eta)]\Big]\\
&=\gamma\upsilon_1(s)\upsilon_2(t)Y(s)\Big[\int_t^1V(u,1,1)du-\upsilon \int_t^{\eta}V(u,1,1)du\Big]\\
&=\gamma\upsilon_1(s)\upsilon_2(t)Y(s)\Big[(1-\upsilon)\int_t^{\eta}V(u,1,1)du+\int_{\eta}^1V(u,1,1)du\Big].\nonumber
\end{aligned}
\end{equation}

\noindent\textbf{Case 1.2:} $ t<s\leq \eta$. Then we have
\begin{equation}\begin{aligned}
G(t,s)
&=\gamma
\upsilon_1(s)\upsilon_2(t)\Big[\upsilon Y(s)Y(t)-Y(t)Y(s)+Y(1)Y(t)-\upsilon Y(t)Y(\eta)\Big]\\
&= \gamma\upsilon_1(s)\upsilon_2(t)Y(t)\Big[[Y(1)-Y(s)]+\upsilon [Y(s)-Y(\eta)]\Big]\\
&=\gamma\upsilon_1(s)\upsilon_2(t)Y(t)\Big[\int_s^1V(u,1,1)du-\upsilon\int_s^{\eta}V(u,1,1)du\Big]\\
&=\gamma\upsilon_1(s)\upsilon_2(t)Y(t)\Big[(1-\upsilon)\int_s^{\eta}V(u,1,1)du+\int_{\eta}^1V(u,1,1)du\Big].\nonumber
\end{aligned}
\end{equation}

\noindent\textbf{Case 1.3:} $ t\leq \eta<s$. Then we have
\begin{equation}\begin{aligned}
G(t,s)&=\gamma
\upsilon_1(s)\upsilon_2(t)\Big[\upsilon Y(\eta)Y(t)-Y(t)Y(s)+Y(1)Y(t)-\upsilon Y(t)Y(\eta)\Big]\\
&=\gamma \upsilon_1(s)\upsilon_2(t)Y(t)\Big[Y(1)-Y(s)\Big]\\
&=\gamma \upsilon_1(s)\upsilon_2(t)Y(t)\Big[\int_s^1V(u,1,1)du\Big].\nonumber
\end{aligned}
\end{equation}

\noindent\textbf{Case 2.1:} $ s\leq \eta<t$. Then we have
\begin{equation}\begin{aligned}
G(t,s)&=\gamma
\upsilon_1(s)\upsilon_2(t)\Big[\upsilon Y(s)Y(t)-Y(t)Y(s)+Y(1)Y(s)-\upsilon Y(s)Y(\eta)\Big]\\
&=\gamma \upsilon_1(s)\upsilon_2(t)Y(s)\Big[[Y(1)-Y(t)]+\upsilon [Y(t)-Y(\eta)]\Big]\\
&=\gamma \upsilon_1(s)\upsilon_2(t)Y(s)\Big[\int_t^1V(u,1,1)du+\upsilon \int_{\eta}^tV(u,1,1)du\Big].\nonumber
\end{aligned}
\end{equation}

\noindent\textbf{Case 2.2:} $\eta<s\leq t$. Then we have
\begin{equation}\begin{aligned}
G(t,s)&=\gamma
\upsilon_1(s)\upsilon_2(t)\Big[\upsilon Y(\eta)Y(t)-Y(t)Y(s)+Y(1)Y(s)-\upsilon Y(s)Y(\eta)\Big]\\
&=\gamma \upsilon_1(s)\upsilon_2(t)\Big[Y(s)[Y(1)-Y(t)]+\upsilon Y(\eta)[Y(t)-Y(s)]\Big]\\
&=\gamma \upsilon_1(s)\upsilon_2(t)\Big[Y(s)\int_t^1V(u,1,1)du+\upsilon Y(\eta)\int_{s}^tV(u,1,1)du\Big].\nonumber
\end{aligned}
\end{equation}

\noindent\textbf{Case 2.3:} $\eta<t<s$. Then we have
\begin{equation}
\begin{aligned}
G(t,s)&=\gamma
\upsilon_1(s)\upsilon_2(t)\Big[\upsilon Y(\eta)Y(t)-Y(t)Y(s)+Y(1)Y(t)-\upsilon Y(t)Y(\eta)\Big]\\
&=\gamma \upsilon_1(s)\upsilon_2(t)Y(t)\Big[Y(1)-Y(s)\Big]\\
&=\gamma \upsilon_1(s)\upsilon_2(t)Y(t)\int_s^1V(u,1,1)du.\nonumber
\end{aligned}
\end{equation}

\section {Main Result}

Now we are ready to present our main result of this article.

\begin {theorem}  \label{thm3.1}
Suppose that assumption (H) is satisfied and $f(t,\phi)$ is an actively
bounded continuous function with $\Theta_{t}(f), t\in I$ being the
set-valued function defined in Definition \ref{def2.1}. Assume, also, that the functions
$L_0(\cdot;m,M)$ and
$\omega(\cdot;m,M)$ are measurable for all $m<M$. Finally, assume that
there is a $\delta\in[0,1]$ and two (distinct) real numbers
$\rho_1,\rho_2$ such that
\begin{gather}\label{e13}
\frac{1}{\rho_1}\int_0^1G(s,s)L(s,\frac{\mu}{\Lambda}\rho_1,\rho_1)ds
\leq\frac{1}{\Lambda},\\
\label{e14}
\frac{1}{\rho_2}\sup_{t\in
I}\int_{S}G(t,s)\omega(s,\frac{\mu}{\Lambda}\rho_2, \rho_1\vee\rho_2)ds\geq 1,
\end{gather}
where
\begin{gather}
L(t,m,M):=\sup_{0\leq \xi\leq M}\Big(Q(t,\xi)+[p'_2(t)+p_1(t)p_2(t)]\xi\Big)+
L_0(t,m,M).\notag\\
S:=\{s\in[0,1]: s+\theta\in [\delta,1], \;\theta\in\Theta_s(f)\},\notag\\
\label{e15}
\Lambda:=\max\{e^{\int_0^{\eta}p_2(v)dv},e^{\int_{\eta}^1p_2(v)dv}\}. \\
\label{e16}
\mu:=\min\Big\{\frac{Y(1)-Y(\eta)}
{Y(1)-\upsilon Y(\eta)},\quad
\frac{Y(\delta)}{Y(1)}\upsilon_2(0),\quad\frac{Y(\eta)}{Y(1)}{\upsilon}\Big\}.
\end{gather}
Then, for any $\phi\in C_r^+(0)$ with $\|\phi\|\leq \rho_1$,
there is a positive solution of the  boundary-value
problem \eqref{e1}-\eqref{e2} having norm in the interval with ends
the numbers $\rho_1,\rho_2$.
\end{theorem}

\begin{proof} First we shall obtain some properties of the kernel
$G$ of the operator $A_{\phi}$.  In  Case 1.1 we have
\begin{equation}\label{e17}
\begin{aligned}
\frac{G(t,s)}{G(s,s)}
&=\frac{\upsilon_2(t)\Big[(1-\upsilon)\int_t^{\eta}V(u,1,1)du+\int_{\eta}^1V(u,1,1)du\Big]}
{\upsilon_2(s)\Big[(1-\upsilon)\int_s^{\eta}V(u,1,1)du+\int_{\eta}^1V(u,1,1)du\Big]}\\
&\leq e^{\int_s^tp_2(v)dv}\leq e^{\int_0^{\eta}p_2(v)dv}\leq \Lambda,\end{aligned}
\end{equation}
 where $\Lambda$ is given by \eqref{e15}.
Also in this case we get
\begin{equation}\label{e18}
\begin{aligned}
\frac{G(t,s)}{G(s,s)}&\geq
\frac{\int_{\eta}^1V(u,1,1)du}
{(1-\upsilon)\int_0^{\eta}V(u,1,1)du+\int_{\eta}^1V(u,1,1)du}\\
& =\frac{Y(1)-Y(\eta)}{Y(1)-\upsilon Y(\eta)}\geq\mu.
\end{aligned}
\end{equation}
 In Cases 1.2, 1.3 and 2.3 we get
\begin{equation}\label{e19}
\frac{G(t,s)}{G(s,s)}=\frac{\upsilon_2(t)Y(t)}{\upsilon_2(s)Y(s)}\leq e^{-\int_t^sp_2(v)dv}\leq 1\leq \Lambda.
\end{equation}
If, in addition, we have $\delta\leq t$, then it follows that
\begin{equation}\label{e20}
\frac{G(t,s)}{G(s,s)}=\frac{\upsilon_2(t)Y(t)}{\upsilon_2(s)Y(s)}
=e^{-\int_t^sp_2(v)dv}\frac{Y(t)}{Y(s)}\geq e^{-\int_t^sp_2(v)dv}\frac{Y(\delta)}{Y(1)}\geq \mu.
\end{equation}
 In Case 2.1 it holds
\begin{equation}\label{e21}\begin{aligned}
\frac{G(t,s)}{G(s,s)}&=\frac{\upsilon_2(t)\Big[\int_t^1V(u,1,1)du+\upsilon
\int_{\eta}^tV(u,1,1)du\Big]}{\upsilon_2(s)\Big[\int_s^1V(u,1,1)du+\upsilon
\int_{\eta}^sV(u,1,1)du\Big]}\\
& \leq {\upsilon}^{-1}e^{\int_s^tp_2(v)dv}\leq e^{\int_0^{\eta}p_2(v)dv}\leq \Lambda.
\end{aligned}
\end{equation}
Also, we get
\begin{equation}\label{e22}
\frac{G(t,s)}{G(s,s)}\geq {\upsilon}e^{\int_s^tp_2(v)dv}\geq {\upsilon}\geq\mu.
\end{equation}
 Finally, in  Case 2.2 we have
\begin{equation}\label{e23}
\begin{aligned}
 \frac{G(t,s)}{G(s,s)}&=\frac{\upsilon_2(t)\Big[Y(s)\int_t^1V(u,1,1)du+\upsilon
Y(\eta)\int_{s}^tV(u,1,1)du\Big]}{\upsilon_2(s)\Big[Y(s)\int_s^1V(u,1,1)du\Big]}\\
& \leq e^{\int_s^tp_2(v)dv}\frac{Y(s)\int_t^1V(u,1,1)du+\upsilon
Y(s)\int_{s}^tV(u,1,1)du}{Y(s)\int_s^1V(u,1,1)du}\\
& \leq e^{\int_{\eta}^1p_2(v)dv}\leq\Lambda,
\end{aligned}
\end{equation}
 and moreover
\begin{equation}\label{e24}\begin{aligned}
 \frac{G(t,s)}{G(s,s)}&=\frac{\upsilon_2(t)\Big[Y(s)\int_t^1V(u,1,1)du+\upsilon
Y(\eta)\int_{s}^tV(u,1,1)du\Big]}{\upsilon_2(s)\Big[Y(s)\int_s^1V(u,1,1)du\Big]}\\
& \geq e^{\int_s^tp_2(v)dv}\frac{\upsilon
Y(\eta)\int_{s}^1V(u,1,1)du}{Y(s)\int_s^1V(u,1,1)du}\\
& \geq {\upsilon}\frac{Y(\eta)}{Y(1)}\geq\mu.
\end{aligned}
\end{equation}
 From \eqref{e17}, \eqref{e19}, \eqref{e21} and \eqref{e23} we see that
for all $s, t\in I$,
\begin{equation}\label{e25}
G(t,s)\leq \Lambda G(s,s),
\end{equation}
where (recall that) $\Lambda$ is the constant defined in \eqref{e15}. Also
from \eqref{e18}, \eqref{e20}, \eqref{e22} and \eqref{e24} we see that
for all $t\in [\delta,1]$ and $s\in [0,1]$  it holds
\begin{equation}\label{e26}
G(t,s)\geq \mu G(s,s),
\end{equation}
where $\mu$ is defined in \eqref{e16}.

 Now define the set
$${\mathcal{K}}:=\{x\in C^+_0(I):\quad x(t)\geq\frac{\mu}{\Lambda}\|x\|, \quad t\in [\delta,1]\}$$ and observe
that it is a cone in the space $C_0(I)$. \par
 Consider a initial function $\phi\in C_r^+(0)$
with $\|\phi\|_J\leq\rho_1$, where $\rho_1$ satisfies \eqref{e13}
and \eqref{e14}.

 Let $A_{\phi}$ be the corresponding operator
defined by \eqref{e10}. Because of Lemma \ref{lem2.1} it is enough to show
that the operator $A_{\phi}$ has a fixed point.  To this end
we let $x\in\mathcal{K}$. Then we have $(A_{\phi}x)(0)=0$ and from
\eqref{e25} we get
\begin{equation}\label{e27}\begin{aligned}
\|A_{\phi}x\|_I
&=\sup_{t\in I}\int_0^1G(t,s)F(s,T_s(\cdot,x;\phi))ds\\
&\leq\Lambda\int_0^1G(s,s)F(s,T_s(\cdot,x;\phi))ds.\end{aligned}
\end{equation}
 From  (H) and the definition of $f$ we have
$F(s,T_s(\cdot,x;\phi))\geq 0$, for all
$s\in I$ and therefore $(A_{\phi}x)(t)\geq 0$ for all $t\in I$.
Let $t\in [\delta, 1];$   from \eqref{e26} and \eqref{e27} we get
\begin{equation}\label{e28}\begin{aligned}
(A_{\phi}x)(t)&=\int_0^1G(t,s)F(s,T_s(\cdot,x;\phi))ds\\
&\geq
\mu\int_0^1G(s,s)F(s,T_s(\cdot,x;\phi))ds\geq\frac{\mu}{\Lambda}\|A_{\phi}x\|_I.\end{aligned}
\end{equation}
Relation \eqref{e28}  guarantees that the operator $A_{\phi}$ maps
the cone $\mathcal{K}$ into itself. Furthermore from \eqref{e3}
and the first argument in Definition \ref{def2.1} we conclude that the
function $y\to F(\cdot,T_{\cdot}(\cdot,y;\phi))$ is continuous and
it maps bounded sets into bounded sets; thus the operator
$A_{\phi}$ is completely continuous.

 Next take any $x\in \mathcal{K}$. By definition, for any $s\in S$ we have
$s+\theta\in [\delta,1]\subseteq I$, for all $\theta\in\Theta_s(f)$. Thus it holds
\begin{equation}\label{e29}
T_s(\theta,x;\phi)=x(s+\theta)\geq\frac{\mu}{\Lambda}\|x\|_I.
\end{equation}
 Let $x\in \mathcal{K}$ with
$\|x\|_I=\rho_1$. Taking it into account together with the choice of $\|\phi\|_J$, we have
$\|T_s(\cdot,x;\phi)\|_J\leq\rho_1$. Thus, because of \eqref{e25},
Definition \ref{def2.1} and \eqref{e13} for all
$t\in I$ we have
\begin{equation}\label{e30}
\begin{aligned}
\|A_{\phi}x\|_I&=\sup_{t\in I}\int_0^1G(t,s)F(s,T_s(\cdot,x;\phi))ds\\
&\leq\Lambda\int_0^1G(s,s)F(s,T_s(\cdot,x;\phi))ds\\
&\leq \Lambda\int_0^1G(s,s)L(s,\frac{\mu}{\Lambda}\rho_1,\rho_1)ds\leq \rho_1=\|x\|_I.
\end{aligned}
\end{equation}
 Also, let  $x\in \mathcal{K}$, with $\|x\|_I=\rho_2$. Then we derive
$$
\|T_s(\cdot;x,\phi)\|_J\leq\rho,
$$
where, recall that, $\rho:=\rho_1\vee\rho_2$.
Consequently, because of (H), Definition \ref{def2.1} and \eqref{e14},
 we get
\begin{equation}\label{e31}\begin{aligned}
\|A_{\phi}x\|_I
&=\sup_{t\in I}\int_0^1G(t,s)F(s,T_s(\cdot,x;\phi))ds\\
&\geq \sup_{t\in I}\int_{S}G(t,s)F(s,T_s(\cdot,x;\phi))ds\\
&\geq \sup_{t\in I}\int_{S}G(t,s)\omega(s;\frac{\mu}{\eta}\|x\|_I,\rho)ds\geq \|x\|_I.
\end{aligned}
\end{equation}
Finally, define $\Omega_1$ and $\Omega_2$ to be the open balls with radius $\rho_1\wedge\rho_2$ and
$\rho_1\vee\rho_2$ respectively.
The previous arguments together with \eqref{e30} and \eqref{e31} permit us
 to apply Theorem \ref{thm1.1} to get the result.
\end{proof}

\section{An Application}

In this section we show that our technique (namely,  to write the damping term as the difference of two factors), helps a lot to
obtain more information on the existence of the solutions. We show that given any
$\rho>0$ there exist  solutions having norm equal to $\rho$.
Consider the delay differential equation
\begin{equation}\label{e32}
x''(t)+t e^{-x(t-{\frac{1}{2}})}\exp^{1000x(\frac{t}{2})}=0,\quad t \in[0,1],
\end{equation}
associated with the conditions
\begin{equation}\label{e33}
x_0=\phi,\quad x(\frac{1}{2})=x(1).\end{equation}
Here we have $\eta=\frac{1}{2}$, $r=\frac{1}{2}$ (thus $J=[-\frac{1}{2},0]$),
 $Q(t,\xi):=0$ and $p(t):=0$. Let $c$ be a positive parameter and
write $p(t)=c-c$.
First observe that the function
$$
f(t,\psi):=te^{-\psi(-\frac{1}{2})}\exp^{1000\psi(-\frac{t}{2})}
$$
is actively bounded with
\begin{gather*}
\Theta_t(f):=\{-\frac{1}{2},-\frac{t}{2}\}, \\
\omega(t;m,M):=te^{-M}\exp^{1000m}, \\
L_0(t,m,M):=te^{-m}\exp^{1000M}.
\end{gather*}
Hence
$$
L(t,m,M):=c^2M+te^{-m}\exp^{1000M}.
$$
 Choose  $\delta=\frac{1}{10}$, thus we obtain
$S=[\frac{3}{5}, 1]$.

 In the sequel all constants  involved in our conditions are given as expressions of the parameter $c$. So, first we obtain
\begin{gather*}
\upsilon_1(t;c)=\upsilon_2(t;c)=e^{-c(1-t)},\quad \upsilon(c)
=e^{-\frac{c}{2}}, \\
Y(t;c)=\frac{e^{c(2-t)}}{c}\sinh(ct), \\
\gamma(c)=\frac{ce^{-c}}{\sinh(c)-\sinh(\frac{c}{2})}.
\end{gather*}
Also we obtain
\begin{gather*}
\Lambda(c)=e^{\frac{c}{2}},\\
\mu(c)=\min\Big\{\frac{e^{\frac{-c}{2}}\sinh(\frac{c}{2})}{{\sinh(c)
-\sinh(\frac{c}{2})}},
\quad e^{-(\frac{11c}{10})}\frac{\sinh(\frac{c}{10})}{\sinh(c)},
\quad\frac{\sinh(\frac{c}{2})}{\sinh(c)}\Big\}.
\end{gather*}
We can see that for all $c>0$,
$$
\mu(c)=\frac{e^{-\frac{c}{4}}\sinh(\frac{c}{4})}{\sinh(c)}.
$$

 Next we compute $G(s,s)$ for $s\in I$:  If
$s\leq\frac{1}{2}$, then
\begin{equation}\nonumber
G(s,s)=\frac{(\sinh(cs))^2}{c(\sinh(c)-\sinh(\frac{c}{2}))}[1+e^{c(\frac{3}{4}-s)}\sinh(\frac{c}{4})]
+\frac{\sinh(cs)}{c},
\end{equation}
while, if $s\geq\frac{1}{2}$, then
\begin{equation}\nonumber
G(s,s)=\frac{\cosh(c)-\cosh(2cs-c)}{2c(\sinh(c)-\sinh(\frac{c}{2}))}.
\end{equation}
Also we obtain
$$
G(\frac{1}{2},s)=\frac{\sinh(\frac{c}{2})\sinh(c-cs)}{c(\sinh(c)
-\sinh(\frac{c}{2}))}.
$$
Now we seek for the existence of positive reals $\rho_1, \rho_2$
satisfying \eqref{e13}, namely,
\begin{equation}
\label{e36}
\begin{aligned}
&\frac{\rho_1}{\sinh(c)-\sinh(\frac{c}{2})}\Big[\frac{1}{2}\sinh(\frac{c}{4})
[c\sinh(\frac{3c}{4})-2\sinh(\frac{c}{2})\sinh(\frac{c}{4})]-2e^{-c}\Big]\\
&+\frac{\exp^{1000\rho_1}}{8c^2(\sinh(c)-\sinh(\frac{c}{2}))}
\Big[c\sinh(\frac{c}{4})\sinh(\frac{3c}{4})+2\sinh(c)\\
&+\sinh(\frac{c}{2})-\frac{3(1+c^2)}{2c}\cosh(c)\\
&+\frac{1}{2c}\Big]\exp\big(\frac{-e^{-(\frac{8c}{5})}\sinh(\frac{c}{10})}{\sinh(c)}\rho_1\big)
\leq\rho_1e^{-\frac{c}{2}}
\end{aligned}
\end{equation}
and \eqref{e14}. The latter becomes
$$
\int_{S}G(\frac{1}{2},s)\omega(s,\frac{\mu(c)}{\Lambda(c)}\rho_2,
\rho_1\vee\rho_2)ds\geq \rho_2,
$$
which takes the form
\begin{equation}\label{e37}
\begin{aligned}
\frac{\sinh(\frac{c}{2})}{c(\sinh(c)-\sinh(\frac{c}{2})}&\Big(\frac{3}{5c}\cosh(\frac{2c}{5})+\frac{1}{c^2}\sinh(\frac{2c}{5})-\frac{1}{c}\Big)\\
&\exp(-1100c\rho_2\frac{\sinh(c/10)}{\sinh(c)})\geq\rho_2e^{\rho_1\vee\rho_2}.
\end{aligned}
\end{equation}

  Let us restrict our discussion to the case
\begin{equation}\label{e38}
\rho_1<\rho_2.
\end{equation}
By the use of a graphing calculator, we can take a view of the set of pairs
$(\rho_1, \rho_2)$ satisfying the implicit algebraic inequalities
\eqref{e36}, \eqref{e37} and \eqref{e38}. We find out that there are
two points $c_1$ and $c_2$ (approximately equal to 0.1 and 1.66527, respectively) such that for all
$c\in[c_1, c_2]$  inequalities \eqref{e36}, \eqref{e37} are satisfied by
all $\rho_1, \rho_2>0$.


  We shall show the following result.

\begin{theorem} \label{thm4.1}
 Let $\rho_2>0$ and any (initial) function
$\phi\in C_0^+(J)$ with $\|\phi\|_{J}\leq\rho_2$. Then there is a solution $x$
of the problem \eqref{e32}- \eqref{e33} such
that  $\|x\|_I=\rho_2$.
\end{theorem}

\begin{proof}
Consider a $c\in(c_1, c_2]$ and a (strictly)
increasing sequence of positive reals $R_n$ converging to $\rho_2$.
By the previous arguments
it follows that $\rho_2>R_n$ satisfies \eqref{e37} and $R_n$ satisfies
\eqref{e36}. By Theorem \ref{thm3.1} there is a
solution $x_n$ of \eqref{e32} such that $x_n(s)=\phi(s)$, for all
$s\in[-\frac{1}{2},0]$,
\begin{equation}\label{e39}
x_n(\frac{1}{2})=x(1)
\end{equation}
and
\begin{equation}\label{e40}
R_n\leq\|x_n\|_I\leq\rho_2,
\end{equation}
for all $n$. From \eqref{e39} it follows that there is a
$t_n\in [\frac{1}{2}, 1]$ such that $x'_n(t_n)=0$ and hence
from \eqref{e32} by integration we get
\begin{equation}\label{e41}
x'_n(t)=-\int_{t_n}^tse^{-x_n(s-\frac{1}{2})}e^{1000x_n(\frac{s}{2})}ds.
\end{equation}
This shows that $(x'_n)$ is bounded. Also, from \eqref{e32} we see that
$(x''_n)$ is bounded. Applying Arzela-Ascoli theorem twice it
follows that there is a subsequence $(x_{k_n})$ converging
(in the $C^1$ sense)  to some differentiable function
$x$ satisfying the integral equation \eqref{e41}. It is easy to see that
$x$ is a solution of the original problem,
and because of \eqref{e40}, it satisfies $\|x\|_{I}=\rho_2$.
The proof is complete.
\end{proof}

\begin{thebibliography}{00}

\bibitem {a1} R. P. Agarwal, D. O'Regan and P. J. Y. Wong,
{ \it Positive solutions of differential, difference and integral equations},  Kluwer Academic Publishers,
Boston, 1999.

\bibitem {a2} R. P. Agarwal and D. O'Regan, Some new
existence results for differential and integral equations
, {\it Nonlinear Anal., Theory Methods Appl.,} {\bf 29} (1997), pp. 679--692.


\bibitem {a3} V. Anuradha, D. D. Hai and R. Shivaji, Existence results for superlinear semipositone BVP's, {\it
Proc. Am. Math. Soc.}
{\bf 124} (1996), pp. 757--763.

\bibitem {b1} Dingyong Bai and Yuantong Xu, Existence of positive solutions for boundary-value problems of
second-order delay differential equations, {\it  Appl. Math. Lett.},
{\bf 18} (2005), pp. 621--630.

\bibitem {b2} Chuanzhi Bai and Jipu Ma, Eigenvalue criteria for existence of multiple positive
solutions to boundary value problems
of second-order delay differential equations, {\it J. Math. Anal. Appl.},
{\bf 301} (2005), pp. 457--476.

\bibitem {d1} J. M. Davis, K. R. Prasad and W. K. C. Yin,  Nonlinear eigenvalue problens involving two classes
for functional differential equations, {\it Houston J. Math.},
{\bf 26} (2000), pp.597--608.

\bibitem {d2} T. Dlotko, On a paper of Mawhin on second order differential equations
{\it Ann. Math. Silesianae (Katowice)}, {\bf 11} (1997), pp. 55--66.

\bibitem {d3} R. D. Driver, {\it Ordinary and delay differential
equations}, Springer Verlag, New York, 1976.

\bibitem {e1} L. H. Erbe, Qingai Kong and B. G. Zhang, {\it Oscillation Theory for Functional Differential
Equations}, Pure Appl. Math., 1994.

\bibitem {e2} L. H. Erbe and Q. K. Kong, Boundary value
problems for singular second order functional differential
equations, {\it J. Comput. Appl. Math.},
{\bf 53} (1994), pp. 640--648.

\bibitem {g1} L. J. Grimm and K. Schmitt, Boundary value
problems for differential equations with deviating arguments, {\it Aequationes Math.},
{\bf 4} (1970), pp. 176--190.

\bibitem {g2} G. B. Gustafson and K. Schmitt, Nonzero solutions of boundary value problems for second order
ordinary and delay-differential equations, {\it J. Differ. Equations}, {\bf 12} (1972), pp. 129--147.

\bibitem {h1} J. K. Hale and S. M. V. Lunel, {\it Introduction
to functional differential equations}, Springer Verlag, New York, 1993.

\bibitem {h2} J. Henderson, {\it Boundary Value Problems for
Functional Differential Equations}, World Scientific, 1995.


\bibitem {h3} J. Henderson, Double solutions of three point boundary value
problems for second order differential equations, {\it Electron. J. Differ. Equ.},
{\bf  2004} (2004), No. 115, pp. 1--7.


\bibitem {h4} J. Henderson and W. Hudson, Eigenvalue
problens for nonlinear functional differential equations, {\it  Commun. Appl. Nonlinear Anal.},
{\bf 3} (1996), pp. 51--58.

\bibitem {h5} J. Henderson and W. Yin, Positive solutions
and nonlinear eigenvalue problens for functional differential
equations, {\it  Appl. Math. Lett.},
{\bf 12} (1999), pp. 63--68.

\bibitem {j1} D. Jang and P. Weng, Existence of positive
solutions for boundary value problems of second order functional
differential equations, {\it Electron. J. Qual. Theory Differ. Equ.},
{\bf 6} (1998) pp.1--13.

\bibitem {k1} G. L. Karakostas, Positive solutions of a Boundary Value Problem for a delay differential equation
with damping and actively bounded delayed forcing term, {\it Electron. J. Differ. Equ.},
{\bf  2006}(2006), No. 73, pp. 1--12.

\bibitem {k2} G. L. Karakostas, K. G. Mavridis and P. Ch. Tsamatos, Triple Solutions for a Nonlocal
Functional Boundary Value Problem
by Leggett-Williams Theorem, {\it Appl. Anal.}, {\bf 83} (9) (2004), pp. 957--970.

\bibitem {k3} G. L. Karakostas, K. G. Mavridis and P. Ch. Tsamatos, Multiple positive solutions for a
functional  second order boundary value problem, {\it J. Math. Anal. Appl.},
{\bf 282} (2003), pp. 567--577.


\bibitem {k4} G. L. Karakostas and P. Ch. Tsamatos, Positive solutions and nonlinear eigenvalue problems for
retarded second order differential equations, {\it Electron. J. Differ. Equ.},
{\bf  2002}(2002), No. 59, pp. 1--11.


\bibitem {k5} M. A. Krasnoselskii, {\it Positive solutions of
operator equations}, Noordhoff, Groningen, 1964.

\bibitem {l1} Yongkun Li and Lifei Zhu, Positive periodic solutions of nonlinear
functional differential equations, {\it Appl. Math. Computat.},
{\bf 156} (2004), pp. 329--339.


\bibitem {s1} D. R. Smart, {\it  Fixed Point Theorems},
Cambridge University Press, Cambridge, 1980.

\bibitem {w1} P. Weng and D. Jiang, Existence of positive
solutions for a nonlocal boundary value problem of second-order
FDE, {\it  Comput. Math. Appl.},
{\bf 37} (1999), pp. 1--9.


\bibitem {w2} P. Weng  and Y. Tian,  Existence of positive
solutions for singular $(n,n-1)$ conjugate boundary value problem
with delay, {\it Far East J. Math. Sci.},
{\bf 1}(3) (1999), pp. 367--382.


\end{thebibliography}


\end{document}
