\documentclass[twoside]{article}
\usepackage{amssymb} % used for R in Real numbers
\usepackage{psfig} % For including ps figures
\pagestyle{myheadings}
\setcounter{page}{97}
\markboth{\hfil On Properties of Nonlinear Second Order Systems \hfil}%
{\hfil John R. Graef \& J\'anos Karsai \hfil}
\begin{document}
\title{\vspace{-1in}\parbox{\linewidth}{\footnotesize\noindent
{\sc Differential Equations and Computational Simulations III}\newline
J. Graef, R. Shivaji, B. Soni J. \& Zhu (Editors)\newline
Electronic Journal of Differential Equations, Conference~01, 1997, pp. 97--108. \newline
ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu
\newline ftp 147.26.103.110 or 129.120.3.113 (login: ftp)}
\vspace{\bigskipamount} \\
On Properties of Nonlinear Second Order Systems under
Nonlinear Impulse Perturbations
\thanks{ {\em 1991 Mathematics Subject Classifications:} 34D05, 34D20, 34C15.
\hfil\break\indent
{\em Key words and phrases:} Asymptotic stability, attractrivity of periodic
solutions, \hfil\break\indent
impulsive systems, nonlinear equations, second order systems.
\hfil\break\indent
\copyright 1998 Southwest Texas State University and University of
North Texas. \hfil\break\indent
Published November 12, 1998.} }
\date{}
\author{John R. Graef \& J\'anos Karsai}
\maketitle
\begin{abstract}
In this paper, we consider the impulsive second order system
\[
\ddot{x}+f(x)=0\quad (t\neq t_{n});\quad \dot{x}(t_{n}+0)=b_{n}\dot{x}(t_{n})
\quad (t=t_{n})
\]
where $t_n=t_0+n\,p$ $(p>0, n=1,2\dots )$. In a previous paper, the authors
proved that if $f(x)$ is strictly nonlinear, then this system has
infinitely many
periodic solutions. The impulses account for the main differences in
the attractivity properties of the zero solution. Here,
we prove that these periodic
solutions are attractive in some sense, and we give good estimates for
the attractivity region.
\end{abstract}
\newcommand{\rref}[1]{(\ref{#1})}
\newcommand{\inth}{\int\limits}
\newcommand{\sign}{\mathop{\rm sgn}}
\newtheorem{theorem}{\bigskip Theorem}
\newtheorem{lemma}{Lemma}
\section{Introduction}
Investigations of asymptotic stability problems for the intermittently
damped second order differential equation
\begin{equation}
\ddot{x}+g(t)\dot{x}+f(x)=0 \label{e:DE}
\end{equation}
have led to asymptotic stability investigations of the impulsive system
\begin{eqnarray}
& \ddot{x}+f(x)=0,\quad (t\neq t_{n})& \nonumber \label{e:IDE} \\
& x(t_{n}+0)=x(t_{n}),& \\
& \dot{x}(t_{n}+0)=b_{n}\dot{x}(t_{n}),& \nonumber
\end{eqnarray}
where $t_{n}\rightarrow \infty$ $(n\rightarrow \infty)$,
$xf(x)>0$ $(x\neq 0)$, and $f$ is continuous $(x\in {\mathbb R})$
(see \cite{b:imp2, b:KJ, b:imp1}).
Although there are analogies between the systems {\rref{e:DE}} and {\rref{e:IDE}}
in the case $0 \leq b_n \leq 1$, system {\rref{e:IDE}} has unexpected properties
due to the instantaneous effects. In addition, if $b_n<0$
(\cite{b:imp2, b:imp4}), there are some new beating phenomena, and the beating
impulses can stabilize the oscillatory behavior of the system
(see \cite{b:imp4}). In particular, in both the positive and negative impulse
cases, if $t_n=t_0+n\,p$ $(p>0)$, there can exist nonzero periodic solutions,
which are small or large depending on the nonlinearity of the function $f(x)$.
The existence of such solutions can destroy the global nature of the attractivity,
or the attractivity itself, of the zero solution. In this paper,
we investigate the attractivity properties of these periodic solutions. We
show that the periodic solutions are attractive in some sense, and we describe
the attractivity regions as well.
\section{Definitions and Preliminaries}
\label{s:lemma}
For the system {\rref{e:IDE}}, we use the following assumptions:
$t_n=t_0+n\,p$ $(p>0)$, $f(x)$ is continuous, $xf(x)> 0$ ($x \neq 0$),
and for the sake of simplicity, we assume that $f$ is an odd function,
i.e., $f(-x)=-f(x)$.
We say that the zero solution of \mbox{(\ref{e:IDE})} or \mbox{(\ref{e:DE})}
is {\it stable} if for any $\varepsilon > 0 $ there exists $\delta > 0 $
such that $|x(0)| + |\dot{x}(0)| < \delta $ implies $|x(t)| + |\dot{x}(t)| <
\varepsilon \,\, (t \geq 0) $. The zero solution is {\it asymptotically stable}
(a.s.) if it is stable and there exists $\delta > 0 $ such that $|x (0)| + |\dot{x} (0)| <
\delta $ implies $\lim_{t \rightarrow \infty} (x(t), \dot{x} (t)) = (0,0)$.
The asymptotic stability is {\it global} (g.a.s.) if $\delta = \infty $.
We investigate the solutions of the equations with the aid of the energy
function
\begin{equation} \label{e:energy}
V(x,y) = y^2 + 2 \int \limits ^x _0 f = : y^2 + F (x),
\end{equation}
and often use the notation $V(t)=V(x(t),\dot x(t))$ for the solutions of
system {\rref{e:IDE}}. Furthermore, without future reference, we assume that
\[
\lim_{x\rightarrow\pm \infty}F(x)=\infty.
\]
This condition allows us to obtain boundedness of the solutions from the
boundedness of the energy.
Let us consider the undamped equation
\begin{equation} \label{e:DE0}
\ddot u + f(u)=0.
\end{equation}
All solutions are periodic, and the energy is constant along each solution.
The distance between the extremal points is given by (see \cite{b:R-S-C})
\begin{equation}
\Delta (r) = \int\limits_{-F^{-1} (r)} ^ {F^{-1}(r)} \frac {dx} {\sqrt{r-F(x)%
}},
\end{equation}
where $F^{-1}$ is the inverse of the positive part of $F(x)$.
Calculations yield the following expressions for $\Delta(r) $ in the case
where $f(x)=|x|^\alpha{\rm sgn \,} (x)$:
\begin{eqnarray} \label{e:delt}
\mbox{a)}& \alpha = 1, & \Delta(r) = \pi, \\
\mbox{b)}& \alpha \neq 1, & \Delta (r) = A r^\beta \ \ \mbox{with} \ \ \beta
= \frac {1-\alpha} {2(\alpha+1)} \quad \mbox{and} \nonumber \\
&& \quad A = 2 \left(\frac { \alpha +1} {2}\right)^ {\frac {1} {\alpha + 1}}
\frac{\sqrt \pi \Gamma \left( \frac {1} {\alpha+1}\right)} {(\alpha +1 )
\Gamma \left( \frac {3 +\alpha} {2 (1 + \alpha)} \right) }, \nonumber
\end{eqnarray}
where $\Gamma(\cdot)$ denotes Euler's $\Gamma $ function.
In the linear case, we obtain the known value $\pi$ provided $\beta<0$
for $\alpha>1$ or $0<\beta<1/2$ for $0<\alpha <1$.
Consider the system {\rref{e:IDE}}. Since $\lim_{n \rightarrow \infty}
t_n =\infty$, every solution can be continued to $\infty$. In addition,
the solutions are differentiable, and $\dot x(t)$ is piecewise continuous
and continuous from the left at every $t>0$.
The variation of the energy along the solutions of \mbox{(\ref{e:IDE})} is
given by
\begin{eqnarray}
V(t_{n+1})-V(t_n) & = & V(t_n+0)-V(t_n) \nonumber \\
& = & \dot{x}^2 (t_n+0) + F(x(t_n+0)) - \dot{x}^2 (t_n)- F(x(t_n)) \label{e:ener0}\\
& = & b^2 _n \dot{x}^2 (t_n) - \dot{x}^2 (t_n) = - \dot{x}^2
(t_n)(1-b^2_n)=-a_n\dot{x}^2 (t_n) , \nonumber
\end{eqnarray}
where $a_n = 1 - b^2_n$ is the n-th energy-quantum.
The energy is nonincreasing if $b^2_n \leq 1$, independent of the sign
of $b_n$, and it is constant between any $t_n$ and $t_{n+1}$. In case $b_n=0$,
the solutions of initial value problems are not
unique in the backwards direction. In this case, there can be solutions which are identically zero on
$[t_n,\infty).$
Consider the energy along the solutions of \mbox{(\ref{e:IDE})} on the
interval $[0,t]$. Using \mbox{(\ref{e:ener0})} repeatedly easily yields
\begin{equation} \label{e:Bell10}
V(t) = V(0)- \sum_{t_{n}0$, and $t_{n}=t_0+ n\,p$, and
let $D_{0}=\{r:\Delta (r)=p/k, \ k=1,2\dots \}$. The
solutions of \mbox{\rm(\ref{e:IDE})} with initial conditions satisfying
$F(x(t_{i}))=r\in D_{0},\ \ \dot{x}(t_{i})=0 \ (i=0,1, \dots)$ are
periodic and satisfy equation \mbox{\rm(\ref{e:DE0})}.
\end{theorem}
This theorem assures the existence of periodic solutions in both the
superlinear and sublinear cases. If $\limsup_{r\rightarrow\infty}\Delta(r)=0$,
then the set $D_0$ is unbounded, i.e., there are infinitely many periodic
solutions in some set $\{V(x,y)>r_0\}$. This is the case if
$f(x)=|x|^\alpha \sign x \ \ (\alpha>1)$. On the other hand, if
$\liminf_{r\rightarrow 0}\Delta(r)=0$, then $D_0$ has no positive infimum,
i.e., there are arbitrarily small periodic solutions
(e.g., $f(x)=|x|^\alpha \sign x \ \ (0<\alpha<1)$). If
$\lim_{r\rightarrow 0}\Delta(r)=\infty$, $D_0$ has a positive minimal element.
As consequence of the above statements, we obtain that the zero solution
of the system {\rref{e:IDE}}
cannot be globally asymptotically stable in the strictly superlinear case
($\limsup_{r\rightarrow\infty}\Delta(r)=0$), and it cannot be asymptotically
stable in the strictly sublinear case ($\liminf_{r\rightarrow 0}\Delta(r)=0$).
More precisely, a simplified version of Corollary 3.3 in \cite{b:imp4} is
the following.
\begin{theorem}
\label{t:as}
Let $t_{n}=t_0+ n\,p$ and $|b_n|\leq 1$, and assume that there exists a
sequence of integers $\{n_k\}$ such that
\begin{equation}
\sum_{k}\min (a_{n_k},a_{n_k+1})=\infty . \label{e:c1}
\end{equation}
Then:
\\
Case {\rm (a):} $0<\inf_{r>0}\Delta (r)$. If $p<\inf_{r>0}\Delta (r)$, the zero
solution is globally asymp{\-}totically stable. If $p>\inf_{r>0}\Delta (r)$,
the behavior depends on the shape of $\Delta (r)$.
\\
Case {\rm (b):} $\lim_{r\rightarrow 0}\Delta (r)=\infty$ and $\lim_{r\rightarrow
\infty }\Delta (r)=0$. The zero solution is asymptotically (but not
globally) stable.
\\
Case {\rm (c):} $\lim_{r\rightarrow 0}\Delta (r)=0$. The zero solution is not
asymptotically stable.
\end{theorem}
\section{Attractivity of the Periodic Solutions}
Following Theorem \ref{t:m1}, let $u_p$ denote the periodic solution of
equation {\rref{e:DE0}} (it is also a solution of system {\rref{e:IDE})}
such that $ r_p=V(u_p(t_0),\dot u_p(t_0))=\Delta^{-1}(p)$ and
$\dot u_p(t_0)=0 $ (i.e., $F(u_p(t_0))=\Delta^{-1}(p)$).
First, we consider which attractivity properties might reasonably be expected.
Let $x(t)$ be another solution of {\rref{e:IDE}} for which
$ r=V(x(t_0),\dot x(t_0))0 $ be such that $F(u_p)=r_p$, and
define $ \tau(p,r)=\Delta(r)-\Delta(r_p)=\Delta(r)-p$.
If $\Delta(r)$ is increasing on the interval $I_p$, then $\tau(p,r) >0$,
and if $\Delta(r)$ is decreasing, then $\tau(p,r) <0$; moreover,
$|\tau(p,r)|$ is increasing for $x\geq u_0$.
Let $\gamma_+$ and $\gamma_-$
be the curves which are mapped to the sets $\{(-u_p,y): y\in {\mathbb R}\}$ and
$\{(u_p,y): y\in {\mathbb R}\}$, respectively, by the mapping $U(p;\cdot)$.
It is easy to see that this is equivalent to
$\gamma_+ =\{U(\tau(p,r);U_0): U_0=(u_p,y)\}$ and
$\gamma_- =\{U(\tau(p,r);U_0): U_0=(-u_p,y)\}$ where $r=V(u_p,y)$ is the
energy of $U(t;U_0)$. From the symmetry of $f(x)$, we see that
$\gamma_- =\{(x,y):(-x,-y)\in \gamma_+\}$. The mapping $ \delta_- \rightarrow
\gamma_+ \rightarrow \{(-x_p,y)\}$ is shown in Figure 1.
\begin{figure}
\psfig{file=gkfig1.eps,width=11cm}
\caption{The mapping $ \delta_- \rightarrow \gamma_+
\rightarrow \{(-x_p,y)\}$}
\end{figure}
The monotonicity of $\Delta(r)$ and the continuous dependence of solutions
on initial conditions imply that the curve $\gamma_+$ is a graph of a
continuous function of one variable, and so it can be written in the form
$\dot u(\tau(p,r);U_0)=\gamma_+(u(\tau(p,r);U_0))$.
To see this, assume the contrary. Let $(x,y_1),(x,y_2) \in \gamma_+$,
$|y_1|<|y_2|$,
and let $r_1=V(x,y_1) < r_2=V(x,y_2)$. Then
$$
|\tau(p,r_1)|=\inth_{u_p}^{x} \frac {ds}{\sqrt{r_1-F(s)}}>
\inth_{u_p}^{x} \frac {ds}{\sqrt{r_2-F(s)}}=
|\tau(p,r_2)|,
$$
which contradicts the monotonicity of $|\tau(r,p)|$. The case $y_1=-y_2$
cannot happen because of the uniqueness of solutions to initial value
problems for equation {\rref{e:DE0}}.
Note that $\gamma_+(x) $ is positive (negative) if $\Delta(r)$ is
increasing (decreasing), $x-u_p$ is small enough, and $x>u_p$.
Let $\hat x$ be the first zero of $\gamma_+$ if $F(\hat x)\in I_p$,
let $\bar x = \min \{\hat x, \sup I_p\}$, and let $\bar r=F(\bar x)$.
We also use the notation $\gamma_+^-=\{(x,y):(x,-y)\in \gamma_+\}$ and
$\gamma_-^-=\{(x,y):(x,-y)\in \gamma_-\}$.
Now, we can define the following closed sets:
\[
G_+:= \left \{
\begin{array}{lll}
\{ (x,y): x\geq x_p, \ V(x,y)\leq \bar r, \ y\leq \gamma_+(x) \},
& \mbox{ if } \Delta(r) \mbox{ is increasing,} \\
\{ (x,y): x\geq x_p, \ V(x,y)\leq \bar r, \ y\geq \gamma_+(x) \},
& \mbox{ if } \Delta(r) \mbox{ is decreasing,}
\end{array}
\right .
\]
\[
G_-:=\{ (x,y): (-x,-y)\in G_+ \},
\]
\[
G_+^-:=\{ (x,y): (x,-y)\in G_+ \},
\]
\[
G_-^-:=\{ (x,y): (x,-y)\in G_- \}.
\]
Next, we consider the impulsive system {\rref{e:IDE}}, and assume that
$X(t_{n-1}+0; X_0)\in G_+ \ \ (G_-)$ and $-1 \leq b_n\leq 0 $ for an impulse
at $t_{n-1}$. Then
$X(t_{n}-0; X_0) \in G_-^- \ \ (G_+^-)$ and
$X(t_{n}+0; X_0) \in G_- \ \ (G_+)$.
The first relation follows immediately from the properties of the sets $G_i^j\
(i,j \in \{+,-\})$.
For the second one, we only have to prove that $(x,y) \in G_+$
implies $(x,b\,y) \in G_+$ for every $0 \leq b\leq 1$. But this immediately
follows from the fact that $\gamma_+$ is a function of $x$.
Applying the above arguments, we can formulate the basic attractivity theorem
for the beating impulses.
\begin{theorem}
\label{t:at1}
Assume that $\Delta(r)$ is monotone on an interval $[r_{p},r_{p}+\varepsilon]$
or $[r_p,\infty)$, and $-1r_p$.
Then $\Delta(V(x(t_n),\dot x(t_n)))> p_1>p$ for $n>N$.
Now, it is easy to see that there exist a positive number $ \mu$,
independent of $n$, such that
$$
\max(\dot x^2 (t_{n}) , \dot x^2 (t_{n+1}))>\mu
$$
for $n>N$. Consequently,
\begin{eqnarray*}
V(t)& =& V(t_0)- \sum_{t_{n}0,\ t_n=np$. $b_{2n}=0$, and
$b_{2n-1}=-1$, $(n=1,2,\dots )$. Let ${x}(t)$ be any solution with
$\dot{x} (t_{2})=0$. This solution is periodic but does not satisfy the
equation $\ddot{x}+f(x)=0$.
\medskip
We can observe that the conditional stability of the solution $u_p$ is
satisfied independently of the specific values of the impulse constants $b_n$.
The negative sign guarantees that the sets $G_+ $ and $G_-$ map into each
other by the mapping $X(t_{n-1}+p+0; \cdot)$.
The case of positive impulses is different. If $b_n$ is positive,
such invariance will hold for much narrower sets under stronger conditions
on $b_n$.
To formulate results for the case $ 0 \leq b_n \leq 1$, we need some additional
definitions. Let $\delta_+$ and $\delta_-$ be the curves that are mapped
respectively to $\gamma_-$ and $\gamma_+$ by the mapping $U(p;\cdot)$.
Obviously, these curves can be defined analogously to the $\gamma$ curves
above, that is, $\delta_+=\{U(2\tau(p,r);U_0): U_0=(u_p,y)\}$ and
$\delta_-= \{U(2\tau(p,r);U_0): U_0=(-u_p,y)\}=\{(x,y):(-x,-y)\in \delta_+\}$.
The curves $\delta_+$ and $\delta_-$ also represent graphs of continuous
functions of one variable, and can be written in the form
$\delta_+(x)$ and $\delta_-(x)$.
Similarly, $\delta_+(x) $ is positive (negative) if $\Delta(r)$ is increasing
(decreasing), $x-u_p$ is small enough, and $x>u_p$.
Let $\check x$ be the first zero of $\delta_+$ if $F(\check x)\in I_p$,
let $\tilde x = \min \{\check x, \sup I_p\}$, and let $\tilde r=F(\tilde x)$.
We can then define the following closed sets:
\[
H_+:= \left \{
\begin{array}{lll}
\{ (x,y): x\geq x_p, \ V(x,y)\leq \tilde r, \ y\leq \delta_+(x) \},
& \mbox{ if } \Delta(r) \mbox{ is increasing,} \\
\{ (x,y): x\geq x_p, \ V(x,y)\leq \tilde r, \ y\geq \delta_+(x) \},
& \mbox{ if } \Delta(r) \mbox{ is decreasing,}
\end{array}
\right .
\]
\[
H_-:=\{ (x,y): (-x,-y)\in H_+ \}.
\]
The sets $G_+$, $G_-$, $H_+$, and $H_-$ are shown in Figure 2.
It follows immediately from the definition of the sets that the mapping
$ U(p;\cdot)$ maps the sets $H_+ $ and $H_-$ into
$G_- \cap \{V(x,y)\leq \tilde r\}$ and
$G_+ \cap \{V(x,y)\leq \tilde r\}$, respectively.
Now let $x(t)$ be a solution of \rref{e:IDE} such that
$X(t_{n-1}+0; X_0)\in H_+ \ \ (H_-)$ and $0\leq b_n\leq 1 $. Then
$X(t_{n}-0; X_0) \in G_-\cap \{V(x,y)\leq \tilde r\}
\ \ (G_+\cap \{V(x,y)\leq \tilde r\})$. To guarantee that
$ X(t_{n}+0; X_0) \in H_- \ \ (X(t_{n}+0; X_0) \in H_+) $,
we need an additional condition on $b_n$, such as
$b_n \leq \sup \{\delta_+(x)/\gamma_+(x): x\in (u_p, x(t_n-0))\}$.
The following theorem then holds.
\begin{theorem}
\label{t:at2}
Assume that $\Delta(r)$ is monotonic on an interval
$[r_{p},r_{p}+\varepsilon]$ or $[r_p,\infty)$.
Let $r_0 \leq \tilde r$, and assume that
\begin{equation}
\label{c:bmax}
0\leq b_n \leq \sup \{\delta_+(x)/\gamma_+(x): x\in (u_p, F^{-1} (r_0))\},
\quad n=1,2, \dots
\end{equation}
If a solution $x(t)$ of {\rm\rref{e:IDE}} satisfies
$(x(t_0+0),\dot x(t_0+0)) =X_0 \in H_+ \cap \{V(x,y)\leq r_0\} \
(H_- \cap \{V(x,y)\leq r_0\})$,
then $(x(t_0+2n\,p+0),\dot x(t_0+2n\,p+0)) \in H_+ \ (H_-)$.
In addition, $\lim_{t\rightarrow\infty} V(x(t),\dot x(t))=r_p$, i.e.,
$\lim_{t\rightarrow\infty} (x(t)-u_p(t))=0$.
\end{theorem}
For the proof of the last statement, we have only to note that condition
\rref{c:bmax} is stronger than \rref{e:c1} since the supremum in \rref{c:bmax}
is smaller than 1.
The right hand side of Figure 3 shows the attractivity of $u_p$ for
positive impulses.
\begin{figure}
\psfig{file=gkfig3.eps,width=12cm}
\caption{The attractivity of $u_p(t)$, $f(x)=x^{1/3}$, $b_n=0.7$ (l.h.s) and
$b_n=0.6$ (r.h.s)}
\end{figure}
To illustrate our theorem, in Figure 4 we show the values of the mappings
$X(t_0+np; \cdot)$ for $b_n=1,0.9,0.8,0.7,0.6,0.5$.
Later, we will return to the question of the sharpness of the estimates
of the fraction $\delta_+(x)/\gamma_+(x)$.
\begin{figure}
\psfig{file=gkfig4.eps,width=12cm}
\caption{Mappings $X(t_0+np; \cdot)$, $f(x)=x^{1/3}$, $p=3$,
$b_n=1,0.9,0.8,0.7,0.6,0.5$}
\end{figure}
Combining the arguments for negative and positive impulses, we can formulate
the following more general theorem.
\begin{theorem}
\label{t:at3}
Assume that $\Delta(r)$ is monotone on an interval $[r_{p},r_{p}+\varepsilon]$
or $[r_p,\infty)$, $r_0 \leq \tilde r$, and assume that conditions {\rm\rref{e:c1}}
and
\begin{equation}
\label{c:best}
-1\leq b_n \leq \sup \{\delta_+(x)/\gamma_+(x): x\in (u_p, F^{-1} (r_0))\}
\quad n=1,2, \dots
\end{equation}
hold. If a solution $x(t)$ of {\rm\rref{e:IDE}} satisfies
$(x(t_0+0),\dot x(t_0+0)) =X_0 \in H_+ \cap \{V(x,y)\leq r_0\leq \tilde r\}$
$ \ (H_- \cap \{V(x,y)\leq r_0\leq \tilde r\})$, then
$\lim_{t\rightarrow\infty} V(x(t),\dot x(t))=r_p$, i.e.,
$\lim_{t\rightarrow\infty} (x(t)-u_p(t))=0$.
\end{theorem}
The key to the applicability of our results is to either find the curves
$\gamma_+$ and $\delta_+$ analytically or to approximate them numerically.
In either case, computer algebra programs are very useful.
The curves in Figures 1 and 2 are obtained from the definitions of $\gamma_+$ and
$\delta_+$, interpolating the points
$\{ U(p,U_0): U_0 \in \{(u_0,i\,d), i=1,\dots\}\}$ and
$\{ U(2p,U_0): U_0 \in \{(u_0,i\, d), i=1,\dots\}\}$, respectively, where the
step size $d$ is small enough. This approach is quite fast and good enough
(although not analytically certain) to verify that a point $(x,y)$ is in
$G_+ \ (H_+)$, but it is not applicable to estimate the quotient
$\delta_+(x)/\gamma_+(x)$ if $x$ is close to $u_p$ since
$\lim_{x \rightarrow u_0+0} \delta_+(x)/\gamma_+(x)$ is of form $0/0$.
First, let us give estimates for the sets $G_+$ and $H_+$; for simplicity,
we assume that $f$ is monotonic.
From the definition, we have
$$
\gamma_+(x)=y_p-\inth_0^{\tau(p,r) } f(u(s;(u_p,y_p))) \, ds.
$$
Let $r_p u_p$ and
$V(x,\gamma_+(x))=r\leq r_0$. Since $\gamma_+(x)>0 \ (<0)$ and
$\tau(p,r)>0 \ (<0)$ for $x>u_0$, $r>r_p$, $\Delta(r)$ is increasing
(decreasing), and $f(x)$ is monotonic, we have
$$
\sqrt{|r-r_p|}- f(F^{-1}(r_p))|\Delta(r)-p| \geq |\gamma_+(x)|
\geq \sqrt{|r-r_p|}- f(F^{-1}(r_0))|\Delta(r)-p|,
$$
where $|y_p|=\sqrt{|r-r_p|}=\sqrt{|r-\Delta^{-1}(p)|}$. If, in addition,
$\Delta(r)$ is differentiable and $|\tau(p,r)|$ is concave up for
$r_p