\documentclass[twoside]{article}
\usepackage{amsfonts} % used for R in Real numbers
\pagestyle{myheadings}
\markboth{ Heteroclinic connections }
{ L. Sanchez }
\begin{document}
\setcounter{page}{257}
\title{\vspace{-1in}\parbox{\linewidth}{\footnotesize\noindent
USA-Chile Workshop on Nonlinear Analysis, \newline
Electron. J. Diff. Eqns., Conf. 06, 2001, pp. 257--266.\newline
http://ejde.math.swt.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.swt.edu or ejde.math.unt.edu (login: ftp)}
\vspace{\bigskipamount} \\
%
Heteroclinic connections for a class of non-autonomous systems
%
\thanks{ {\em Mathematics Subject Classifications:} 34B15, 34C37.
\hfil\break\indent
{\em Key words:} Heteroclinics, Fisher equation.
\hfil\break\indent
\copyright 2001 Southwest Texas State University.
\hfil\break\indent Published January 8, 2001.
\hfil\break\indent
Supported by Funda\c c\~ao para a Ci\^encia e a
Tecnologia and PRAXIS XXI.} }
\date{}
\author{ L. Sanchez }
\maketitle
\begin{abstract}
We prove the existence of heteroclinic connections for a system
of ordinary differential equations, with time-dependent coefficients,
which is reminiscent of the ODE arising in connection with traveling
waves for the Fisher equation.
The approach is elementary and it allows in particular the
study of the existence of positive solutions for the same system that
vanish on the boundary of an interval $(t_0,+\infty)$.
\end{abstract}
\section{Introduction}
When one looks for one-dimensional traveling waves $u(x-ct)$ for the
Fisher equation
$$\frac{\partial u}{\partial t}=\frac{\partial^2 u}{\partial x^2}+f(u)
$$
(that models a diffusion phenomenon in biomathematics), one finds
the ordinary differential equation
$$u''+cu'+f(u)=0\,.\eqno(1)$$
Here $c>0$ represents the admissible wave
speed; the function $f$ takes positive values between two zeros, say
0 and $a$ ($a>0$): see for example \cite{kpp,aw}.
In this paper we consider the following system, which is a
non-autonomous multi-dimensional analogue of (1):
$$u_i''+p_i(t)u_i'+f_i(u)=0,\quad i=1,\dots, n\,,\eqno(2)$$
where $u=(u_1,\cdots u_n)$.
The vector field $f=(f_1,\cdots,f_n)$ is assumed to be defined in some
$n$-dimensional box
$[0,a_1]\times\cdots\times[0,a_n]$ ($a_i>0,\forall i=1,\dots, n$), the
vertices $(0,\cdots,0)$ and $(a_1,\cdots,a_n)$ being its only zeros.
More precisely, we state the following basic assumptions:
\begin{enumerate}
\item[(H1)] For each $i\in{1,\cdots, n}$,
$f_i:[0,a_1]\times\cdots\times[0,a_n]\rightarrow{{\mathbb R}_+}$ is a
Lipschitz continuous function
such that $ f_i(0,\cdots,0)=0=f_i(a_1,\cdots a_n)$ and $f_i(u)>0$
if $u_i>0$ and $u\neq a:=(a_1,\cdots a_n)$.
\item[(H2)] The functions $p_i:{\mathbb R}\to{{\mathbb R}_+}$ will be assumed throughout
to be continuous and $$c_i:={\displaystyle\inf_{t\in{\mathbb R}}p_i(t)}>0.$$
\end{enumerate}
We look for {\it positive solutions} $u(t)=(u_1(t);\cdots, u_n(t))$,
i.e., solutions that have positive components.
Accordingly, the word {\it solution} will be used to mean
{\it positive solution} throughout.
The relevant problem is
to find ``monotonic'' heteroclinics (in the sense that their
components are decreasing functions) that connect the equilibria
$(a_1,\cdots a_n)$ and 0. This problem has been
very much studied for the autonomous scalar equation,
various approaches being available in a vast literature: we refer the
reader to \cite{aw,as,k} and the bibliography in those papers. The
autonomous system has been dealt with in \cite{al}; we owe a lot to
the ideas there, and we would like to stress that our approach, which
is also elementary, works in a slightly more general setting in the
sense that it allows not only time dependence but also consideration
of models where the vector field $f$ may vanish to a higher order at
$u=0$. In addition we could equally consider a more general form of
(2) where nonlinear terms $b_i(t)f_i(u)$ replace $f_i(u)$ and the
functions $b_i$ are bounded above and below by positive numbers
(see \cite{s}).
An important role is played by the functions $g_i(u)$, defined (for
those $u$ such that $00,$ $\mu>0$, $c^2\geq4M$ and
$0\leq \epsilon\leq \mu(\frac{c}{2}+\frac{\sqrt{c^2-4M}}{2})$.
Then the solution $u(t)$ of the initial value problem
$$\displaylines{
\hfill u''+cu'+Mu=0\hfill\llap{(3)}\cr
\hfill u(0)=\mu,\quad u'(0)=-\epsilon\hfill\llap{(4)}
}$$
is positive in $[0,+\infty)$ and tends to zero as $t\to+\infty$.
(See \cite{s}.)
\paragraph{\bf Lemma 2.1} {\sl Let continuous functions $p,\,q,\, l,\, m$
be given such that $p(t)\geq q(t)>0$, $0\leq l(t)\leq m(t)$ in the
interval $[t_0,t_1]$. Let $u$ and $v$ be the respective solutions of
$$\displaylines{
\hfill u''+p(t)u'+l(t)u=0,\hfill\llap{(5)}\cr
\hfill v''+q(t)v'+m(t)v=0\hfill\llap{(6)}
}$$
such that $u(t_0)=v(t_0)\geq0$ and $u'(t_0)=v'(t_0)$. Assume in
addition that $p(t)\equiv q(t)$ in case $u'(t_0)=v'(t_0)>0$. Then if
$v(t)\geq0$ in $[t_0,t_1]$ we have $u(t)\geq v(t)$ in $[t_0, t_1]$.
}
\paragraph{Proof.} If $u(t_0)=v(t_0)=u'(t_0)=v'(t_0)=0$ or $p\equiv q $
and $l\equiv m$ there is nothing to prove. Otherwise, starting with
$u(t_0)=v(t_0)\geq0$ and $u'(t_0)=v'(t_0)+\epsilon$ ($\epsilon>0$)
it follows that $u>v$ in some interval
$(t_0,t_0+\delta)$. Suppose that there exists $\bar t\leq t_1$ such
that $u(t)>v(t)$ $\forall t\in (t_0,\bar t)$ and $u(\bar t)=v(\bar
t)$. Set $P(t):=\int_{t_0}^tp(s)\,ds$. Multiplying (5) and (6) by
$e^{P(t)}$, then (5)
by $v$, (6) by $u$, integrating in $[t_0,\bar t]$ and subtracting we
obtain, since $v'(t)<0$ in case $v'(t_0)\leq 0$ (according to
Remark 2)
\begin{eqnarray*}
0&>&[e^{P(t)}(u'(t)v(t)-u(t)v'(t)]_{t_0}^{\bar t}+\int_{t_0}^{\bar
t}e^{P(t)}(p(t)-q(t))v'(t)u(t)\,dt\\
&&+\int_{t_0}^{\bar t}e^{P(t)}(l(t)-m(t))u(t)v(t)\,dt=0,
\end{eqnarray*}
a contradiction. Hence
$u\geq v$ in $[t_0,t_1]$. In the limit as $\epsilon\to0^+$ the
statement follows. \smallskip
Let us write, in accordance with previous notation for the
$n$-dimensional case,
$$f(u)=ug(u),\quad 0\leq u\leq a.$$
\paragraph{Lemma 2.2} {\sl Assume $p$ is continuous in ${\mathbb R}$, $f$ is a
Lipschitz continuous function in $[0,\mu]$ such that $f(0)=0$ and
$f(u)>0$ if $0__t_0$. Then the solution of the Cauchy problem
$$u''+p(t)u'+f(u)=0,\quad u(t_0)=\mu,\;\;u'(t_0)=-\epsilon
$$ where
we assume that $\mu$ and $\epsilon>0$ are as in Remark 3, is positive and
strictly decreasing in $[t_0,\infty)$ and vanishes at $+\infty$.}
\paragraph{Proof.} Apply Lemma 2.1 with $q(t)=c$, $l(t)=g(u(t))$,
$m(t)=M$. Take Remarks 2 and 3 into account.
The fact that $u(+\infty)=0$ is an easy consequence of the boundedness
of $p$, $u$ and $u'$ since for $t>t_0$ and some $t^*\in(t_0,t)$
$$u'(t)+\epsilon+p(t^*)(u(t)-u_0)+\int_{t_0}^tf(u(s))\,ds=0,\quad t>t_0
$$
and we infer that $\int_{t_0}^{+\infty} f(u(s))\,ds$ converges.
\paragraph{Remark.} It is immediately recognized that the above result
still holds if $\epsilon=0$ provided $f(u)>0$ $\forall u\in(0,\mu]$.
\section{Heteroclinics}
In this section we give a simple analytic argument to prove the
existence of heteroclinics under hypotheses (H1)-(H2). For the sake of
clarity we start with the case of the scalar equation
$$u''+p(t)u'+f(u)=0\,,\eqno(2_1)$$
where $f:[0,a]\to{\mathbb R}_+$ has the property (H1) for $n=1$ and we write
accordingly
$$c:={\displaystyle\inf_{t\in{\mathbb R}}p(t)}>0;\quad f(u)=ug(u).
$$
A basic assumption, which cannot be
improved when $p\equiv c$ is a constant and $g(u)$ is decreasing, is
$c\geq2\sqrt{{\displaystyle\sup_{0____0$
$0____t_0$. Then for each
sufficiently small $\epsilon>0$ the solution $u(t,t_0,\epsilon)$ of (2)
such that $u(t_0,t_0,\epsilon)=\mu$ and
$u'(t_0,t_0,\epsilon)=-\epsilon$
is positive in $[t_0,+\infty)$ and
$$\lim_{t\to+\infty}u(t,t_0,\epsilon)=0.$$}
\paragraph{Proof.} In case (i) holds, this is only Lemma 2.2. Otherwise
note that the solution $u(t,t_0,\epsilon)$ has no critical points and
therefore is strictly decreasing. It cannot remain above a positive
constant by the argument used at the end of the proof of Lemma 2.2.
Let $ t_1$ be such that $u(t_1, t_0,\epsilon)=\nu$.
The equation itself shows that $u'( t_1, t_0,\epsilon)\geq -N/c$
(consider separately the cases where $t_1$ lies in an interval of
convexity or of concavity of the solution) and therefore Lemma 2.2 can
be applied.
\paragraph{Remark.}
According to the remark after Lemma 2.1 it is obvious, via the same
arguments, that the Proposition holds even if $\epsilon=0$ except in
case $f(\mu)=0$.
\paragraph{Theorem 3.2} {\sl Assume (H1)-(H2) with $n=1$ and, in
addition to the hypotheses of proposition 3.1 with $\mu=a$, that
$p(t)$ is bounded. Then ($2_1$) has a
strictly decreasing heteroclinic solution connecting $a$ and 0.}
\paragraph{Proof.} With respect to $\mu=a$ in Proposition 3.1 take a
sequence $t_m$ decreasing to $-\infty$ and consider the solution
$u(.,t_1,\epsilon_1)$ where
$\epsilon_1$ is a small positive number. According to proposition 3.1,
$0____1$ such that
$$u(\bar t, t_{m_2},\epsilon_1)1$ and, by a $t_m$ translation, this can be written
$$u_m(\bar t-t_m)\geq a/2\eqno(8)
$$
in terms of the solution of
$$u''_m+p_m(t)u'_m+f(u_m)=0,\quad u_m(0)=a,\;\;u'_m(0)=-\epsilon_1\,,
$$
where $p_m(t)=p(t+t_m)$. The boundedness of $p_m$, $u_m$ and $u'_m$
and Ascoli's theorem enable us, by extracting subsequences and a
diagonal procedure, to suppose that (where we set
$d:={\displaystyle\sup_{t\in{\mathbb R}}p(t)}$)
$$p_m\to p_{\infty}\;\;\mbox{in}\;\;L^{\infty}\mbox{weak-*},\;\;c\leq
p_{\infty}(t)\leq d
$$
$$u_m\to u\;\;\mbox{in}\;\; C^1(K),\;\; K\;\; \mbox{any compact interval
in}\;[0,+\infty).
$$
Since
$$u''+p_\infty(t) u'+f(u)=0,\quad u(0)=1,\;\;u'(0)=-\epsilon_1
$$
(and it is easy to see that proposition 3.1 still applies to
solutions in the Carath\'eodory sense) there exists $\tilde t$ such
that $u(\tilde t)=a/4$. Since $u_m\to u$ uniformly in
$[0,\tilde t]$ and $\bar t-t_m\to+\infty$ this contradicts (8) and so the
Claim holds.
To go on with the proof we observe that if $\delta>0$ is sufficiently
small we have
$u(\bar t, t_{m_2},\delta)>a/2$, since $u(.,t_{m_2},\delta)\to a$ as
$\delta\to0^+$ in
$[t_{m_2},\bar t]$. By the intermediate value theorem we can pick up
$0<\epsilon_2<\epsilon_1 $ such that $u(\bar t, t_{m_2},\epsilon_2)=a/2.$
This argument can be iterated so as to construct decreasing sequences
$\tau_k=t_{m_k}$ and $\epsilon_k$ with the property that
$u(\bar t, \tau_k,\epsilon_k)=a/2$.
Using again the boundedness of $u(.,\tau_k,\epsilon_k)$ and
$u'(.,\tau_k,\epsilon_k)$ and the diagonal procedure we can pass to a
subsequence (which for convenience is denoted by
the same symbol) so that for any compact interval $K\subset{{\mathbb R}}$,
$$u(.,\tau_k,\epsilon_k)\to u\;\;\mbox{in}\;\; C^1(K).$$
The limit function $u$ thus obtained is, of course, a decreasing
solution to (1), such that $u(\bar t)=a/2$ and
$0____0$
sufficiently small such solutions are defined in $[t_0,\infty)$, their
components being strictly decreasing and vanishing at $+\infty$.
Now take a sequence $t_m\to-\infty$ and $u(.,t_1,\epsilon_1)$
where $\epsilon_1$ is small. Selecting the first component, $u_1$,
we easily establish, as in the proof of theorem 3.2, that there exists
$\bar t$ and subsequences
$\tau_k=t_{m_k}\to-\infty,\;\epsilon_k\to0^+)$ so that
$$u_1(\bar t, \tau_k,\epsilon_k)=a_1/2\eqno(9)
$$
(it is sufficient to argue as in the proof of theorem 3.2 with respect
to the equation for the first component).
Next consider the sequence $u_2(.,\tau_k,\epsilon_k)$ and let $s_k$ be
numbers such that
$$u_2(s_k, \tau_k,\epsilon_k)=a_2/2.$$
We claim that the sequence $s_k-\bar t$ is bounded: for suppose for
instance that along a subsequence $s_k-\bar t\to+\infty$ (the case
$s_k-\bar t\to-\infty$ is analogous); integrating the second equation
of the system (2) in $[\bar t, s_k]$ we obtain
\begin{eqnarray*}
u'_2(s_k,\tau_k,\epsilon_k)-u'_2(\bar
t,\tau_k,\epsilon_k)+p_2(t^*_k)(u_2(s_k,\tau_k,\epsilon_k)-u_2(\bar t,
\tau_k,\epsilon_k))&&\\
+\int_{\bar t}^{s_k}u_2(t,\tau_k,\epsilon_k)g_2(u(t,\tau_k,\epsilon_k))
\,dt&=&0\,,
\end{eqnarray*}
where $t^*_k\in[\bar t,s_k]$. Now the first factor in the integrand
is greater than $a_2/2$; using (H1) we see that the second is bounded away from zero
(because $u_1$ takes values $a_2/2$); therefore we
have reached a contradiction.
Since this argument can be repeated with respect to the remaining
components, along with (9) we construct sequences $s_k^{(j)}$,
$j=2,\cdots,n$ such that
$$u_j(s_k^{(j)}, \tau_k,\epsilon_k)=a_j/2\eqno(10)$$
and $s_k^{(j)}-\bar t$ is bounded. Now, as in theorem 3.2 we go to
the limit through a diagonal subsequence: $u(.,\tau_k,\epsilon_k)\to v$ uniformly
in compact intervals, and $v$ is a solution of (2) with decreasing
components. Moreover we may assume that $s_k^{(j)}\to t_j$,
$j=2,\cdots,n$ and therefore on account of (9)-(10) we obtain
$$v_1(\bar t)=a_1/2,\quad v_j(t_j)=a_j/2,\;\;j=2,\cdots n.\eqno(11)
$$
We assert that $00$ with $v'_1(t_k)\to0$ and $\vert
v_1'(s_k)\vert\geq\delta$. Multiplying the first equation in (2) by
$v_1'$ and integrating yields
$$\frac{1}{2}[v_1'^2(s_k)-v_1'^2(t_k)]+\int_{t_k}^{s_k}p_1v_1'^2
+\int_{t_k}^{s_k}f_1(v)v_1'=0,
$$
where, by the mean value theorem and what has been already proved, the
last summand tends to 0 as $k\to\infty$; this is a contradiction and
the proof is complete.
\paragraph{Remark.} As in theorem 3.2, we could use a set of conditions
like (7) to improve the lower
bounds on $c_i,\quad i=1,\dots, n$ in case the functions $g_i$ approach 0 as
$u_i\to0$.
\section{Positive solutions vanishing at the endpoints of an
unbounded interval}
In this section we consider $f$ satisfying \begin{enumerate}
\item[(H3)] For each $i\in{1,\cdots n}$, $f_i:{R_+}^n\rightarrow{{\mathbb R}_+}$
is a locally Lipschitz continuous function
such that $f_i(0)=0$ and $f_i(u)>0$ if $u_i>0$.
\end{enumerate}
We consider the problem of finding {\it nontrivial} positive solutions to
$$\displaylines{
\hfill u_i''+p_i(t)u_i'+f_i(u)=0,\quad i=1,\dots, n\hfill\llap{(12)}\cr
\hfill u(0)=0=u(+\infty)\hfill\llap{(13)}
}$$
where by {\it nontrivial} we mean that each component $u_i$ of such
solution is positive in $(0,+\infty)$. For definiteness the initial
endpoint is taken to be $t_0=0$, but our
results can obviously be restated with an arbitrary left endpoint.
\smallskip
Before stating the result we note the following fact: denote by
$u(\cdot ,A)$ the solution of the Cauchy problem
$$u_i''+p_i(t)u_i'+f_i(u)=0,\quad u_i(0)=0,\;\;u_i'(0)=A;$$
then $u_i(\cdot , A)$ has, for every $A>0$, a maximum $\mu_i(A)$ depending continuously on $A$
and $\mu_i(0^+)=0$,
$\mu_i(+\infty)=+\infty.$ To see this, take $t^*>0$ in a neighborhood of
0, $A_i^*=u_i'(t^*,A)>0$, $u_i^*=u_i(t^*,A)>0$. Let $K_i$ be the least
upper bound of the (scalar) solution of
$$z''+p_i(t)z'=0, \quad z(t^*)=u_i^*,\;\;z'(t^*)=A_i^*,
$$
and define $\delta_i:=\inf\{g_i(x):\;\;u_i^*\leq x_i
\leq K_i;\;x_j\leq K_j \;\;\mbox{if}\;j\neq i\}>0.$
Comparing $u_i(\cdot,A)$ with the solution to
$$v''+p_i(t)v'+\delta_i v=0, \quad v(t^*)=u_i^*,\;\;v'(t^*)=A_i^*,
$$
it is easy to see, using Lemma 2.1, that (since $v\leq z$)
$u_i(t,A)\leq v(t)$ as long as $u_i(t,A)>u_i^*$. But the behavior of
$v(t)$ implies that
$v(t)$ returns to the value $u_i^*$ and therefore $u_i(t,A)$
attains a maximum. The assertion about $\mu_i(0^+)$ comes from the fact
that $K_i\to0$ as $A\to0^+$. The other assertion is straightforward.
\paragraph{Proposition 4.1} {\sl Assume (H2)-(H3). Assume $p$ is bounded
in $[0,\infty)$ and let $c_i:={\displaystyle\inf_{t\geq0}p_i(t)}$.
Given positive numbers $\alpha_i$, $i=1,\cdots,n$ such
that
$M_i:=\sup\{g_i(u): \;\;00$ such that whenever $00:\;\;(\mu_1(A),\cdots,\mu_n(A))\in(0,\alpha_1]
\times\cdots\times(0,\alpha_n]\}$.
Then if $0__