\documentclass[twoside]{amsart}
\usepackage{amssymb, amsmath}
\pagestyle{myheadings}
\usepackage{graphicx}
\AtBeginDocument{{\noindent\small
{\em Electronic Journal of Differential Equations},
Vol. 2002(2002), No. 51, pp. 1--27.\newline
ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.swt.edu (login: ftp)}
\thanks{\copyright 2002 Southwest Texas State University.}
\vspace{1cm}}
\begin{document}
\title[\hfilneg EJDE--2002/51\hfil On combined asymptotic expansions]
{On combined asymptotic expansions in singular perturbations}
\author[E. Beno\^{\i}t, A. El Hamidi, \& A. Fruchard \hfil EJDE--2001/51\hfilneg]
{Eric Beno\^{\i}t, Abdallah El Hamidi, \& Augustin Fruchard }
\address{Eric Beno\^{\i}t, Abdallah El Hamidi, \& Augustin Fruchard \hfill\break
Laboratoire de Math\'ematiques \\
Universit\'e de La Rochelle \\
P\^ole Sciences et Technologie\\
Avenue Michel Cr\'epeau\\
17042 La Rochelle cedex 1, France}
\email{ebenoit@univ-lr.fr}
\email{aelhamid@univ-lr.fr}
\email{afruchar@univ-lr.fr}
\date{}
\thanks{Submitted March 10, 2002. Published June 3, 2002.}
\subjclass[2000]{34E05, 34E15, 34E18, 34E20}
\keywords{Singular perturbation, combined asymptotic expansion,
\hfil\break\indent
turning point, canard solution. }
\begin{abstract}
A structured and synthetic presentation of Vasil'eva's combined
expansions is proposed. These expansions take into account
the limit layer and the slow motion of solutions of a
singularly perturbed differential equation. An asymptotic
formula is established which gives the distance between two
exponentially close solutions. An ``input-output" relation
around a {\it canard} solution is carried out in the case of
turning points. We also study the distance between two canard values
of differential equations with given parameter.
We apply our study to the Liouville equation and to the splitting
of energy levels in the one-dimensional steady Schr\"{o}dinger
equation in the double well symmetric case. The structured nature
of our approach allows us to give effective symbolic algorithms.
\end{abstract}
\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{definition}{Definition}[section]
\newcommand{\pref}[1]{{\rm(\ref{#1})}}
\section{Introduction}
The main motivation of this work is the study of the real steady
Schr\"{o}dinger equation
\begin{equation} \label{1}
\varepsilon^2\ddot\psi=(U(t)-E)\psi\,,
\end{equation}
where the dot denotes the derivative with respect to the space variable $t$,
the small parameter $\varepsilon>0$ is related to the Planck constant
($\varepsilon=\hbar/\sqrt{2m}$), $E\in\mathbb{R}$ is the energy, and $U$ is a symmetric
non-degenerated double well potential. Precisely, $U$ is assumed to be
a $C^\infty$ even function with three critical points: one local
maximum at the origin and two global minima
at $\pm t_0$, which are supposed to be quadratic.
By a translation on $U$ and $E$, we may suppose that $U$ vanishes at $\pm t_0$.
Hence the potential is of the form $U(t)=\varphi(t)^2$ where $\varphi$ is itself
a $C^\infty$ even function, and satisfies furthermore $\varphi(0)>0,\ \varphi(t_0)=0,\ \varphi'(t_0)\neq0$
and $\varphi$ decreasing on $\mathbb{R}^+$.
The simplest example, which has already been studied in \cite{de,z1,z2} is
\begin{equation} \label{2}
U(t)=(1-t^2)^2.
\end{equation}
The following description contains some statements which will be proven in subsection
\ref{sec3.4}. The asymptotic behaviour of the solutions in the
neighborhood of $+\infty$ is as follows: there is
a one dimensional subspace denoted by $V$ (in the two dimensional space of solutions) of exponentially
decaying solutions as $t \rightarrow +\infty$; the other solutions increase exponentially. The situation
is similar at $-\infty$.
A natural question is to find the energy values for which these two subspaces coincide.
This is equivalent to the fact that equation \pref1 has nontrivial solutions in $L^2(]-\infty,+\infty[)$ which
leads to energy quantification. These values of $E$
are related to the energy levels which correspond to {\it observable solutions} of \cite c.
We consider in this paper only solutions without zero in the neighborhood of $\pm t_0$. This
corresponds to the first energy level and this implies that $E/\varepsilon$ is
infinitely close to $-\varphi'(t_0)$.
It appears that the posed problem has two solutions, denoted by $E^\#(\varepsilon)$ and $E^\flat(\varepsilon)$. They are
{\it canard} values for the Riccati equation associated to \pref1:
\begin{equation} \label{ri}
\varepsilon v'=U(t)-E-v^2,
\end{equation}
that is to say, values for which
\pref{ri} has solutions with a particular asymptotic behaviour.
Those solutions, denoted by $v^\#$ and $v^\flat$, border the slow
curve $v=-\varphi(t)$ on $]-\infty,0[$, and the other slow curve
$v=\varphi(t)$ on $]0,+\infty[$. The solution $v^\#$ (resp.
$v^\flat$) takes the value $v=\infty$ (resp. $0$) at $t=0$ and is
a {\it canard} solution both at $t=-t_0$ and at $t=t_0$.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}[ht]
\begin{center}
\includegraphics[width=6cm]{fig1.ps}
\vspace*{-0.1cm}
\caption{The solutions $v^\#$ and $v^\flat$, and a "great canard solution"
$v^\natural$ for the potential \pref2 and $\varepsilon = 1/10$.}
\setlength{\unitlength}{1cm}
\vspace{-7cm}
%\hspace{-3cm}
\begin{picture}(12,7.3)\small
\put(3.6,2.2){$\varphi$}
\put(6.2,2.2){$v^\#$}
\put(8,5.8){$-\varphi$}
\put(5.5,5.8){$v^\#$}
\put(6.1,3.8){$v^\flat$}
\put(5.4,4.1){$v^\natural$}
\end{picture}
\end{center}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Concerning the potential \pref2, it is proven in \cite{g} that these canard values are exponentially close to
each other; precisely:
\begin{equation} \label{4}
E^\#(\varepsilon)-E^\flat(\varepsilon)=\exp\Big(-\frac1\varepsilon\big(\frac43+o(1)\big)\Big),
\quad \varepsilon\to 0\,.
\end{equation}
In the general case, the same method yields the analogous result, where the constant $a$ that plays the
role of 4/3 is given by
\begin{equation} \label{a1}
a=2\int_0^{t_0}\varphi(t)dt\,.
\end{equation}
We present in this paper the following result.
\begin{theorem} \label{th1}
There are a constant $C$ and a real sequence
$(a_n)_{n\geq1}$ such that for any fixed integer $N\geq1$
one has, as $\varepsilon\to0$,
\begin{equation} \label{6}
E^\#(\varepsilon)-E^\flat(\varepsilon)=C\varepsilon^{1/2}\exp(-a/\varepsilon)\left(1+a_1\varepsilon+
\dots +a_{N-1}\varepsilon^{N-1}+O(\varepsilon^N)\right).
\end{equation}
\end{theorem}
\paragraph{Comments:} Before giving an idea of proof of this theorem, we
describe below some experimental results and related conjectures.
In the case of the potential \pref2 we found $C=\frac{16\sqrt2}{\sqrt\pi}$ and
$a_1=-\frac{71}{96}$, which were already found in \cite{de} (only
$a_1$ has to be replaced by $-\frac{71}{48}$ because \cite{de} uses the potential
$U(t)=t^2(1-t)^2$). See also \cite{z1,z2} for a related work.
Using Maple, we obtained the following terms $a_2=-\frac{6299}{18432},\ a_3=-\frac{2691107}{5308416}$.
Symbolic manipulations for other potentials led us to guess the following:
\paragraph{Conjecture 1} In the case of potential \pref2, all $a_n$ are
rational.
This conjecture has no particular physical relevance.
Moreover it cannot be generalized to other potentials, since we found several polynomial
potentials with rational coefficients for which the $a_n$ are not rational, see subsection \ref{symb}.
On the other hand, the conjectures that follow seem to us more interesting. Let $E^\natural$ be some parameter
value such that the corresponding solution $v^\natural$ of \pref{ri} borders the slow curve
$v=\varphi(t)$ from $-t_0$ to $+\infty$, {\it i.e.} a {\it great canard}.
Such a value is defined up to
exponentially small. Precisely, one shows as in \pref4 that, if $\overline E=\overline E(\varepsilon)$ is a given
great canard value, then it is the same for $E^\natural$ if and only if
$$\overline E(\varepsilon)-E^\natural(\varepsilon)=O\Big(\exp\big(-\frac1\varepsilon(2a+o(1))\big)\Big).
$$
We say in this case that $E^\natural$ {\it is defined
within an exponential of type $2a$}.
Since $E^\flat$, $E^\#$ are unique, the differences $E^\#-E^\natural$,
$E^\natural-E^\flat$ are known up to an exponential of type $2a$;
as these quantities are exponentials of
type $a$ ({\it c.f.} \cite{g}), they are known in relative value up to an exponential of type $a$.
Therefore it is natural to expect an expansion in powers of $\varepsilon$ in the
expression of these differences. In any case, if such an expansion exists, it is necessarily unique,
({\it i.e.} independent of the chosen great canard value $E^\natural$).
Indeed, we obtain as in theorem \ref{th1} the analogous formulae:
\begin{gather} \label{6d}
E^\#(\varepsilon)-E^\natural(\varepsilon)=C^\#\varepsilon^{1/2}\exp(-a/\varepsilon)\left(1+a^\#_1\varepsilon+
\dots +a^\#_{N-1}\varepsilon^{N-1}+r^\#_N(\varepsilon)\right),\\
\label{6b}
E^\natural(\varepsilon)-E^\flat(\varepsilon)=C^\flat\varepsilon^{1/2}\exp(-a/\varepsilon)\left(1+a^\flat_1\varepsilon+\dots
+a^\flat_{N-1}\varepsilon^{N-1}+r^\flat_N(\varepsilon)\right)\,,
\end{gather}
where $r^\#_N(\varepsilon)=O(\varepsilon^N)$ and $r^\flat_N(\varepsilon)=O(\varepsilon^N)$.
Using Maple we found $a^\#_n=a^\flat_n=a_n$ for $n\leq4$ for several potentials. Hence we
are led to the following:
\paragraph{Conjecture 2}
With the assumptions on $\varphi$, we have $C^\#=C^\flat=\frac C2$ and for
all $n\in\mathbb{N}^*,\ a^\#_n=a^\flat_n=a_n$.
This result seems to be amazing insofar as the equation has no
symmetry according to the change of variable $u \mapsto 1/u$.
Concerning potential \pref2, in the approach of \cite{de}, this symmetry between the coefficients
$a^\#$ and $a^\flat$ seems to follow directly from the fact that $U$ is even.
Since this approach may be generalized to other polynomial symmetric
potentials, and due to its formal nature, conjecture 2 is highly believable.
We must point out here that, if the method in \cite{de} may work for polynomial ---
possibly analytic -- potentials, it is by no means applicable
to general $C^\infty$ potentials, as this approach makes a wide use of complex
analysis. Furthermore, our approach seems to be applicable to
the case of minima that are not quadratic. On the other hand, the
technique of \cite{de} allows a deep insight of the analytic structure of $E^\#$
and $E^\flat$, which is out of the range of our ``real'' methods.
Anyway, numerical computations (see section \ref{symb} suggest the following:
\paragraph{Conjecture 3}
In the case of potential \pref2, the mean value $\frac12(E^\#+E^\flat)$ is
a great canard value.
More general conjectures for analytic potentials are available, but need to deal with
the singularities of the potential in the complex plane. As we chose to keep a real viewpoint in
this article, we do not formulate them.
A way to show part of this conjecture would be to prove that the asymptotic expansion
$1+\sum_{n\geq1}a_n\varepsilon^n$ is Gevrey-1 as well as the remainder terms, in other words,
that there are
$A,C,\varepsilon_0>0$ such that for all $n\in\mathbb{N}^*$ and all $\varepsilon\in]0,\varepsilon_0[$ one has
\begin{equation} \label{4.1}
|a_n|\leq A\,C^n\,n!,\quad |r^\#_n(\varepsilon)|\leq A\,C^n\,n!\,\varepsilon^n,
\quad |r^\flat_n(\varepsilon)|\leq A\,C^n\,n!\,\varepsilon^n,
\end{equation}
where $r^\#_n$ and $r^\flat_n$ are defined (within an exponential of type $4/3$) by
\pref{6d} and \pref{6b}. This point seems to be more accessible and allows to prove that the solution
corresponding to $\frac12(E^\#+E^\flat)$ borders the slow curve $v=1-t^2$ on a interval
$]\alpha,+\infty[$ containing $0$ ({\it i.e.} a canard solution longer than $v^\#$ and $v^\flat$).
Actually, the classical results of Gevrey analysis and a study of potential \pref2 in the complex domain
allow to prove that $\frac12(E^\#+E^\flat)$ is a
great canard value if:
\begin{itemize}
\item for all $n\in\mathbb{N}^*$, one has $a^\#_n=a^\flat_n=a_n$
\item The expansion $1+\sum_{n\geq1}a_n\varepsilon^n$ as well as the remainders $r^\#_n$ and $r^\flat_n$ are
Gevrey-1 of type $3/4$, {\it i.e.} for all $\delta>0$ there is $A>0$ such that \pref{4.1} is satisfied with
$C=\frac34+\delta$.
\end{itemize}
These questions of Gevrey analysis are beyond the scope of the present article and will be the topic of
another study.
We now return to theorem \ref{th1}.
The principle of the proof is to consider an associated
Riccati equation to \pref1 (different from\pref{ri} for technical reasons).
To each family of exponentially decaying solutions of \pref1 described above correspond two
solutions (analogous to $v^\#$ and $v^\flat$) denoted
by $u^\#$ and $u^\flat$ of the Riccati equation with $E^\#$ and $E^\flat$ as values
of the energy.
Using the differential equation satisfied by $y= u^\#- u^\flat$ written in a linear form,
we express $E^\#-E^\flat$ in terms of integrals containing $u^\flat$ and $u^\#$; see subsection
\ref{sec3.4}.
This requires an accurately estimate of $u^\#$ and $u^\flat$, not only for the
slow motion, but also for the fast one.
We used for this purpose the combined asymptotic expansions introduced by A.B. Vasil'eva and V.F.
Butuzov \cite{v}.
In spite of the large use of these expansions in asymptotic analysis, we believe useful to present
them in a structured and simplified version.
Indeed, the approach of Vasil'eva and
Butuzov is more general and therefore with some technical difficulties.
Sporadic presentations of these expansions are done
\cite{o}, \cite{w}, without complete proofs.
The algebraic properties of combined expansions are described in section \ref{sec2.3}.
Among them, are proved general compatibility results with respect to usual operations
(algebraic, analytic and differential).
Some elementary results related to exponentially decaying functions,
which are used later, are included
in section \ref{sec2.2}.
The existence of combined expansions for solutions of singularly perturbed differential equations is
proved in section \ref{sec2.4}.
Next, an application for the estimation of the difference of two solutions in a slow-fast
differential equation is presented. The case without turning point is illustrated
on the Liouville equation in section \ref{sec3.2}. We briefly describe the turning point case,
together with a canard solution, and the input-output relation \cite{bcdd} is carried out.
Section \ref{sec3.4} is devoted to the proof of theorem \ref{th1} and the numerical results mentioned
above.
This paper is written in the framework of nonstandard analysis in its IST version, introduced by Edward
{\sc Nelson} \cite n.
However, the reader can easily ``translate" all the statements and proofs into standard mathematical language.
Notions and notations related to nonstandard analysis are collected in section \ref{sec2}.
\section{Foundations}\label{sec2}
\subsection{Notations}\label{sec2.1}
$\mathbb{R}^d$ is equipped with the maximum norm. $T=[t_1,t_2]$ denotes the standard
interval in $\mathbb{R}$, $\varepsilon>0$ is
infinitesimally small, and $X$ denotes the
nonstandard interval $X=[0,(t_2-t_1)/\varepsilon]=\{x\in\mathbb{R}\ ;\ t_1+\varepsilon x\in T\}$. \\
The symbol $\pounds$ denotes any finite real number
(generally functions of $t$ or of $x$). Two occurrences of
this symbol have not necessarily the same value.\\
The symbol $\oslash$ denotes any infinitesimally small quantity.\\
The symbol $@$ denotes a positive, finite and non infinitesimally small
quantity .\\
The symbols $\forall^{st}$ and $\exists^{st}$ stand for the expressions
``for all standard" and ``there is a standard".\\
The notation $x\simeq y$ means ``$x-y$ is infinitesimally small".\\
$]\!\,]a,b]$ denotes the external set of points in $]a,b]$ which are not
infinitesimally close to $a$.
Given a function $f$ of class $C^1$ on an open subset $U$ in $\mathbb{R}^d$,
we introduce the notation
$$\Delta_if(x;h_i):= \begin{cases}
\frac1{h_i}(f(x_1,\dots ,x_i+h_i,\dots ,x_d)-f(x_1,\dots ,x_d)), &\mbox{if }
h_i\neq 0\\
\frac{\partial f}{\partial x_i}(x_1,\dots ,x_d) &\mbox{if } h_i = 0
\end{cases}
$$
We will use the following formula:
if $x=(x_i)_{i\in\{1,\dots ,d\}}$ and $h=(h_i)_{i\in\{1,\dots ,d\}}$ are such that
$x+(h_1,\dots ,h_k,0,\dots ,0)$ belongs to $U$, for any $k \in \{1,\dots ,d\}$, then
\begin{equation} \label{d}
f(x+h)=f(x)+\sum_{k=1}^dh_k\tilde\Delta_kf(x,h)\,,
\end{equation}
with $\tilde\Delta_kf(x,h):=\Delta_kf(x+(h_1,\dots ,h_{k-1},0,\dots ,0);h_k)$.
\subsection{Expansion} \label{sec2.1bis}
Given a finite quantity $q$, we say that $q$ has an {\it $\varepsilon$-expansion} if there is
a standard sequence $(q_n)_{n\in\mathbb{N}}$ such that for all standard integer $N\geq1$, we have
\begin{equation} \label{9}
q=\sum_{n=0}^{N-1}q_n\varepsilon^n+\pounds\varepsilon^N\,.
\end{equation}
The sequence $(q_n)$ is of course unique in this case and
we simply write
$$q\sim\sum_{n\geq0}q_n\varepsilon^n\,.$$
When $q$ is a function defined on an internal or external set $E$, the relation \pref9 must be satisfied
{\it for every element of $E$,
standard or not}. In classical terms, the expansion in $\varepsilon$-(resp. for every standard element
of $E$) notion corresponds to the uniform asymptotic expansion (pointwise asymptotic expansion).
Uniform expansion on any compact subset of some domain $D$ would correspond to $\varepsilon$-expansion
on the {\it $S$-interior of $D$}, which is the external set of limited points of $D$ that are not
i-close to the boundary of $D$.
Given an integer $k$ and a function $f$ defined on a standard open subset $U$ in $\mathbb{R}^d $ into $\mathbb{R}^p$, we say
that $f$ is of {\it class $S^k$ on $U$} if $f$ is of {\rm class $C^k$ on $U$}, if $f$ has a shadow
${\,^\circ} f$ of {\rm class $C^k$ on $U$} and if
$$\forall j\leq k\ ,\ \ f^{(j)}\mbox{\rm\ is S-continuous and }
\ {\,^\circ}(f^{(j)})=({\,^\circ} f)^{(j)}\,.$$
By ``S-continuous" we mean: $x\simeq y\Rightarrow f(x)\simeq f(y)$.
This corresponds to ``uniformly S-continuous" in other texts.
We recall that the shadow of a function $f$ defined on a standard set is the only standard function
${\,^\circ} f$ that takes at any standard $x$ the standard part of $f(x)$.
We say that a function $f:U\subset\mathbb{R}^d\to\mathbb{R}^p$ {\it has a regular $\varepsilon$-expansion in $U$}
if $f$ is of class $S^\infty$ on $U$ and if $f$ has an $\varepsilon$-expansion on
$U$ as well as all its derivatives of standard order.
Be aware that not only the expansion is ``regular".
It is known \cite{dx} that, if $f$ has a regular $\varepsilon$-expansion in $U$, then
the expansion of $f$ commutes with the derivation: there exists a standard sequence $(f_n)_{n\in\mathbb{N}}$
of $C^\infty$ functions such that
\begin{equation} \label{s}
\forall x\in U\ \forall^{st} k\in\mathbb{N}\ \forall^{st} N\in\mathbb{N}^*,\quad
f^{(k)}(x)=\sum_{n=0}^{N-1}f^{(k)}_n(x)\varepsilon^n+\pounds\varepsilon^N\,.
\end{equation}
We will also use the following.
\begin{proposition} \label{expa}
\begin{enumerate}
\item If $f$ and $g$ have regular $\varepsilon$-expansions, then the same holds for $f'$ and $\int\!\!f$,
for $f+g$, for $fg$, for $f\circ g$ and for $\Delta f$.
\item If $f$ has a regular $\varepsilon$-expansion, and let $y=y(x,c)$ denote the solution of the b.v.p.
$$ y'=f(x,y)\,,\quad y(x_0)=c\,.$$
Then $y$ has a regular $\varepsilon$-expansion with respect to $x$ and $c$.
\end{enumerate}
\end{proposition}
\paragraph{proof} (1) Since these results are well known for usual $\varepsilon$-expansions, we only check the property ``regular''.
For $f'$ and $\int\!\!f$ it is obvious. For $fg$, use Leibnitz formula.
For $f\circ g$, use $(f\circ g)'=f'\circ g\times g'$.
For $\Delta f$, use $\Delta f(x;h)=\frac1h\int_0^hf'(x+u)du$
for dimension 1 and similar formulae for higher dimension.\\
(2) For $x$, use the result for $f\circ g$: $f$ and $y$ $\varepsilon$-expandable implies that $y'$ is, too.
For $c$, the variation equation yields the formula
$$
\frac{\partial y}{\partial c}(x,c)
=\exp\Big(\int_{x_0}^x\frac{\partial f}{\partial y}(\xi,y(\xi,c))d\xi\Big)
$$
shows that $\frac{\partial y}{\partial c}$ has an $\varepsilon$-expansion. It is then clear that
expansion w.r.t $\varepsilon$ and derivation w.r.t. $c$ commute. Conclude by induction.
\hfill $\Box$
A free Maple package for these computations is available at
http://www.univ-lr.fr/Labo/MATH/DAC.
\subsection{Functions with exponential decay and with S-exponential decay}
\label{sec2.2}
The Laplace method will be used later in the following form:
\begin{proposition} \label{int}
Let $t_1<00$,
for all $t\in T\setminus\{0\}$, $f_0(t)>0$.
Then
$$ I=\int_{t_1}^{t_2}\exp\big(\frac{-f(t)}{\varepsilon}\big) g(t)\,dt
$$
has an $\eta-$shadow expansion, with $\eta=\sqrt\varepsilon$.
Furthermore, If $g_0(0)\neq0$ we have $I=@\eta$. Namely,
$I=\sqrt\frac\pi a\ g(0)\ \eta+\oslash\eta$.
\end{proposition}
\paragraph{Proof}
There is a standard $k>0$ such that for all $t$ in $T$ one has
$f_0(t)\geq kt^2$. Thus, for
every $t\in T$ one has $f(t)\geq kt^2-C\varepsilon$, with $C=\sup_{t\in T}|f_1(t)|+1$
for example. Using the change of variable $t=\eta\tau$ and Taylor expansion
$f_0(t)=at^2+\sum_{i=3}^{2N+1}a_it^i+\pounds t^{2N+2}$,
one has, for $\tau$ limited,
$$
f_0(\eta\tau)/\varepsilon=a\tau^2+\sum_{i=3}^{2N+1}a_i\tau^i\eta^{i-2}+\pounds\varepsilon^N\,.
$$
When we expand each $f_n$ and $g_n$ using Taylor's formula,
we find that the function
$$G(\tau)=\exp\Big( a\tau^2\!-\frac{f(\eta\tau)}\varepsilon\Big) g(\eta\tau)
$$
admits an expansion in powers of $\tau$ (for limited $\tau$) whose coefficients, denoted $G_n(\eta)$,
admit an $\eta-$shadow expansion with valuation at least $n-2$.
Indeed, with the notation $f_j(t)=\sum_{i\geq0}a_{ij}t^i$ and
$g_j(t)=\sum_{i\geq0}b_{ij}t^i$ we have
\begin{align*}
G(\tau)=&\exp\Big(-\sum_{i=3}^{2N+1}a_i\tau^i\eta^{i-2}
-\sum_{j=1}^{N}\sum_{i=0}^{2(N-j)+1}a_{ij}\tau^i\eta^{2j+i-2}\Big)\\
&\times\sum_{\begin{smallmatrix} 0\leq j\leq N-1 \\ 0\leq i\leq2(N-j)-1
\end{smallmatrix}}
b_{ij}\tau^i\eta^{2j+i}+\eta^{2N}\pounds\,.
\end{align*}
With this notation, the new integrand is $\eta\exp(-a\tau^2)G(\tau)$. Since
$\eta\exp(-k\tau^2+C)(\sup_{t\in T}|g_0(t)|+1)$ bounds this
integrand on $\tilde T:=\{\tau\in\mathbb{R}\ ;\ \eta\tau\in T\}$, the dominated convergence theorem implies:
$$
\int_{t_1}^{t_2}\exp(-f(t)/\varepsilon)g(t)\,dt=
\sum_{n=0}^{2N}\eta\,G_n(\eta)\int_{-\infty}^\infty e^{-a\tau^2}\tau^nd\tau+\pounds\varepsilon^{N}\,.
$$
To conclude this proof, it suffices to rearrange these terms.
\hfill $\Box$
\begin{definition} \label{df1} \rm
Let $I=[0,x^*]$ with $x^*\in\mathbb{R}^+$ not necessarily limited.
A function $f:I\to\mathbb{R}$ is said to have {\it S-exponential decay}
(notation $f(x) = \pounds e^{-@x}$) if there are standard constants $c$, $C>0$
such that
$$ \forall x\in I,\quad |f(x)|x_0$,
$a(x)$ is appreciably negative.
Let $b$ be a continuous function on $\mathbb{R}^+$ with S-exponential decay.
If $y$ is a solution of the differential equation
$$ y'=a(x)y+b(x) $$
with $y(0)$ limited, then $y$ has an S-exponential decay. In other words,
if $y'=\pounds y+\pounds$
for $x\leq x_0$, and $y'=-@y+\pounds e^{-@x}$ for $x>x_0$ with $y(0)=\pounds$,
then $y(x)=\pounds e^{-@x}$. As a consequence, $y'$ itself has
S-exponential decay.
\end{proposition}
\subsection{Combined expansions: Algebraic properties}\label{sec2.3}
\begin{definition} \label{df2}
Consider again $T=[t_1,t_2]$.
A function $\varphi:T\to\mathbb{R}^d$ admits a {\it combined expansion} if there are two standard sequences
of $C^\infty$ functions $(\varphi_n)_{n\in\mathbb{N}}$,
$(\psi_n)_{n\in\mathbb{N}}$,
$\varphi_n:T\to\mathbb{R}^d$, $\psi_n:\mathbb{R}^+\to\mathbb{R}^d$ such that
\begin{itemize}
\item For all $N\in\mathbb{N}$ and all $t\in T$,
\begin{equation} \label{combex}
\varphi(t)=\sum_{n=0}^{N-1}\big(\varphi_n(t)
+\psi_n(\frac {t-t_1}\varepsilon)\big)\varepsilon^n+\pounds\varepsilon^N
\end{equation}
\item $\psi_n$ has exponential decay at infinity.
\end{itemize}
\end{definition}
The sequence $(\varphi_n)_{n\in\mathbb{N}}$ is called the {\it slow part} and $(\psi_n)_{n\in\mathbb{N}}$
the {\it fast part} of the combined expansion. The dimension $d$ will allow us in the next sections to
consider two different solutions of an ordinary differential equation as a function on $\mathbb{R}^2$
with a combined expansion. The variable $t$ will be considered itself as a third component.
\begin{proposition} \label{pr2} \begin{enumerate}
\item Combined expansions are unique.
\item A vector function has a combined expansion if, and only if,
each of its components has.
\item Let $f$ be a $C^\infty$ standard function from an open subset $U$
of $\mathbb{R}^d$ to $\mathbb{R}^p$ and
$\varphi$ a function from $T$ to $\mathbb{R}^d$ having a combined expansion
$(\varphi_n,\psi_n)$.
Suppose that for all $t\in T$, $\varphi_0(t)\in U$ and for all $x\in\mathbb{R}^+$,
$\varphi_0(t_1)+\psi_0(x)\in U$. Then
$f\circ\varphi$ is well defined and has a computable combined expansion.
\item If $\varphi$ has a combined expansion $(\varphi_n,\psi_n)$, then
$\Phi:[t_1,t_2]\to\mathbb{R}^d,t\mapsto\int_{t_1}^t\varphi(\tau)d\tau$ has a
combined expansion
$(\Phi_n,\Psi_n)$ given, for every $x$ in $X$ and $t$ in $T$, by
$$
\Phi_0(t)=\int_{t_1}^{t}\varphi_0(\tau)d\tau\ ,\quad \Psi_0(x)=0
$$
and for $n\geq1$:
$$\Phi_n(t)=\int_{t_1}^{t}\varphi_n(\tau)d\tau+\int_0^{+\infty}\psi_{n-1}(x)\,dx\,
\quad
\Psi_n(x)=-\int_x^{+\infty}\psi_{n-1}(\xi)\,d\xi\, .
$$
In particular, we have
$$
\int_{t_1}^{t_2}\varphi(t)dt\sim\int_{t_1}^{t_2}\varphi_0(t)dt+
\sum_{n\geq1}\Big(\int_{t_1}^{t_2}
\varphi_n(t)dt+\int_0^{+\infty}\psi_{n-1}(x)dx\Big)\varepsilon^n .
$$
\end{enumerate}
\end{proposition}
The word ``computable'' in statement 3 means that algorithms exist, but are not described in
the present article. They allow to calculate the expansion of $f \circ \varphi$. Actually, the
reader may find a free Maple
package already mentioned at http://www.univ-lr.fr/Labo/MATH/DAC, see procedure called subsDAC
(only for $f:\mathbb{R}\to\mathbb{R}$).
\paragraph{Proof} (1) By contradiction.
If a function admits two different combined expansions, then their difference is a
non trivial combined expansion $(\varphi_n,\psi_n)$ of $0$. Let $n_0$ be the first
index such that $\varphi_{n_0}$ or $\psi_{n_0}$ is not the zero function. Using the
transfer principle, $n_0$ is standard. Taking $N=n_0+1$ and multiplying by $\varepsilon^{-n_0}$,
one obtains, for any $t$ in $T$, that $\varphi_{n_0}(t)+\psi_{n_0}\left(\frac{t-t_1}\varepsilon\right)\simeq0$.
Since $\psi_{n_0}$ has exponential
decay, for any standard $t$ in $T\setminus\{t_1\},\ \varphi_{n_0}(t)\simeq0$ and consequently
$\varphi_{n_0}(t)=0$. By transfer, this remains valid for every $t$ in $T\setminus\{t_1\}$,
and by continuity for $t=t_1$. Now, for a standard $x$ in $\mathbb{R}^+$ one obtains $\psi_{n_0}(x)\simeq0$, therefore
$\psi_{n_0}(x)=0$, and this remains valid for any real $x$ by transfer.
This leads to the contradiction.\\
(2) This statement is obvious.\\
(3) Denote by $\hat\varphi^i$ the components of
$\hat\varphi:=\sum_{n\geq0}\varphi_n\varepsilon^n$
(and similarly for $\hat\psi$). Formula \pref d yields
$$
f(\hat\varphi+\hat\psi)=f(\hat\varphi)
+\sum_{k=1}^d\tilde\Delta_kf(\hat\varphi,\hat\psi)(\hat\psi^k).
$$
The first term $f(\hat\varphi)$ gives the slow part $(f_n)_{n\in\mathbb{N}}$, since the image of an
usual asymptotic expansion by a $C^\infty$ mapping is an asymptotic expansion (expand $f$ at
$\varphi_0(t)$ with Taylor formula).
For the fast part, given by the sum $\sum_{k=1}^d(\hat\psi^k)\tilde\Delta_kf(\hat\varphi,\hat\psi)$,
one first expands each function $\varphi_i^j(t)=\varphi_i^j(t_1+\varepsilon x)$ up to order $(N-1)$ by Taylor formula.
Using the same Taylor expansion of order $(N-1)$ to the function $\tilde\Delta_kf$ at
$(\varphi_0(t_1),\psi_0(x))$ and multiplying it by the expansion of
$(\hat\psi^k)$, one obtains an $\varepsilon$-expansion. These coefficients $g_n$ are
polynomial in $x$ and $\psi_i^j(x)$ and derivatives of $\tilde\Delta_kf$ at
$(\varphi_0(t_1),\psi_0(x))$. As $\psi_0$ is bounded on $\mathbb{R}^+$, each of these derivatives is a bounded function of $x$.
Moreover, each of the monomial terms of $g_n$ contains
at least one term $\psi_i^j$.
Therefore, these functions $g_n$ have exponential decay.
Concerning the remainder term
$$
R_N(x):=\sum_{k=1}^d\tilde\Delta_kf(\hat\varphi,\hat\psi)(\hat\psi^k)
-\sum_{n=0}^{N-1}g_n(x)\varepsilon^n,
$$
if each of the former Taylor expansions is written with a remainder term of the form
$\frac1{N!}\varphi_i^{j\, (N)}(\tau_{ij})\varepsilon^N$, $\ \tau_{ij}\in T$
(similarly for $\tilde\Delta_kf$),
we see that $R_N$ is polynomial in $x,\ \varepsilon,\ $ $ \varphi_i^{j\, (N)}(\tau_{ij}),\ \psi_i^j(x)$
and in the differentials of $\tilde\Delta_kf$ at some points $(\alpha,\beta)$
with $\alpha$ i-close to $\varphi_0(T)$ and $\beta$ i-close to $\psi_0(\mathbb{R})$.
Moreover, each monomial term of
$R_N$ contains at least one term $\psi_i^j$ and a power of $\varepsilon$ greater or equal to $N$. Thus, $R_N\varepsilon^{-N}$ has
S-exponential decay, hence is limited on $T$.
\noindent
(4) Formula (\ref{combex}) gives:
\begin{align*}
\int_{t_1}^{t}\varphi=&\int_{t_1}^{t}
\Big(\sum_{n=0}^{N-1}\varphi_n(\tau)\varepsilon^n+
\sum_{n=0}^{N-1}\psi_{n}\big(\frac{\tau-t_1}\varepsilon\big)\varepsilon^n+\pounds\varepsilon^N\Big)
\, d\tau\\
=&\sum_{n=0}^{N-1}\Big(\int_{t_1}^{t}\varphi_n(\tau)d\tau\ \varepsilon^n+
\int_0^{+\infty}\psi_{n-1}(\xi)\, d\xi \Big)\\
&-\sum_{n=0}^{N-1}\int_{\frac{t-t_1}{\varepsilon}}^{+\infty}\psi_{n}(\xi)\, d\xi\,
\varepsilon^{n+1}+\pounds\varepsilon^N.
\end{align*}
Since $\psi_n$ has exponential decay,
$\Psi_{n+1}(x):=-\int_x^{+\infty} \psi_{n}(s) \, ds$ has exponential decay.
In particular, since $t_2$ is standard,
we conclude that $\int_{(t_2-t_1)/\varepsilon}^{+\infty}\psi_{n}(x)dx=e^{-@/\varepsilon}=\pounds\varepsilon^N$.\hfill $\Box$
\paragraph{Remarks on statement 3.}
(1) We detail here the computation of these expansions in the cases of dimensions $d$ and $p$ equal to $1$.
With $f\ C^\infty$ standard, $t=t_1+\varepsilon x$ and
$\varphi(t)=\sum_{i=0}^n(\varphi_i(t)+\psi_i(x))\varepsilon^i+\pounds\varepsilon^{n+1}$,
we look for an expression of $f\circ\varphi$ in the form
\begin{equation} \label{lr}
f(\varphi(t))=\sum_{i=0}^n(f_i(t)+g_i(x))\varepsilon^i+\pounds\varepsilon^{n+1}.
\end{equation}
The slow expansion is the usual asymptotic expansion of a composition of two
expansions, given by: $f_0(t)=f(\varphi_0(t))$, and for $n\geq1$:
\begin{equation} \label{len}
f_n(t)=\sum_{1\leq p_i\leq n,\; 1\leq k\leq n\; p_1+\dots +p_k=n}
\frac1{k!} f^{(k)}(\varphi_0(t))\varphi_{p_1}(t)\dots
\varphi_{p_k}(t)\, .
\end{equation}
Here and in the sequel, we will use bold letters for multi-indices.
We denote by $E_n$ the set of finite sequences of positive integers that are smaller than or equal to $n$, i.e.
$$
E_n:=\cup_{d=0}^{+\infty}\{1,\dots ,n\}^d.
$$
For ${\bf p}=(p_1,\dots ,p_d)\in E_n$, we denote its length by
$\#({\bf p}):=d$ and its size by $|{\bf p}|:=p_1+\dots +p_d$.
We denote by $\Phi_{\bf p}$ the product $\Phi_{\bf p}=\varphi_{p_1}\dots \varphi_{p_k}$ (with the usual convention $\Phi_\emptyset=1$).
For instance \pref{len} becomes, with these notation,
$$
\forall n\geq0,\quad f_n(t)
=\sum_{{\bf p}\in E_n ,\; |{\bf p}|=n}
\frac1{\#({\bf p})!}f^{(\#({\bf p}))} (\varphi_0(t))\Phi_{\bf p}\,.
$$
For the fast expansion, given some $n\in\mathbb{N}$, with the notation
$\varphi(t)=\phi(t)+\psi(x)$, $\psi:=\sum_{i=0}^n\psi_i\varepsilon^i$
(hence $\phi(t)=\sum_{i=0}^n\varphi_i(t)\varepsilon^i+\pounds\varepsilon^{n+1}$
for all $t\in T$) we have
\begin{equation} \label{fi}
f(\phi(t)+\psi(x))=f(\phi(t))+\Delta f(\phi(t);\psi(x))\psi(x)\, .
\end{equation}
The first term $f(\phi(t))$ yields the slow part already calculated, and the
second part (which has S-exponential decay) corresponds to the fast part.
We then use the Taylor formula in the form
\begin{equation} \label{dd}
\Delta f(u;v)=\sum_{\begin{smallmatrix} i,j\geq 0\\ i+j\leq n\end{smallmatrix}}
\Delta_{ij}(u_0,v_0) (u-u_0)^i(v-v_0)^j+\pounds(u-u_0)^{n+1}
+\pounds(v-v_0)^{n+1}
\end{equation}
with $\Delta_{ij}:=\frac1{i!j!}\frac{\partial^{i+j}}{\partial u^i\partial v^j}
\Delta f$.
We apply it to $u:=\phi(t)$, $u_0:=\varphi_0(t_1)$, $v:=\psi(x)$,
$v_0:=\psi_0(x)$.
Using Taylor formula for $\phi$ at point $t=t_1$, we get
$$
u=\sum_{k=0}^nu_k(x)\varepsilon^k+\pounds x^{n+1}\varepsilon^{n+1}\quad\mbox{with}\quad
u_k(x):=\sum_{j=0}^k\frac1{j!}\varphi_{k-j}^{(j)}(t_1)x^j
$$
(notice that $u_0(x)$ is constant equal to $u_0=\varphi_0(t_1)$).
In addition, we simplify the notation
$$
f_{ij}(x):=\Delta_{ij}(\varphi_0(t_1),\psi_0(x))\,.
$$
Altogether, using $u-u_0=\pounds x\varepsilon$ and $v-v_0=\pounds\varepsilon$, \pref{dd} gives
\begin{equation} \label{ddd}
\Delta f(u;v)=\sum_{i,j\geq 0,\; i+j\leq n}f_{ij}(x)
\Big(\sum_{k=1}^nu_k(x)\varepsilon^k\Big)^i
\Big(\sum_{l=1}^n\psi_l\varepsilon^l\Big)^j+\pounds(1+x^{n+1})\varepsilon^{n+1}
\end{equation}
Taking into account the last term $\psi(x)$ in \pref{fi}, whose S-exponential decay implies that
$\pounds(1+x^{n+1})\varepsilon^{n+1}\psi(x)=\pounds\varepsilon^{n+1}$ on $T$,
we obtain the coefficients of the fast expansion of \pref{lr}:
\begin{gather*}
g_0(x)=f(\varphi_0(t_1)+\psi_0(x))-f(\varphi_0(t_1))\,,\\
g_n(x):=\sum f_{ij}(x)\;u_{k_1}(x)\dots u_{k_i}(x)\;\psi_{l_1}(x)
\dots \psi_{l_j}(x)\,\psi_m(x),
\end{gather*}
for $n\geq 1$, where the summation is taking on
$i,j,m\geq0$, $1\leq k_1,\dots ,k_i\leq n$, $1\leq l_1,\dots ,l_j
\leq n$, and $k_1+\dots +k_i+l_1+\dots +l_j+m=n$.
More concisely, using the notation below \pref{len},
$$
g_n=\sum_{\begin{smallmatrix} {\bf k},{\bf l}\in E_n,\; m\geq 0\\
|{\bf k}|+|{\bf l}|+m=n\end{smallmatrix}}
f_{\#({\bf k})\ \#({\bf l})}\,
{\bf u}_{\bf k}\ {\Psi}_{\bf l}\ \psi_m\,.
$$
For the implementation of this formula, we refer the reader to the
Maple package already mentioned.
\noindent(2) We insist on the fact that the fast expansion of $f\circ\varphi$ depends of the slow and fast
expansions of $\varphi$, whereas the slow expansion of $f\circ\varphi$ depends only of the
slow expansion of $\varphi$. Consider, for example, the product of two real combined expansions
$\varphi$ and $\tilde\varphi$. Using capital letters for the resulting combined expansion, we have
$$
\Phi_n(t)=\sum_{k=0}^n\varphi_k(t)\tilde\varphi_{n-k}(t)\,.
$$
To obtain $\Psi_n(x)$, we consider the other terms,
\begin{gather*}
\Big(\sum_{\nu\geq0}\varphi_\nu(t_1+\varepsilon x)\varepsilon^\nu\Big)
\Big(\sum_{\nu\geq0}\tilde\psi_\nu(x)\varepsilon^\nu\Big)
+
\Big(\sum_{\nu\geq0}\psi_\nu(x)\varepsilon^\nu\Big)
\Big(\sum_{\nu\geq0}\tilde\varphi_\nu(t_1+\varepsilon x)\varepsilon^\nu\Big)\\
+\Big(\sum_{\nu\geq0}\psi_\nu(x)\varepsilon^\nu\Big)
\Big(\sum_{\nu\geq0}\tilde\psi_\nu(x)\varepsilon^\nu\Big),
\end{gather*}
and expand each term $\varphi_\nu(t_1+\varepsilon x)$ with Taylor formula.
Then, $\Psi_n(x)$
will be the $n$-th term of the obtained expansion in powers of $\varepsilon$.
\subsection{Combined expansions in singular perturbation theory}\label{sec2.4}
Consider the singularly for the perturbed real differential equation
\begin{equation} \label{3.1}
\varepsilon\dot u=f(t,u)
\end{equation}
with the following hypotheses. \begin{enumerate}
\item[H1] The function $f$ is $S^\infty$ and has a regular
$\varepsilon$-expansion in a standard open subset $U$ of $\mathbb{R}^2$.
\item[H2] There is a slow curve $u=u_0(t)$ in $U$ defined and $C^\infty$
on a standard compact interval $T=[t_1,t_2]$ , i.e.
\begin{equation} \label{21}
\forall t\in[t_1,t_2]\ ,\ (t,u_0(t))\in U \ {\rm and} \ f_0(t,u_0(t))=0\,.
\end{equation}
Let $c_0$ be such that the segment $\{t_1\}\times[u_0(t_1),c_0]$ is in $U$
(in the case $c_00$ standard and
sufficiently small). We can then consider $u^\natural$ as the solution of \pref1 with
the initial condition $u^\natural(-\delta)=u_0(-\delta)$.
The attractiveness of the slow curve implies that this solution is defined and has a regular $\varepsilon$-expansion in
$[0,t_2]$, with $C^\infty$ coefficients $u_n$. This allows to isolate the slow part of the
expansion \pref{tta}.
Set $\tilde y:=u- u^\natural$; this leads to
\begin{equation} \label{61}
\varepsilon\dot{\tilde y}=g(t,\tilde y)\tilde y\ ,
\end{equation}
where $g(t,\tilde y):=\Delta_2f(t, u^\natural(t);\tilde y)$
and $\Delta_2f$ is defined just above \pref d.
Since $u^\natural$ has itself a regular expansion, $g$ has, too; we denote by $g_i$ its coefficients.
Moreover $g$ remains appreciably negative on a standard neighborhood $V$ of
$$
L:=(\{0\}\times[0,c_0])\cup([0,t_2]\times\{0\})\,.
$$
Indeed, the shadow of $g$, denoted by $g_0$, satisfies $g_0(t,0)=a(t,u_0(t))$
for any $t\in[0,t_2]$ and $g_0(0,y)=\frac1y\int_0^ya(0,v)dv$ for any
$y\in]0,c_0]$.
Let $t=\varepsilon x$ and $y(x):=\tilde y(\varepsilon x)$. Then
\begin{equation} \label{71}
y'(x)=g(\varepsilon x,y(x))y(x)\ ,\ \ y(0)=c- u^\natural(0)\,
\end{equation}
where ${}'$ denotes the derivation with respect to $x$.
It will be shown that this solution admits an $\varepsilon$-expansion with coefficients
$y_n$ exponentially decreasing.
First of all, $y$ is decreasing, hence $y'$ is limited on $X:=[0,t_2/\varepsilon]$.
Therefore $y$ is S-continuous,
hence has a shadow, denoted by $y_0$. This shadow satisfies
\begin{equation} \label{81}
y'_0(x)=g_0(0,y_0(x))\ y_0(x)\ ,\ \ y_0(0)=c_0\,.
\end{equation}
This implies that $y_0$ is decreasing on $\mathbb{R}^+$ and has exponential decay at
infinity, by statement 2 of proposition
\ref{pr1}. By definition one has a priori $y(x)\simeq y_0(x)$ only for
limited values of $x$, but this
remains true for all $x\in X$, as both functions are i-small for $x$ i-large.
Examining now the formal solutions. We write the coefficients of $g$ in the
form
$$ g_i(t,y)=\sum_{j,k\geq0}g_i^{jk}(x)t^j(y-y_0(x))^k $$
with
$$ g_i^{j k}(x):=\frac1{j!k!} \frac{\partial^{j+k}g_i}
{\partial t^j\partial y^k}(0,y_0(x))\,.
$$
We omit in the sequel the dependance in $x$ of $g_i^{jk}$ and $y_l$.
With these notation, one has
\begin{equation} \label{som}
\sum_{i\geq0}y'_i\varepsilon^i
=\sum_{i,j,k\geq0}g_i^{jk}x^j\varepsilon^{i+j}\Big(
\sum_{\nu\geq1}y_\nu\varepsilon^\nu\Big)^k\sum_{l\geq0}y_l\,\varepsilon^l .
\end{equation}
Symbolic identification yields a linear differential equation
for $y_n$; namely
$$
y_n'= \varepsilon\mbox{-terms of order $n$ in right-hand
of the above equaiton.}
$$
The terms in this expression are of the form
$g_i^{j k}x^jy_{\nu_1}\dots y_{\nu_k}y_l$, $\nu_p\geq1$.
Later, we will write separately those terms containing $y_n$;
namely $(g_0^{00}+g_0^{01}y_0)y_n$.
It is convenient to introduce the following indexed set:
\begin{equation} \label{k}
M_n:=\bigcup_{k=0}^n\left\{\mu=(i,j,l,\nu_1,\dots ,\nu_k)\in\{0,\dots ,n\}^{k+3}\ ;\ \nu_p\geq1\right\}\ ,
\end{equation}
and the notation, for $\mu=(i,j,l,\nu_1,\dots ,\nu_k)\in M_n$:
\begin{equation} \label{ka}
|\mu|:=i+j+l+\nu_1+\dots +\nu_k \quad \mbox{and} \quad
{\bf y}_\mu:= g_i^{j k}x^jy_{\nu_1}\dots y_{\nu_k}y_l .
\end{equation}
In summary, $y_n$ satisfies
\begin{equation} \label{3.2}
y'_n=\sum_{\mu\in M_n,|\mu|=n}{\bf y}_\mu \,.
\end{equation}
We notice that (this will be used later) that, if $mn} {\bf y}_\mu\varepsilon^{|\mu|}+(\pounds+\pounds x^{n+1})Y_n\varepsilon^{n+1}\,.$$
Each term ${\bf y}_\mu=g_i^{jk}x^jy_{\nu_1}\dots y_{\nu_k}y_l$
contains at least one factor $y_l$, hence has S-exponential decay,
and $Y_n$ has S-exponential decay too.
To sum up,
$$g(\varepsilon x,Y_n)Y_n-Y_n'=(\pounds+\pounds x^{n+1})e^{-@x}\varepsilon^{n+1}=\pounds e^{-@x}\varepsilon^{n+1}\,.$$
This shows that $b$ has S-exponential decay.
\hfill $\Box$
\paragraph{Remarks:}
(1) The more general hypothesis ``$f$ admits an $\varepsilon$-expansion'' instead
of ``$f$ standard'' is useful for the following:
\begin{itemize}
\item This allows to treat problems where the initial instant $t_1$ is not
standard but has only an
$\varepsilon$-expansion. Indeed, if $\varphi$ is standard and $C^\infty$ --- or
has an $\varepsilon$-expansion --- and if
$t_1$ has an $\varepsilon$-expansion,then
$\tilde \varphi:[0,t_2-t_1]\to\mathbb{R}^d,\ s\mapsto \varphi(t_1+s)$ has an
$\varepsilon$-expansion and is $S^\infty$.
\item Furthermore, the Schr\"{o}dinger equation(we will study) may contain
a nonstandard parameter (canard value).
\end{itemize}
\noindent (2) Theorem \protect\ref{tata} will be applied in more general situations,
for instance when the equation has a turning point in
$]t_1,t_2[$, or when the starting point $t_1$ is not standard.
Therefore we present the following result.
\begin{proposition} \label{pr3}
(1) Theorem \ref{tata} remains valid if (H3) is replaced by the
following hypotheses:
\begin{enumerate}
\item[(i)] There is a $S^{\infty}$ solution of \pref{3.1}, close to the slow
curve $u=u_0(t)$ on a standard open interval containing $[t_1,t_2]$.
\item[(ii)] For every $u\in[u_0(t_1),c_0]$ one has $\ a(t_1,u)<0$.
\item[(iii)] For every $t\in]t_1,t_2]$ one has $\ A_0(t)<0$,
where $A_0$ is given by \pref{a0}.
\end{enumerate}
(2) Theorem \ref{tata} remains valid if $t_1$ is only $\varepsilon$-expandable
instead of standard.
\end{proposition}
\paragraph{Proof}
(1) Assume that there is already a $S^{\infty}$ canard solution
$u^\natural$ close to the slow curve on a standard open interval containing
$[t_1,t_2]$. In that case, the solutions $u$ and $u^\natural$ are
exponentially close to each other as soon as $t$
is appreciably greater than $t_1$ and as far as the ``accumulated stability"
is positive: more precisely, if $A_0$ is given by
\begin{equation} \label{a0}
A_0(t)=\int_{t_1}^ta(\tau,u_0(\tau))d\tau
\end{equation}
(recall that $a(t,u)=\frac{\partial f_0}{\partial u}(t,u)$).
Then
$$
u(t)- u^\natural(t)=\exp\left((A_0(t)+\oslash)/\varepsilon\right).
$$
As far as $A_0$ is appreciably negative (the ``accumulated stability"
would be defined as $-A_0$).
Since $u^\natural$ is $S^{\infty}$ and defined on
$[t_1-\delta,t_2+\delta]$ for some $\delta >0$ standard,
it admits an $\varepsilon$-expansion on $[t_1,t_2]$.
Theorem \ref{tata} applied to $[t_1,t_1+\delta]$, $\delta >0$ standard
sufficiently small, yields a combined expansion for $u$ on $[t_1,t_1+\delta]$.
As $u$ is exponentially close to $u^\natural$, this
combined expansion remains valid on $[t_1+\delta,t_2]$.
This proves the first part.
\noindent
(2) Put $\alpha=t_1-^\circ t_1,\ t=s+\alpha,\ u(t)=v(s)$.
Then $v$ satisfies $\varepsilon\dot v=g(s,v)$ with
$g(s,v):=f(s+\alpha,v)$. The function $g$ has a regular $e$-expansion;
hence $v$ has a combined expansion
$v(s)=\sum v_n(s)\varepsilon^n+ \sum y_n(x)\varepsilon^n,\ x:=(s-^\circ t_1)/\varepsilon$.
Taylor formula shows that
$\sum v_n(t-\alpha)\varepsilon^n$ has a regular $\varepsilon$-expansion $\sum u_n(t)\varepsilon^n$.
This gives the slow part.
The fast part is the same, since $x$ is also equal to $(t-t_1)/\varepsilon$.
\hfill $\Box$
\section{Applications}\label{sec3}
\subsection{Towards a transasymptotic expansion}\label{sec3.2}
The results of section \ref{sec2} yield an estimate of the distance between two slow solutions of a
slow-fast differential equation. In this section, we will study this distance, first for equations without
parameter, and then for equations with parameter. Consider again equation
\pref{3.1},$\varepsilon\dot u=f(t,u)$,
with the hypotheses (H1)--H(3) in section \ref{sec2.4} ($f$ is $C^\infty$ and has a regular
$\varepsilon$-expansion and a slow
curve $u=u_0(t)$; we assume as above that this slow curve is attractive on a
standard compact $T=[t_1,t_2]$; the situation with a turning point will be
detailed at the end of this section).
Let $u^\natural$ be a solution close to the slow curve on a standard open interval containing $[t_1,t_2]$ (for example, the
solution with initial condition $u^\natural(t_1-\delta)=u_0(t_1-\delta)$ where
$\delta>0$ is standard sufficiently small).
Consider now an initial condition $c^\#$ having an $\varepsilon$-expansion and sufficiently close to the slow
curve such that the solution $u^\#$, issuing from $c^\#$ at $t_1$, has a boundary layer at
$t_1$ and borders the slow curve, at least, until $t_2$ (i.e. hypotheses 2 and
3 in \ref{sec2.4} for $c_0={\,^\circ} c^\#$).
These two solutions are exponentially close to each other as soon as $t\gg t_1$. Namely, it is easy to see
that
$$
u^\#(t)- u^\natural(t)=\exp\Big(\frac1\varepsilon\Big(\int_{t_1}^t
\frac{\partial f_0}{\partial u}(\tau,u_0(\tau))d\tau+\oslash\Big)\Big)\,.
$$
A more precise result will be given in this section. As the essential task is to introduce
the notations, the statement will be given after its proof.
If $t=t_1+\varepsilon x$, theorem \ref{tata} provides two combined expansions:
$$u^\#(t)\sim\sum_{n\geq0}(u_n(t)+ y^\#_n(x))\varepsilon^n, \quad
u^\natural(t)\sim\sum_{n\geq0}u_n(t)\varepsilon^n\ ,$$
where $ y^\#_0$ is the solution of the differential equation
\begin{equation} \label{y}
y_0'=\frac{\partial f_0}{\partial u}(t_1,u_0(t_1)+y_0)y_0
\end{equation}
with $ y^\#_0(0)= c^\#_0-u_0(t_1)$.
Moreover, $\tilde y:= u^\#- u^\natural$ satisfies the differential equation \pref{61} rewritten below in a linear form:
\begin{equation} \label{g}
\varepsilon\dot{\tilde y}=a(t)\tilde y\quad \mbox{with}\quad
a(t):=g(t, u^\#(t)-u^\natural(t))
\end{equation}
with the notation $g(t, y)=\Delta_2f(t, u^\natural(t); y)$ of \ref{sec2.4}.
Hence with $c^\natural= u^\natural(t_1)$ one has:
\begin{equation} \label{u}
u^\#(t)- u^\natural(t)=( c^\#- c^\natural)
\exp\Big(\frac1\varepsilon\int_{t_1}^ta(\tau)d\tau\Big).
\end{equation}
According to the statement 3 of the proposition \ref{pr2}, the function $a$
admits a combined asymptotic expansion
$a(t)\sim\sum_{n\geq0}(a_n(t)+b_n(x))\varepsilon^n$ whose first terms are clarified
below
\begin{gather} \label{a}
a_0(t)=\frac{\partial f_0}{\partial u}(t,u_0(t)),\quad
a_1(t)=u_1(t)\frac{\partial^2f_0}{\partial u^2}(t,u_0(t))
+\frac{\partial f_1}{\partial u}(t,u_0(t)), \\
\label{b}
b_0(x)=\Delta_2f_0(t_1,u_0(t_1); y^\#_0(x))-
\frac{\partial f_0}{\partial u}(t_1,u_0(t_1)),
\end{gather}
where $y^\#_0$ given by \pref y.
According to the statement 4 of this same proposition \ref{pr2}, the primitive of $a$ has an $\varepsilon$-expansion
\begin{equation} \label{an}
\int_{t_1}^ta(\tau)d\tau\sim\sum_{n\geq0}A_n(t)\varepsilon^n
\end{equation}
for $t\in]\,\!]t_1,t_2]$, the first terms of which are:
\begin{equation} \label{An}
A_0(t)=\int_{t_1}^ta_0(\tau)d\tau\,,\quad
A_1(t)=\int_{t_1}^ta_1(\tau)d\tau+\int_0^{+\infty}b_0(\xi)d\xi\,.
\end{equation}
Since $c^\#- c^\natural$ has also an $\varepsilon$-expansion (its first term is
$c^\#_0- c^\natural_0$), formula \pref u shows the following result.
\begin{theorem} \label{th3}
With the notation and hypotheses above, there is a standard sequence of
functions $(r_n)_{n\in\mathbb{N}^*}$ such that the difference $u^\#- u^\natural$
satisfies for any $t\in]\,\!]t_1,t_2]$:
\begin{equation} \label{rn}
\left(u^\#(t)- u^\natural(t)\right)\exp\left(-\frac{A_0(t)}\varepsilon\right)\; \sim\;
\sum_{n \geq 0}r_n(t)\varepsilon^n\,.
\end{equation}
with $r_0(t)=( c^\#_0- c^\natural_0)\,\exp(A_1(t))$.
\end{theorem}
\paragraph{Remarks:}
(1) Concerning values of $t$ close to $t_1$, an analogous formula is
available with combined expansions.
We do not mention it for simplicity.
\noindent (2) As mentioned in the introduction, the functions $r_n$ can explicitly be
computed with an algorithm based on the previous proof (subject to compute
the primitives of the occurring functions). Instead of detailing this
algorithm in the general case, we find it is more useful to illustrate it on
the Liouville equation, see below. We refer to the Maple package already mentioned for the general case.
\noindent(3) If a standard $t$ is fixed, formula \pref{rn} gives immediately an
asymptotic expression of $u^\#(t)- u^\natural(t)$ with standard constants.
If $t$ has an $\varepsilon$-expansion,
such an asymptotic expression is also possible, but one has to expand each
term $A_i(t),\ i=0,1$ and $r_j(t),\ j\geq1$ with Taylor formula.
\noindent(4) As for combined asymptotic expansions in \ref{sec2.4} (proposition \ref{pr3}
of remark 2), this result remains valid if the slow curve is not attractive
on the whole interval $[t_1,t]$. The existence of a solution close the slow
curve on an open interval containing $[t_1,{\,^\circ} t]$ and the assumption that
$A_0$ is negative on $]t_1,{\,^\circ} t]$ are enough.\medskip
\noindent(5) Formula \pref{u} allows to find an $\varepsilon$-expansion of $c^\#$ from an
$\varepsilon$-expansion of $t$ and $u^\#(t)- u^\natural(t)$.
More precisely, we have the following.
\begin{corollary} \label{cor10}
Denote by $I=]c_{{\rm min}},c_{{\rm max}}[$ the set of all numbers
$c\in\mathbb{R}$ which satisfy hypothesis 3 of subsection \ref{sec2.4}.
Let $t^*\in]t_1,t_2]$ $\varepsilon$-expandable with ${\,^\circ} t^*>t_1$.
Denote by $u^\#=u^\#(t,c)$ the solution of \pref{3.1} with boundary condition
$u^\#(t_1)=c$.
Consider the function
$$
\varphi:I\to\mathbb{R},\ c\mapsto \left(u^\#(t^*)- u^\natural(t^*)\right)
\exp\Big(-\frac{A_0(t^*)}\varepsilon\Big),
$$
Then $\varphi$ is an S-diffeomorphism from the S-interior of $I$ to its image.
(In classical terms:
$\varphi=\varphi_\varepsilon$ is a diffeomorphism from $I$ to $\varphi(I)$ and for any fixed $c\in I$
there is a constant $M>0$ independent of $\varepsilon$ such that
$\frac1M\leq\varphi_\varepsilon'(c)\leq M$.)
As a consequence, given $\alpha\in\varphi(I)$ with
$c^\#(\alpha):=\varphi^{-1}(\alpha)\in
]\,\!]c_{\rm min},c_{\rm max}[\,\![$, if $\alpha$ is $\varepsilon$-expandable,
then $c^\#(\alpha)$ is $\varepsilon$-expandable
and by theorem \ref{tata}, $u^\#(\cdot,c^\#(\alpha))$ has a combined
expansion on $[t_1,t_2]$.
\end{corollary}
\paragraph{Proof} With \pref u we get
$$
\varphi(c)=(c-c^\natural)\exp\Big(\frac1\varepsilon\int_{t_1}^{t^*}
(g(t,\tilde y(t,\varepsilon))-g_0(t,0))dt\Big)
$$
with $\tilde y:(t,c)\mapsto u^\#(t,c)-u^\natural(t)$,
$g=\Delta_2f(\cdot,u^\natural,\cdot)$ as before,
and $g_0$ is the first term of the $\varepsilon$-expansion of $g$.
Therefore,
$$\varphi(c)=(c-c^\natural)M\exp(J(c))$$
with
$$M:=\exp\Big(\frac1\varepsilon\int_{t_1}^{t^*}(g(t,\tilde y(t,c))-g(t,0))dt\Big)
$$
independent of $c$, and
\begin{equation} \label{j}
J(c):=\frac1\varepsilon\int_{t_1}^{t^*}(g(t,\tilde y(t,c))-g(t,0))dt\,.
\end{equation}
The function $\tilde y$ is the solution of the boundry-value problem
$\varepsilon\dot y=g(t,y)y$, $y(t_1)=c-c^\natural$,
hence is monotonous on $[t_1,t^*]$ and can be used as a change of variable.
For convenience we use the notation $y^*:=\tilde y(t^*,c)$
(it is an exponentially small number which depends on $c$)
and $\tilde t=\tilde t(y,c)$ the ``inverse'' function of $\tilde y$.
The change of variable $y=\tilde y(t,c)$ yields $dy=\frac1\varepsilon g(t,y)ydt$ and
$$ J(c)=\int_{c-c^\natural}^{y^*}\psi(y)\,dy $$
with
$$ \psi(y):=\frac{\Delta_2g(\tilde t(y,c),0;y)}{g(\tilde t(y,c),y)}\,.
$$
This function is not S-continuous, but is continuous and limited on $[c-c^\natural,y^*]$.
Moreover, for $y\not\simeq0$, one has
$$\psi(y)\simeq\psi_0(y):=\frac{\Delta_2g_0(0,0;y)}{g_0(0,y)}\,.$$
Hence $J$ is S-continuous and its shadow satisfies
$$ {\,^\circ}J:c\mapsto\int_{c-c^\natural}^0\psi_0(y)dy\,. $$
We will show later that $J$ has a regular expansion. This implies that
$$
{\,^\circ}(J'(c))=({\,^\circ}J)'(c)=-\psi_0(c-c^\natural)=
\frac1{c-c^\natural}\Big(\frac{g_0(0,0)}{g_0(0,c-c^\natural)}-1\Big)\,.
$$
Since we have $\frac{\varphi'}\varphi(c)=\frac1{c-c^\natural}+J'(c)$, we deduce that
$(c-c^\natural)\frac{\varphi'}\varphi(c)\simeq\frac{g_0(0,0)}{g_0(0,c-c^\natural)}$ is appreciable.
Using $M=@$ and $J(c)=\pounds$, this gives $\varphi'(c)=@$
which shows that
$\varphi$ is a S-diffeomorphism. Furthermore $\varphi$ has an $\varepsilon$-expansion
(by composition of the expansion \pref{rn} and the expansion of $t^*$).
Hence by a well-known result on classical expansions, $\varphi^{-1}$ has an $\varepsilon$-expansion, too.
The consequence is clear, again by composition of the expansions of $\alpha$
and $\varphi^{-1}$.
It remains to prove that $J$ has a regular expansion. For that purpose,
it is better to use the change of variable
$t=t_1+\varepsilon x$ in (\ref{j}). This gives, with $x^*:=(t^*-t_1)/\varepsilon$
(independent of $c$)
and $\hat y(x,c):=\tilde y(t_1+\varepsilon x,c)$:
\begin{equation} \label{jj}
J(c)=\int_0^{x^*}G(x,c)dx
\end{equation}
with
$$ G(x,c):=(g(t_1+\varepsilon x,\hat y(x,c))-g(t_1+\varepsilon x,0)) $$
At this scale, the function $\hat y$ has S-exponential decay in $x$ and a regular
$\varepsilon$-expansion $\hat y(x,c)\sim\sum_{n\geq0}y_n(x,c)\varepsilon^n$ with respect to $c$
(see the end of \ref{sec2.2}),
where the $y_n$ have exponential decay in $x$ (see the end of \ref{sec2.2}).
Hence the same holds for $G$, and $J$ has a regular
$\varepsilon$-expansion with respect to $c$.
\medskip
\noindent(5) Using classical results of Gevrey analysis, it is possible to
give an exact
meaning to an expression like:
$$
u^\#(t)\sim\sum_{n\geq 0}u_n(t)\varepsilon^n+\exp\big(\frac{A_0(t)}\varepsilon\big)
( c^\#_0- c^\natural_0)\exp(A_1(t))
\Big( 1+ \sum_{n\geq 1}r_n(t)\varepsilon^n\Big)\,.
$$
This expression may be considered as a start of {\it transasymptotic expansion}.
The first expansion $\sum_{n\geq0}u_n(t)\varepsilon^n$ is the classical expansion of
slow curves, and the exponential term which follows arises from the boundary
layer at $t_1$. Thanks to Gevrey analysis, --- under analyticity hypothesis of
$f$ according to $t$ and a simple geometrical hypothesis concerning the {\it
relief} function $a:t\mapsto\frac{\partial f_0}{\partial u}(t,u_0(t))$ --- it
is possible to show that the first expansion $\sum_{n\geq0}u_n(t)\varepsilon^n$ is
defined up to exponentially small (of type strictly greater than $A_0(t)$).
Therefore, a summation "to the least term" of both expansions accounts for the
boundary layer.
\paragraph{Liouville's equation}
We consider here Liouville's equation in its singularly perturbed form
\begin{equation} \label{l}
\varepsilon\dot u=u^2-t.
\end{equation}
This equation has a repulsive river $u^\natural$ which is asymptotic to $\sqrt
t$. Consider a solution $u^\flat$ that borders this river, on an appreciable interval, and
join the attractive river. We will give an estimate of $u^\flat-u^\natural$ before the
limit layer of $u^\flat$. The first two terms $r_1$ and $r_2$ will be explicit.
To set the ideas, we assume that $u^\flat$ links both rivers through $0$ rather than infinity.
Choosing adequately the parameter $\varepsilon$ and doing linear change of variables of $u$ and $t$, we can
suppose that $u^\flat(1)=0$.
Given $t$ fixed and standard in $]0,1[$, we want to compute an $\varepsilon$-expansion
of $u^\flat(t)-u^\natural(t)$.
The first terms of the $\varepsilon$-expansion are
$$
u^\natural(t)=\sqrt t+\frac14t^{-1}\varepsilon
-\frac5{32}t^{-5/2}\varepsilon^2+\frac{15}{64}t^{-4}\varepsilon^3
+\pounds\varepsilon^5\,.
$$
The funtion $y(x):= u^\flat(1+\varepsilon x)-u^\natural(1+\varepsilon x)$,
already introduced satisfies the differential equation
$$ y'=(2u\natural(1+\varepsilon x)+y)y\, ,$$
approximated by
$$ y'=\left(2+(x+\frac12)\varepsilon-(\frac{x^2}4+\frac
x2+\frac5{16})\varepsilon^2+(\frac{x^3}8+\frac{x^2}2+\frac{25x}{32}+
\frac{15}{32})\varepsilon^3+\pounds\varepsilon^4-y\right) y
$$
with the initial condition
$y(0)= -u^\natural(1)=-1-\frac14\varepsilon+\frac5{32}
\varepsilon^2-\frac{15}{64}\varepsilon^3+\pounds\varepsilon^4$.
We then deduce the beginning of the $\varepsilon$-expansion
$y(x)\sim\sum_{n\geq0}y_n(x)\varepsilon^n$ of $y$:
$$ y'_0=(2+y_0)y_0\,,\quad y_0(0)=-1, $$
hence $y_0(x)=-1-\tanh x$,
$$
y'_1=a(x)y_1+(x+\frac12)y_0\,, \quad y_1(0)=-\frac14
$$
with $a(x)=2+2y_0(x)=-2\tanh x$, hence
$\exp\big(\int_u^xa\big)=\frac{\cosh^2u}{\cosh^2x}$;
this gives
$$
y_1(x)=\frac{-1}{4\cosh ^2x}\Big(1+\int_0^x(2u+1)\left( e^{2u}+1\right) du\Big)
=\frac{-1}{4\cosh ^2x}\left(1+x+x^2+x\,e^{2x}\right)\,.
$$
Using the symbolic manipulation language Maple, we found
It is possible to prove that $y_n$ is of the form $y_n(x)=\frac{P_n(x,e^x)}{e^{2x}+1}$.
We do not explicit
the rather long expression $y_2$ because we need only the value of
the integrals
$I_n:=\int_0^{-\infty}y_n(x)dx$. One finds for the first terms:
$$I_0=\ln 2\,,\quad I_1=\frac{1}{4}\,,\quad I_2=\frac{1}{32}\,.
$$
We remark that, despite the complexity of the functions $y_n$, their integrals on $\mathbb{R}^-$ are very simple.
In particular, it
is possible to show (using the associated linear Airy equation) that these
numbers are rational for all $n\geq1$.
Then, $u^\flat(t)-u^\natural(t)=\left( u^\flat(1)-u^\natural(1)\right)
\exp(\frac{A(t)}\varepsilon)$ with
$$
u^\flat(1)- u^\natural(1)= -u^\natural(1)=-1-\frac\varepsilon{4}+\frac 5{32}\varepsilon^2-\frac{15}{64}\varepsilon^3
+\pounds\varepsilon^4,
$$
\begin{eqnarray*}
A(t)&=&\int_1^t\big( u^\natural(\tau)+ u^\flat(\tau)\big) d\tau\\
&=&2\int_1^t u^\natural(\tau)d\tau+\varepsilon\int_0^\frac{t-1}\varepsilon y(x)dx\\
&=&2\sum_{n=0}^3\Big(\int_1^tu_n(\tau)d\tau\Big)\varepsilon^n
+\sum_{n=0}^2\Big(\int_0^{-\infty}y_n(x)dx\Big)\varepsilon^{n+1}+\pounds\varepsilon^4\\
&=&\frac43\left( t^{3/2}-1\right)+\left(\frac12\ln t\right)\varepsilon+\frac5{24}\left(
t^{-3/2}-1\right)\varepsilon^2+ \frac5{32}\left(1-t^{-3}\right)\varepsilon^3\\
&&
+\varepsilon\ln2+\frac14\varepsilon^2+\frac1{32}\varepsilon^3 +\pounds\varepsilon^4
\end{eqnarray*}
Hence, we deduce $\exp\left(\frac{A(t)}\varepsilon\right)$ and obtain finally:
\begin{eqnarray*}
u^\natural(t)- u^\flat(t)&=&\exp\!\left\{\frac4{3\varepsilon}\left(
t^{3/2}-1\right)\right\}2\sqrt{t}\ \left[\ 1+\frac1{24}\left(5
t^{-3/2}+7\right)\varepsilon\right.\\
&&\left.
+\frac1{1152}\left(49+70\,t^{-3/2}-155\,t^{-3}\right)\varepsilon^2+\pounds\varepsilon^3\ \right]\,.
\end{eqnarray*}
\paragraph{Remark.}
This formula is valid only for $t$ appreciable, but
for Liouville equation, a formula can be given for $t$ infinitely small, for
example for $t=0$. Indeed, only the computation of the integral
$\int_1^t u^\natural(\tau)d\tau$ of $A(t)$ poses a problem. But here, $u^\natural$ is
linked to the logarithmic derivative of the Airy function.
This yields an answer to the following natural question.
\smallskip
Consider the Liouville equation
\begin{equation} \label{li}
U'=U^2-T
\end{equation}
and its unique solution asymptotic to $\sqrt T$ at $+\infty$ given by
$U^\natural(T)=-{\rm Ai}'(T)/{\rm Ai}(T)$.
Consider another solution $U^\flat$ with the
initial condition $U^\flat(0)=U^\natural(0)-\delta$, $0<\delta\simeq0$. This
solution vanishes only at an infinitely large value $\omega$. What is the
asymptotic relation between $\delta$ and $\omega$?\smallskip
The change of variables $T=\omega t,\ U=\sqrt\omega\ u$, with $\varepsilon=\omega^{-3/2}$ leads to
the previous equation \pref{li}. We deduce that
$\exp\big(\frac1\varepsilon\int_0^1 u^\natural(\tau)d\tau\big)=
\big(\frac{{\rm Ai}(\omega)}{{\rm Ai}(0)}\big)^2$ with
$\omega=\varepsilon^{-2/3}$ (and ${\rm Ai}(0)=3^{-2/3}/\Gamma(2/3)$). The asymptotic expansion of ${\rm Ai}$ implies:
$$
\delta=\frac1{2\pi{\rm Ai}(0)^2}
\exp\big(-\frac{4\omega^{3/2}}3\big)\big(1+\alpha_1
\omega^{-3/2}+\dots +\alpha_n\varepsilon^n+\pounds\omega^{-3(n+1)/2}\big)
$$
with $\alpha_1=\frac7{24}$ and $\alpha_2=-\frac{49}{1152}$.
As mentioned above, concerning the associated Riccati equations to classical linear equations, it
is more judicious to solve the problem directly with the linear equation.
Nevertheless the method presented here can be applied to other types of
equations. Liouville's equation is here only for illustration.
\subsection{The situation with a turning point}
\label{sec3.3}
Consider equation\pref{3.1}, $\varepsilon\dot u=f(t,u)$, with the hypotheses (H4) and
(H5) below:
\begin{enumerate}
\item[H4] The function $f$ is $S^\infty$ and has a regular
$\varepsilon$-expansion in an open subset $U$ of $\mathbb{R}^2$ ({\it cf.}
\ref{sec2.1bis}).
\item[H5] There is a $C^\infty$slow curve $u=u_0(t)$ in $U$ on a
standard compact interval $T=[t_1,t_2]$, i.e.
\begin{equation} \label{21bis}
\forall t\in[t_1,t_2]\ ,\ (t,u_0(t))\in U \ {\rm and} f_0(t,u_0(t))=0\,.
\end{equation}
\end{enumerate}
Now, the slow curve $u=u_0(t)$ is assumed to be attractive for $t_1\leq t0\,.
\end{equation}
\end{enumerate}
Moreover, we assume that there is a canard solution $u^\natural$ that border the slow curve from
$t_1$ to $t_2$. We are interested in the input-output relation about this
canard solution.
Let $t_e$ and $t_s$ be the input and output instants respectively such that
$t_1<{\,^\circ} t_e0\,.
\end{equation}
We assume that there is a solution $u^\#$ of \pref{3.1} with $u^\#(t_e)=u_e,\ u^\#(t_s)=u_s$. In this case,
it is known \cite{bcdd} that the input-output relation is given by
\begin{equation} \label{es}
\int_{{\,^\circ} t_e}^{{\,^\circ} t_s}a(t,u_0(t))dt=0\,.
\end{equation}
\begin{proposition} \label{pr11}
With the above notation and hypotheses (H4)--(H6), if three of the
four quantities $t_e,t_s,u_e,u_s$ have an $\varepsilon$-expansion, then the fourth
one has, and this expansion can be computed modulo an
inversion of diffeomorphism.
\end{proposition}
\paragraph{Proof} The quantities $t_e,t_s,u_e,u_s$ are linked by the relation
$$
u_s- u^\natural(t_s)=(u_e- u^\natural(t_e))\exp
\Big\{\frac{1}{\varepsilon}\int_{t_e}^{t_s}\Delta_2f\left( t, u^\natural(t);
u^\#(t)- u^\natural(t)\right) dt\Big\}\,.
$$
The solution $u^\#$ presents two limit layers. First, we split this integral
in two parts, for example at $t_0$.
For each limit layer, we use the combined asymptotic expansions, more
especially proposition \ref{pr3}.
When the three quantities $t_e,t_s$
and $u_e$ have an $\varepsilon$-expansion, proposition \ref{pr3} shows that
$u^\#(t_0)- u^\natural(t_0)$ is of the form $\exp\left(\frac{A_0}\varepsilon+b\right)$ where $A_0$ is
standard and $b$ has an $\varepsilon$-expansion. Hence, by corollary \ref{cor10},
$u_s$ has an $\varepsilon$-expansion. The case where
the three given quantities are $t_e,t_s$ and $u_s$ is similar.
If the three quantities are $t_e,u_e$ and $u_s$, then the previous case is used as follows:
we set the change of variable $t_s={\,^\circ} t_s+\varepsilon x_s$, where ${\,^\circ} t_s$ is
given by the input-output relation \pref{es}. When $t_e$ and $u_e$ are fixed,
the previous case shows that $u_s$ is of the form
$u_s=\varphi(t_s)$ where $\varphi$ is $S^\infty$ and has an $\varepsilon$-expansion. Moreover, we have
$$
\varphi'(x_s)=\frac d{dx} u^\#({\,^\circ} t_s+\varepsilon x_s)=f({\,^\circ} t_s+\varepsilon
x_s, u^\#({\,^\circ} t_s+\varepsilon x_s))=f(t_s,u_s)\,,
$$
which is appreciable thanks to \pref{aa}, to $f(t_s, u^\natural(t_s))\simeq 0$ and
to $u_s\not\simeq u^\natural(t_s)$. Thus, $\varphi$ is a S-diffeomorphism (i.e. its shadow
is a diffeomorphism) and its inverse is
$S^\infty$ and has an $\varepsilon$-expansion. We conclude that
$t_s=\varphi^{-1}(x_s)$ has an $\varepsilon$-expansion. \hfill $\Box$
\subsection{Equations with parameter}
\label{sec3.4}
Here, we prove theorem \ref{th1}.
It is classical \cite{c} to consider the associated Riccati equation by setting
$v=\frac{\varepsilon\dot\psi}\psi$. This leads to
\begin{equation} \label{3}
\varepsilon\dot v=U(t)-E-v^2\,.
\end{equation}
Recall that the solutions of a Riccati equation are naturally considered on the cylinder
$\mathbb{R}\times(\mathbb{R}\cup\{\infty\})$, whose variable $v$ is one chart among others.
In particular, it is natural to consider solutions with poles: the passage
through infinity is perfectly regular.
The equation \pref3 is of slow-fast type and usual techniques are applied.
For any infinitesimal value of $E$, we have two
repulsive rivers; the first one is close to $-\varphi(t)$ for $t\to-\infty$ and the
second one is close to $\varphi(t)$ for $t\to+\infty$ (here, repulsiveness does not
take into account the direction of $t$). For each river, there is a unique solution,
called "exceptional" which borders it and all other solutions join the
attractive river ($\varphi(t)$ for $t\to-\infty$, $-\varphi(t)$ for $t\to+\infty$).
Moreover, the formula $ \psi=\exp(\int v/\varepsilon)$ shows that to each
solution bordering an attractive river corresponds a subspace of solutions of
the corresponding linear equation with exponential growth. The subspace
associated to an exceptional solution is constituted by solutions with
exponential decay.
For $E$ fixed, the symmetry of the equation
shows that if $v$ is a solution of \pref3, then $\tilde v:t\mapsto-v(-t)$ is. Hence,
a solution bordering both repulsive rivers is necessarily an odd function.
The posed problem has two solutions, denoted by $(E^\#, v^\#)$ and $(E^\flat, v^\flat)$
with $v^\#(0)=\infty,\ v^\flat(0)=0$, which are canard solutions. It is more
convenient to work in another chart. We therefore turn the Riccati cylinder by a quarter of turn.
The change of the unknown $u:=\frac{v-1}{v+1}$ leads to
$$\varepsilon\dot u=\frac12(U(t)-E-1)(1+u^2)-(U(t)-E+1)u,$$
with $u^\#(0)=1,\ u^\flat(0)=-1$ (and respectively with the values $E^\#$
and $E^\flat$ for $E$).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\begin{figure}[ht]
\begin{center}
\includegraphics[width=6cm]{fig3.ps}
\vspace*{-0.1cm}
\caption{The solutions $u^\#$, $u^\flat$ and $u^\natural$ for the potential
\pref2 and $\varepsilon=\frac{1}{10}$. $\Phi_+:=\frac{\varphi-1}{\varphi+1}$,
$\Phi_-:=\frac{-\varphi-1}{-\varphi+1}$.}
\setlength{\unitlength}{1cm}
\vspace{-8cm}
\begin{picture}(12,7.45)\small
\put(4.2,5){$\Phi_+$}
\put(6,5){$u^\#$}
\put(7.5,5){$u^\#$}
\put(7.5,4.2){$u^\flat$}
\put(7.5,4.6){$u^\natural$}
\put(8.2,5){$\Phi_+$}
%%%%%%%%%%%%
\put(5.9,2.2){$u^\flat$}
\put(5.3,2.9){$u^\natural$}
%%%%%%%%%%%%
\put(3.5,1.2){$\Phi_+$}
\put(5,1.2){$\Phi_-$}
\put(5.8,1.2){$u^\#$}
\put(6.6,1.2){$\Phi_-$}
%%%%%%%%%%%%%
\end{picture}
\end{center}
\end{figure}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
The difference $y:= u^\#- u^\flat$ satisfies the equation
$$ \varepsilon\dot y=A(t)y-B(t)(E^\#-E^\flat)\ ,\ \ \ y(+\infty)=0 $$
with
$$
A(t)=\frac12(U(t)-E^\flat-1)( u^\#+ u^\flat)-(U(t)-E^\flat+1),\quad
B(t)=\frac12(1- u^\#)^2\,.
$$
Here occurs a technical difficulty
which does not appear in theory if ones uses the Riccati cylinder,
but in practice the cylindrical coordinates are difficult to use.
The difficulty comes form $A$ and $B$ possibly having poles with
$u^\#$ and $u^\flat$, for some values of $t$ near $t^*$ that satisfies
$\varphi(t^*)=-1$.
Therefore, we consider some standard $t_1>t_0$ close enough to $t_0$ such that $A$ and $B$
are limited on $[0,t_1]$, and we apply the variation of constant formula between $0$ and $t_1$, in the form
\begin{equation} \label{yy}
y(0)=y(t_1)\exp\Big(\frac1\varepsilon\int_{t_1}^0A\Big)
-\frac{E^\#-E^\flat}\varepsilon\int_{t_1}^0B(t)
\exp\left(\frac1\varepsilon\int_t^0A\right) dt\,.
\end{equation}
Notice that, if $t\in]\!\,]0,t_1]$, then
$A(t)\simeq\frac12\left(\varphi(t)^2-1\right)2\frac{\varphi(t)-1}{\varphi(t)+1}
-\left(\varphi(t)^2+1\right)=-2\varphi(t)$.
Furthermore, $u^\#$ and $u^\flat$ admit combined expansions and $E^\flat$ has
an $\varepsilon$-expansion which is the $\varepsilon$-expansion of every canard
value. Therefore $A,\ B$ and the primitive of $A$ have also combined expansions.
and the $\varepsilon$-expansion of $\int_{t_0}^0A$ begins with $a$ given by \pref{a1}.
Notice also that $y(t_1)=\pounds\left(E^\#-E^\flat\right)$ by item 2 of proposition \ref{pr1} in section \ref{sec2.2}.
Since we already know that $E^\#-E^\flat$ is exponentially small of order $a$, and since
$\int_{t_1}^0A\ll\int_{t_0}^0A\simeq a$, the first term of \pref{yy}, namely
$y(t_1)\exp\left(\frac1\varepsilon\int_{t_1}^0A\right)$, is exponentially small (of order
$\int_{t_0}^{t_1}(-2\varphi)$).
With $y(0)=2$, formula \pref{yy} then yields:
$$
E^\#-E^\flat=2\varepsilon\Big(\int_0^{t_1}B(t)\exp\Big(-\frac1\varepsilon\int_0^tA\Big)
dt\Big)^{-1}\left(1+\pounds e^{-@/\varepsilon}\right)\,,
$$
which can be rewritten as
\begin{align*}
E^\#-E^\flat=&2\varepsilon\exp\big(-\frac a\varepsilon\big)
\exp\Big(\frac1\varepsilon\int_0^{t_0}(A-2\varphi)\Big)\\
&\times\Big(\int_0^{t_1}B(t)\exp\big(-\frac1\varepsilon\int_{t_0}^tA\big)
dt\Big)^{-1}\big(1+\pounds e^{-@/\varepsilon}\big)\,.
\end{align*}
Now Laplace method (proposition \ref{pr1} in section \ref{sec2.2})
shows that the integral
$$
\int_0^{t_1}B(t)\exp\Big(-\frac1\varepsilon\int_{t_0}^tA\Big)dt
$$
has an expansion in powers of $\sqrt\varepsilon$ with a non-zero first term,
i.e. is of order $\sqrt\varepsilon$ since $B(t_0)\simeq\frac12$.
This completes the proof of theorem \ref{th1}.
\subsection{Symbolic and numerical results}\label{symb}
Using the Maple package at \\
http://www.univ-lr.fr/Labo/MATH/DAC,
we studied the expansion \pref6
for several symmetric potentials. For instance, the potential
$U(t)=(2\cosh (t)-5/2)^2$ yields
\begin{align*}
E^\#-E^\flat=&\frac{3^{5/2}}{\sqrt{2\pi}}
\varepsilon^{1/2}\exp\left(-\frac1\varepsilon(5\ \ln2-3)\right)\\
&\times\left(1-\frac{793}{324}\varepsilon-\frac{534959}{209952}\varepsilon^2
-\frac{2490060889}{204073344}\varepsilon^3+O\left(\varepsilon^4\right)\right)\,.
\end{align*}
The polynomial potential with rational coefficients
$U(t)=(1-t^4)^2$
yields an $\varepsilon$-expansion with irrational coefficients:
\begin{align*}
E^\#-E^\flat=&\frac{64e^{\pi/2}}{\sqrt{\pi}}
\sqrt{\varepsilon}e^{-8/5\varepsilon}\Big(1+\big(-{ \frac {131}{64}}
-{ \frac {3}{32}}\pi \big)\epsilon\\
&+\big(-{ \frac {5923}{8192}}-{ \frac {57}{2048}}\pi
+{\frac {9}{2048} }{\pi }^{2}\big ){\epsilon}^{2}+ O(\varepsilon^3)
\Big).
\end{align*}
Concerning the potential \pref2, numerical simulations give the following
results:
\begin{itemize}
\item $a^\#$ is given by the relation $E^\# - E^\flat=\frac{16
\sqrt{2\varepsilon}}{\sqrt{\pi}}\exp{(\frac{-4}{3\varepsilon})} a^\#$,
%%%%%%%%%
\item $\tilde{a}^\#=1-\frac{71}{96} \varepsilon-
\frac{6299}{18432}\varepsilon^2-\frac{2691107}{5308416}\varepsilon^3$
is the same parameter value obtained by symbolic manipulations with three terms in the
asymptotic expansion,
%%%%%%%%%
\item $\Delta E = \frac{E^\#+E^\flat}{2}-E^\natural $ is expected to be of order $\exp(-\frac{8}{3 \varepsilon})$.
%%%%%%%%%
%\item $\delta$ is given by the relation $\Delta E = \exp(-\frac{8}{3 \varepsilon} (1+\delta))$.
\end{itemize}
\vspace*{0.2cm}
\noindent\begin{scriptsize}
\begin{center}
\begin{tabular}{|c|c|c|c|c|c|}
\hline
$1/\varepsilon$ & $E^\#$
& $a^\#$ & $\tilde{a}^\#$ & $a^\#-\tilde{a}^\#$ \\
\hline
2 & 1.04614053857975 & 0.55129173 & 0.48140379 & 6.98879402e-02 \\
4 & 0.47412601902887 & 0.77628568 & 0.78582414 & -9.5384546e-03 \\
6 & 0.31849701709428 & 0.86337190 & 0.864896269 & -1.52436242e-03 \\
\hline
8 & 0.24157410341774 & 0.90083691 & 0.90122221 & -3.85297223e-04 \\
10 & 0.19467902372582 & 0.92197555 & 0.92211728 & -1.41734313e-04 \\
12 & 0.16301207073454 & 0.93563721 & 0.93570146 & -6.42491620e-05 \\
\hline
14 & 0.14019322986821 & 0.94521099 & 0.94524428 & -3.32946274e-05 \\
16 & 0.12297222653846 & 0.95229836 & 0.95231734 & -1.91678736e-05 \\
18 & 0.10951599390792 & 0.95775964 & 0.95777035 & -1.57096142e-05 \\
\hline
20 & 0.09871245541601 & 0.96211977 & 0.96210310 & -1.11318467e-05 \\
22 & 0.08984800332140 & 0.96677395 & 0.96562888 & 6.41511616e-04 \\
24 & 0.08244380009277 & 0.98149952 & 0.96855405 & 1.59136097e-03 \\
\hline
\end{tabular}
\end{center}
\smallskip
\vspace*{0.2cm}
\begin{center}
\noindent\begin{tabular}{|c|c|c|c|c|c|}
\hline
$1/\varepsilon$ & $E^\natural$ & $(E^\#+E^\flat)/2$ & $\Delta E$ &
$-(3 \varepsilon /8) \ln \{\Delta E\}$
\\ \hline
2 & 0.72722933436087 & 0.87324733697904 & 1.46018002e-01 & 3.60754754e-01\\
4 & 0.46065652094628 & 0.46216455343306 & 1.50803248e-03 & 6.09089012e-01\\
6 & 0.31773469668930 & 0.31774227863839 & 7.58194908e-06 & 7.36858765e-01\\
\hline
8 & 0.24152668022043 & 0.24152671668585 & 3.64654158e-08 & 8.02823509e-01\\
10 & 0.19467600946049 & 0.19467600963480 & 1.74309428e-10 & 8.42632090e-01\\
12 & 0.16301187671876 & 0.16301187671959 & 8.31751334e-13 & 8.69226339e-01\\
\hline
14 & 0.14019321725968 & 0.14019321725968 & 4.38538094e-15 & 8.85549104e-01\\
16 & 0.12297222571282 & 0.12297222571282 & -8.88178419e-16 & 8.12281852e-01\\
18 & 0.10951599385352 & 0.10951599385352 & -9.71445146e-17 & 7.68131915e-01\\
\hline
20 & 0.09871245541241 & 0.09871245541241 & 2.77555756e-17 & 7.14808029e-01\\
22 & 0.08984800332116 & 0.08984800332116 & 1.24900090e-16 & 6.24187798e-01\\
24 & 0.08244380009275 & 0.08244380009275 & -1.66533453e-16 & 5.67677116e-01\\
\hline \end{tabular}
\end{center}
\end{scriptsize}
\vspace*{0.2cm}
We can observe
that $a^\#-\tilde{a}^\#$ has approximatively
the same order as $\varepsilon^4$, except for $\varepsilon \leq 1/20$. The results of the second table are
in accordance with our third conjecture: The last column seems to tend
to $1$ (as we ask an exponential order $2 \, a$, which is twice than for $ E^\# - E^\flat $, only the upper half
of the table is relevant).
To determine $E^\flat$, we initialize
$E:=2\varepsilon$ and compare the values of the solutions $v_{+}$
and $v_-$, at $t=1$, of the following Cauchy's problems
\begin{equation}\label{vplus}
\begin{gathered}
\varepsilon\dot{v}_+ = (t^2-1)^2-E-v^2_+\\
v_+(0)=0
\end{gathered}
\end{equation}
and
\begin{equation} \label{vmoins}
\begin{gathered}
\varepsilon\dot{v}_- = (t^2-1)^2-E-v^2_-\\
v_-(2)=-(2^2-1)=-3.
\end{gathered}
\end{equation}
For this purpose, we used an adaptative fourth Runge-Kutta method such that the
computations are valid for $\varepsilon \ge 1/20$.
If we denote by $\zeta(E)=v_+(1)-v_-(1)$, the computation of $E^\flat$
consists in finding the zero of $\zeta$. This computation was performed with
the secant method.
To compute $E^\natural$, we consider the problems (\ref{vplus}) and
(\ref{vmoins}) respectively with the initial conditions
$v_+(-0.95)=\varphi(-0.95)$ and $v_-(2)=\varphi(2)$.
Concerning the computation of $E^\#$, the initial condition $v(0)=\infty$
is replaced by $w(0)=0$, where $w=1/v$. This change of chart produces a pole
(for $w$) in the neighborhood of $t=1$, thus we return to the first chart at,
for example, $t=1/2$, with the initial condition $v_+(1/2)=1/w(1/2)$. We
compare finally the values of $v_+(1)$ to $v_-(1)$ where $v_-$ is the
solution of (\ref{vmoins}) with the initial condition $v_-(2)=\varphi(2)$.
\begin{thebibliography}{99} \frenchspacing
\bibitem{bcdd}
\sc{E. Beno\^{\i}t, J.L. Callot, F. Diener, M. Diener},
{\it Chasse au canard},
Collect. Math., 31, 1-3 (1981) 37-119.
\bibitem{c}
{\sc J.L. Callot},
{\it Solutions visibles de l'\'equation de Schr\"odinger},
Math\'ematiques finitaires et analyse non standard, Tome
1, Publi. Math. de l'Univ. Paris VII n$^\circ$ 31 (1985) 105-119.
\bibitem{de}
{\sc E. Delabaere, H. Dillinger, F. Pham},
{\it Exact semiclassical expansions for one-dimensional
quantum oscillators},
J. Math. Phys. 38 (1997), no. 12, 6126-6184.
\bibitem{dx}
{\sc A. Delcroix},
{\it Propri\'et\'es asymptotiques des champs de vecteurs lents-rapides},
Th\`ese de doctorat de l'Universit\'e de Poitiers,
N· d'ordre : 254 (1989).
\bibitem{d}
{\sc F. Diener},
{\it M\'ethode du plan d'observabilit\'e},
Th\`ese de doctorat d'\'Etat,
pr\'epublication IRMA, CNRS, Strasbourg, France (1981).
\bibitem{g}
{\sc A. Gaignebet},
{\it \'Equation de Schr\"{o}dinger unidimensionnelle stationnaire.
Quantification dans le cas d'un double puits de potentiel sym\'{e}trique},
C. R. Acad. Sci. Paris, t.315, S\'{e}rie I (1992) 113-118.
\bibitem{gg}
\sc{C. G\'erard, A. Grigis},
{\it Precise estimates of tunneling and eigenvalues near a potential barrier},
J. Differential Equations 72 (1988), no. 1, 149-177.
\bibitem{b}
{\sc F. Koudjeti, I. P. van der Berg},
{\it Neutrices, external numbers, and external calculus}
In F. Diener and M. Diener, editors, Nonstandard Analysis in Practice, pages 145-170. Springer,
1995.
\bibitem{s}
{\sc C. Lobry, T. Sari, S. Touhami},
{\it On Tykhonov's theorem for convergence of solutions of slow and fast
systems},
Electronic Journal of Differential Equations, Vol. 1998 (1998), No. 19, pp. 1-22.
\bibitem{n}
{\sc E. Nelson},
{\it Internal Set theory},
Bull. Amer. Math. Soc. 83 (1977) 1165-1198.
\bibitem{Olver}
{\sc F.W.J. Olver},
{\it Introduction to Asymptotics and Special Functions},
Academic Press, New York (1974).
\bibitem{o}
{\sc R. E. O'Malley, Jr},
{\it Singularly Perturbed Methods for Ordinary Differential Equations},
Appl. Math. Sci. 89, Springer (1991).
\bibitem{v}
{\sc A.B. Vasil'eva, V.F. Butuzov},
{\it Asymptotic expansions of the solutions of singularly perturbed equations},
(in russian) Izdat. "Nauka", Moscou (1973) 272 p.
\bibitem{w}
{\sc W. Wasow},
{\it Asymptotic Expansions for Ordinary Differential Equations},
Interscience (1965) section 40.
\bibitem{z1}
{\sc J. Zinn-Justin},
{\it Quantum field theory and critical phenomena},
Oxford Univ. Press (1989).
\bibitem{z2}
{\sc J. Zinn-Justin},
{\it From instantons to exact results},
``Analyse alg\'ebrique des perturbations
singuli\`eres I : M\'ethodes r\'esurgentes".
Travaux en cours, Hermann, 51-68 (1994).
\end{thebibliography}
\end{document}