
\documentclass{article}
\usepackage{amsmath, amssymb, epsfig}
\pagestyle{myheadings}
\markboth{\hfil Traveling waves in rapid solidification \hfil EJDE--2000/16}
{EJDE--2000/16\hfil Karl Glasner \hfil}
\begin{document}
\title{\vspace{-1in}\parbox{\linewidth}{\footnotesize\noindent
{\sc  Electronic Journal of Differential Equations},
Vol.~{\bf 2000}(2000), No.~16, pp.~1--28. \newline
ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu
\newline ftp  ejde.math.swt.edu \quad ftp ejde.math.unt.edu (login: ftp)}
 \vspace{\bigskipamount} \\
%
 Traveling waves in rapid solidification 
\thanks{ {\em 1991 Mathematics Subject Classifications:} 80A22, 74J30.
\hfil\break\indent
{\em Key words and phrases:} Traveling waves, Phase field models.
\hfil\break\indent
\copyright 2000 Southwest Texas State University  and University of
North Texas. \hfil\break\indent
Submitted January 4, 2000. Published February 25, 2000.} }
\date{}
%
\author{ Karl Glasner }
\maketitle

\begin{abstract}
 We analyze rigorously the one-dimensional traveling wave problem for a
 thermodynamically consistent phase field model.  Existence is proved 
 for two new cases:  one where the undercooling is large but not in the 
 hypercooled regime, and the other for waves which leave behind an unstable 
 state.  The qualitative structure of the wave is studied, and under 
 certain restrictions monotonicity of front profiles can be obtained.  
 Further results, such as a bound on propagation velocity and non-existence 
 are discussed.  Finally, some numerical examples of monotone and 
 non-monotone waves are provided. 
\end{abstract}

\newtheorem{proposition}{Proposition}
\newtheorem{lemma}{Lemma}
\newtheorem{theorem}{Theorem}
\newtheorem{corollary}{Corollary}
\numberwithin{equation}{section}


\section{Introduction}

Continuum descriptions of phase transitions known as phase field
models have become popular in describing solidification processes
\cite{langer86b,collins85,penrose90,wang93,caginalp93,wheeler93b}.
All phase field models introduce an abstract order parameter $\phi$
which designates the phase of the system.  Without any loss of
generality we will take these to be $\phi = 1$ (solid, for example)
and $\phi = -1$ (liquid, for example).  The construction of these models
typically begins with an assumption about the free energy
(or alternatively, the negative entropy) of the system
as a function of the phase $\phi$ and the internal energy density
$e$.  We will consider the specific functional \cite{penrose90,wang93}
$$
{\cal F}(\phi, e) = \int \tfrac12 |\nabla \phi|^2 + F(\phi, e) dx
$$
which has been written in nondimensional form.
The gradient part accounts for surface energy, and the function
$F(\phi,e)$ is the bulk free energy density.

We will consider a form for $F$ similar to that proposed by Wang
{\it et al.} \cite{wang93}:
\begin{equation}
  \label{specificS}
F(\phi, e) = g(\phi) + \lambda u^2 = 
            \frac{\lambda}{2} \left(e+\tfrac12 p(\phi)\right)^2.
\end{equation}
The constitutive function $g(\phi)$ is typically a positive,
symmetric, double well potential with minima at $\pm 1$, but we shall
not always require this.  The parameter $\lambda$
designates the coupling between the two fields; for narrow phase 
interfaces, it is physically the ratio of interface width to capillary
length \cite{karma96b}.
We assume $F(\phi; e) \in C^2$ and has the following properties:
\begin{itemize}
\item
For each fixed value of $e$, the function $F(\phi, e)$ will have have exactly
two minima at $\pm 1$.  Specifically we suppose that
\begin{equation}
  \label{atmin}
\frac{\partial^2}{\partial \phi^2} F(\pm 1, e) = 
 \sigma^s_{\pm}(e) > 0.
\end{equation}
\item
For each fixed value of $e$, $F(\phi, e)$
possesses a unique intermediate maximum
at some point $\hat{\phi}$ (as an example, see figure \ref{fig:energy}).
\item
Outside of the interval $\phi \in (-1,1)$, $\partial F/ \partial \phi$
will be set to zero without any loss of generality.
\end{itemize}

To complete the model's description, a relationship between
internal energy and temperature $u$ is needed.  The choice we make has
the general form
$$
e = u + \tfrac12 p(\phi)
$$
where we will take $p$ to be an increasing function of $\phi$.
The dependence on $\phi$ accounts for latent heat released
during the phase change.  It can always be assumed (by a linear change
of the variable $u$, for example) that
$$
p(\pm 1) = \pm 1.
$$

The dynamics which arise from the above constitutive model result
from a gradient flow of ${\cal F}$ which simultaneously conserves total
internal energy.  In one dimension this yields the system
(see \cite{wang93} for the derivation)
\begin{eqnarray}
   \label{phase1}
\phi_t &=& \phi_{xx} + f(\phi, u) \\
   \label{phase2}
e_t &=& D u_{xx}
\end{eqnarray}
where
$$f(\phi, u) = \left. \frac{\partial F}{\partial \phi} \right|_e
(\phi, u) $$
and the parameter $D$ is a nondimensional diffusion coefficient.

The problem we are interested in is where a phase interface
is propagating from left to right into the state $\phi = -1$.
We also suppose that temperature approaches constants
$u_-, u_+$ far to the left and right, respectively.
If we look for constant velocity traveling wave solutions of the form
$\phi = \phi(x- Vt)$, $u = u(x-Vt)$, we obtain the traveling wave
problem
\begin{eqnarray}
\phi'' + V \phi' + f(\phi, u) &=& 0 \label{ode1} \\
D u' + Vu - \frac12 V p(\phi) - Ve_\infty &=& 0 \label{ode2} \\
\phi \to -1, \quad u \to  u_+ \quad \text{as} \quad x &\to&
  +\infty \label{bc1} \\
\phi \to  1, \quad u \to  u_- \quad \text{as} \quad x &\to&
   -\infty \label{bc2} 
\end{eqnarray}
where $e_\infty = u_- - \tfrac12 p(1)$ is a constant which comes from
one integration of the second equation.  Since $u_x \to 0$
as $x \to \pm \infty$, $u_-, u_+$ must satisfy
\begin{equation}
  \label{ec}
u_- - u_+ = \tfrac12[p(1) - p(-1)] = 1.
\end{equation}
We define the nondimensional ``undercooling'' $\Delta$ to be
to be
\begin{equation}
  \label{u1}
  \Delta \equiv -u_+ = -e_\infty + \tfrac12
\end{equation}
so that $u_- = -\Delta +1$.  We shall talk about the parameters
$\Delta$ and $e_\infty$ interchangeably.

For certain singular limits of phase field models, $u$ is
approximately constant near the phase interface, and an asymptotic
analysis can be conducted to determine the wave profile and
propagation velocity \cite{caginalp88}.  When the rate of phase change
is rapid, however, this is not the case.  In particular,
both fields will vary simultaneously, so the problem must be treated
as a system rather that a single equation.  This gives rise to a
number of features not seen in the one component case, including
non-existence, non-uniqueness and non-monotone behavior.
We therefore adopt an approach different from that of other traveling
wave problems.

Several other authors have studied traveling waves in phase field
systems.   Caginalp and Nishiura \cite{caginalp91} prove existence when the
coupling between the two variables is weak, allowing for the use of
a perturbative argument.  In a more recent study, Bates {\it et al.}
\cite{bates97a, bates97b}
establish existence of waves under the hypothesis of
hypercooling, when $\Delta > 1$.  In contrast, we are principally
concerned with the case $\Delta < 1$ and where the coupling between
the variables is significant.

The model we discuss here is quite general, and may very well pertain
to other phase transition phenomena, such as solidification of
binary alloys \cite{wheeler92, caginalp93} and
superconductivity \cite{chapman92}.  In one dimension at least, all of
these models have a similar mathematical structure.

The layout of the paper is as follows.
In section \ref{sec:prelim} we establish some basic properties of
solutions.  Existence results appear in section \ref{sec:exist}.
We then prove a bound on propagation velocity in
section \ref{sec:bound}.
With certain restrictions, monotonicity is established
in section \ref{sec:structure}.  In section \ref{sec:nonexist}, it is
shown that in a critical region of parameter space, no solutions may
be obtained.  Finally, in section \ref{sec:compute}, some computational
examples of both monotone and non-monotone waves are shown.

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Preliminary Results}
\label{sec:prelim}

We can rewrite the system (\ref{ode1} - \ref{ode2}) as a third order
dynamical system by introducing $\psi = \phi_x$:
\begin{eqnarray}
   \label{system1}
\phi_x &=& \psi \\ 
   \label{system2}
\psi_x &=& -V\psi - f(\phi, u) \\
   \label{system3}
u_x &=& \frac{V}{D} ( -u + \tfrac12 p(\phi) + e_\infty) 
\end{eqnarray}
For convenience, we set $\eta = (\phi, \psi, u)$,
$\eta_- = (1, 0, u_-)$, $\eta_+ =(-1, 0, u_+)$, and write the system
compactly as $\eta_x = G(\eta)$.
The task of finding a traveling wave solution is the same as finding a
trajectory connecting $\eta_-$ to $\eta_+$.


Because of the gradient construction of the original model, there is a
natural Lyapunov function for the system (\ref{system1}-\ref{system3}):
\begin{lemma}
  \label{lemma:lyapunov}
\begin{equation}
   \label{lyapunov}
\frac{d}{dx} \left\{ F(\phi; e_\infty) - \tfrac12 \phi_x^2 - 
       \lambda \left(\frac{D u_x}{V}\right)^2 \right\} =
V \psi^2 + \frac{\lambda D}{V} u_x^2
\end{equation}
\end{lemma}
{\bf Proof.}  This is just a straightforward calculation.
\hfill$\Box$

We can now establish a necessary condition for existence.
\begin{proposition}
    \label{prop:necessary}
Any solution to (\ref{ode1} -\ref{bc2}) must satisfy
\begin{equation}
   \label{entropy_diss}
   F(-1, e_\infty) - F(1, e_\infty) = 
   V \int_{-\infty}^{\infty} \psi^2(x) dx +
   \frac{\lambda D}{V} \int_{-\infty}^{\infty} u_x^2(x) dx.
\end{equation}
In particular, forward moving solutions have
\begin{equation}
   \label{Fbigger}
   F(-1, e_\infty) > F(1, e_\infty). 
\end{equation}
\end{proposition}
{\bf Proof.}
This is simply obtained by integrating (\ref{lyapunov}).
\hfill$\Box$

The final result of this section pertains to estimates for derivatives
of $\phi, u$.
\begin{proposition}
Suppose $\phi, u$ solve equations (\ref{ode1}),(\ref{ode2}), and
(\ref{bc2}).
There exist positive constants $C_1, C_2$, depending only on $e_\infty$,
so that
$$
|\phi_x| < C_1, \quad |u_x| < C_2.
$$ 
\end{proposition}
{\bf Proof.}  Integrating (\ref{lyapunov}) from $-\infty$ to $x$,
we can get
$$
\tfrac12 \phi_x^2(x) < F(\phi(x), e_\infty) - F(1, e_\infty)
$$
which establishes the first bound.  For the second, we take the
derivative of (\ref{system3}), multiply by the integration factor
$K(x) = V/D \exp(Vx/D)$ and integrate, giving
\begin{equation}
  \label{uxint}
u_x(x) = \tfrac12 \int_{-\infty}^x K(x - x') p(\phi(x'))_x dx.
\end{equation}
We can bound the term $p(\phi)_x$ by a constant, and the
remaining integral evaluates to exactly $1$.
\hfill$\Box$

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{Existence of traveling waves}
  \label{sec:exist}

In a separate paper \cite{glasner99}, traveling wave solutions are
constructed by formal asymptotic expansions.
The conclusions which were drawn from this analysis are the following:
\begin{itemize}
\item Solutions may not exist for all parameters.  When the ratio
$D/\lambda$ is small, a saddle-node bifurcation in the non-hypercooled
($\Delta < 1$) regime gives rise to two monotone solution branches,
a stable one where $V$ is large, and an unstable one where $V$ is small.
\item  When $D/\lambda$ is large, $\phi$ is approximately the solution to
\begin{eqnarray}
  \label{ufixed}
\phi_{xx} + V \phi_x + f(\phi, u_-) = 0 \\
  \label{ufixedbc}
\phi(-\infty) = 1, \quad \phi(+\infty) &=& -1
\end{eqnarray}
This is a standard traveling wave problem, and much is known about
solutions (see \cite{fife88} for example).
For forward-moving ($V>0$) solutions to exist, it is required that
\begin{equation}
  \label{onecond}
\int_{-1}^1 f(\phi, u_-) \, d\phi > 0
\end{equation}
which is (provided $g(\phi)$ is an even double well potential)
the same as requiring $\Delta > 1$.
This is the hypercooled situation for which existence, and sometimes
uniqueness, is guaranteed \cite{bates97a}.
\item  When $D$ is small, the $u$ variable is essentially slaved to
the $\phi$ variable on the fast solution branch.  Then $\phi$ is
approximately the solution to
\begin{eqnarray}
  \label{slaved}
\phi_{xx} + V \phi_x + f(\phi, e_\infty + \tfrac12 p(\phi)) &=& 0 \\
  \label{slavedbc}
\phi(-\infty) = 1, \quad \phi(+\infty) &=& -1
\end{eqnarray}
This the same traveling wave problem as (\ref{ufixed}), but with
a different source term.  The analog of condition (\ref{onecond})
is actually the same as the earlier condition (\ref{Fbigger}) in this case
since
$$
f(\phi, e_\infty + \tfrac12 p(\phi) ) =
  \frac{\partial F(\phi; e_\infty)}{\partial \phi}
$$
Provided (\ref{Fbigger}) holds, there is a forward moving solution
to this problem \cite{fife77}:
\begin{proposition}
  \label{prop:existslaved}
The traveling wave problem (\ref{slaved} - \ref{slavedbc})
posses a solution pair $(\phi, V) = (\Phi_s(x), V_s)$.
$\Phi_s$ is decreasing and unique up to translation.
$V_s$ is positive and uniquely determined.
\end{proposition}
\end{itemize}

We shall now specify two types of traveling waves, each corresponding
to different types of source terms $f(\phi,u)$.

\begin{figure}
\begin{center}
\epsfig{file=functionf.ps, height=2in}
\caption
{(a) Type I waves. The nonlinear function $f(\phi, u_-)$ is of
 bistable type for each fixed value of $u_-$.  (b)  the function
 $f(\phi, u_-)$ for waves of type II. }
\label{fig:funf}
\end{center}
\end{figure}

{\bf Waves of \uppercase{T}ype \uppercase{I}:}
The first case is where, for fixed $u$, the function $f(\phi, u)$
is of ``bistable'' type (see figure 1a).  This is the usual situation
where both phases $\phi = \pm 1$ are stable.
We will further assume that $g(\phi)$ is an even function, with
$$
g'(\phi) \begin{cases}
>0 & \phi<0 \\
<0 & \phi>0.
\end{cases}
$$
The results of sections \ref{sec:structure}-\ref{sec:compute} will pertain
specifically to this case.

{\bf Waves of \uppercase{T}ype \uppercase{II}:}
The second case is where $f(\phi, u_-)$ is of monostable type
(figure 1b).  The phase $\phi = 1$ is actually now unstable, but this
type of wave has been exhibited in numerical simulations
\cite{almgren96,zukerman96,glasner99a}.

Typically, traveling wave problems are viewed as eigenvalue
problems in the propagation velocity $V$.  In view of the first
conclusion above, however, solutions may not always exist when
all other parameters are fixed.
The approach we adopt instead is to regard $V$ as fixed.  We then
have two difference existence results by regarding either $D$ or
$\Delta = -e_\infty + \tfrac12$  as the unknown parameter.

In the first of two existence theorems, we take $D$ to be unknown.
There are two properties which will be required:
\begin{equation}
  \label{prop1}  \tag{P1}
F(-1, e_\infty) > F(1, e_\infty)
\end{equation}
and
\begin{equation}
  \label{prop2}  \tag{P2}
\int_{-1}^1 f(\phi, u_-) \, d\phi < 0.
\end{equation}
Property P1 is just the necessary condition established in proposition
\ref{prop:necessary}.  For waves of type I, properties P1 and P2 will
hold when
$$\tfrac12 < \Delta <  1.$$
For type II waves, property P1 takes the form
$$
\lambda > \lambda_s > 0.
$$
where $\lambda_s$ is the value of $\lambda$ making $F(-1,e_\infty)=
F(1,e_\infty)$.  In general, $\lambda_s$ will depend on $\Delta$.
Notice that property P2 is automatically satisfied, since
$f(\phi, u_-)<0$.  The theorem is
\begin{theorem}
  \label{thm:exist}
Suppose $V \in (0,V_s)$ and properties P1, P2
hold.  Then there exists a triple $(\phi(x), u(x), D)$ solving the
problem (\ref{ode1} - \ref{bc2}), with the property that
$-1 \le \phi \le 1$.
\end{theorem}
In other words, non-hypercooled type I waves always exist in a range
of velocities, provided we can adjust $D$, typically by making it small.
The same is true of type II waves, provided $\lambda$ is large.

The second existence theorem pertains only to waves of type I.  We
take the undercooling $\Delta$ to be unknown, giving the following
result:
\begin{theorem}
  \label{thm:exist2}
Suppose $V,D >0$.  Then there exists a triple $(\phi(x), u(x),
\Delta)$ solving the problem (\ref{ode1} - \ref{bc2}), with the
property that $-1 \le \phi \le 1$.
\end{theorem}
This means that any velocity is accessible, provided the
undercooling is chosen properly.

The proofs are similar, and are given in a series of steps
which we shall outline.
The idea is to construct a shooting method with $D$ (or $\Delta$) as the
shooting parameter.  The system (\ref{system1} - \ref{system3}) possesses
a one dimensional unstable manifold near the fixed point $\eta_-$.
When $D$ is very large (or $\Delta$ small),
the trajectory which is formed from the unstable manifold never
reaches a point where $\phi = -1$.  But for small enough $D$
(or large $\Delta$), the trajectory will ``overshoot'',
that is continue below $\phi = - 1$.  Consequently,
there should be some intermediate value which gives $\phi(+\infty) \to -1$.

%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Behavior at infinity}
  \label{sec:linear}

We begin by considering the linearization of the
third order system (\ref{system1} - \ref{system3}) near
the point $\eta_-$.  Setting
$\eta' = \eta - \eta_-$ we obtain the linear system
$\eta'_x = {\cal L} \eta'$ where
$$
{\cal L} = \left[ \begin{array}{ccc}
0 & 1 & 0 \\
\sigma_+ & -V & \rho_+ \\
\frac{V}{2D} p'(1) & 0 & -\frac{V}{D}
\end{array}
\right]
$$ 
and we define
$$
\sigma_{\pm} = -\frac{\partial f}{\partial \phi} (\mp 1, u_{\pm}),
\quad \rho_{\pm} = -\frac{\partial f}{\partial u} (\mp 1, u_{\pm}),
$$ 
Note that $\sigma, \rho$ and $\sigma^s$ are related by
$$
\sigma^s_{\pm} = \sigma_{\pm} + \tfrac12 p'(\mp 1) \rho_{\pm}.
$$

\begin{lemma}
  \label{lemma:eigenvalues}
Assume that condition (\ref{atmin}) holds.
Then ${\cal L}$ has one eigenvalue $\mu$ with positive real part and
two with negative real part and $\mu$ depends continuously on $D$ and
$\Delta$.
\end{lemma}
{\bf Proof.}  A straightforward calculation gives for an eigenvalue
$\mu$ the characteristic polynomial
\begin{equation}
  \label{char}
\mu^3 + (V + V/D) \mu^2 + (V^2/D - \sigma) \mu
- V/D(\sigma^s_+) =0.
\end{equation}
It follows that the product of the roots is positive and their sum
is negative by the requirement (\ref{atmin}).
The only possibility is that one of
the roots is real and positive, leaving the other two possibly complex
with negative real part.  Continuous dependence follows from the
formula for roots of a cubic equation.
\hfill$\Box$

We may conclude that for the linear system, there is a
one dimensional unstable manifold ${\cal M}$ which is the subspace
$$
{\cal M} = \{ \alpha \eta_u | \alpha \in \mathbb{R} \}
$$
where $\eta_u$ is the eigenvector corresponding to $\mu$:
\begin{equation}
   \label{eigenvect}
\eta_u = \left[ \begin{array}{c}
-1 \\
-\mu \\
-\frac{p'(1)}{2(1 + \mu D/V )}
\end{array}
\right].
\end{equation}
Notice that $\eta_u$ depends on $D$ and $\Delta$ in a continuous
manner.

Let ${\cal T}$ be the two dimensional subspace orthogonal to the
linear unstable manifold so that ${\cal T} \oplus {\cal M} = \mathbb{R}^3$. 
We now state the existence of the unstable manifold for the nonlinear
system.
\begin{proposition}
  \label{prop:unstable}
In a sufficiently small neighborhood of $\eta_-$, say
$$
B_{\alpha} = \{ |\eta'| < \alpha \},
$$
there is a unique $C^2$ map
$$
T(\eta';D,\Delta): {\cal M} \cap B_{\alpha} \to {\cal T}, 
$$
which has the following properties:\\
(a) $T(0) = 0$ \\
(b) $\nabla T(0) = 0$ \\
(c) $T$ continuously depends on $D$ and $\Delta$ \\
(d) If $\eta'(x)$ is a solution to the linear system lying on ${\cal
M}$, then $T(\eta'(x)) + \eta'(x) + \eta_-$ is a solution to
the nonlinear system.\\
(e) Upper bounds on $\alpha^{-1}$ and $\| T \|_{C^2}$
depend only on $\| G\|_{C^2}$.
\end{proposition}
The proof of this is a standard result of the theory of dynamical
systems.

Since we will be concerned about what happens when $D \to 0$, the
bounds on $\alpha$ and $\| T \|_{C_2}$ are not sufficient.  These
bounds are, however, somewhat artificial.

\begin{corollary}
For all $D \in (0, \infty)$ there are constants $C_1, C_2>0$ such
that $\alpha$ can be chosen so that
$$
\alpha > C_1, \quad \| T \|_{C^2} < C_2
$$ 
\end{corollary}
{\bf Proof.}
We can multiply the entire system by the factor
$$
\beta = \frac{D}{D+1}
$$
and introduce the variable $X = x/\beta$, giving a new system
$\eta_X = \beta G(\eta)$.  The unstable manifold of the new system
is also one for the old as well since only the scale of the
independent variable was changed.  But for the new system,
$\beta \| G \|_{C^2}$
has a uniform bound for all $D \in (0, \infty)$ as required.
\hfill$\Box$

A starting point for the shooting method can now be given, by
constructing a trajectory which sits on the unstable manifold near
the point $\eta_-$.

\begin{proposition}
  \label{prop:startingpoint}
There exists a solution $\eta(x):(-\infty, 0] \to \mathbb{R}^3$ to
(\ref{system1}-\ref{system3}) with the following properties:\\
(a) There is a $K>0$ such that
$$|\eta(x) - \eta_{-} - K e^{\mu x} \eta_u| \le C K^2 e^{2 \mu x}$$
for some constant $C>0$ independent of $D$.\\
(b) $\eta(0)$ continuously depends on $D$, $\Delta$. \\
(c) There is some fixed $\phi_0 < 1$, so that  $\phi(0) = \phi_0$.
\end{proposition}
{\bf Proof.}
For arbitrary but small $K$ and $x \le 0$, the function
$$
\eta(x; K, D, \Delta) = \eta_{-} + K e^{\mu x} \eta_u 
  + T( K e^{\mu x} \eta_u; D, \Delta)
$$
is well defined.
Choose $\phi_0 < 1$ so that it is in the range of $\eta_{\phi}$ for all
$D>0$, the subscript denoting the $\phi$-component.  For small $K$,
the function
$$
\phi(0; K, D, \Delta) = 1 - K + T_{\phi}( K \eta_u; D,\Delta)
$$
is decreasing in $K$, and therefore has a unique continuous inverse
$K(\phi(0); D, \Delta)$.  We can then set $K = K(\phi_0; D, \Delta)$, so that
properties (b) and (c) are satisfied.
The estimate (a) follows from Taylor's theorem and properties (a),(b)
and (e) in proposition \ref{prop:unstable}.
\hfill$\Box$

%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{The limit as $D \to 0$ and overshooting}
\label{sec:slaved}

For proof of theorem \ref{thm:exist}, we will be concerned about what
happens in the limit $D \to 0$.
The goal of this section is to show that when $D$ is near zero, the
trajectory formed from the extension of the solution obtained
in proposition \ref{prop:startingpoint} will eventually decrease past
the value $\phi = -1$, provided $V<V_s$. 

Formally setting $D=0$, we have $u \equiv \tfrac12 p(\phi) + e_\infty$
and obtain the reduced system
\begin{eqnarray}
  \label{slaved1}
\phi_x &=& \psi \\
  \label{slaved2}
\psi_x &=& -V \psi + F_{\phi}(\phi, e_\infty) 
\end{eqnarray}
As long as $\phi$ is monotone decreasing, it will be useful to work in
phase space, viewing $\phi$ as the independent variable.
Then $\psi(\phi)$ solves the
non-autonomous equation
\begin{equation}
  \label{psieqn}
\frac{d \psi}{d \phi} = -V + \frac{F_{\phi}(\phi, e_\infty)}{\psi}.
\end{equation}
Given a solution to this equation, the traveling wave
profile may be obtained by inverting the one to one function
\begin{equation}
  \label{formula1}
x(\phi) = \int_0^x dx = \int_{\phi_0}^{\phi} \frac{d \phi}{\psi(\phi)}.
\end{equation}
The constant $\phi_0$ is the value $\phi$ takes at $x=0$; choosing
it removes the translation invariance of the original problem.

When $V = V_s$ there is a unique decreasing solution $\Phi_s$ given by
proposition \ref{prop:existslaved},  whose derivative $\Psi_s$ must
satisfy (\ref{psieqn}).  We will first construct a trajectory which
decreases ``faster'' than $\Phi_s$.
\begin{proposition}
  \label{prop:comparison}
When $V < V_s$ there is a solution $\Psi:(-1, 1) \to \mathbb{R}$
of equation (\ref{psieqn}) with the following properties:\\
(a) $\Psi(\phi) < \Psi_s(\phi) < 0$.\\
(b) $\Psi(\phi) \to 0$ as $\phi \to 1$.\\
(c) For any $\phi_0 \in (-1, 1)$,
$$ \int_{\phi_0}^{-1} \frac{d\phi}{\Psi(\phi)} \quad \text{is bounded}.$$
(d) For any $\phi_0 \in (-1, 1)$,
$$ \int_{\phi_0}^{1} \frac{d\phi}{\Psi(\phi)} = - \infty.$$
\end{proposition}
{\bf Proof.} 
Let $\Psi_n(\phi)$ be the backwards solution to the initial value
problem (\ref{psieqn}) with $\Psi_n(1) = - 1/n$, which exists
down to a point where $\Psi_n \to 0$.  We claim that
$\Psi_n(\phi) < \Psi_s(\phi)$.  This must be true near $\phi=1$ since 
$\Psi_s$ approaches zero there.  Suppose then that there is some
largest $\phi = \phi^*$ at which $\Psi(\phi^*) = \Psi_s(\phi^*)$.
At that point we have
\begin{equation}
  \label{compare}
\frac{d \Psi_n}{d \phi}(\phi^*) = -V +\frac{F_{\phi}(\phi^*, e_\infty)}{\Psi_n} 
             > -V_s + \frac{F_{\phi}(\phi^*, e_\infty)}{\Psi_s}=
\frac{d \Psi_s}{d \phi}(\phi^*)
\end{equation}
which is impossible since $\Psi_n < \Psi_s$ when $\phi> \phi^*$.
$\Psi_n$ therefore exists on the the whole interval $(-1, 1)$.

We now pass to a limit as $n \to \infty$.  A uniform bound on
$\| \Psi_n \|_{C^1}$ is obtained by noticing that
$$
\left| \frac{d\Psi_n}{d\phi} \right| < |V| + \left|
       \frac{F_{\phi}(\phi, e_\infty)}{\Psi_n} \right| 
< |V| + \left|\frac{F_{\phi}(\phi, e_\infty)}{\Psi_s} \right|
< |V| + |V_s| + \left| \frac{d\Psi_s}{d\phi} \right|
$$ 
There exists some subsequence which converges uniformly in
$C^1$ to a limit we shall simply call $\Psi$, solving equation
(\ref{psieqn}).

Now to verify that $\Psi$ has the stated properties.  For (a),
clearly we have $\Psi^* \le \Psi_s$.
But if they are equal at some point, an argument similar to that
in equation (\ref{compare}) would give a contradiction.

For (b), we observe that
$$
0 \ge \Psi_n(\phi) \ge -1/n - C(1- \phi)
$$
where $C$ is a positive uniform bound on the derivatives of $\Psi_n$.
Taking $n \to \infty$ gives
\begin{equation}
  \label{limit}
0 \ge \Psi(\phi) \ge - C(1 -\phi)
\end{equation}
which proves (b).

For (d), the inequality (\ref{limit}) gives
$$
\int_{\phi_0}^{1} \frac{d\phi}{\Psi(\phi)} <
C^{-1} \int_{\phi_0}^{1} \frac{d\phi}{\phi - 1} = -\infty.
$$

For (c), suppose instead
$$
\int_{\phi_0}^{-1} \frac{d\phi}{\Psi(\phi)} = + \infty.
$$
Formula (\ref{formula1}) then may be used to obtain
a solution $\phi(x)$ to the traveling wave problem
(\ref{slaved}-\ref{slavedbc}) with speed $V \ne V_s$, which is impossible.
\hfill$\Box$

We can use formula (\ref{formula1}) to obtain a solution $x(\phi)$
from $\Psi$, and by property (d) above, $x \to -\infty$ as $\phi \to 1$.
The inverse of this function, $\Phi(x)$, is a solution to the system
(\ref{slaved1} -\ref{slaved2}) with
$$
\lim_{x \to -\infty} \phi(x) = 1.
$$
Additionally, by virtue of property (c) in the proposition, there is
some $x$ at which both $\Phi(x) = -1$ and $\Psi(x) < 0$.
Consequentially there is some $\epsilon, x^*$ for which
$$
\Phi(x^*) = -1 - \epsilon.
$$

We next study the limit of the full system as $D \to 0$.  It will be
shown that the limit of solutions to the full system will be the
solution of (\ref{slaved1}-\ref{slaved2}).
The first result shows that $u$ can approximately be regarded as a function of
$\phi$.
\begin{proposition}
  \label{prop:slaved}
Let $(\phi, \psi, u)$ be a solution to the system
(\ref{system1}-\ref{system3}) satisfying (\ref{bc2}). Then
there exists a positive constant $k$, so that
$$
|u(x) - \tfrac12 p(\phi(x)) - e_\infty| \le  k D \overline{\psi}(x)
$$
where
$$
\overline{\psi}(x) = \sup_{(-\infty, x)} |\psi|.
$$
\end{proposition}
{\bf Proof.}
Multiplying (\ref{system3}) by the integrating factor
$$
K(x) = \frac{V}{D} \exp(Vx/D)
$$
and integrating gives
\begin{equation}
  \label{integralform}
u(x) = \int_{-\infty}^x K(x - x')
        \left[ \tfrac12 p(\phi(x')) + e_\infty \right] dx'.
\end{equation}
An exact Taylor expansion of the $p(\phi(x'))$ term around $x$ gives
\begin{eqnarray*}
u(x) &=& \int_{-\infty}^x K(x - x') 
      \left[ \tfrac12 p(\phi(x))+e_\infty \right] \, dx' \\
&& + \int_{-\infty}^x  K(x - x') p(\phi(x^*(x')))_x (x' - x)  dx'
\end{eqnarray*}
where $x^*(x') \in (x', x)$.  The first integral explicitly integrates to
$\tfrac12 p(\phi(x)) + e_\infty$.  For the second integral, we may bound
term involving $x^*$ by a constant times $\overline{\psi}(x)$, and
the remaining term integrates to exactly $D/V$.
\hfill$\Box$

Now let $(\phi, \psi, u)$ be the solution obtained in proposition
\ref{prop:startingpoint}.  Since $\phi$ is
monotone decreasing at least up to $x=0$, we may again regard it as
the independent variable, and $\psi(\phi), u(\phi)$ solve 
\begin{equation}
   \label{fulleqn}
\frac{d \psi}{d \phi} = -V + \frac{g'(\phi) + \tfrac12 \lambda u(\phi) p'(\phi)}
{\psi}
\end{equation}
We will also write $\overline{\psi}(\phi)$ for the function
defined in the previous proposition.
It is important to note that, at least when $\phi$ is near $1$,
property (a) in proposition \ref{prop:startingpoint} indicates that
$\psi$ is also monotone decreasing, and consequentially
$$\overline{\psi} = |\psi| = - \psi.$$
We may now show that a solution of this equation approaches the
expected limit as $D \to 0$.

\begin{proposition}
  \label{prop:startclose}
Let $\psi(\phi)$ be a solution to equation (\ref{fulleqn}) obtained
in proposition \ref{prop:startingpoint}, and let
$\Psi(\phi)$ be a solution to equation (\ref{psieqn}) as obtained in
proposition \ref{prop:comparison}.  There is a positive constant $k_1$
so that
$$
|\psi(\phi) - \Psi(\phi)| < k_1 D, \quad \phi>\phi_0.
$$
\end{proposition}
{\bf Proof.}
Set $\Theta = \psi - \Psi$.  Using proposition \ref{prop:slaved},
$\Theta$ solves the equation
$$
\frac{ d \Theta}{d \phi} + \frac{F_{\phi}(\phi, e_\infty)}{\Psi \psi} \Theta
= \frac{U(\phi)}{\psi}
$$
where
$$
U(\phi) = \frac{\lambda}{2} p'(\phi) [u(\phi) - \tfrac12 p(\phi) - e_\infty]
$$
For any $\phi_0 < 1$, we can define the integrating factor
$$
K_2(\phi) = \exp \left(  \int_{\phi_0}^{\phi}
  \frac{F_{\phi}(\phi', e_\infty)}{\Psi(\phi') \psi(\phi')} d\phi'
\right).
$$
Note that since $\Psi, \psi$ and $F_{\phi}$ are all negative near
$\phi=1$, $K_2 \le 1$ for $\phi \ge \phi_0$.
Multiplying by $K_2$ and integrating from $\phi_0$
to some $\tilde{\phi}$ yields
$$
K_2(\tilde{\phi}) \Theta(\tilde{\phi}) -  \Theta(\phi_0) =
\int_{\phi_0}^{\tilde{\phi}} K_2(\phi) \frac{U(\phi)}{\psi} d\phi.
$$
Taking the limit $\tilde{\phi} \to 1$, we obtain
$$
|\Theta(\phi_0)| = \left| \int_{\phi_0}^{1} \frac{U(\phi)}{\psi}
 d\phi \right| < k_1 D.
$$
\hfill$\Box$

We conclude this section by showing that the solution to the full
problem overshoots when $D$ is small precisely because the ``slaved''
($D \to 0$) limit is being approached.

\begin{proposition}
  \label{prop:overshoot}
Let $(\phi,\psi,u)$ be the solution obtained in proposition
\ref{prop:startingpoint}.  There exists $x^*$, so that if $D$ is
small enough, $\phi(x^*) = -1$ for some $x \le x^*$.
\end{proposition}
{\bf Proof.}
By an appropriate translation of the limit solution $\Phi(x)$,
we can make $\Phi(0) = \phi(0) = \phi_0$.  Using proposition
\ref{prop:startclose} we have $\Psi(0) = \psi(0) + {\cal O}(D)$.
Since $\psi$ has a uniform bound, $u(x) = \tfrac12 \phi(x) + e_\infty
+ {\cal O}(D)$, therefore $(\phi, \psi)$ solve a system of the form
\begin{eqnarray*}
\phi_x &=& \psi \\
\psi_x &=& -V \psi + F_{\phi}(\phi, e_\infty) + R(x)
\end{eqnarray*}
where $R = {\cal O}(D)$.
Now suppose that $\Phi(x^*) = -1 - \epsilon$.  By elementary theory,
solutions are continuous both with respect to initial data and
perturbations of the equation.  Consequentially for $D$ small enough we have
$ \phi(x^*) < \Phi(x^*) + \epsilon = -1$.
\hfill$\Box$

            %%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\subsection{Proof of Theorem 1}
  \label{sec:proof}
We now formally define the shooting procedure which is used to obtain
a solution.  For $M>0$ set
$$
\Sigma_M(D) = \phi(M)
$$
where $\eta = (\phi, \psi, u)$ is the solution to
(\ref{system1}-\ref{system3}) obtained in proposition
\ref{prop:startingpoint}.  Since $\phi$ varies continuously both
with respect to its initial data and parameters, it follows
$\Sigma(D)$ is also continuous.
For each $M$, we will vary $D$ so that 
$\Sigma = -1$, and so obtain a solution $\eta_M$.  We will conclude by
passing to the limit $M \to \infty$ and showing that the limiting
solution has the correct asymptotic behavior.

\paragraph{\bf Proof of Theorem 1.}

\noindent{\it Step 1:} Overshooting\\
For large $M$, we have already shown that $\phi$ can obtain the value
$-1$ when $D$ is small.  Past this point, it is easy to see that
$\phi < -1$ since $\psi$ converges to zero in an exponential fashion
because $f \equiv 0$.  This ensures $\Sigma < -1$.

\vspace{.1 in}
\noindent{\it Step 2:}  Undershooting\\
When $D = \infty$ (i.e. $D^{-1} = 0$) we claim that $\Sigma > -1$.
Suppose instead there exists some $x^*$ for which $\phi(x^*) = -1$.
$u(x)$ is constant in this case, taking the value $u_-$.  Multiplying
(\ref{system2}) by $\psi$ and integrating from
$-\infty$ to $x^*$ gives
$$
\tfrac12 \psi^2(x^*) =  \int_{-1}^1 f(\phi, u_-) \, d\phi
- V \int_{-\infty}^{x^*} \psi^2(x) dx <0
$$
by virtue of property P2, which is impossible.

\vspace{.1 in}
{\it Step 3:} Uniform bounds and passing to the limit\\
We now define
$$
D_M = \sup \{ D| \Sigma_M(D) = -1 \}
$$
which must exist by steps 1 and 2 and the fact that $\Sigma$ is
continuous.  Associated with each $D_M$ is a solution
$\eta_M = (\phi_M, \psi_M, u_M)$ of (\ref{system1} - \ref{system3})
with $D = D_M$.  Since $\phi_M$ may not decrease past $-1$, we have
$$
-1 \le \phi_M \le 1.
$$
We claim that $D_M$ is non-decreasing in $M$.  For $\epsilon$
small, let $M_1 = M + \epsilon$.  Then
$$
 \Sigma_{M_1}(D_M) = \phi_M(M + \epsilon) \le \phi_M(M) = -1
$$
so that $D_{M_1} \le D_M$.  This uniformly bounds $D_M$ from below.
Using the uniform bounds on $\phi_x$ and $u_x$, 
there is a constant $C$ so that
$$ \parallel \eta_M \parallel_{C^1([-M, M])} < C.$$
We can find a subsequence $M_j \to \infty$ so that $D_{M_j} \to D$
and $\eta_{M_j} \to \eta = (\phi, \psi, u)$
locally in the $C^1$ norm, giving a solution to
(\ref{system1}-\ref{system3}) on $(-\infty, \infty)$.

By construction, the limiting solutions must satisfy the left 
hand far field conditions (\ref{bc1}).  We need only show that the right hand
conditions (\ref{bc2}) hold.

\vspace{.1 in}
\noindent{\it Step 4:}  Behavior as $x \to +\infty$\\
The existence of a limit follows from the gradient character of the
system by the following lemma:
\begin{lemma}
Suppose $\lim_{x \to \infty} I(x) = I^*$, and that there is some
positive constant $C$ so that $|I_{xx}(x)| < C$.  Then
$\lim_{x \to \infty} I_x(x) = 0$.
\end{lemma}
{\bf Proof.}  For $\epsilon >0$, let $x_1$ be so large that
$$
|I^* - I(x)| \le \epsilon^2, \quad x \ge x_1.
$$
For any $x \ge x_1$,
$$
I(x) = I(x_1) + I_x(x_1)(x - x_1) + \int_{x_1}^x \int_{x_1}^{x'}
I_{xx}(x'') dx'' \, dx'.
$$
Then using the bound on the second derivative and setting
$x - x_1 = \epsilon$, we have
$$
\epsilon |I_x(x_1)| \le |I(x) - I(x_1)| + \frac{C}{2} \epsilon^2
\le (1 + \frac{C}{2}) \epsilon^2
$$
which means $I'(x_1) \le C \epsilon$.
\hfill$\Box$

To continue the proof, let $I(x)$ be the Lyapunov function in lemma
\ref{lemma:lyapunov}.  Since $I$ is increasing and bounded from above,
the limit $\lim_{x \to \infty} I(x)$ exists.
A bound on the second derivative of $I$ follows from the bounds on 
derivatives of $\phi$ and $ u$.  Therefore $\psi, u_x \to 0$ as
$x \to \infty$, and consequentially $\lim_{x \to \infty}
F(\phi; e_\infty)$ exists.  Since $\phi$ is continuous, it must
also approach a limit $\overline{\phi}$.  Using equation
(\ref{system3}) it follows that
$$
\lim_{x \to \infty}  u(x) = e_\infty + \tfrac12 p(\overline{\phi}) \equiv
\overline{u}.
$$
Then $(\overline{\phi}, 0, \overline{u})$ must be a fixed point of the system
(\ref{system1} - \ref{system3}), which is the same as saying that
$\overline{\phi}$ is a critical point of the function $F(\phi;
e_\infty)$.   Three possibilities exist:\\
(a) $\overline{\phi} = 1$.\\
(b) $\overline{\phi} = \hat{\phi}$, the intermediate maximum of $F(\phi; e_\infty)$.\\
(c) $\overline{\phi} = -1$ and $\overline{u} = -\Delta$.\\
We can show that the first two are impossible.

If (a) holds,  then equation (\ref{entropy_diss}) implies that
$\psi, u_x \equiv 0$, which is impossible.
Suppose instead that (b) holds.  Let $\epsilon$ to be small enough that
\begin{equation}
  \label{sdiff}
F(\phi, e_\infty) - F(-1, e_\infty) \ge \epsilon
\end{equation}
when $\phi$ is in some small neighborhood of $\hat{\phi}$;  this is
always possible since $\hat{\phi}$ is a maximum of $F(\phi, e_\infty)$.
Since $\psi, u_x \to 0$ as $x \to \infty$, there must exist some point
$X$ for which (\ref{sdiff}) holds for $\phi = \phi_M(X)$ and
\begin{equation}
\label{bound}
\tfrac12 \psi_M^2(X) + \lambda \frac{D_M^2}{V^2} (u_M)_x^2(X) < \epsilon
\end{equation}
for large values of $M$.  By integrating (\ref{lyapunov}) from $X$ to $M$
for each $\eta_M$, we get
\begin{equation}
  \label{contra}
\tfrac12 \psi_M^2(X) + \lambda \frac{D^2}{V^2} (u_M)_x^2 (X) >  
       F(\phi_M(X); e_\infty) - F(-1; e_\infty) > \epsilon
\end{equation}
which is a contradiction of (\ref{bound}).\\
This concludes the proof of Theorem \ref{thm:exist}.

       %%%%%%%%%%%%%%%%%%%
\subsection{Proof of Theorem 2}
  \label{sec:proof2}

The proof of theorem 2 is very similar to the first existence theorem,
so we will only point out the differences.  We again construct the 
``shooting'' function
$$
\Sigma_M(\Delta) = \phi(M)
$$
which is continuous in $\Delta$.
The steps in completing the proof are the same as above:

\vspace{.1 in}
{\it Step 1:} Overshooting
\vspace{.1 in}

\noindent
We will first establish a comparison solution by an existence result
for the one-component equation.
\begin{proposition}
  \label{prop:onecomp}
Let $V$ be given.  There exists $\tilde{V}>V, U<0, \Phi$ solving
\begin{equation}
  \label{bistable}
\Phi_{xx} + \tilde{V} \Phi + f(\phi, U) = 0
\end{equation}
with the boundary conditions $\Phi(\pm \infty) = \mp 1$, where
$\Phi$ is decreasing.
\end{proposition}
{\bf Proof.}
Notice that (since we are dealing only with waves of type I)
the term $f(\phi, U)$ is of bistable type for each $U$.
Therefore (\ref{bistable}) has a solution pair $(\Phi, \tilde{V})$ for each
fixed $U$, where $\Phi$ is decreasing.  We need only show
$$
\tilde{V} \to +\infty \quad \text{as} \quad U \to -\infty.
$$
Regarding $\Phi$ as the independent variable, the derivative of
$\Phi$, call it $\Psi(\Phi)$, will solve
\begin{equation}
  \label{1comp}
\frac{d \Psi}{d \Phi} = -\tilde{V} + \frac{g'(\Phi)
    + \frac{\lambda}{2} U p'(\Phi)}{\Psi}.
\end{equation}
Suppose that as $U \to -\infty$, $\tilde{V}$ remains bounded.  Multiplying
(\ref{1comp}) by $\Psi$ yields
\begin{eqnarray*}
\frac{d}{d \Phi} \left(\frac12 \Psi^2 \right) &=&
g'(\Phi) + \frac{\lambda}{2} U p'(\Phi) - \tilde{V} \Psi \\
&\le& C_1 \left( \frac12 \Psi^2 \right) +C_2 U + C_3
\end{eqnarray*}
where $C_1$, etc. will denote positive constants.  Applying the usual
Gronwall lemma gives
$$
|\Psi(\phi)| \le C_4 |U|^{\frac12} + C_5 \quad \text{for $\phi \in (-1,1)$}.
$$
Now multiplying (\ref{1comp}) by $\psi$ and integrating from $\phi=-1$
to $\phi=+1$, we obtain
\begin{eqnarray}
   \label{Vformula}
\tilde{V} &=& \frac{\lambda U}{\int_{-1}^1 \Psi(\phi) d\phi} \\
  &\ge& C_6 |U|^{\frac12} - C_7.
\end{eqnarray}
But then $\tilde{V} \to \infty$ as $U \to -\infty$, a contradiction. \hfill$\Box$

With this comparison solution we can easily show, provided $\Delta$ is large
enough, that the solution to the full system will overshoot.
\begin{proposition}
  \label{prop:overshoot1}
Let $(\phi, \psi, u)$ be a solution obtained in proposition
\ref{prop:startingpoint}, with $\Delta \ge -U +1$.  Then
there exists $x^*$, so that $\phi(x^*) = -1$.
\end{proposition}
{\bf Proof.}
We will again work in phase space, so as before $\psi(\phi), u(\phi)$ solves
\begin{equation}
  \label{2comp}
\frac{d \psi}{d \phi} = -V + \frac{g'(\phi) + \frac{\lambda}{2}
u(\phi) p'(\phi)}{\psi}
\end{equation}
which is valid so long as $\phi$ is decreasing; in particular, it is
true near $\phi = 1$.

First we claim that if $\psi(\phi^*) = \Psi(\phi^*)$ at some point
$\phi^*$, then $\psi(\phi) > \Psi(\phi)$ when $\phi > \phi^*$.  This
follows from the inequality
\begin{eqnarray}
  \notag
\frac{d \Psi}{d \phi}(\phi^*) &=&
-\tilde{V}+\frac{g'(\phi^*) + \frac{\lambda}{2} U p'(\phi^*)}{\Psi}\\
  \label{compare1}
&<&- V +\frac{g'(\phi^*) + \frac{\lambda}{2} u(\phi) p'(\phi^*)}{\psi}
= \frac{d \psi}{d \phi}(\phi^*). 
\end{eqnarray}
As a consequence, there is some small neighborhood of $\phi=1$ where
$\Psi$ and $\psi$ do not cross.  Suppose that $\Psi < \psi$ there.
Then by virtue of $g'(\phi)<0$, just as in (\ref{compare1}) we obtain
that  
\begin{equation}
  \label{bigger}
  \frac{d \psi}{d \phi} > \frac{d \Psi}{d \phi}.
\end{equation} 
But integrating $d\psi / d\phi$ from $\phi_0$ up to $\phi = 1$ gives
$$
\lim_{\phi \to 1} \psi(\phi) > \lim_{\phi \to 1} \Psi(\phi) = 0,
$$
which is impossible since $\psi \to 0$ as $\phi \to 1$.

It follows that $\psi(\phi)$ exists on the whole interval $(-1,1)$,
and that $\psi(\phi) < \Psi(\phi)$.
Finally near $\phi = -1$, by virtue of $g'(\phi) >0$ we obtain
(\ref{bigger}) again, and consequentially
$$
\lim_{\phi \to -1} \psi(\phi) < \lim_{\phi \to -1} \Psi(\phi) = 0.
$$
This means, according to formula (\ref{formula1}) that there is some
$x^*$ for which $\phi(x^*) = -1$.
\hfill$\Box$

\vspace{.1 in}
{\it Step 2:}  Undershooting\\
\vspace{.1 in}

We claim, with $\Delta = \tfrac12$, that $\phi$ never reaches $-1$.
Suppose that it does, at $x = x^*$.  Then integrating (\ref{lyapunov})
from $-\infty$ to $x^*$ gives
$$
-\tfrac12 \psi^2(x^*) - \lambda \frac{D^2}{V^2} u(x^*) =
V \int_{-\infty}^{x^*} \psi^2(x) dx + \frac{\lambda D}{V}
\int_{-\infty}^{x^*} u^2_x(x) dx
$$
which is impossible.

\vspace{.1 in}
{\it Step 3:} Uniform bounds and passing to the limit\\
\vspace{.1 in}

By steps 1 and 2, we may find $\Delta_M$, which is some value
of $\Delta$ satisfying
$$
\tfrac12 < \Delta_M \le -U + 1
$$
giving $\Sigma(\Delta_M) = -1$.
The $C^1$ bounds on $\phi, u$ are as before, and we pass to a limit
via a subsequence $M_j$ in the same manner,
obtaining a solution on $(-\infty, \infty)$ which
we again call $\eta = (\phi, \psi, u)$ and set $\overline{\Delta} = \lim_{M_j \to
\infty} \Delta_{M_j}$.

\vspace{.1 in}
{\it Step 4:}  Behavior as $x \to +\infty$\\
\vspace{.1 in}

The existence of the limit is obtained as before, and the rest of the
proof is essentially unaltered, with one exception.  In equation
(\ref{sdiff}), the quantity $e_\infty$ is replaced by the limiting value
$\overline{e_\infty} = -\overline{\Delta} + 1$.  Then (\ref{contra}) is
still true as long as $M$ is large enough so that $(e_\infty)_M = -\Delta_M
+ 1$ is sufficiently close to $\overline{e_\infty}$.
\hfill$\Box$


             %%%%%%%%%%%%%%%%%%%%%%%%%
\section{A Bound on the Propagation Velocity}
  \label{sec:bound}

In theorem 1, it was required that the propagation velocity be less
than that of the slaved system, which has a unique velocity $V_s$.
In fact, we can show that $V_s$ is actually an upper bound, at least
for monotone waves.  The
proof of this relies on a comparison technique, commonly called
the ``sliding method'' \cite{berestycki92}.
The main result is the following:
\begin{theorem}
  \label{thm:bound}
Suppose $\Phi_s$ is the decreasing solution given by proposition
\ref{prop:existslaved} and suppose $\phi$ is a decreasing solution
to the problem (\ref{ode1} - \ref{bc2}).  If there is
some translate of $\phi$ such that $\phi(x) < \Phi_s(x)$, then $V < V_s$.
\end{theorem}
{\bf Proof.}
Suppose that $\phi(x) < \Phi_s(x)$ already holds.  By a suitable
translation, we can ensure that $\phi(x) \le \Phi_s(x)$ and that
there is at least one point $x^* $where $\phi(x^*) = \Phi_s(x^*)$.  At
this point, $\Phi_x = \phi_x = -c < 0$ and the following holds:
\begin{equation}
  \label{comparison}
0 \le (\Phi_s - \phi)_{xx} = (V - V_s) (-c) 
 + \tfrac12 \lambda p'(\phi) \left( \tfrac12 p(\phi) + e_\infty - u \right).
\end{equation}
Since $p(\phi(x))$ is decreasing, formula (\ref{integralform}) implies
$$
u(x^*) > \int_{-\infty}^{x^*} K(x' - x^*) \left[ \tfrac12 p(\phi(x^*)) +e_\infty
\right] dx' = p(\phi(x^*)) + e_\infty.
$$
Using this in (\ref{comparison}) gives $V - V_s < 0$ as required.
\hfill$\Box$

The fact that $\phi$ may be translated so that $\phi < \Phi_s$ depends
on the decay rates of each function at $\pm \infty$.  In particular,
as $x \to \pm \infty$, we have
$$
\phi \sim \mp (1 -  C \exp(\mu_{\pm} x))
$$  
and
$$
\Phi_s  \sim \mp (1- C \exp(\mu^s_{\pm} x))
$$ 
where $\mu_-, \mu^s_- > 0$ and  $\mu_+, \mu^s_+ < 0$ may be obtained
by linearizing about the fixed points $\phi_{\pm}$.  Rather than exhibiting
lengthy proofs, we merely describe the outcome of this analysis
in a brief, informal fashion.

As $x \to -\infty$, the decay rate $\mu_-$ is simply the positive
eigenvalue found in section \ref{sec:linear}.  We can rewrite the
characteristic polynomial (\ref{char}) as
$$
(\mu + V/D)( \mu^2 + V\mu - \sigma_s) = - \frac14 \lambda p'(\phi_-)^2
$$
so that
\begin{equation}
  \label{mu_minus}
\mu_-^2 + V \mu_-  - \sigma^s_- \le 0.
\end{equation}
It is easy to obtain a similar characteristic polynomial for the
slaved system, and the positive decay rate $\mu^s_-$ solves
$$
(\mu^s_-)^2 + V_s \mu^s_-  - \sigma^s_- = 0 
$$
Using (\ref{mu_minus}) we have
\begin{equation}
  \label{minusinf}
\mu_- (\mu_- + V) \le \mu^s_- (\mu^s_- + V_s).
\end{equation}

To analyze what happens as $x \to +\infty$ we will assume that $p'(\phi_-)
=0$ for the sake of simplicity.  Then $\sigma = \sigma_s$ and $\mu_+,
\mu^s_+$ solve
$$
(\mu_+)^2 + V \mu_+  - \sigma^s_+ = 0
$$
and
$$
(\mu^s_+)^2 + V_s \mu^s_+  - \sigma^s_+ = 0
$$
so that
\begin{equation}
  \label{plusinf}
\mu_+ (\mu_+ + V) = \mu^s_+ (\mu^s_+ + V_s) = \sigma^s_+.
\end{equation}

Suppose now that $V \ge V_s$.  From (\ref{minusinf}) and
(\ref{plusinf}) we obtain
$$
0 < \mu_- \le \mu^s_-, \quad   \mu_+ \le \mu^s_+ < 0. 
$$
This means that $\Phi_s$ decays faster than $\phi$ as $x \to -\infty$
and $\phi$ decays faster that $\Phi_s$ as $x \to +\infty$, ensuring
that a suitable translation of $\phi$ to the left will give $\phi <
\Phi_s$.  We may now employ the theorem, which of course contradicts
our hypothesis that $V \ge V_s$.

                %%%%%%%%%%%%%%%%%%%%
\section{The uniform structure of solutions}
  \label{sec:structure}

For the remainder this paper,  we will only discuss type \uppercase{I}
waves.  The discussion will also be limited to $e_\infty \in E$, where
$E$ is some bounded interval of admissible values.  It follows that
traveling wave solutions have $u \in E + (-\tfrac12, \tfrac12)$.  With the
further assumption of $\lambda$ being small, we will show that solutions
are monotone and there are uniform bounds on the width of the
interface profile.

We begin by describing three different $\phi$ intervals.  This first
corresponds to the interface layer, and it is defined as
\begin{equation}
L = \{ \phi |F(\phi, e) - F(-1, e) \ge \gamma, \quad e \in E \}. 
\end{equation}
where we arbitrarily choose $\gamma = \tfrac12 g(0)$.
When $\lambda$ is small, $L$ is nearly
centered at zero and has a width bounded away from zero.
The other two intervals correspond to the ``tails'' of the phase
profile.  We define them to be
\begin{equation}
  \label{tplus}
T_+ = \{ \phi|  f(\phi, u) \ge \frac{\sigma^s_+}{2} (1 - \phi) \quad 
  \text{for} \quad u \in E + (-\tfrac12, \tfrac12)  \}
\end{equation}
and 
\begin{equation}
  \label{tminus}
T_- = \{ \phi|  f(\phi, u) \le - \frac{\sigma^s_-}{2} (1 + \phi) \quad 
  \text{for} \quad u \in E + (-\tfrac12, \tfrac12) \}.   
\end{equation} 
We will assume that these intervals overlap, that is
\begin{equation}
  \label{overlap}  
(-1, 1) \subset L \cup T_- \cup T_+.
\end{equation}
It is easy to show this happens when $\lambda$ is small (how small
depending of course on the interval $E$).
Corresponding to each of these intervals are the sets
\begin{eqnarray*}
L^x &=&   \{ x | \phi(x) \in L \} \\
T_-^x &=& \{ x | \phi(x) \in T_- \} \\
T_+^x &=& \{ x | \phi(x) \in T_+ \}
\end{eqnarray*}
so that $L^x \cup T_-^x \cup T_+^x = (-\infty,\infty)$.

We can prove the following about the structure of $\phi$:
\begin{theorem}
  \label{thm:structure}
Assume that (\ref{overlap}) holds.  Then \\
(a) $\phi$ and $u$ are both monotone decreasing \\
(b) There are constants $w, W$, depending only on the functions
 $g$ and $p$, for which $w \le |L^x| \le W$.
\end{theorem}
{\bf Proof.}
Suppose first that $\phi(x) \in L^x$.  Integrating (\ref{lyapunov})
from $x$ to $\infty$ gives the bound
$$
\tfrac12 \psi^2(x) > F(\phi(x), e_\infty) - F(-1, e_\infty) - 
  \lambda \left( \frac{D u_x}{V} \right)^2.
$$
Notice that $Du_x/V$ has a uniform bound by using (\ref{system3}), so
for small $\lambda$ we can ensure that there is a constant $B_1$ with
\begin{equation}
  \label{lower}
|\psi(x)| > B_1
\end{equation}
Therefore $\phi$ is monotone on each connected component of $L^x$.
Also, multiplying (\ref{system2}) by the integrating factor $K_1 = \exp(Vx)$
and integrating from $-\infty$ to $x$ gives
$$
\psi(x) = - \int_{-\infty}^x K_1(x' - x) f(\phi(x'), u(x')) \, dx',
$$
which is negative provided $\phi$ is in the tail region $T_+$.  A
similar argument shows $\phi$ is also decreasing in the other tail
region $T_+$, therefore $\phi$ decreases everywhere.  Using the
formula (\ref{uxint}), it follows that $u$ is also decreasing.

For part (b), suppose the interval $L^x = (\phi_a, \phi_b)$.  Then
$$
\left| \int_{L^x} \phi_x dx \right| = \phi_b - \phi_a = |L| 
$$
Denote the lower and upper bounds on $|\phi_x|$ in the interval
$L^x$ by $B_1$ and $C_1$ respectively.  Then it follows that
$|L^x| < |L|/B_1$ and $|L^x| > |L|/C_1$.
\hfill$\Box$.

                %%%%%%%%%%%%%%%%%%%%%%%%%%

\section{Criticality and non-existence}
  \label{sec:nonexist}

When $\Delta >1$, traveling waves always exist, but this is not
necessarily true when $\Delta<1$ \cite{schofield91,lowen91,lowen92}.
In fact, there is a critical value of the ratio $\lambda / D$,
below which no solutions exist.  On the other hand, if $D$ is small,
solutions must exist by virtue of Theorem 1.

Two technical bounds are needed to prove the main result, which
are given in the following lemma.  They both depend on the
monotone structure of section \ref{sec:structure}, so we assume
that $\lambda$ is small enough so that theorem \ref{thm:structure}
holds.

\begin{lemma}
  \label{lemma:phi}
There exists positive constants $I, J$, depending only on $g$ and $p$, for
which
\begin{equation}
  \label{IJ}
\int_{-\infty}^{\infty} \phi_x^2 \, dx \ge I, \quad
\int_{-\infty}^{\infty} 1 - p(\phi)^2 \, dx \le J.
\end{equation}
\end{lemma}
{\bf Proof.}
Setting $B$ to be the lower bound on $\phi_x$ in the interval $L^x$,
we have
$$
\int_{-\infty}^{\infty} \phi_x^2 \, dx
\ge \int_{L^x} \phi_x^2 \, dx \ge w B^2.
$$
For the second bound, note that by the definition of the tail regions
(\ref{tplus}-\ref{tminus}) there must be some constant $C$ so that
when $\phi \in T_{\pm}$,
$$
|1 - p(\phi)^2| \le C |f(\phi, u)|.
$$
We can split the integral into integrals over the sets $L^x, T_-^x$
and $T_+^x$.  Then
$$
\int_{L^x} 1 - p(\phi)^2 \, dx < |L_x| < W
$$
and
$$
\int_{T_-^x} 1 - p(\phi)^2 \, dx < C \int_{T_-^x} f(\phi, u) dx
 = C \int_{T_-^x} \phi_{xx} + V \phi_x \, dx.
$$
The last integral is evidently bounded by some constant which
depends only on $V_s$.  A bound on the integral over $T_+^x$
is obtained in the same way.
\hfill$\Box$

\begin{theorem}
Suppose that $\Delta<1$.  There exists a constant $C$, so that if
$$
\frac{\lambda}{D} \le \frac{2I}{J}
$$
then there is no solution to the traveling wave problem
(\ref{ode1}-\ref{bc2}).
\end{theorem}

{\bf Proof.}  We will show that when ${\lambda}/{D}$ is small,
the equation (\ref{entropy_diss}) can't hold.  Suppose
that $(\phi, u)$ is some solution.  The the right hand side of
(\ref{entropy_diss}) has the estimate
\begin{equation}
  \label{rhs}
\text{rhs} = \frac{\lambda}{2} (2 \Delta - 1) < \frac{\lambda}{2}.
\end{equation}
For the left hand side, we introduce the negative constant
$$
\delta = \Delta -1.
$$
Notice that the function $u + \delta$ is always negative, and has
the asymptotic behavior
\begin{equation}
u + \delta \to
\begin{cases}
0 \quad \text{as} \quad x \to -\infty \\
-1 \quad \text{as} \quad x \to \infty 
\end{cases}
\end{equation}
We can transform the integral of $u_x^2$ as follows:
\begin{eqnarray}
\int_{-\infty}^{\infty} u_x^2 \, dx
&=& - \int_{-\infty}^{\infty} u_{xx} (u + \delta) dx \\
&=& \frac{V}{D} \int_{-\infty}^{\infty}
u_x( u + \delta) - \tfrac12 p(\phi)_x (u + \delta) \, dx \\
  \label{ux2}
&=& \frac{V}{2D} - \frac{V}{2D} \int_{-\infty}^{\infty} p(\phi)_x (u + \delta) dx
\end{eqnarray}
where we have integrated by parts and used equation (\ref{ode2}).
By using formula (\ref{integralform}) we have
$$
(u + \delta)(x) = \tfrac12 \int_{-\infty}^x K(x' - x) [p(\phi(x')) -
1] dx.
$$
Using (\ref{ux2}), we can therefore obtain the estimate
\begin{eqnarray*}
\frac{D \lambda}{V} \int_{-\infty}^{\infty} u_x^2 dx
&=& \frac{\lambda}{2} - \frac{\lambda V}{2D}
 \int_{-\infty}^{\infty} p(\phi(x))_x  \int_{-\infty}^x
e^{\frac{D}{V}(x' - x)} [p(\phi(x')) - 1] dx' \, dx \\
&\ge&
\frac{\lambda}{2} - \frac{\lambda V}{2D} \int_{-\infty}^{\infty} p(\phi(x))_x 
\int_{-\infty}^x [p(\phi(x')) - 1] dx' \, dx  \\
&=& \frac{\lambda}{2} - \frac{\lambda V}{2D} \int_{-\infty}^{\infty}
 [1 - p(\phi(x))^2] dx \\
&\ge& \frac{\lambda}{2} - \frac{\lambda V}{2D} J
\end{eqnarray*}
where integration by parts was used for the second equality.
Then an estimate for the left hand side of (\ref{entropy_diss}) is
\begin{equation}
  \label{lhs}
\text{lhs} \ge V I +  \frac{\lambda}{2} - \frac{\lambda V}{2D} J.
\end{equation}
Comparing this to the right hand side estimate (\ref{rhs}), it follows
that for (\ref{entropy_diss}) to hold, one needs
$$
\frac{\lambda}{D} > \frac{2I}{J}.
$$
\hfill$\Box$

{\bf Remark.}  Sharp values for $I$ and $J$ can be obtained by letting
$\phi$ solve
$$
\phi_{xx} - g'(\phi) = 0
$$
and setting $I = \int \phi_x^2 dx$ and $J = \int 1 - p(\phi)^2 dx$.


                %%%%%%%%%%%%%%%%%%%%%%
\begin{figure}
\begin{center}
\epsfig{file=zeroosc.ps, height=2in}
\caption
{A monotone traveling wave.  The phase variable $\phi$ is solid,
temperature $u$ is dashed.  In this case, $D = .0628$.}
\label{fig:zeroosc}
\end{center}
\end{figure}
 
\begin{figure}
\begin{center}
\epsfig{file=oneosc.ps, height=2in}
\caption
{An oscillatory wave, $D = .077$.}
\label{fig:oneosc}
\end{center}
\end{figure}

\begin{figure}
\begin{center}
\epsfig{file=twoosc.ps, height=2in}
\caption
{An oscillatory wave, $D = .124$.}
\label{fig:twoosc}
\end{center}
\end{figure}

\begin{figure}
\begin{center}
\epsfig{file=threeosc.ps, height=2in}
\caption
{An oscillatory wave, $D = .205$.}
\label{fig:threeosc}
\end{center}
\end{figure}

\begin{figure}
\begin{center}
\epsfig{file=potential.ps, height=2in}
\caption{A plot of the function $F(\phi; e_\infty)$ for $e_\infty = -\Delta +\tfrac12 =
-.4$}
\label{fig:energy}
\end{center}
\end{figure}

                
\section{Numerical examples}
  \label{sec:compute}

In this section, we discuss some numerical computations of both
monotone and non-monotone wave profiles.
The numerical method for obtaining these was, in fact, identical
to the proof of theorem 1.
All parameters but $D$ were fixed, and 
a trajectory of the system (\ref{system1}-\ref{system3}) was computed
forward for some fixed amount of time, starting at a point
near $\eta_-$.  Then $D$ was adjusted so that the trajectory tended
toward $\eta_+$.  There was always at least one value of $D$ which
gave this behavior, and sometimes several.  In any case, the
smallest value of $D$ corresponded to a monotone profile, where
larger values gave oscillatory wavefronts.

Figures \ref{fig:zeroosc} - \ref{fig:threeosc} show four different
wave profiles, each corresponding to a different value of $D$,
but otherwise with the same parameters
$\lambda = 2$, $\Delta = .9$, and $V = .02$.
The functions $g$ and $p$ were
$$
g(\phi) = \frac14 (1- \phi^2)^2, \quad p(\phi)
 = \frac{15}{8} (\phi - \frac23 \phi^3 + \frac15 \phi^5)
$$
which yield the energy function $F(\phi; e)$ whose graph is given in
figure \ref{fig:energy}.


The results of the numerical study suggest a number of open problems.
An fairly exhaustive search of parameter space was conducted,
and from this we conjecture the following:
\begin{itemize}

\item There is a unique monotone wave, in the sense that exactly one
value of $D$ gives such a wave profile.  As a consequence, when
$\lambda$ is small, there is a unique value of $D$ giving a solution.

\item As $\lambda$ is increased (equivalently as the wells of the
energy $s$ become more uneven), more solutions
appear, corresponding to higher values of $D$ and having more
oscillations.

\item For fixed $D$ and $\Delta > 1$, there is exactly one monotone
wave whose velocity increases with $\Delta$.
\end{itemize}


                %%%%%%%%%%%%%%%%

\section{Conclusion}

We have given an in-depth analysis of the traveling wave problem
for phase field models.  To some extent, the questions of existence,
uniqueness, monotonicity and non-existence have all been addressed.
Quantitative results regarding velocity bounds and non-existence
have also been provided.

We have made no attempt to address the dynamics of the waves
under consideration.  Some work in this direction is presented in
\cite{glasner99}.  As for the non-monotone solutions, our suspicion is
that they are unstable; this is frequently the case for oscillatory
traveling waves \cite{volpert94}.  The interested reader also may wish
to look at some of the numerical experiments in \cite{lowen92} for
some unusual dynamical features.

\begin{thebibliography}{10}

\bibitem{almgren96}
{\sc R.~Almgren and A.~Almgren}, {\em Phase field instabilities and adaptive
  mesh refinement}, TMS/SIAM, 1996, pp.~205--214.

\bibitem{bates97a}
{\sc P.~W. Bates, P.~C. Fife, R.~A. Gardner, and C.~K. R.~T. Jones}, {\em The
  existence of traveling wave solutions of a generalized phase field model},
  Siam J. Math. Anal., 28 (1997), pp.~60--93.

\bibitem{bates97b}
\leavevmode\vrule height 2pt depth -1.6pt width 23pt, {\em Phase field models
  for hypercooled solidification}, Physica D, 104 (1997), pp.~1--31.

\bibitem{berestycki92}
{\sc H.~Berestycki and L.~Nirenberg}, {\em Traveling fronts in cylinders}, Ann.
  Inst. Henri Poincar{\' e},  (1992), pp.~497--572.

\bibitem{caginalp88}
{\sc G.~Caginalp and P.~Fife}, {\em Dynamics of layered interfaces arising from
  phase boundaries}, SIAM J. Appl. Math., 48 (1988), pp.~506--518.

\bibitem{caginalp91}
{\sc G.~Caginalp and Y.~Nishiura}, {\em Examples of traveling wave solutions
  for phase field models and convergence to sharp interface models in the
  singular limit}, Quart. of Appl. Math., 49 (1991), pp.~147--162.

\bibitem{caginalp93}
{\sc G.~Caginalp and W.~Xie}, {\em Mathematical models of phase boundaries in
  alloys: phase field and sharp interface}, Phys. Rev. E, 48 (1993),
  pp.~1897--1909.

\bibitem{chapman92}
{\sc S.~J. Chapman, S.~D. Howison, and J.~R. Ockendon}, {\em Macroscopic models
  for superconductivity}, SIAM Review, 34 (1992), pp.~529--560.

\bibitem{collins85}
{\sc J.~B. Collins and H.~Levine}, {\em Diffusion interface model of diffusion
  limited crystal growth}, Phys. Rev. B, 31 (1985), p.~6118.

\bibitem{fife88}
{\sc P.~C. Fife}, {\em Dynamics of Internal Layers and Diffuse interfaces},
  SIAM, 1988.

\bibitem{fife77}
{\sc P.~C. Fife and J.~B. Mc\uppercase{L}eod}, {\em The approach of solutions
  to nonlinear diffusion equations to traveling front solutions}, Arch. Rat.
  Mech. Anal., 65 (1977), pp.~335--361.

\bibitem{glasner99}
{\sc K.~Glasner}, {\em Rapid growth and critical behavior in phase field models
  of solidification},  (1999).
\newblock In press.

\bibitem{glasner99a}
{\sc K.~Glasner and R.~Almgren}, {\em Dual fronts in phase field models},
  (1999).
\newblock Submitted.

\bibitem{karma96b}
{\sc A.~Karma and W.-J. Rappel}, {\em Phase-field method for computationally
  efficient modeling of solidification with arbitrary interface kinetics},
  Phys. Rev. E.,  (1996), pp.~3017--3020.

\bibitem{langer86b}
{\sc J.~S. Langer}, {\em Models of Pattern Formation in First order Phase
  Transitions}, World Scientific, 1986.

\bibitem{lowen91}
{\sc H.~Lowen and J.~Bechhoefer}, {\em Critical behavior of crystal growth
  velocity}, Europhys. Lett., 16 (1991), pp.~195--200.

\bibitem{lowen92}
{\sc H.~Lowen, J.~Bechhoefer, and L.~Tuckerman}, {\em Crystal growth at long
  times: Critical behavior at the crossover from diffusion to kinetics limited
  regimes}, Phys. Rev. A, 45 (1992), pp.~2399--2415.

\bibitem{penrose90}
{\sc O.~Penrose and P.~Fife}, {\em Thermodynamically consistent models for the
  kinetics of phase transitions}, Physica D, 43 (1990), pp.~44--62.

\bibitem{schofield91}
{\sc S.~Schofield and D.~Oxtoby}, {\em Diffusion disallowed crystal growth.
  \uppercase{I. L}andau-\uppercase{G}inzburg model}, J. Chem. Phys., 94 (1991),
  pp.~2176-- 1286.

\bibitem{volpert94}
{\sc A.~Vol'Pert, V.~Volpert, and V.~Volpert}, {\em Traveling Wave Solutions of
  Parabolic systems}, American Mathematical Society, 1994.

\bibitem{wang93}
{\sc S.~Wang, R.~Sekerka, A.~Wheeler, B.~Murray, C.~Coriell, R.~Braun, and
  G.~McFadden}, {\em Thermodynamically consistent phase-field models for
  solidification}, Physica D, 69 (1993), pp.~189--200.

\bibitem{wheeler92}
{\sc A.~Wheeler, W.~Boettinger, and G.~McFadden}, {\em Phase-field model for
  isothermal transitions in binary alloys}, Physical Review A, 45 (1992),
  pp.~7424--7439.

\bibitem{wheeler93b}
\leavevmode\vrule height 2pt depth -1.6pt width 23pt, {\em Phase-field model of
  solute trapping during solidification}, Physical Review E, 47 (1993),
  pp.~1893--1909.

\bibitem{zukerman96}
{\sc M.~Zukerman, R.~Kupferman, O.~Shochet, and E.~Ben-Jacob}, {\em Concentric
  decomposition during rapid compact growth}, Physica D, 90 (1996),
  pp.~293--305.

\end{thebibliography}


\noindent{\sc Karl Glasner} \\
 Department of Mathematics, University of Utah \\
 Salt Lake City, Utah 84112-0090 USA \\
 email: glasner@math.utah.edu
 
\end{document}




