\documentclass[reqno]{amsart}
\usepackage{hyperref} 
\usepackage{graphicx}
\usepackage{dsfont}
\usepackage{subfigure}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2012 (2012), No. 160, pp. 1--30.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2012 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2012/160\hfil Pattern formation]
{Pattern formation in a mixed local and nonlocal reaction-diffusion system}

\author[E. Sander, R. Tatum\hfil EJDE-2012/160\hfilneg]
{Evelyn Sander, Richard Tatum}  % in alphabetical order

\address{Evelyn Sander \newline
Department of Mathematical Sciences \\
George Mason University\\
4400 University Dr. \\
Fairfax, VA 22030, USA}
\email{esander@gmu.edu}

\address{Richard Tatum \newline
Naval Surface Warfare Center Dahlgren Division\\
18444 Frontage Road Suite 327\\
Dahlgren, VA 22448-5161, USA}
\email{rchrd.ttm@gmail.com}

\thanks{Submitted March 19, 2012. Published September 20, 2012.}
\subjclass[2000]{35B36, 35K57}
\keywords{Reaction-diffusion system; nonlocal equations; 
 Turing instability; \hfill\break\indent pattern formation}

\begin{abstract}
  Local and nonlocal reaction-diffusion models have been shown to
  demonstrate nontrivial steady state patterns known as Turing
  patterns. That is, solutions which are initially nearly homogeneous
  form non-homogeneous patterns. This paper examines the pattern
  selection mechanism in systems which contain nonlocal terms. In
  particular, we analyze a mixed reaction-diffusion system with Turing
  instabilities on rectangular domains with periodic boundary
  conditions.  This mixed system contains a homotopy parameter $\beta$
  to vary the effect of both local $(\beta = 1)$ and nonlocal $(\beta
  = 0)$ diffusion. The diffusion interaction length relative to the
  size of the domain is given by a parameter $\epsilon$.  We associate
  the nonlocal diffusion with a convolution kernel, such that the
  kernel is of order $\epsilon^{-\theta}$ in the limit as $\epsilon \to  0$.
  We prove that as long as $0 \le \theta<1$, in the singular limit as
  $\epsilon \to 0$, the selection of patterns is determined by the
  linearized equation.  In contrast, if $\theta = 1$ and $\beta$ is
  small, our numerics show that pattern selection is a fundamentally
  nonlinear process.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{assumption}[theorem]{Assumption}
\allowdisplaybreaks

\newcommand{\norm}[1]{\|#1\|}
\newcommand{\abs}[1]{|#1|}

\section{Introduction}

Turing in 1952 first suggested a mechanism in which chemicals, through
the process of diffusion, could form highly developed patterns
\cite{Turing:52}.
Now referred to as Turing patterns, they have been
experimentally shown in several well-known reaction-diffusion systems
such as the chlorite-iodide-malonic acid (CIMA) reaction
\cite{LengyelEpstein:92}, and more recently, the Belousov-Zhabotinsky
(BZ) reaction using a water-in-oil aerosol micro-emulsion
\cite{VanagEpstein:01}.  Prior to this important discovery, Field and
Noyes devised the well-known Oregonator reaction-diffusion equation
for the Belousov-Zhabotinsky (BZ) reaction \cite{FieldNoyes:74}.
However, these models do not account for any nonlocal interactions.
Using a nonlocal feedback illuminating source, Hildebrand, Sk\o dt and
Showalter \cite{HildebrandSkodtShowalter:01} experimentally showed the
existence of novel spatiotemporal patterns in the BZ reaction.  This
system is similar to System~\eqref{system:scaledMixed} the equation we
consider in this paper, except that the version we consider does not contain a thresholding
function.  In particular, we consider the following system of
equations subject to periodic boundary conditions:
\begin{equation}
\begin{gathered}
u_t  = \epsilon (\beta \Delta u + (1-\beta)(J*u - \hat{J}_{0} \cdot u))+ f(u,v), \\
v_t  = d \epsilon( \beta \Delta v + (1-\beta)(J*v - \hat{J}_{0} \cdot v)) + g(u,v) \end{gathered}\label{system:scaledMixed},
\end{equation}
%
where $\Omega \subset \mathbb{R}^{n}$ is a rectangular domain for $n \in \{1,2,3\}$ and $u$
and $v$ model concentrations
of activator and inhibitor populations, respectively.
This equation contains a homotopy
between pure local diffusion and a nonlocal counterpart with the homotopy
parameter $\beta
\in [0,1]$. The convolution is defined by
\begin{gather}
J*u(x,t) = \int_{\Omega} J(x-y) u(y,t) dy,\\
\hat{J}_{0} = \frac{1}{\abs{\Omega}}\int_{\Omega} J(x) dx,
\end{gather}
where the kernel $J:\mathbb{R}^{n} \to \mathds{R}$ of the convolution is periodic.
The kernel  $J$ is assumed to be such that for some $ 0 \le \theta \le 1$,
$\epsilon^\theta
J(x)$ limits uniformly to a smooth $\epsilon$-independent function $K(x)$ as
$\epsilon \to 0$. For our simulations, we use a Gaussian kernel that is modified by a
smooth cut-off function similar to the kernel used in \cite{HartleyWanner:09}.  See
Appendix A for more details about the kernel choice.
In System~\eqref{system:scaledMixed},
diffusion is modeled by the local and nonlocal operators, while the nonlinearities model
the associated reaction kinetics.  System \eqref{system:scaledMixed} includes both local and
nonlocal operators to model both short and long range diffusion effects \cite{Murray:93}.
The inclusion of both operators in the model is important for those physical systems in
which both effects are present. Again, see \cite{HildebrandSkodtShowalter:01}.  The
parameter $d$ is the ratio of the diffusion coefficients of $u$ and $v$, in which higher
values of $d$ indicate higher diffusion rates for the inhibitor species.  The parameter
$\epsilon$ is a scale parameter that regulates the effects of the reaction kinetics over
the domain $\Omega$.

For a large range of nonlinear functions $f$ and $g$, the system above
has an unstable spatially homogeneous equilibrium
$(\bar{u}_0,\bar{v}_0)$ (See
Lemma~\ref{lemma:turingInstabilityConditions}).  This corresponds to
an experimental or naturally occurring setting in which the uniformly
mixed starting state is destabilized by small fluctuations.  In order
to study how these natural fluctuations impact the evolving mixture,
one studies the time evolution of solutions starting at initial
conditions close to the homogeneous equilibrium.  After a rather short time,
such solutions form patterns. However,  even for a
fixed set of parameters, every initial condition results in different
pattern formation.  Thus through the initial condition, randomness
enters an otherwise deterministic process of pattern
formation. Although the fine structure of these patterns
differ, the patterns exhibit common characteristic features and
similar wavelength scales.  In this paper, we concentrate on
understanding the key features of these patterns under nonlocal diffusion.

This paper focuses on short term pattern formation rather than
asymptotic behavior. See Figure \ref{fig:patternFormation}.  In most natural systems, not only the asymptotic
behavior but also the transient patterns that occur dynamically are
critically important for understanding the behavior of the
system.  For example, in cases of
metastability~\cite{alikakos:bronsard:fusco:98a}, the convergence to the global
minimizers is exponentially long, and thus from a practical point of
view not viable. More generally, many systems simply never reach equilibrium on the time scale of the
natural problems. To quote Neubert, Caswell, and
Murray~\cite{neubert:etal:02}: ``Transient dynamics have traditionally
received less attention than the asymptotic properties of dynamical
systems. This reflects the difficulty in studying transients, the
belief that dynamics are more important than history, and the mistaken
belief that asymptotic properties completely characterize the
solution. \dots  There is, however, a growing recognition that transient
dynamics deserve more study.''

\begin{figure}[ht]
\centering
\subfigure[Initial condition $t=0$]{\includegraphics[width=0.46\textwidth]{fig1a}}
% sol_0.png}}
\subfigure[Time $t_i$]{\includegraphics[width=0.46\textwidth]{fig1b}}
% sol_1.png}}
\subfigure[Time $2 t_i$]{\includegraphics[width=0.47\textwidth]{fig1c}}
%sol_2.png}}
\subfigure[Time $3 t_i$]{\includegraphics[width=0.44\textwidth]{fig1d}}
% sol_3.png}}\\
\subfigure[Time $4 t_i$]{\includegraphics[width=0.46\textwidth]{fig1e}}
% sol_4.png}}
\subfigure[Time $8 t_i$]{\includegraphics[width=0.46\textwidth]{fig1f}}
% sol_8.png}}
\caption[Pattern Formation]{Early and later pattern formation with $\beta = 0$. 
Starting with an initial random perturbation about the homogeneous 
equilibrium (a),
the system evolves to show pattern formation after $t_i=2.23 \times 10^{-3}$ 
time units.
The behavior seen in  (b)-(c) is the focus of our results.  
Further pattern formation development occurs in (d)-(e).
}\label{fig:patternFormation}
\end{figure}

We specifically consider the pattern formation occurring in the limit
as $\epsilon$ approaches zero.  The parameter $\epsilon$ is a measure
of the interaction length of diffusion on a fixed domain. A rescaling
of the time $\tilde{t} = \epsilon t$ and setting $\gamma = 1/\epsilon$,
where we drop the tilde after rescaling, results in the system of
equations: $u_ t = (\beta \Delta u + (1-\beta)(J*u - \hat{J}_{0}\cdot u))+
\gamma f(u,v)$, $v_t = d ( \beta \Delta v + (1-\beta)(J*v -
\hat{J}_{0} \cdot v)) + \gamma g(u,v)$. In this form, $\gamma$ is
viewed as a measure of the domain size. See for example
Murray~\cite{Murray:93}.

A standard heuristic explanation of the pattern formation starting
near the homogeneous equilibrium is to say that the patterns can be
fully explained by considering only the eigenfunction corresponding to
the most unstable eigenvalue of the linearization (which we will refer
to as the most unstable eigenfunction). For example, such an
explanation was given by Murray~\cite{Murray:93} for the above
equation in the case that $\beta=1$. The same explanation was given
for spinodal decomposition for the Cahn-Hilliard equation by
Grant~\cite{grant:93a}. However, this explanation does not explain the
patterns that are seen: most unstable eigenfunctions are regularly
spaced periodic patterns, whereas the patterns seen are irregular
snake-like patterns with a characteristic wavelength. This discrepancy
arises because the most unstable eigenfunction only describes pattern
formation for solutions that start exponentially close to the
homogeneous equilibrium, whereas both numerical and experimental
pattern formation can at best be considered as polynomially close to
the equilibrium.  Sander and Wanner~\cite{SanderWanner:03} gave an
explanation for the irregular patterns for solutions for the above
equation in the case of purely local diffusion (i.e. for $\beta=1$),
and in this paper, we have extended these results to the case of
nonlocal diffusion. See Fig.~\ref{fig:examplesOfTuringPatterns}. By
applying~\cite{MaierPaapeWanner:98,MaierPaapeWanner:00}, Sander and
Wanner showed that the observed patterns arise as random
superpositions of a finite set of the most unstable eigenfunctions on
the domain called the {\em dominating subspace}.  These results are
not merely a use of simple linearization techniques, which would give
only topological rather than quantitative information as to the degree
of agreement between linear and nonlinear solutions.  Using ``most
nonlinear patterns" approach of Maier-Paape and
Wanner~\cite{MaierPaapeWanner:98}, it is possible to show both the
dimension of the dominating subspace, and the degree to which linear
and nonlinear solutions agree.  In particular, the technique shows
there exists a finite-dimensional inertial manifold of the local
reaction-diffusion system which exponentially attracts all nearby
orbits.  The orbit can be projected onto this finite-dimensional
manifold.  In this paper, we extend their results to the mixed
local-nonlocal equation given in \eqref{system:scaledMixed}. Our
results are the first generalization of the results obtained in
\cite{SanderWanner:03} to nonlocal reaction-diffusion systems.

We now state our main theoretical result.
In order to compare solutions to the nonlinear
equation \eqref{system:scaledMixed} and of the linearization of this
equation linearized at the homogeneous equilibrium $(\bar{u}_{0},
\bar{v}_{0})$, let $(u,v)$ denote a solution to the full nonlinear
equation starting at initial condition $(u_0,v_0)$, and let
$(u_{\rm lin},v_{\rm lin})$ denote a solution to the linearized equation
starting at the same initial condition.  We consider initial
conditions which are a specified distance $r_\epsilon$ from the
homogeneous equilibrium depending only on $\epsilon$.  We refer to this value $r_\epsilon$ as the
{\it initial radius}. The subscript denotes the fact that the choice
of initial radius varies with $\epsilon$.  We compare the trajectories
of $(u,v)$ and $(u_{\rm lin},v_{\rm lin})$ until the distance between the
solution $(u,v)$ and the homogeneous equilibrium $(u_0,v_0)$ reaches
the {\it exit radius} value $R_\epsilon$. Clearly we choose
$R_{\epsilon} > r_{\epsilon}$.  When the solution has reached the exit
radius, we measure the {\it relative distance}
$$ D_\epsilon := \frac{\norm{(u(t),v(t)) - (\bar{u}_{0}, \bar{v}_{0}) -
(u_{\rm lin},v_{\rm lin})}_{**}}{\norm{(u_{\rm lin}(t), v_{\rm lin}(t))}_{**}}.$$ The
$\norm{\cdot}_{**}$-norm is equivalent to the standard Sobolev norm. See
Lemma~\ref{lemma:normEq} of Section~\ref{section:linearizationResults}.
If it is possible to choose the initial radius such that $r_\epsilon
\to 0$ as $\epsilon \to 0$  and the exit radius such that $R_\epsilon
\to \infty$  as $\epsilon \to 0$, such that  $D_\epsilon \to 0$ as
$\epsilon \to 0$, this implies that as $\epsilon$ limits to zero, the
nonlinear behavior of solutions is progressively closer to linear as
$\epsilon \to 0$. We refer to this as   {\em almost linear behavior}.

Extending techniques  in~\cite{MaierPaapeWanner:98,MaierPaapeWanner:00, SanderWanner:03},
we give conditions such that  the mixed system given by System~\eqref{system:scaledMixed}
displays almost linear behavior.  Our main theoretical result is summarized in the
following theorem.

\begin{theorem} \label{theorem:firstResult}
Let $\epsilon < \epsilon_{0}$ and choose $\alpha$ such that 
$\dim \Omega / 4 < \alpha < 1$.
Assume that  System \eqref{system:scaledMixed} satisfies the following conditions:
\begin{enumerate}
\item $\Omega$ is a rectangular domain of $\mathbb{R}^{n}$, where $n = \{1,2,3\}$.

\item The nonlinearities $f$ and $g$ are sufficiently smooth and
  satisfy Turing instability conditions with real eigenvalues. Namely,
  they satisfy conditions such that the eigenvalues of the linearized
  right hand side of System \eqref{system:scaledMixed} are real; in
  addition, $f$ are $g$ are assumed to be such that for $\epsilon=0$,
  the system is stable, and there exists an $\epsilon_{0}>0$ such that
  for all $0<\epsilon \le \epsilon_0$, the homogeneous equilibrium
  $(\bar{u}_{0}, \bar{v}_{0})$ is unstable.  (These conditions are
  given in Lemma~\ref{lemma:turingInstabilityConditions} and
  Assumption~\ref{assumption:realEigenvalues}).

\item For some constant $0 \le \theta \le 1$, the limit of the kernel function
  \[K(x) = \lim_{\epsilon \to 0} \epsilon^{\theta}J(x)\] is a uniform
  limit to a $C^1$ smooth $\epsilon-$independent function, which is
  smoothly periodic with respect $\Omega$.

\item Define $\hat{K}_{0}=\int_\Omega K(x) \;dx$. For $\beta$
  satisfying $0 < \beta < 1$ and two constants $s_{\ell}<s_r$
  determined by the functions $f$ and $g$ (defined in
  \ref{eqn:sellr}), we assume that $\hat{K}_0$ satisfies the
  condition \[s_r< \hat{K}_0< \frac{s_{\ell}}{\epsilon^{1-\theta}
    \cdot (1-\beta)}.\] as $\epsilon \to 0$.
\end{enumerate}
We define the constant $\chi$ to be a measure of the
order of the nonlinearity of the functions $f$ and $g$ 
(defined in \ref{eqn:xidefined}).
Then there is almost linear behavior with the following values of the
constants $r_\epsilon, R_\epsilon, D_\epsilon$ defined above:
\begin{gather*}
0 < r_{\epsilon} \sim \min ( 1,( \epsilon^{-(\alpha - \dim \Omega /4 ) 
+ \alpha/ \chi + \xi} )^{1 / ( 1 - \xi)} ),
\\
0 < R_{\epsilon} \sim  \epsilon^{-(\alpha - \dim \Omega / 4) 
 + \alpha / \chi + \xi },
\\
D_\epsilon \sim \epsilon^{\alpha - \dim \Omega / 4}.
\end{gather*}
\end{theorem}

The results of the above theorem are schematically depicted in
Figure~\ref{fig:almostLinearDiagram}. The value $\theta$ describes the
asymptotic $\epsilon$-dependent relationship between $J(x)$ and an
$\epsilon$-independent kernel $K(x)$.  Hypothesis 4 of the theorem
states that for fixed $\hat{K}_0$, $f$, and $g$, if $0 \le \theta<1$ then
any $\beta$ value between 0 and 1 is sufficient for the results of the
theorem to hold. However, if $\theta=1$, then $\beta$ must be
sufficiently close to 1 for the results to follow. This
can be clearly seen numerically  in Figures~\ref{fig:noALBForNonlocal1}-\ref{fig:noALBForNonlocal3}.
The parameters of the nonlinearity featured in Figure \ref{fig:noALBForNonlocal1} can be
found in \cite{SanderWanner:03} and are known to give rise to Turing instability under the
appropriate choice for $\epsilon$ and $d$. See \cite{MurrayVol2:93}.  Figures
\ref{fig:noALBForNonlocal2}-\ref{fig:noALBForNonlocal3} use random perturbations of
the nonlocal parameters in Figure \ref{fig:noALBForNonlocal1} that also give rise to
Turing instability.
Since the results are asymptotic in $\epsilon$, the values of $r, R$, and $D$ are
independent of $\theta$. As $\epsilon \to 0$, the size of $\theta$ determines how
quickly the solutions display almost linear behavior.

This theorem does not mention the case in which $\theta > 1$. In this
case the homogeneous equilibrium is asymptotically stable independent
of any other parameter values. Therefore all random fluctuations
sufficiently close to the homogeneous equilibrium converge to the
homogeneous equilibrium, and there is no pattern formation. We
performed numerics to see what size of fluctuations are possible in
this case.  Our numerics show that for fluctuations of .1, the
solutions converge to the homogeneous equilibrium.  The details and
proof of this theorem are given in Section
\ref{section:linearizationResults} as a combination of Theorems
\ref{thm:earlyPatternResults} and \ref{thm:almostLinearBehavior}. The
case of $\beta = 1$ in the above theorem is analogous to the
homogeneous Neumann case considered in \cite{SanderWanner:03}.  For
$\beta < 1$, our results are new.

The numerical results in Figure~\ref{fig:noALBForNonlocal1}-\ref{fig:noALBForNonlocal3} as well as
our other numerical investigations (not shown here) indicate that the
estimates for $\theta \to 1$ of the above theorem remain true as long as $\beta$ remains in an
interval $[\beta_{0},1]$, where $\beta_{0} > 0$. Indeed, in the
numerics the nonlinear behavior of solutions becomes more and more
pronounced for small $\epsilon$ as $\theta \to 1$ outside of
$[\beta,1]$. Our numerics indicate an additional conclusion for small
$\beta$ (cf. Figures~\ref{fig:noALBForNonlocal1}-\ref{fig:noALBForNonlocal3}).  Specifically, they
indicate that the results of the above theorem cannot be generalized
to include the case of purely nonlocal systems. For systems close to
purely nonlocal (ie. $\beta<\beta_0$), the behavior becomes
fundamentally nonlinear.  The thesis of Hartley~\cite{Hartley:08}
included numerical observations of a similar distinction between local
and nonlocal behavior for a phase field model with a homotopy between
purely local and nonlocal terms.

\begin{figure}[ht]
\centering
\subfigure[$\beta = 1.0$]{\includegraphics[width=0.46\textwidth]{fig2a}} %fig1a.png}}
\subfigure[$\beta = 0.99$]{\includegraphics[width=0.46\textwidth]{fig2b}} %fig1b.png}}
\subfigure[$\beta = 0.98$]{\includegraphics[width=0.46\textwidth]{fig2c}} %fig1c.png}}
\subfigure[$\beta = 0.97$]{\includegraphics[width=0.46\textwidth]{fig2d}} %fig1d.png}}
\subfigure[$\beta = 0.96$]{\includegraphics[width=0.46\textwidth]{fig2e}} %fig1e.png}}
\caption[Patterns for various $\beta$ values]{Examples of the patterns produced using
various $\beta$ values and $\epsilon = 1\times10^{-5}$ over the domain $[0,1]^{2}$. These
patterns occur when the relative distance  between the nonlinear and linear solution reaches
a threshold value $D_\epsilon$ of 0.01.  As $\beta$ decreases, the characteristic
size of the patterns becomes larger.
  Note  that $(s_{\ell}, s_{r}) \approx (.0071, .8806)$. See Appendix
\ref{KernelNonlinearities} for a description of the
kernel.}\label{fig:examplesOfTuringPatterns}
\end{figure}


\begin{figure}[t]
\centering
\includegraphics[width=.6\textwidth]{fig3}
\caption{A summary of behavior in each parameter 
region given by Theorem~\ref{theorem:firstResult}.} 
 \label{fig:almostLinearDiagram}
\end{figure}

\begin{figure}[ht]
\centering
\subfigure[$\epsilon = .01$]{\includegraphics[width=.48\textwidth]{fig4a}}
\subfigure[$\epsilon = .001$]{\includegraphics[width=.48\textwidth]{fig4b}}\\
\subfigure[$\epsilon = .0001$]{\includegraphics[width=.48\textwidth]{fig4c}}
\subfigure[$\epsilon = .00001$]{\includegraphics[width=.48\textwidth]{fig4d}}
\caption[Demonstration of Increased Nonlinear Behavior for Nonlocal
System]{Exit radius $R_{\epsilon}$ for relative distance 0.01,
  varied $\beta$ and nonlinearity parameters  $a = 150.0$, $b = 100.0$, $\rho = 13.0$, $A = 1.5$, and $K = 0.050$. For each simulation, we used random initial
  conditions with initial radius $r_{\epsilon} < \epsilon^{1/4}$. As
  $\beta \to 0$, the measured values are smaller, meaning that the
  behavior of solutions is determined by nonlinear effects. This is
  more pronounced for smaller $\epsilon$ values.  For each $\beta$ and
  $\epsilon$ value depicted we performed 20 distinct simulations.
  Distances are measured in the $\norm{\cdot}_{**}$ norm, as defined in
  Section 4.  To capture the rapid change in the graph, a refined grid
  is used near $\beta=1$.  In all simulations, we used a Galerkin
  spectral method with a semi-implicit 2D integration scheme that used
  $128^{2}$ nodes. Note that $(s_{\ell}, s_{r}) \approx (.0071,
  .8806)$. See Appendix \ref{KernelNonlinearities} for a description
  of the kernel.} \label{fig:noALBForNonlocal1}
\end{figure}

\begin{figure}[ht]
\centering
\subfigure[$\epsilon = .01$]{\includegraphics[width=.48\textwidth]{fig5a}}
\subfigure[$\epsilon = .001$]{\includegraphics[width=.48\textwidth]{fig5b}}\\
\subfigure[$\epsilon = .0001$]{\includegraphics[width=.48\textwidth]{fig5c}}
\subfigure[$\epsilon = .00001$]{\includegraphics[width=.48\textwidth]{fig5d}}
\caption[Demonstration of Increased Nonlinear Behavior for Nonlocal
System]{Exit radius $R_{\epsilon}$ for relative distance 0.01,
  varied $\beta$ and nonlinearity parameters  $a = 127.0$, 
 $b = 81.0$, $\rho = 29.0$, $A = 1.5$, and $K = 0.040$. 
 For each simulation, we used random initial
  conditions with initial radius $r_{\epsilon} < \epsilon^{1/4}$. As
  with the nonlinearity parameters associated with Figure \ref{fig:noALBForNonlocal2}, we see that
  the solutions are dominated by nonlinearity effects as $\beta \to 0$. This is
  more pronounced for smaller $\epsilon$ values.  For each $\beta$ and
  $\epsilon$ value depicted we performed 20 distinct simulations.
  Distances are measured in the $\norm{\cdot}_{**}$ norm, as defined in
  Section 4.} \label{fig:noALBForNonlocal2}
\end{figure}

\begin{figure}[t]
\centering
\subfigure[$\epsilon = .01$]{\includegraphics[width=.48\textwidth]{fig6a}}
\subfigure[$\epsilon = .001$]{\includegraphics[width=.48\textwidth]{fig6b}}\\
\subfigure[$\epsilon = .0001$]{\includegraphics[width=.48\textwidth]{fig6c}}
\subfigure[$\epsilon = .00001$]{\includegraphics[width=.48\textwidth]{fig6d}}
\caption[Demonstration of Increased Nonlinear Behavior for Nonlocal
System]{Exit radius $R_{\epsilon}$ for relative distance 0.01,
  varied $\beta$ and nonlinearity parameters  $a = 125.5$, $b = 76.0$, 
  $\rho = 15.2$, $A = 1.68$, and $K = 0.053$. For each simulation, 
  we used random initial
  conditions with initial radius $r_{\epsilon} < \epsilon^{1/4}$. 
  Qualitatively, we again see
  that the results do not change with changing the parameters of the nonlinearities.  For each $\beta$ and  $\epsilon$ value depicted we performed 20 distinct simulations.
  Distances are measured in the $\norm{\cdot}_{**}$ norm, as defined in
  Section 4.} \label{fig:noALBForNonlocal3}
\end{figure}

Note that in the above theorem and numerics, we have used the
$**$-norm to study distances since it is the natural mathematical
choice. The natural physical choice is the $L^\infty$-norm, by which
measure our results are only polynomial in $\epsilon$ rather than
order one. See Sander and Wanner~\cite{SanderWanner:03} for a more
detailed discussion of theoretical and numerical measurements in the
two norms.

Mixed local and nonlocal equations have been considered
previously. The Fisher-KPP was shown to generate traveling
waves~\cite{BerestyckiNadinPerthameRyzhik:09}.  A similar model also
appears in the survey article of Fife~\cite{fife:03} and in Lederman
and Wolanski~\cite{LedermanWolanski06} in the context of the
propagation of flames.  Hartley and Wanner also studied pattern
formation for a mixed phase field model with a homotopy parameter like
Eqn. \eqref{system:scaledMixed}~\cite{HartleyWanner:09}.
Specifically, for the stochastic nonlocal phase-field model, they used
functional-analytic structure to prove the existence and uniqueness of
mild solutions \cite{HartleyWanner:09}. We use a related method here
to describe the early pattern selection for
Eqn. \eqref{system:scaledMixed}.



This paper is organized as follows.  Section \ref{section:assumptions}
contains our assumptions.  Section
\ref{section:linearizationDescription} describes the properties of the
linearization of the right hand side.  The full spectrum of the
linearization is given in Section \ref{section:spectrum}.  The almost
linear results for System \eqref{system:scaledMixed} are found in
Section
\ref{section:linearizationResults}.
The final section includes a summary with some conjectures.

\section{Preliminaries} \label{section:assumptions}

In this section, we describe in detail our assumptions for the domain, 
kernel type, smoothness of the nonlinearity, and type of instability exhibited 
by the homogeneous equilibrium.

\begin{assumption}[Rectangular domain] \label{assumption:rectangularDomain} \rm
Let $\Omega$ be a closed rectangular subset of $\mathbb{R}^{n}$ for $n \in \{1,2,3\}$.
\end{assumption}

\begin{definition}[Spectrum of $-\Delta$] \label{definition:LaplacianEigenvalues}\rm
Suppose that $\Omega$ satisfies Assumption \ref{assumption:rectangularDomain}.
Let $L^{2}_{\rm per}(\Omega)$ be the space of functions which are  periodic with respect to $\Omega$ and belong to $L^{2}(\Omega)$.  For $\Delta:L^{2}_{\rm per}(\Omega) \to L^{2}_{\rm per}(\Omega)$, denote the ordered sequence of eigenvalues of $-\Delta$ as $0 = \kappa_{0} < \kappa_1 \le \dots  \to \infty$ \cite[Section 1.3.1]{ArendtSchleich:09}.  Denote the corresponding real-valued $L^{2}-$orthonormalized eigenfunctions by $\psi_{k}$, for $k \in \mathds{N}$.
\end{definition}

Assume that $K \in L^{2}_{\rm per}(\Omega)$. An important aspect of
 Definition \ref{definition:LaplacianEigenvalues} is that we can define
 the Fourier series for functions $J$ and  $K$ as
\begin{align}\label{eqn:series}
J_N(x) = \sum_{k = 0}^{N} \hat{J}_{k} \psi_{k}(x), \quad\text{and}\quad
K_N(x) = \sum_{k = 0}^{N} \hat{K}_{k} \psi_{k}(x),
\end{align}
where
\begin{align} \label{eqn:fourierCoefficientsOfJ}
\hat{J}_{k} = \int_{\Omega} J(x) \psi_{k}(x) dx, \quad\text{and}\quad
\hat{K}_{k} = \int_{\Omega} K(x) \psi_{k}(x) dx.
\end{align}
Note that if $J,K \in C^{1}(\bar{\Omega})$, then $J_N \to J$ and $K_N \to K$  
uniformly as $N \to \infty$.  See \cite{Katznelson:04}.  
Observe that $\hat{J}_{0} = \int_{\Omega}J(x)
dx / \abs{\Omega} $ since $\psi_{0} = 1/|\Omega|$ by Definition
\ref{definition:LaplacianEigenvalues}.

\begin{definition}[Smooth periodicity on $\Omega$] \label{definition:omegaPeriodic}\rm
Suppose that $\Omega$ satisfies Assumption \ref{assumption:rectangularDomain}.  A function
$f:\Omega \to \mathds{R}$ is said to be smoothly periodic on $\Omega$ if it is periodic
with respect to the boundary $\partial \Omega$ and can be extended to a smooth function on
$\mathbb{R}^n$.
\end{definition}

\begin{assumption}[The kernel function $J$ and its limit $K$] 
\label{assumption:kernel} \rm
Suppose that $\Omega$ satisfies Assumption \ref{assumption:rectangularDomain}.
 Let the kernel $J \in C^{1}(\bar{\Omega})$ be such that for some 
$0 \le \theta \le 1$, there is an
$\epsilon$-independent  function $K(x)$ such that $K(x) = \lim_{\epsilon \to 0}
\epsilon^{\theta} \cdot J(x) $, where the limit is a uniform limit. 
Assume that  $J(x)$ and $K(x)$ are smoothly periodic on $\Omega$.  
Furthermore, assume the Fourier
coefficients are such that  $\hat{K}_{0} > \hat{K}_{k}$  for all $k>0$, and thus
$\hat{J}_{0} > \hat{J}_{k}$ for $\epsilon$ sufficiently small.
\end{assumption}

The meaning of the convolution operator on $\mathbb{R}^{n}$ is well established,
but convolution on $\Omega$ is not. The following
definition specifies what is meant here by convolution of functions on $\Omega$.

\begin{definition}[Convolution on $\Omega$] \label{definition:convolution} \rm
Suppose that $K$ and $J$ satisfy Assumption \ref{assumption:kernel} and that the periodic extension of $K$ and $J$ are given as $K_{\rm per}$ and $J_{\rm per}$, respectively.  The convolution of $K$ and $u$ is defined as
$$K_{c}(u) = K*u = \int_{\Omega} K_{\rm per}(x-y) u(y) dy,$$
where $K_{c}:L^{2}_{\rm per}(\Omega) \to L^{2}_{\rm per}(\Omega)$
and the convolution of $J$ and $u$ is defined as
$$J_{c}(u) = J*u = \int_{\Omega} J_{\rm per}(x-y) u(y) dy,$$
where $J_{c}:L^{2}_{\rm per}(\Omega) \to L^{2}_{\rm per}(\Omega)$
\end{definition}

We now consider the adjoints of $K_{c}$ and $J_{c}$. In particular, the adjoint 
of $J_{c}$ will be used in Section \ref{section:linearizationDescription} 
to describe the spectrum of the
linearization of System \eqref{system:scaledMixed}, while the adjoint of 
$K_{c}$ will be used in Section \ref{section:linearizationResults} to describe 
the unstable interval for which our main results hold.   
Let $K_{\rm per}$ and $J_{\rm per}$ be the smooth periodic extensions of $K$ and $J$,
respectively. We begin by
defining $A^{K}_{\rm per}$ such that
\begin{equation}
A^{K}_{\rm per}(x) = K_{\rm per}(-x)
\end{equation}
and $A^{J}_{\rm per}$ such that
\begin{equation}
A^{J}_{\rm per}(x) = J_{\rm per}(-x)
\end{equation}
 The convolution of $A^{K}$ with $u$ and $A^{J}$ with $u$ are given by
\begin{gather}
A^{K}_{c}(u) = A^{K}*u = \int_{\Omega} A^{K}_{\rm per}(y-x)u(x) dx, \label{eqn:adjointConvLimit}
\\
A^{J}_{c}(u) = A^{J}*u = \int_{\Omega} A^{J}_{\rm per}(y-x)u(x) dx, \label{eqn:adjointConv}
\end{gather}

\begin{lemma} \label{lemma:adjointConv1}
Suppose that Assumptions \ref{assumption:rectangularDomain} - \ref{assumption:kernel} are
satisfied with $A^{K}_{c}$ is defined as in \eqref{eqn:adjointConvLimit} and $A^{J}_{c}$ is defined as in \eqref{eqn:adjointConv}.  The
adjoint of $K_{c}$ is $A^{K}_{c}$ and the adjoint of $J_{c}$ is $A^{J}_{c}$.
\end{lemma}

\begin{proof}
As the computation of the adjoints of $K_{c}$ and $J_{c}$ are similar, 
we only show the computation of the adjoint of $K_{c}$. 
 Let $u,v \in L^{2}_{\rm per}(\Omega)$.  Computing the inner product directly gives
\begin{align*}
(K_{c}(u), v) & = \int_{\Omega} K_{c}(u(x)) \cdot v(y) \,dy, \\
& = \int_{\Omega} \int_{\Omega} K_{\rm per}(y-x) \cdot u(x) \cdot v(y) \,dx \,dy.
\end{align*}
Switching the order of integration, we have
\begin{align*}
(K_{c}(u), v) &  = \int_{\Omega} \int_{\Omega} K_{\rm per}(y-x) \cdot u(x) \cdot v(y) \,dy \,dx, \\
& =  \int_{\Omega}  u(x) \Big( \int_{\Omega} K_{\rm per}(y-x) \cdot  v(y) \,dy \Big)
\,dx, \\
& =  \int_{\Omega}  u(x) \Big( \int_{\Omega} A^{K}_{\rm per}(x-y) \cdot v(y) \,dy \Big)
 \,dx, \\
& = (u, A^{K}_{c}(v) ).
\end{align*}
\end{proof}

By Lemma \ref{lemma:adjointConv1}, in order to guarantee that $K_{c}$
is self-adjoint, we must use an even kernel function.

\begin{definition} \label{definition:even} \rm
Let $T:\mathbb{R}^{n} \to \mathds{R}$ and $x = (x_1, x_2, \dots , x_n) \in \mathbb{R}^{n}$.
The function $T$ is even if for each $x_i < 0$, $0 \le i \le n$,
\begin{align*}
T(x_1, x_2, \dots , x_i, \dots ,  x_n ) = T(-x_1, -x_2, \dots ,-x_i, \dots ,  -x_n ).
\end{align*}
\end{definition}

\begin{assumption} \label{assumption:even} \rm
Suppose that $J_{\rm per}$ is even.
\end{assumption}

\begin{lemma} \label{lemma:selfAdjoint}
Suppose that Assumptions \ref{assumption:rectangularDomain} - \ref{assumption:even} are
satisfied, and $A^{K}_{c}$ and $A^{J}_{c}$ are defined as in \eqref{eqn:adjointConvLimit} and \eqref{eqn:adjointConv}, respectively.  Then $K_{c}$ and $J_{c}$
are self-adjoint operators.
\end{lemma}

\begin{proof}
By Lemma~\ref{lemma:adjointConv1}, $A^{K}_{c}$ is the adjoint operator of $K_{c}$.  
Since $J$ is such that $J_{\rm per}$ satisfies Assumption \ref{assumption:even} and $K$
is defined as the limit function of $\epsilon^{\theta}\cdot J$ in 
Assumption \ref{assumption:kernel},
$K_{\rm per}(x) = K_{\rm per}(-x)$.  
Thus $A^{K}_{c} = K_{c}$ and $K_{c}$ is self-adjoint.  Since
$J_{\rm per}$ is also even by Assumption \ref{assumption:even},
the same reasoning shows that
$J_{c}$ is also self-adjoint.
\end{proof}

As pointed out in~\cite{HartleyWanner:09}, the convolution of $K$ with $u$
 has the same eigenfunctions as $-\Delta$.

\begin{lemma}[Spectrum of $J_c$ and $K_{c}$]\label{lemma:kernelProperties}
Suppose that $\Omega$ satisfies Assumption \ref{assumption:rectangularDomain}, and that
$K$ satisfies Assumptions \ref{assumption:kernel} - \ref{assumption:even}.  Then the
following statements are true:
\begin{itemize}
\item[(1)] $\hat{K}_{k} \to 0$ as $k \to \infty$.
\item[(2)] The spectrum of $K_{c}$ contains only the $\hat{K}_{k}$ and $0$, where $0$ is a limit point of the $\hat{K}_{k}$.
\item[(3)] For each fixed $\epsilon$, the above statements hold for $J_c$ as well.
\end{itemize}
\end{lemma} 
%\label{lemma:convProps}

\begin{proof}
For part 1, $K \in C^{1}(\bar{\Omega})$ implies that $\hat{K}_{k} \to 0$ as $k
\to \infty$.  See  \cite[Chapter 1, Section 4.3]{Katznelson:04}.  We have that
$K_{c}$ is a compact operator on a Banach space \cite[Theorem 8.3]{RenardyRogers:04}.
Therefore, the spectrum of $K_{c}$ contains only the eigenvalues $\hat{K}_{k}$ and its limit
point $0$ \cite[Theorem 7.3]{AbramovichAliprantis:00}.  If we fix $\epsilon$, observe that
part (3) follows using the same reasoning of parts (1) and (2) since $J \in
C^{1}(\bar{\Omega})$ and $J_{c}$ is also compact.
\end{proof}

In what follows, we will use the fact that the spectra of $K_{c}-\hat{K}_0$ 
and $J_c-\hat{J}_0$ are just shifted versions of the spectra in the above lemma.

\begin{assumption} \label{assumption:smoothNonlinearity} \rm
(Smoothness of the nonlinearity and a homogeneous equilibrium).
Let $\chi \in \mathds{N}$ be arbitrary.  Assume that $f,g : \mathbb{R}^2 \to \mathds{R}$ are
$C^{1+\chi}$-functions, and that there exists a point $(\bar{u}_0, \bar{v}_0) \in \mathbb{R}^2$
with $f(\bar{u}_0, \bar{v}_0) = g(\bar{u}_0, \bar{v}_0) = 0$. 
That is, $(\bar{u}_0,\bar{v}_0)$
is a homogeneous equilibrium for System~\eqref{system:scaledMixed}.
 If $\chi \ge 2$,  assume
further that the partial derivatives of $f$ and $g$ of order $2,3,\dots ,\chi$ at the
$(\bar{u}_0, \bar{v}_0)$ vanish.
\end{assumption}

\begin{assumption}[Turing instability] \label{assumption:turingInstability} \rm
Assume that $f$ and $g$ satisfy the smoothness conditions of  Assumption \ref{assumption:smoothNonlinearity} and that the homogeneous equilibrium of
System \eqref{system:scaledMixed} exhibits Turing instability. 
That is, in the absence of nonlocal
and local diffusion terms, the homogeneous equilibrium is stable, 
but in the presence of
the nonlocal and local diffusion terms, it is unstable.
\end{assumption}

\begin{lemma}[Turing Instability Conditions] 
\label{lemma:turingInstabilityConditions}
The homogeneous equilibrium of \\
System \eqref{system:scaledMixed} exhibits 
Turing instability.  This is true if and only there exists $d > 0$ be such that
\begin{itemize}
\item[(1)] $f_{u} + g_{v} < 0$, \label{condition:stable1}
\item[(2)] $f_{u}g_{v} - f_{v}g_{u} > 0$, \label{condition:stable2}
\item[(3)] $d f_{u} + g_{v} > 0$, \label{condition:unstable1}
\item[(4)] $(df_{u} + g_{v})^{2} - 4d(f_{u}g_{v} - f_{v}g_{u}) > 0$ \label{condition:unstable2},
\end{itemize}
where the partials are evaluated at the homogeneous equilibrium
 $(\bar{u}_{0}, \bar{v}_{0})$.
\end{lemma}

For a proof of the above lemma, see~\cite{Murray:81}. In particular, the
first two conditions in this lemma  ensure the stability of the homogeneous 
equilibrium in the absence of diffusion.  The next two conditions ensure 
that the homogeneous equilibrium is unstable when diffusion is present. 
 Note that the first and third conditions show that $d > 1$.



\begin{assumption}[Real eigenvalues for the nonlinearity] 
\label{assumption:realEigenvalues} \rm
Suppose that $f$ and $g$ satisfy Assumption \ref{assumption:smoothNonlinearity}.
 Assume that the eigenvalues of the linearization are real.
\end{assumption}

This section is concluded with definitions of the  function spaces that 
provide the context for the results of this chapter.

\begin{definition}[Function Spaces]\label{definition:functionSpaces} \rm
Let $L^{2}_{\rm per}(\Omega)$ be the space of smoothly periodic functions 
on $\Omega$ that belong to $L^{2}(\Omega)$ as defined by  
Definition \ref{definition:LaplacianEigenvalues}.  Let
\begin{equation}
\mathbb{L}^{2}_{\rm per}(\Omega) = L^{2}_{\rm per}(\Omega) \times L^{2}_{\rm per}(\Omega).
\end{equation}
For $s > 0$, let $H^{s}(\Omega)$ be the standard fractional Sobolev space
for real-valued functions  and let $H^{s}_{\rm per} (\Omega)$ be the space
of periodic functions in $H^{s}_{\rm per}(\Omega)$. Let
\begin{equation}
\mathbb{H}^{s}_{\rm per}(\Omega) = H^{s}_{\rm per}(\Omega) \times H^{s}_{\rm per}(\Omega).
\end{equation}
\end{definition}

\section{Properties of the linearization} \label{section:linearizationDescription}

In this section, we state and derive explicit representations for the eigenvalues and
eigenfunctions of the linearized right hand side of System \eqref{system:scaledMixed}. 
For
$0 < \beta \le 1$ and $0 \le \theta < 1$, we show that if Assumptions
\ref{assumption:rectangularDomain} - \ref{assumption:realEigenvalues} are satisfied, 
then there exists an $\epsilon_{0}$ such that for $0 < \epsilon \le \epsilon_{0}$, the
homogeneous equilibrium will be unstable.

The following system is the linearized form of System \eqref{system:scaledMixed}:
\begin{equation}
U' = D \mathcal{J}_\epsilon U + B U, \label{eqn:linear}
\end{equation}
 where
\begin{gather}
D = \begin{pmatrix} 1 & 0 \\
0 & d \end{pmatrix}, \label{eqn:D}
 \\
\mathcal{J}_\epsilon  = \epsilon \mathcal{J}_1 + \epsilon^{1-\theta} \mathcal{J}_2 \label{eqn:J}
\\
\mathcal{J}_1 = \beta \begin{pmatrix} \Delta & 0 \\
0 & \Delta \end{pmatrix} \label{eqn:J1}
\\
\mathcal{J}_2  = (1-\beta) \epsilon^\theta \begin{pmatrix} J_{c} - \hat{J}_{0} & 0 \\
0 & J_{c} - \hat{J}_{0}  \end{pmatrix}, \label{eqn:J2}
 \\
B = \begin{pmatrix} f_{u}( \bar{u}_0, \bar{v}_0 ) & f_{v}(\bar{u}_0, \bar{v}_0 ) \\
g_{u}( \bar{u}_0, \bar{v}_0 ) & g_{v}(\bar{u}_0, \bar{v}_0 ) \end{pmatrix}, \label{eqn:B}
\end{gather}
for $U = (u,v)^{T}$.  For the sake of notation, we shall denote this operator as
\begin{equation}
\mathcal{H}_{\epsilon}  =  D \mathcal{J}_\epsilon + B \label{eqn:hEpsilon},
\end{equation}
where $\mathcal{H}_{\epsilon}: \mathbb{L}^{2}_{\rm per}(\Omega) \to \mathbb{L}^{2}_{\rm per}(\Omega)$.
The domains for the local and nonlocal operators are given respectively as
$D(\Delta) =H^{2}_{\rm per}(\Omega)$ and $D(J_{c}) = L^{2}_{\rm per}(\Omega)$.
Thus, for $0 < \beta \le 1$,
the domain of $\mathcal{H}_{\epsilon}$ is given as
$D(\mathcal{H}_{\epsilon}) = \mathbb{H}^{2}_{\rm per}(\Omega)$ and for $\beta = 0$,
 $D(\mathcal{H}_{\epsilon}) = \mathbb{L}^{2}_{\rm per}(\Omega)$.

The asymptotic growth of the eigenvalues of the negative Laplacian and $J_{c}$ is
important for our results.  Since both the negative Laplacian and $J_{c}$ have the same
set of eigenfunctions, the eigenvalues of  $- \beta \Delta - (1 -\beta)
(J_{c}-\hat{J}_{0})$ are given as
\begin{equation}
\nu_{k,\epsilon} =  \beta \kappa_{k} +  (1-\beta)  ( \hat{J}_{0} - \hat{J}_{k} ), \label{eqn:nuk}
\end{equation}
where $k \in \mathds{N}$.  Here, the $\kappa_{k}$ are the eigenvalues of $-\Delta$ as
defined in Definition \ref{definition:LaplacianEigenvalues} and
the $\hat{J}_{k}$ are the eigenvalues
of $J_{c}$ as defined by Equation \ref{eqn:fourierCoefficientsOfJ}.  Note that
$\nu_{k,\epsilon}$ is real since $\kappa_{k}$ and $\hat{J}_{k}$ are real.
For rectangular domains, the growth of eigenvalues of the negative Laplacian
are given as
\begin{equation}
\lim_{k \to \infty} \frac{\kappa_{k}}{k^{2/n}} = C_{\Omega}, \label{rel:asymGrowth}
\end{equation}
where $n = $ dim $\Omega$ and $0<C_{\Omega}<\infty$ \cite{CourantHilbert:89}.
 Since $J \in C^{1}(\bar{\Omega})$, by Lemma \ref{lemma:kernelProperties},
 $\lim_{k \to \infty} (\hat{J}_{0} - \hat{J}_{k})  = \hat{J}_{0}$.
Thus, we see that for fixed $\epsilon$, if $\beta > 0$,
\begin{equation}
\lim_{k \to \infty} \frac{\nu_{k,\epsilon}}{k^{2/n}} = \beta \cdot C_{\Omega},
\label{eq:asymptoticDistOfMixedOp}
\end{equation}
whereas if $\beta = 0$, $\lim_{k \to \infty} \nu_{k,\epsilon} =\hat{J}_{0}$.
Note that $\hat{J}_0$ depends on $\epsilon$.

\begin{lemma}[Eigenvalues of $\mathcal{H}_{\epsilon}$] \label{lemma:eigOfHepsilon}
Suppose that Assumptions \ref{assumption:rectangularDomain} - \ref{assumption:realEigenvalues} are satisfied.  The eigenvalues of $\mathcal{H}_{\epsilon}$ are
\begin{equation}
\lambda_{k,\epsilon}^{\pm} = \lambda^{\pm}(\epsilon \nu_{k,\epsilon})
 = \frac{b(\epsilon \nu_{k,\epsilon}) \pm \sqrt{(b(\epsilon \nu_{k,\epsilon})^2 
- 4 c(\epsilon \nu_{k,\epsilon})}}{2}, \label{eqn:eigOfHe}
\end{equation}
where $\lambda_{k,\epsilon}^{\pm} \in \mathds{R}$ and
\begin{gather}
b(s)  = (f_{u}+g_{v})-(d+1)s \label{eqn:beig} \\
c(s)  = (f_{u}g_{v} - g_{u}f_{v}) - (df_{u} + g_{v})s + ds^2 \label{eqn:ceig},
\end{gather}
and $\nu_{k,\epsilon}$ are the eigenvalues of $-\beta \Delta - (1 -\beta) (J_{c} -
\hat{J}_{0} )$ associated with $\lambda_{k,\epsilon}^{\pm}$.  
The normalized eigenfunctions of $\mathcal{H}_{\epsilon}$ are given as
$\Psi^{\pm}_{k,\epsilon} = E^{\pm}(\epsilon \nu_{k,\epsilon}) \cdot \psi_{k}$, where
$E^{\pm}(\epsilon \nu_{k,\epsilon})$ are eigenfunctions of $B - \epsilon \nu_{k,\epsilon}
D$.  If $\beta = 0$, then for each fixed $\epsilon$,  
$\lambda^{\pm}(\epsilon \cdot
\nu_{k,\epsilon}) \to \lambda^{\pm}( \epsilon \cdot \hat{J}_{0} )$ as $k
\to \infty$.
\end{lemma}

\begin{proof}
Fix $\epsilon>0$. We begin by showing that any eigenvalue of $\mathcal{H}_{\epsilon}$ is
expressible as $\lambda^{\pm}_{k,\epsilon}$ for some $k$.  Let  $\lambda$ and 
$U$ be an eigenvalue and corresponding eigenfunction of $\mathcal{H}_{\epsilon}$, 
respectively, where $U \in \mathbb{L}^{2}_{\rm per}(\Omega)$ and $U \neq (0,0)$.  
We can write $U \in
\mathbb{L}^{2}_{\rm per}(\Omega)$ as
$$
U = \sum_{j=0}^{\infty} \psi_{j} r_{j},
$$
where $r_{j} = (s_{j},t_{j})^{T}$ and $s_{j},t_{j} \in \mathds{R}$. 
 Since $U$ is nontrivial, then for $j = k$, $r_{j} \neq (0,0)^{T}$. 
 Since $\lambda$ is an eigenvalue of $\mathcal{H}_{\epsilon}$, and $U$ 
is the corresponding eigenfunction,
$$ \mathcal{H}_{\epsilon} U - \lambda U = 0.
$$  Using \ref{eqn:hEpsilon}, we evaluate the left hand side as
\[
\mathcal{H}_{\epsilon} U - \lambda U
=  \sum_{j = 0}^{\infty}(  D \mathcal{J}_\epsilon + B - \lambda I)\psi_{j}r_{j} 
= \sum_{j = 0}^{\infty}( -\epsilon \nu_{j,\epsilon} D + B - \lambda I) \psi_{j} r_{j}.
\]
Since the $\psi_{j}$ are linearly independent,
$$ (-\epsilon \nu_{j,\epsilon} D + B - \lambda I) r_{j} = 0,$$
for all $j$.  For $j = k$, we see that $r_{k}$ is nontrivial, which implies that \\
$-\epsilon \nu_{k,\epsilon} D + B - \lambda I$ must be singular for some $k$. Therefore, we have that
$$
\abs{-\epsilon \nu_{k,\epsilon} D + B - \lambda I} = 0.
$$
Solving for $\lambda$ gives the result.

Let $\lambda^{\pm}_{k,\epsilon}$ be as given by Equation \ref{eqn:eigOfHe} 
and $E^{\pm}(\epsilon \nu_{k,\epsilon})$ be the associated eigenfunction of 
$B - \epsilon \nu_{k,\epsilon} D$.  To show that $\lambda^{\pm}_{k,\epsilon}$ 
is an eigenvalue of $\mathcal{H}_{\epsilon}$ and $\Psi^{\pm}_{k,\epsilon}$ is an
 eigenvector of $\mathcal{H}_{\epsilon}$, we compute
\begin{align*}
\mathcal{H}_{\epsilon} \Psi^{\pm}_{k,\epsilon} 
& =  D \mathcal{J}_\epsilon \Psi^{\pm}_{k,\epsilon} + B \Psi^{\pm}_{k,\epsilon} \\
& = \lambda^{\pm}_{k,\epsilon}E^{\pm}(\epsilon \nu_{k,\epsilon}) \psi_{k}\\
& = \lambda^{\pm}_{k,\epsilon} \Psi^{\pm}_{k,\epsilon}
\end{align*}
Since the $\lambda^{\pm}_{k,\epsilon}$ are distinct and the algebraic 
multiplicity is 1, the geometric multiplicity is also 1.  
Thus, each eigenvalue corresponds to one and only one eigenfunction. 
 As $k \to \infty$, Lemma \ref{lemma:kernelProperties} shows that 
$\hat{J}_{k} \to 0$.  If $\beta = 0$, 
$\lambda^{\pm}(\epsilon \nu_{k,\epsilon}) \to \lambda^{\pm}( \epsilon\hat{J}_{0} )$ 
as $k \to \infty$.  Assumption \ref{assumption:realEigenvalues} implies
that $\lambda^{\pm}_{k,\epsilon} \in \mathds{R}$.
\end{proof}

We now give a useful, sufficient condition that describes when the eigenvalues 
of the linearization are real.

\begin{lemma}
Suppose that Assumptions 
\ref{assumption:rectangularDomain} - \ref{lemma:turingInstabilityConditions} 
are satisfied.  A sufficient condition on $f$ and $g$ for the eigenvalues 
of our system to be real:
\[
(f_{u}+g_{v})^{2} - 4(f_{u}g_{v} - f_{v}g_{u}) > 0.
\]
\end{lemma}

\begin{proof}
Suppose that $(f_{u}+g_{v})^{2} - 4(f_{u}g_{v} - f_{v}g_{u}) > 0$. 
Using Equations \eqref{eqn:eigOfHe}, \eqref{eqn:beig} and \eqref{eqn:ceig}, 
we see that the eigenvalues are real if and only if $b^{2}(s) - 4c(s) \ge 0$, 
for which $s = \epsilon \nu_{k,\epsilon} >0$. Expanding the left hand side of
 the inequality, we have
$$
b^{2}(s) - 4c(s) = (f_{u} + g_{v})^{2} - 4(f_{u}g_{v} 
- f_{v}g_{u}) +(d-1)^{2}s^{2} - 2(d+1)(f_{u}+g_{v})s + 4(df_{u} + g_{v})s.
$$
For the Turing instability conditions in Lemma 
\eqref{lemma:turingInstabilityConditions}, we have
$$
(d-1)^{2}s^{2} - 2(d+1)(f_{u}+g_{v})s + 4(df_{u} + g_{v})s \ge 0.
$$
Thus, the eigenvalues are real.
\end{proof}

Figure \ref{figure:increasinglyStableNonlocalEigenvalues} shows eigenvalues
$\lambda^{\pm}_{k,\epsilon}$ for fixed $\beta =0$ and $0 \le \theta < 1$. 
In particular, as $\epsilon
\to 0$, $\lim_{k\to\infty} \nu_{k,\epsilon} =  0$. The convergence to 0 
 becoming slower as $\theta \to
1$, and the expression does not converge to zero for $\theta = 1$.  
In contrast, for all
$\beta >0$ and $0 \le \theta \le 1$,  $\nu_{k,\epsilon}$ limit to $\infty$ 
for $k \to \infty$. Thus
for  $0 \le \theta < 1$, the eigenvalues of the mixed diffusion operator as
 $\epsilon \to 0$
have the property that $\epsilon \nu_{k,\epsilon}$ behave asymptotically
 like $\epsilon \kappa_{k}$ for $0 < \beta \le 1$.

\begin{figure}[t]
\centering
\subfigure[Large $\epsilon$]{\includegraphics[width=0.45\textwidth]{fig7a}}
\subfigure[Medium $\epsilon$]{\includegraphics[width=0.45\textwidth]{fig7b}}
\subfigure[Small $\epsilon$]{\includegraphics[width=0.45\textwidth]{fig7c}}
\caption[Stable Nonlocal System for Arbitrarily Small $\epsilon-$ 
values]{The eigenvalue
dispersion curve for System \eqref{system:scaledMixed}, $\beta = 0$. 
This figure shows
a plot of the eigenvalues $\lambda^+(\epsilon \nu_{k,\epsilon})$
versus $\nu_{k,\epsilon}$, where the $\nu_{k,\epsilon}$
are the eigenvalues of the nonlocal diffusion operator. Parameters $\epsilon$ and $0 \le \theta < 1$, are fixed
(with $\theta$ defined in Assumption \ref{assumption:kernel}).  The points are plotted as black
asterisks, and $(\hat{J}_{0},\lambda^{+}(\epsilon \hat{J}_{0}))$ is given as a red asterisk.  In Part (a),
the eigenvalues are sparsely distributed on the curve when $\epsilon$ is large.  In Part (b),
as $\epsilon$ decreases, the eigenvalues are more closely spaced.  Since $\beta = 0$, the
plotted points limit on the point $(\hat{J}_{0},\lambda^{+}(\epsilon \cdot \hat{J}_{0}))$.
As $\epsilon \to 0$ in Subfigure (c), the eigenvalues lie on the leftmost part of
the curve where all of the eigenvalues are negative.
} \label{figure:increasinglyStableNonlocalEigenvalues}
\end{figure}

In the following lemma, we analyze the behavior of the eigenvalues
$\lambda^{\pm}_{k,\epsilon}= \lambda(\epsilon \nu_{k,\epsilon})$
by  replacing $\epsilon \nu_{k,\epsilon}$
in Eqn.~\ref{eqn:eigOfHe} with the continuous real variable $s$.

\begin{lemma}\label{lemma:propertiesOfLambdaPlusMinus}
Under Assumptions \ref{assumption:turingInstability} and \ref{assumption:realEigenvalues},
the following properties of $\lambda^{\pm}(s)$ are true for $s \ge 0$:
\begin{itemize}
\item $\lambda^{-}(s) < \lambda^{+}(s)$.
\item $\lambda^{+}(0) < 0$.
\item $\lambda^{+}(s)$ has a unique maximum $\lambda^{+}_{\rm max}$.
\item $\lambda^{+}(s)$ has two real roots, $s_{\ell}$ and $s_{r}$.
\item $\lambda^{-}(s)$ is strictly decreasing with $\lambda^{-}(s) < 0$.
\item $\lim_{s\to \infty} (\lambda^{+}(s) / s ) = -1$.
\item $\lim_{s\to \infty} (\lambda^{-}(s) / s ) = -d$.
\end{itemize}
\end{lemma}

\begin{proof}
The proof follows exactly as that given in \cite[Lemma 3.4]{SanderWanner:03}.  
Application of Inequalities (1), (3) of Lemma \ref{lemma:turingInstabilityConditions} 
and  Assumption \ref{assumption:realEigenvalues} give that $b(s)^{2} - 4c(s) > 0$
for every $s \ge 0$.  Part (1) of Lemma \ref{lemma:turingInstabilityConditions} 
shows that $b(s) < 0$ for all $s \ge 0$, and therefore, $\lambda^{-}(s) < 0$.  
Consequently, we have that $\lambda^{-}(s) < \lambda^{+}(s)$ for all $s \ge 0$. 
We also have that $\lambda^{+}(0) < 0$. For $\lambda^{+}(s) > 0$, then $c(s) < 0$. 
 Parts (2) -- (4) of Lemma \ref{lemma:turingInstabilityConditions} show that 
$c(s) < 0$ is equivalent to $s_{\ell} < s < s_{r}$, where
\begin{equation}
s_{l/r} = \frac{1}{2d} \left((df_{u} + g_{v}) \mp \sqrt{(df_{u} + g_{v})^{2} - 4d(f_{u}g_{v} - f_{v} g_{u}) } \right) . \label{eqn:sellr}
\end{equation}
Since $\lambda^{+}$ is continuous on $[s_{\ell}, s_{r}]$, it achieves 
a maximum value, denoted as $\lambda^{+}_{\rm max}$. Computing the asymptotic
limits for $\lambda^{\pm}(s)/s$ gives the final part of the lemma.
\end{proof}


\begin{lemma} \label{lemma:eigenfunctionsFormACompleteSet}
Suppose that Assumptions \ref{assumption:rectangularDomain} - 
\ref{assumption:realEigenvalues} are satisfied.  For $0 \le \beta \le 1$, 
the eigenfunctions of $\mathcal{H}_{\epsilon}$ form a complete set for $\mathbb{X}$.  
The angle between $E_{k, \epsilon}^{\pm}$ is bounded away from $\pi$ and 0.
\end{lemma}

\begin{proof}
The eigenfunctions are given by 
$\Psi^{\pm}_{k,\epsilon} = E^{\pm}_{k,\epsilon} \cdot \psi_{k}$, where
 $E^{\pm}_{k,\epsilon} = E^{\pm}(\epsilon \cdot \nu_{k,\epsilon})$ and 
$E^{\pm}(\cdot)$ is defined by Lemma \ref{lemma:eigOfHepsilon}.  
By Lemma \ref{lemma:propertiesOfLambdaPlusMinus}, we see that for each 
$s \ge 0$,  $\lambda^{+}(s) < \lambda^{-}(s)$.  Thus, the eigenvectors 
$E^{\pm}(s)$ are linearly independent for all $s \ge 0$.   However, we are 
only interested in the discrete points of $s$ in which 
$s = \epsilon \cdot \nu_{k,\epsilon}$. All that is left to show is that 
$ \epsilon \cdot \nu_{k,\epsilon} \ge 0$ for all $k \ge 0$.  
By Assumption \ref{assumption:kernel},
$\epsilon(1-\beta) ( \hat{J}_{0} - \hat{J}_{k} ) \ge 0$ for $0 \le \beta \le 1$. 
 Definition \ref{definition:LaplacianEigenvalues} shows that $\kappa_{k} \ge 0$ 
for all $k \ge 0$.  Since
 $\nu_{k,\epsilon} = \beta \kappa_{k} + (1-\beta)(\hat{J}_{0} - \hat{J}_{k}) \ge 0$,
 we have shown the first part of this lemma.  The $\Psi^{\pm}_{k,\epsilon}$ form a 
complete set in $\mathbb{X}$ since the $\psi_{k}$ form a complete set for $L^{2}(\Omega)$ 
and the $E^{\pm}_{k,\epsilon}$ are linearly independent.

For $\beta=0$, fix $\epsilon_{0} > 0$. As $k \to \infty$, we have that 
$\epsilon_{0} \nu_{k,\epsilon_{0}} \to \epsilon_{0} \hat{J}_{0} < \infty$. 
Thus, all $\nu_{k,\epsilon_{0}}$ are contained in some compact interval 
$[0,s^{*}_{r}]$. Since $0 \le \theta \le 1$, clearly 
$\epsilon \nu_{k,\epsilon} \in [0, s^{*}_{r}]$ for all $0 < \epsilon \le \epsilon_{0}$. Since the eigenvectors $E^{\pm}_{k,\epsilon}$ are linearly independent and the angle between  the $E^{\pm}_{k,\epsilon}$ is bounded away from 0 and $\pi$. For $\beta>0$, we need to consider the limit as $s \to \infty$.
The eigenfunctions of $B - s D$ are the same as $s^{-1} B - D$, and we see that
 as $s\to \infty$, $s^{-1} B - D$ approaches a diagonal matrix. 
 Hence, the eigenfunctions become orthogonal as $s \to \infty$ and are bounded 
away from 0 and $\pi$.
\end{proof}


\begin{lemma} \label{lemma:unstableBetaGtrThan0}
Suppose that Assumptions \ref{assumption:kernel}, \ref{assumption:turingInstability} 
and \ref{assumption:realEigenvalues} are satisfied.  For $0 < \beta \le 1$, 
there exists $\epsilon_{0} > 0$, such that for all $\epsilon \le \epsilon_{0}$, 
the homogeneous equilibrium of System \eqref{system:scaledMixed} is unstable.
\end{lemma}

\begin{proof}
The details follow the proof given in \cite[Lemma 5.1]{SanderWanner:03}. 
Let $0 < \beta \le 1$, $0 \le \theta < 1$,  and choose 
$0 < c_1 < c_2 < \lambda^{+}_{\rm max}$, where
$\lambda^{+}_{\rm max}$ is given in Lemma \ref{lemma:propertiesOfLambdaPlusMinus}.
 By Lemma
\ref{lemma:propertiesOfLambdaPlusMinus} and Lemma
\ref{lemma:eigenfunctionsFormACompleteSet}, there exists a set of two compact intervals,
which we call  $I$, such that
$\lambda^{+}_{k,\epsilon} \in [c_1, c_2]$ if and only if $\epsilon \cdot \nu_{k,
\epsilon} \in I$.  Using the asymptotic
distribution of eigenvalues $\nu_{k,\epsilon}$ given in
\eqref{eq:asymptoticDistOfMixedOp}, we see that as $\epsilon \to 0$, the number of
eigenvalues of $\mathcal{H}_{\epsilon}$ in $[c_1, c_2]$ is of the order $\epsilon^{-\dim
\Omega / 2}$.  Thus, for some $\epsilon_{0}$, we have that the homogeneous equilibrium is
unstable for $0 < \epsilon \le \epsilon_{0}$.
\end{proof}

Note that the estimates in the proof of the above lemma are
more delicate for $\beta=0$ with $\theta=1$. Namely, the eigenvalues are
discretely spaced along a continuous dispersion curve, meaning that even if the dispersion curve goes
above zero, if the spacing of the eigenvalues is too large along the curve it is possible
to miss the unstable region altogether, resulting in no unstable eigenvalues.
The result is never true for $\beta=0$, with $0 \le \theta<1$ 
(cf. Fig.~\ref{figure:increasinglyStableNonlocalEigenvalues}.)

\subsection{Spectrum of the linear operator}\label{section:spectrum}

The results presented in the following sections depend upon the spectrum 
of $\mathcal{H}_{\epsilon}$ and its associated spectral gaps.   For this reason, 
we describe the full spectrum of $\mathcal{H}_{\epsilon}$ for all $0 \le \beta \le 1$. 
 We begin with a theorem describing the spectrum of $\mathcal{H}_{\epsilon}$, 
followed by useful lemmas used in proving the theorem and finally the proof.

\begin{theorem}[Spectrum of $\mathcal{H}_{\epsilon}$] \label{thm:spectrumOfJepsilon}
Suppose that Assumptions 
\ref{assumption:rectangularDomain} - \ref{assumption:realEigenvalues} are satisfied. 
 Let $\mathcal{H}_{\epsilon}$ be as defined in \eqref{eqn:hEpsilon}.  If $0 < \beta \le 1$, the spectrum contains only the eigenvalues of $\mathcal{H}_{\epsilon}$.  If $\beta = 0$, then the spectrum of $\mathcal{H}_{\epsilon}$ consists of the eigenvalues $\mathcal{H}_{\epsilon}$ and the points $\lambda^{\pm}( \epsilon \hat{J}_{0} )$.
\end{theorem}


We introduce a norm that will be useful for the spectrum computation.  
As we show in the next lemma, the equivalence of the $\mathbb{L}^{2}$-norm and this 
new norm is possible since the angle between the $E^{\pm}_{k,\epsilon}$ is bounded 
away from both $0$ and $\pi$.

\begin{definition}\label{definition:scratchNorm} \rm
Let $\epsilon > 0$.  For $U \in \mathbb{L}_{\rm per}^{2}(\Omega)$, 
Lemma \ref{lemma:eigenfunctionsFormACompleteSet} implies that $U$ may be written as
\begin{align}
U = \sum_{k=0}^{\infty} \left( (\alpha^{+}_{k,\epsilon}) E^{+}_{k,\epsilon} 
+ (\alpha^{-}_{k,\epsilon})E^{-}_{k,\epsilon}\right)\cdot \psi_{k}.
\end{align}
When the following is finite, define the $\norm{\cdot}_{\#}-$norm as
\begin{align}
\norm{U}^{2}_{\#} = \sum_{k=0}^{\infty} \left( (\alpha^{+}_{k,\epsilon})^{2} 
+ (\alpha^{-}_{k,\epsilon})^{2}\right).
\end{align}
\end{definition}

\begin{lemma}\label{lemma:equivalentNorms}
Suppose that Assumptions \ref{assumption:rectangularDomain} - \ref{assumption:realEigenvalues} are satisfied.  Let $\norm{\cdot}_{\#}$ be as defined in Definition \ref{definition:scratchNorm}.  For $U \in \mathbb{L}^{2}_{\rm per}(\Omega)$,
$$ 
\sqrt{1-r}\norm{U}_{\#} \le \norm{U}_{\mathbb{L}^{2}_{\rm per}(\Omega)} 
\le \sqrt{1+r} \norm{U}_{\#},
$$
where $\abs{(E^{+}_{k,\epsilon}, E^{-}_{k,\epsilon})_{\mathbb{R}^{2}} } \le r < 1$ for all
$k \in \mathds{Z}$.
\end{lemma}

\begin{proof}
Let $\epsilon > 0$.  For $U \in \mathbb{L}^{2}_{\rm per}(\Omega)$, we write $U$ as
$$ 
U = \sum_{k=0}^{\infty} \left( (\alpha^{+}_{k,\epsilon})E^{+}_{k,\epsilon} 
+ (\alpha^{-}_{k,\epsilon})E^{-}_{k,\epsilon} \right) \cdot \psi_{k}. 
$$
Note that $r$ exists by Lemma \ref{lemma:eigenfunctionsFormACompleteSet}.  Computing the square of the $\mathbb{L}^{2}_{\rm per}(\Omega)$-norm of $U$ yields
\begin{align*}
\norm{U}^{2}_{\mathbb{L}^{2}_{\rm per}(\Omega)}
 & = \sum_{k=0}^{\infty} ( (\alpha^{+}_{k,\epsilon})^{2} + (\alpha^{-}_{k,\epsilon})^{2} + 2\alpha^{+}_{k,\epsilon} \alpha^{-}_{k,\epsilon} ( E^{+}_{k,\epsilon}, E^{-}_{k,\epsilon} ) ), \\
& \le \sum_{k=0}^{\infty} ( (\alpha^{+}_{k,\epsilon})^{2} 
+ (\alpha^{-}_{k,\epsilon})^{2}) + 2 \abs{ \alpha^{+}_{k,\epsilon} \alpha^{-}_{k,\epsilon}} r, \\
& \le \sum_{k=0}^{\infty} (\alpha^{+}_{k,\epsilon})^{2} 
+ (\alpha^{-}_{k,\epsilon})^{2} + r((\alpha^{+}_{k,\epsilon})^{2} 
+ (\alpha^{-}_{k,\epsilon})^{2} ) \\
& = (1+r) \sum_{k=0}^{\infty} (\alpha^{+}_{k,\epsilon})^{2}
 + (\alpha^{-}_{k,\epsilon})^{2}, \\
& = (1+r) \norm{U}^{2}_{\#}.
\end{align*}
Taking square roots gives the right hand inequality.  For the other direction,
 we compute
\begin{align*}
\norm{U}_{\mathbb{L}^{2}(\Omega)}^{2} 
& \ge \sum_{k=0}^{\infty} (\alpha^{+}_{k,\epsilon})^{2}
 + (\alpha^{-}_{k,\epsilon})^{2} - \left( (\alpha^{+}_{k,\epsilon})^{2}
 + (\alpha^{-}_{k,\epsilon})^{2}\right) ( E^{+}_{k,\epsilon}, E^{-}_{k,\epsilon} ) ), \\
& \ge (1-r) \norm{U}^{2}_{\#}.
\end{align*}
Again, taking square roots gives the left hand inequality.
\end{proof}

The following lemma  allows us to describe the full spectrum of $\mathcal{H}_{\epsilon}$ 
for $\beta = 0$.

\begin{lemma}[Adjoint of $\mathcal{H}_{\epsilon}$]
Suppose that Assumptions \ref{assumption:rectangularDomain} - \ref{assumption:kernel} 
are satisfied and $\beta = 0$. Let $\mathcal{H}_{\epsilon}$ be as defined 
in \eqref{eqn:hEpsilon} and $\mathcal{J}_2$ be as defined in \eqref{eqn:J2}. 
 The adjoint of $\mathcal{H}_{\epsilon}$ is given as 
$\mathcal{H}_{\epsilon}^{*} = \epsilon^{1-\theta} D \mathcal{A} + B^{T}$, where
$$ 
\mathcal{A} = \begin{pmatrix} A^{J}_{c} - \hat{J}_{0} & 0 \\ 0 & A^{J}_{c} - \hat{J}_{0}
 \end{pmatrix},
$$
and $A^{J}_{c}$ is as defined in \eqref{eqn:adjointConv}.
If the periodic extension of $J$ satisfies Assumption \ref{assumption:even},
then the adjoint of $\mathcal{H}_{\epsilon}$ is given as 
$\mathcal{H}_{\epsilon}^{*} = \epsilon^{1-\theta} D \mathcal{J}_2 + B^{T}$.
\end{lemma}

\begin{proof}
Let $\epsilon > 0$.  Application of Lemma \ref{lemma:adjointConv1} shows that 
the adjoint of $\epsilon^{1-\theta} D \mathcal{J}_2$ is $\epsilon^{1-\theta} D \mathcal{A}$.  
Since the adjoint of $B$ is $B^{T}$, the adjoint of $\mathcal{H}_{\epsilon}$ is given 
as $\mathcal{H}_{\epsilon}^{*} = \epsilon D \mathcal{A} + B^{T}$.
On the other hand if $J_{\rm per}$ satisfies Assumption \ref{assumption:even},
then $J_{c}$ is self-adjoint by Lemma \ref{lemma:adjointConv1} and the adjoint 
of $\mathcal{H}_{\epsilon}$ is given as 
$\mathcal{H}_{\epsilon}^{*} = \epsilon^{1-\theta} D \mathcal{J}_2 + B^{T}$.
\end{proof}

We are now ready to prove Theorem \ref{thm:spectrumOfJepsilon} that describes 
the full spectrum of $\mathcal{H}_{\epsilon}$ for all $0 \le \beta \le 1$.

\begin{proof}[Proof of Theorem \ref{thm:spectrumOfJepsilon}]
Let $0 < \beta \le 1$.  Recall that 
$\mathcal{J}_{\epsilon} = \epsilon\mathcal{J}_1 + \epsilon^{1-\theta}\mathcal{J}_2$ as defined 
in Equations \eqref{eqn:J} - \eqref{eqn:J2}.  Since $\epsilon D \mathcal{J}_1 + B$ 
has a compact resolvent, its spectrum contains only eigenvalues \cite{Pazy:83}.  
The operator $D \mathcal{J}_{\epsilon} + B$ also has a compact resolvent, since 
$\epsilon D \mathcal{J}_1+B$ has a compact resolvent and $\epsilon^{1-\theta} D \mathcal{J}_2$ 
is a bounded operator.  See \cite[pg. 120]{EngelNagel:00}.  Since the resolvent 
is compact, then for $0 < \beta \le 1$, the spectrum of  $\mathcal{H}_{\epsilon}$  
contains only eigenvalues \cite[pg. 187]{Kato:76}.  We now focus on the 
case $\beta = 0$.

In \cite{IfantisPanagopoulos:01}, a sufficient condition is given that states 
for certain self-adjoint operators defined on Hilbert spaces, all points of the
 spectrum are expressible as limit points of eigenvalues.  The remainder of 
the proof shows that in general, it is not necessary for an operator to be 
self-adjoint.

A value $\lambda$ is in the spectrum of $\mathcal{H}_{\epsilon}$ is either in the point spectrum, continuous spectrum or residual spectrum.  We have already computed the eigenvalues of $\mathcal{H}_{\epsilon}$, which implies that the point spectrum of $\mathcal{H}_{\epsilon}$ is nonempty.  We now show that the residual spectrum must be empty.  Since $\mathcal{J}$ is self-adjoint, then by similar reasoning used in the proof of the eigenvalues of $\mathcal{H}_{\epsilon}$, we have that the eigenvalues of $\mathcal{H}^{*}_{\epsilon}$ are given as the roots of
\begin{equation}
\det(B^{T} - \epsilon ( \hat{J}_{0} - \hat{J}_{k} )D - \lambda^{*\pm}_{k}I) = 0.
\end{equation}
Since the determinant of a matrix is the same as the determinant of the transpose
 of that matrix, we have
\begin{align}
\det(B^{T} - \epsilon ( \hat{J}_{0} - \hat{J}_{k} )D - \lambda^{*\pm}_{k}I)
= \det(B - \epsilon ( \hat{J}_{0} - \hat{J}_{k} )D - \lambda^{\pm}_{k}I).
\label{eqn:sameChars}
\end{align}
Thus, the eigenvalues of $\mathcal{H}^{*}_{\epsilon}$ are the same as those
of $\mathcal{H}_{\epsilon}$.  By \cite[Theorem 8.7.1]{Suhubi:03}, we see that
if a point is in the residual spectrum of $\mathcal{H}_{\epsilon}$, then its
conjugate must also be an eigenvalue of its adjoint operator.  Since the
eigenvalues for both $\mathcal{H}_{\epsilon}$ and $\mathcal{H}^{*}_{\epsilon}$ are the same,
 the residual spectrum of $\mathcal{H}_{\epsilon}$ must be empty.

The last portion of the spectrum to check is the continuous spectrum.  
We now show that both $\lambda^{\pm}( \epsilon\hat{J}_{0} )$ are contained in 
the continuous spectrum.  The proof for $\lambda^{-}( \epsilon\hat{J}_{0} )$ 
follows in the same manner as the proof for $\lambda^{+}( \epsilon\hat{J}_{0} )$, 
so we only give proof for $\lambda^{+}( \epsilon\hat{J}_{0} )$.  
Consider $\lambda^{+}( \epsilon\hat{J}_{0} ) I - \mathcal{H}_{\epsilon}$ and 
let $f_{k} = \Psi^{+}_{k, \epsilon}/
\norm{\Psi^{+}_{k,\epsilon}}_{\mathbb{L}^{2}_{\rm per}(\Omega)}$ where the 
$\Psi^{+}_{k,\epsilon}$ are eigenfunctions of $\mathcal{H}_{\epsilon}$. 
Since $\lambda^{+}( \epsilon\hat{J}_{0} )$ is not an eigenvalue of 
$\mathcal{H}_{\epsilon}$, we have that 
$\lambda^{+}( \epsilon\hat{J}_{0} ) I - \mathcal{H}_{\epsilon}$ is one-to-one.  Thus,
\begin{align*}
\norm{ (\lambda^{+}( \epsilon \hat{J}_{0} ) I 
- \mathcal{H}_{\epsilon} )f_{k} }_{\mathbb{L}^{2}_{\rm per}(\Omega)} 
& = \norm{ ( \lambda^{+}( \epsilon\hat{J}_{0} ) 
 - \lambda^{+}_{k,\epsilon} ) f_{k} }_{\mathbb{L}^{2}_{\rm per}(\Omega)} \\
& \le \abs{ \lambda^{+}( \epsilon\hat{J}_{0} ) - \lambda^{+}_{k,\epsilon} }
\end{align*}
As $k \to \infty$, $\lambda^{+}_{k,\epsilon} \to \lambda^{+}( \epsilon\hat{J}_{0} )$ 
and
$$
\norm{ (\lambda^{+}( \epsilon\hat{J}_{0} ) I 
- \mathcal{H}_{\epsilon} ) f_{k} }_{\mathbb{L}^{2}_{\rm per}(\Omega)} \to 0.
$$
Since $\norm{f_{k}}_{\mathbb{L}^{2}_{\rm per}(\Omega)} = 1$ for all $k$ and
 $\norm{ (\lambda^{+}( \epsilon\hat{J}_{0} ) I 
- \mathcal{H}_{\epsilon} ) f_{k} }_{\mathbb{L}^{2}_{\rm per}(\Omega)} \to 0$, 
we see that $( \lambda^{+}( \epsilon\hat{J}_{0} ) I - \mathcal{H}_{\epsilon})^{-1}$ 
is unbounded.  Thus, $\lambda^{\pm}( \epsilon\hat{J}_{0} )$ is in the continuous 
spectrum of $\mathcal{H}_{\epsilon}$.

For the continuous spectrum, we have shown that the limit points of the eigenvalues 
are elements of this set.  We now show that the points in the continuous spectrum 
must  be limit points of the eigenvalues.  
To do this, we will argue by contradiction.  Suppose
that $\lambda$ is in the continuous spectrum, but that it is not a limit point of
eigenvalues of $\mathcal{H}_{\epsilon}$.  Since the $\norm{\cdot}_{\#}$ is equivalent 
to the $\mathbb{L}^{2}-$norm by Lemma \ref{lemma:equivalentNorms}, we have that for 
some sequence of $f_n \in \mathbb{L}^{2}_{\rm per}(\Omega)$ with $\norm{f_n}_{\#} = 1$ 
for all $n$, $\norm{(\lambda I - \mathcal{H}_{\epsilon})f_n }_{\#} \to 0$ as 
$n \to \infty$.
Since $f_n \in \mathbb{L}^{2}_{\rm per}(\Omega)$, we can write $f_n$ as 
$$ 
f_n = \sum_{k=0}^{\infty} ( (\alpha^{+}_{n,k,\epsilon}) E^{+}_{k,\epsilon} +
(\alpha^{-}_{n,k,\epsilon}) E^{-}_{k,\epsilon} ) \cdot \psi_{k}. 
$$
 By definition of the continuous spectrum, $\lambda$ can not be an eigenvalue.  
Since we assumed that it is also
not a limit point of eigenvalues, there exists $M > 0$ such that 
$M \le \abs{ \lambda - \lambda^{\pm}_{k,\epsilon}}$ for all $k$.  Thus
\begin{align*}
\norm{ (\lambda I - \mathcal{H}_{\epsilon})f_n}^{2}_{\#} & = \sum_{k=0}^{\infty} (\lambda - \lambda^{+}_{k,\epsilon})^{2}(\alpha^{+}_{n,k,\epsilon})^{2} + (\lambda - \lambda^{-}_{k,\epsilon})^{2}(\alpha^{-}_{n,k,\epsilon})^{2}, \\
& \ge M^{2} \sum_{k=0}^{\infty} ((\alpha^{+}_{n,k,\epsilon})^{2} + (\alpha^{-}_{n,k,\epsilon})^{2}), \\
& = M^{2} \norm{f_n}^{2}_{\#} = M^{2} > 0.
\end{align*}
However, this is a contradiction, since 
$\norm{ (\lambda I - \mathcal{H}_{\epsilon})f_n }_{\#} \to 0$.  Therefore, the continuous 
spectrum of $\mathcal{H}_{\epsilon}$ contains only $\lambda^{\pm}( \epsilon\hat{J}_{0} )$.
\end{proof}

\section{Almost linear behavior} \label{section:linearizationResults}

\begin{figure}[ht]
\begin{center}
\includegraphics[width=.7\textwidth]{fig8}
\caption[Schematic of Early Pattern formation]
{Schematic depicting early pattern formation as described in 
Theorem \ref{thm:earlyPatternResults}.  
The initial condition $(u_{0},v_{0})$ of the solution $(u,v)$
is within a parabolic region surrounding the unstable subspace 
spanned by the eigenfunctions of the most unstable eigenvalues.  
For most solutions with this type of initial conditions, 
the solutions remain close to the unstable space during the 
early stage of pattern formation.} \label{fig:stableUnstableSchematic}
\end{center}
\end{figure}


To prove our main results, we use the abstract theory and techniques developed 
for the Cahn-Hilliard equation found in 
\cite{MaierPaapeWanner:98,MaierPaapeWanner:00}.  The theory requires an 
abstract evolution equation of the form
\begin{align}
U_{t} = \mathcal{H}_{\epsilon} U + F(U), \label{eqn:evolEq}
\end{align}
on some appropriate function space $\mathbb{X}$ that satisfies the following assumptions.
\begin{itemize}
\item[(H1)] The operator $-\mathcal{H}_{\epsilon}$ is a sectorial operator on $\mathbb{X}$.
\item[(H2)] There exists a decomposition $\mathbb{X} = \mathbb{X}^{--} \oplus \mathbb{X}^{-}
 \oplus \mathbb{X}^{+} \oplus \mathbb{X}^{++}$, such that all of these subspaces are 
finite except $\mathbb{X}^{--}$, and such that the linear semigroup corresponding 
to $U_{t} = \mathcal{H}_{\epsilon} U$ satisfies several dichotomy estimates.

\item[(H3)] The nonlinearity $F: \mathbb{X}^{\alpha} \to \mathbb{X}$ is continuously 
differentiable, and satisfies both $F(\bar{u}_0, \bar{v}_0) = 0$ and 
$D F( \bar{u}_0, \bar{v}_0 ) = 0$.
\end{itemize}

In light of how $\mathcal{H}_{\epsilon}$ is defined in \eqref{eqn:hEpsilon}, we define the
nonlinearity of the evolution equation given by \ref{eqn:evolEq} in the following way.
Define the function $h:\mathbb{R}^{2} \to \mathbb{R}^{2}$ to be the nonlinear part of $(f,g)$ of
System \eqref{system:scaledMixed} in the following sense. Let
$$\hat{h}(u,v) = (f(u,v), g(u,v))$$
and
\begin{equation}  \label{eqn:hPeriodic}
h(u,v) = \hat{h}(u,v) - \hat{h}_{u}(\bar{u}_{0},\bar{v}_{0})\cdot( u- \bar{u}_{0})
- \hat{h}_{v}(\bar{u}_{0}, \bar{v}_{0})\cdot(v-\bar{v}_{0}).
\end{equation}
Setting
\begin{equation}
F(U) = h(u,v) \quad\text{for $U=(u,v)$ } \label{eqn:F}
\end{equation}
gives the nonlinear portion of \eqref{eqn:evolEq}.

\begin{lemma} \label{lemma:sectorial}
For System \eqref{system:scaledMixed}, suppose that Assumptions
\ref{assumption:rectangularDomain} - \ref{assumption:realEigenvalues} 
are satisfied and that $0 < \beta \le 1$. 
 Let $\mathcal{H}_{\epsilon}$ be as defined in \eqref{eqn:hEpsilon}.
$\mathcal{H}_{\epsilon}$ is a sectorial operator.
\end{lemma}

\begin{proof}
For $0 < \beta \le 1$, again we note that the operator 
$\epsilon^{1-\theta} D \mathcal{J}_2$
is a bounded perturbation of $\epsilon D \mathcal{J}_1 + B$, 
which is a sectorial operator
\cite{Henry:81}.  Thus,  $\mathcal{H}_{\epsilon}$ is sectorial
 \cite{Pazy:83,HartleyWanner:09}.
\end{proof}


An important aspect of our analysis depends upon how the eigenfunctions of
$\mathcal{H}_{\epsilon}$ populate the unstable subspaces as $\epsilon \to 0$. Note that the
eigenvalues of $\mathcal{H}_{\epsilon}$ move arbitrarily close to $ \lambda^{+}(
\epsilon^{1-\theta}\cdot(1-\beta) \cdot \hat{K}_{0})$ as $\epsilon \to 0$. The position
of $\epsilon^{1-\theta}\cdot(1-\beta) \cdot \hat{K}_{0}$ relative to the unstable interval
$[s_{l}, s_{r}]$ is important for the following reasons.  For $\beta = 0$, if
$\epsilon^{1-\theta}\cdot \hat{K}_{0}$ is too far to the right of $s_{r}$, then the
nonlocal operator is stable.  Furthermore, if $\theta = 1$, and $ s_{\ell} <
(1-\beta)\cdot \hat{K}_{0} < s_{r}$, then there is a clustering of eigenvalues in the
unstable interval as $\epsilon \to 0 $.  The following two assumptions exclude these
cases.

\begin{assumption} \label{assumption:K0_sr} \rm
Suppose that $\hat{K}_{0} > s_{r}$ such that only a finite nonzero number of the $\hat{K}_{0} - \hat{K}_{k} $ are contained within the unstable interval $[s_{\ell}, s_{r}]$.
\end{assumption}

\begin{assumption} \label{assumption:K0_sl} \rm
For  $\beta$ satisfying $0 < \beta \le 1$, $\hat{K}_{0}$  satisfies
$$\epsilon^{1-\theta}( 1- \beta )\hat{K}_{0} < s_{\ell}$$
as $\epsilon \to 0$.
\end{assumption}

We now provide a description of the decomposition of the phase space 
using the spectral gaps of $\mathcal{H}_{\epsilon}$. Select the following constants
\begin{equation}
\underline{c}^{--} < \bar{c}^{--} \ll 0 \ll \underline{c}^{-} < \bar{c}^{-}
< \underline{c}^{+} < \bar{c}^{+} < \lambda^{+}_{\rm max}, \label{constants:decomp}
\end{equation}
such that $\bar{c}^{--} - \underline{c}^{--}$, $\bar{c}^{-} - \underline{c}^{-}$,
 and $\bar{c}^{+} - \underline{c}^{+}$ are small.
With Assumptions \ref{assumption:K0_sr} - \ref{assumption:K0_sl}, the proofs
of \cite[Lemma 5.1,Corollary 5.2]{SanderWanner:03} show the existence of the
intervals
\begin{gather}
J^{--}_{\epsilon}  = [a^{--}_{\epsilon}, b^{--}_{\epsilon}]
 \subset [\underline{c}^{--}, \bar{c}^{--}], \label{set:Jmm} \\
J^{-}_{\epsilon}  = [a^{-}_{\epsilon}, b^{-}_{\epsilon}]
 \subset [\underline{c}^{-}, \bar{c}^{-}], \label{set:Jm} \\
J^{+}_{\epsilon}  = [a^{+}_{\epsilon}, b^{+}_{\epsilon}]
 \subset [\underline{c}^{+}, \bar{c}^{+}], \label{set:Jp}
\end{gather}
where $J^{--}_{\epsilon}$, $J^{-}_{\epsilon}$, and $J^{+}_{\epsilon}$
 are contained in the resolvent of $\mathcal{H}_{\epsilon}$ for sufficiently small
$\epsilon$.  Furthermore, the length of each of these intervals is at least
\begin{equation}
d \epsilon^{\dim \Omega / 2}
\end{equation}
for some $\epsilon-$independent constant $d > 0$.

\begin{definition}[Decomposition of the phase space] 
\label{definition:decomposition} \rm
Consider the intervals as defined by \eqref{set:Jmm} - \eqref{set:Jp}.  Define the
intervals $I^{--}_{\epsilon} = (-\infty, a^{--}_{\epsilon})$, $I^{-}_{\epsilon} =
(b^{--}_{\epsilon}, a^{-}_{\epsilon})$, $I^{+}_{\epsilon} = (b^{-}_{\epsilon},
a^{+}_{\epsilon})$ and $I^{++}_{\epsilon} = (b^{+}_{\epsilon}, \lambda^{+}_{\rm max}]$.
Denote  $\mathbb{X}^{-}_{\epsilon}$, $\mathbb{X}^{+}_{\epsilon}$,
$\mathbb{X}^{++}_{\epsilon}$ as the span of the eigenfunctions whose eigenvalues belong to
 $I^{-}_{\epsilon}$, $I^{+}_{\epsilon}$, and $I^{++}_{\epsilon}$,
respectively. Denote  $\mathbb{X}^{--}_{\epsilon}$ as the orthogonal complement 
of the union of these
three spaces (or equivalently, the space with Schauder basis  $I^{--}_{\epsilon}$).
\end{definition}

The theory that we are applying makes use of fractional power spaces
 of $\mathcal{H}_{\epsilon}$. Let $a > \lambda^{+}_{\rm max}$. The fractional 
power spaces are given as $\mathbb{X}^{\alpha} = D( ( aI - \mathcal{H}_{\epsilon} )^{\alpha})$ 
subject to the norm 
$\norm{U}_{\alpha} =\norm{ ( aI - \mathcal{H}_{\epsilon} )^{\alpha} U }_{\mathbb{L}^{2}(\Omega)}$
 for $U \in \mathbb{X}^{\alpha}$.  As pointed out in \cite{HartleyWanner:09}, 
the fractional power spaces of $\mathcal{H}_{\epsilon}$ are given as
\begin{equation}
\mathbb{X}^{\alpha} = \mathbb{H}^{2 \alpha}_{\rm per}(\Omega),
\end{equation}
where $H^{2 \alpha}_{\rm per}(\Omega)$ are the Sobolev spaces of smoothly periodic
functions on $\Omega$ and  $0 < \alpha < 1$ as defined by
 Definition \ref{definition:functionSpaces}.
By Lemma \ref{lemma:eigenfunctionsFormACompleteSet},
$U \in \mathbb{L}^{2}_{\rm per}(\Omega)$ is written as
$$
U = \sum_{k=0}^{\infty}( \alpha^{+}_{k} E^{+}_{k, \epsilon}
+ \alpha^{-}_{k} E^{-}_{k, \epsilon} ) \psi_{k}.
$$
When the following is finite, define $\norm{\cdot}_{**}$ as
\begin{equation}
\norm{U}_{**}^{2} = \sum_{k=0}^{\infty}\left( 1 + \kappa_{k}\right)^{s}
\left( (\alpha^{+}_{k})^{2} + (\alpha^{-}_{k})^{2} \right). \label{eqn:starNorm}
\end{equation}

\begin{lemma} \label{lemma:normEq}
Assume that Assumptions \ref{assumption:rectangularDomain} and 
\ref{assumption:smoothNonlinearity} are satisfied.  The $\norm{\cdot}_{**}$-norm
 given by \eqref{eqn:starNorm} is equivalent to the $\norm{\cdot}_{*}$ considered 
in \cite{SanderWanner:03} when restricted to $\mathbb{L}^{2}_{\rm per}(\Omega)$.
\end{lemma}

\begin{proof}
By \cite[Lemma 4.2]{SanderWanner:03}, $\norm{\cdot}_{*}$ is equivalent 
to $\norm{\cdot}_{\mathbb{H}^{s}(\Omega)}$.  We now show equivalence of norms 
by showing that $\norm{\cdot}_{**}$ is equivalent to the standard norm 
defined for $\mathbb{H}^{s}_{\rm per}(\Omega)$. For $U \in \mathbb{L}^{2}_{\rm per}(\Omega)$, 
we have that
\[
\norm{U}^{2}_{\mathbb{H}^{s}_{\rm per}(\Omega)} 
= \sum_{k=0}^{\infty} (1 + \kappa_{k})^{s} \norm{ \alpha^{+}_{k} 
\cdot E^{+}_{k,\epsilon} + \alpha^{-}_{k} \cdot E^{-}_{k,\epsilon} }_{\mathbb{R}^{2}}.
\]
If we expand the terms in $\norm{\cdot}_{\mathbb{R}^{2}}$, use Lemma
\ref{lemma:eigenfunctionsFormACompleteSet} to note that the angle between 
$E^{+}_{k,\epsilon}$ and $E^{-}_{k,\epsilon}$ are bounded away from both 0 
and $\pi$ for all $k \in \mathds{N}$ and $\epsilon > 0$, and apply the Cauchy-Schwarz 
lemma, we get the equivalence to the standard Sobolev norm.
\end{proof}
We have now established a suitable decomposition of the phase space.  
The following lemma gives dichotomy estimates, as well as estimates 
for critical quantities that we shall use for the first major result.

\begin{lemma} \label{lemma:dichotomy}
Assume that  \ref{assumption:rectangularDomain} - \ref{assumption:realEigenvalues}, 
\ref{assumption:K0_sr} and \ref{assumption:K0_sl} are satisfied and 
let $\mathcal{H}_{\epsilon}$ be as defined in \eqref{eqn:hEpsilon}. 
 Let $S_{\epsilon}(t), t \ge 0$ denote the analytic semigroup on $\mathbb{X}$ 
generated by $\mathcal{H}_{\epsilon}$.  Consider the decomposition as given by 
Definition \ref{definition:decomposition} and let 
$\mathbb{X}^{\alpha} = \mathbb{H}^{2\alpha}_{\rm per}(\Omega)$ be the fractional power 
spaces of $\mathcal{H}_{\epsilon}$.
\begin{itemize}

\item[(a)] The spaces $\mathbb{X}^{-}_{\epsilon}$, $\mathbb{X}^{+}_{\epsilon}$, and $\mathbb{X}^{++}_{\epsilon}$ are finite-dimensional subspaces of $\mathbb{X}^{\alpha}$.  Furthermore, all of the spaces introduced in Definition \ref{definition:decomposition} are invariant under $S_{\epsilon}(t)$, and we denote the restrictions of the semigroup $S_{\epsilon}(t)$ to these spaces by the appropriate superscripts.  The dimensions of these subspaces are proportional to $\epsilon^{-\dim \Omega / 2}$.

\item[(b)] The following estimates are satisfied for arbitrary $U^{++}\in \mathbb{X}^{++}_{\epsilon}$, $U^{+} \in \mathbb{X}^{+}_{\epsilon}$, $U^{-} \in \mathbb{X}^{-}_{\epsilon}$, and $U^{--}_{**} \in \mathbb{X}^{--}_{\epsilon} \cap \mathbb{X}^{\alpha}$:
\begin{gather*}
\norm{S^{++}_{\epsilon}(t)U^{++}}_{**} \le e^{b^{+}_{\epsilon}t} 
\cdot \norm{U^{++}}_{**}, \quad\text{for $t \le 0$}, \\
\norm{S^{+}_{\epsilon}(t)U^{+}}_{**} \le e^{a^{+}_{\epsilon}t} 
\cdot \norm{U^{+}}_{**}, \quad\text{for $t \ge 0$}, \\
\norm{S^{+}_{\epsilon}(t)U^{+}}_{**} \le e^{b^{-}_{\epsilon}t} 
\cdot \norm{U^{+}}_{**}, \quad\text{for $t \le 0$}, \\
\norm{S^{-}_{\epsilon}(t)U^{-}}_{**} \le e^{a^{-}_{\epsilon}t} 
\cdot \norm{U^{-}}_{**}, \quad\text{for $t \ge 0$}, \\
\norm{S^{-}_{\epsilon}(t)U^{-}}_{**} \le e^{b^{--}_{\epsilon}t} 
\cdot \norm{U^{-}}_{**}, \quad\text{for $t \le 0$}, \\
\norm{S^{--}_{\epsilon}(t)U^{--}_{**}}_{**} \le e^{a^{--}_{\epsilon}t} 
\cdot \norm{U^{--}_{**}}_{**}, \quad\text{for $t \ge 0$}, \\
\end{gather*}
There exists a constant $M^{--}_{\epsilon} > 0$ such that for
 $U^{--} \in \mathbb{X}^{--}_{\epsilon}$,
\begin{equation}
\norm{S^{--}_{\epsilon}(t)U^{--}}_{**} \le M^{--}_{\epsilon}
\cdot t^{-\alpha} \cdot e^{a^{--}_{\epsilon}t} \cdot
\norm{U^{--}}_{\mathbb{L}^{2}(\Omega} \quad\text{for $t > 0$.} \label{estimate:Me}
\end{equation}
where
$$
M^{--}_{\epsilon} \le C_1 \cdot  \epsilon^{ -\alpha( 2 + \dim \Omega )/2}
\quad\text{as $\epsilon \to 0$.}
$$

\item[(c)] There exists a constant $M_{\alpha,\epsilon} \ge 1$ 
which is proportional to $\epsilon^{- \alpha }$ as $\epsilon \to 0$, 
as well as an $\epsilon-$independent constant $C>0$ such that for all 
$U \in \mathbb{X}^{-}_{\epsilon} \oplus \mathbb{X}^{+}_{\epsilon} \oplus \mathbb{X}^{++}_{\epsilon}$ 
we have
$$
C \cdot \norm{U}_{\mathbb{L}^{2}(\Omega)} \le \norm{U}_{**} \le M_{\alpha, \epsilon} 
\cdot \norm{U}_{\mathbb{L}^{2}(\Omega)}.
$$
\end{itemize}
\end{lemma}

\begin{proof}
The result of the local case \cite[Proposition 5.4]{SanderWanner:03} 
contains the same estimates provided in this lemma.  Although the norm 
for the mixed case is not the same as the norm used in the local case, 
the norms are similar by Lemma \ref{lemma:normEq}.  Careful examination 
of the details of  \cite[Proposition 5.4]{SanderWanner:03} reveal an 
application to the mixed case considered here.  The estimates 
in \cite[Proposition 5.4]{SanderWanner:03} rely upon 
\cite[Lemma 3.4]{SanderWanner:03}, \cite[Corollary 5.2]{SanderWanner:03},
 $\norm{\cdot}_{*}-$norm \cite[(18)]{SanderWanner:03}, and a complete set 
of eigenfunctions of the linearization \cite[Proposition 3.7]{SanderWanner:03}. 
 Again, the $\norm{\cdot}_{**}-$norm considered here is similar to the 
$\norm{\cdot}_{*}$. Assumptions \ref{assumption:K0_sr} and \ref{assumption:K0_sl}, 
along with asymptotic growth rate of $\nu_{k,\epsilon}$ given by
 \eqref{eq:asymptoticDistOfMixedOp} yield the analogous forms 
of \cite[Corollary 5.2]{SanderWanner:03}.  Furthermore, the asymptotic growth 
of $\nu_{k,\epsilon}$ given in \eqref{eq:asymptoticDistOfMixedOp} shows that 
the asymptotic behavior of the eigenvalues of the local 
case \cite[Lemma 3.4]{SanderWanner:03} is the same as for the mixed case given
 by  Lemma \ref{lemma:propertiesOfLambdaPlusMinus}.  
Lemma \ref{lemma:eigenfunctionsFormACompleteSet} shows that the eigenfunctions 
for the mixed case are also a complete set for $\mathbb{X}$. Thus, the result holds.
\end{proof}

The final lemma shows that the nonlinearity of the evolution equation is 
differentiable in the Banach setting.  Furthermore, the Lipschitz constant 
is polynomially bounded.

\begin{lemma}[Properties of $F$]\label{lemma:diffF}
Suppose  that Assumptions 
\ref{assumption:rectangularDomain} - \ref{assumption:realEigenvalues}, 
\ref{assumption:K0_sr} and \ref{assumption:K0_sl} are satisfied.  
Let $h$ be defined as in \eqref{eqn:hPeriodic}.  Furthermore, for 
arbitrary $U=(u,v) \in \mathbb{X}^{\alpha}$ let $F(U) = h(u,v)$.  
Then for every $\alpha$ satisfying $\dim \Omega / 4 < \alpha < 1$ 
this defines a nonlinear mapping $F:\mathbb{X}^{\alpha} \to \mathbb{X}$ which 
is continuously Fr\'{e}chet differentiable.  Furthermore, there exist
 positive constants $C$ and $R_{0}$ such that for any $0 < R \le R_{0}$ 
the following holds.  For arbitrary $U, V \in \mathbb{X}^{\alpha}$ with
$$
\norm{ U - (\bar{u}_{0}, \bar{v}_{0})}_{**} \le R \quad\text{and}\quad 
\norm{V - (\bar{u}_{0}, \bar{v}_{0})}_{**} \le R,
$$
we have
\begin{equation}
\norm{F(U) - F(V)}_{\mathbb{X}} \le C \cdot R^{\chi} \cdot 
\norm{U - V}_{**}.\label{eqn:xidefined}
\end{equation}
\end{lemma}

\begin{proof}
The result follows directly from \cite[Lemma 5.5]{SanderWanner:03}.  
Note that $\chi$ describes the smoothness of $(f,g)$ as given by 
Assumption \ref{assumption:smoothNonlinearity}.
\end{proof}

We now have everything that we need to prove the result for early pattern formation, 
schematically
depicted in Fig.~\ref{fig:stableUnstableSchematic}.

\begin{theorem}[Early Pattern Formation] \label{thm:earlyPatternResults}
For System \eqref{system:scaledMixed}, suppose that Assumptions
\ref{assumption:rectangularDomain} - \ref{assumption:realEigenvalues},
\ref{assumption:K0_sr} and \ref{assumption:K0_sl} are satisfied.  
Choose $\alpha$ such
that $\dim \Omega / 4 < \alpha < 1$ where $\mathbb{X}^{\alpha} = \mathbb{H}^{2
\alpha}_{\rm per}(\Omega)$.  For every $0 < p \ll 1$ and $0 < d_{0} \ll 1$,
 there exist constants
$\epsilon_{0}, r_{\epsilon}$, and $R_{\epsilon}$ such that
\begin{itemize}
\item[(a)]  $0 < r_{\epsilon} < R_{\epsilon}$ and both constants are proportional to
$\epsilon^{(2\alpha + \dim\Omega)/(2\chi)}$ as $\epsilon \to 0$.
\item[(b)]  For all $\epsilon \le \epsilon_{0}$,   there exists an
invariant manifold $\mathcal{N}_{\epsilon}$ with nearly linear behavior. That is, with probability of $1-p$
the solutions with initial conditions contained in $\mathcal{N}_{\epsilon} \cap B_{r_{\epsilon}}(\bar{u}_{0},
\bar{v}_{0} )$  leave the ball $B_{R_{\epsilon}}$ at a
distance from $(\bar{u}_{0}, \bar{v}_{0}) + \mathbb{X}^{+}_{\epsilon} \oplus
\mathbb{X}^{++}_{\epsilon}$ no larger than $d_{0} R_{\epsilon}$.
\end{itemize}
\end{theorem} 

\begin{proof}[Proof of Theorem \ref{thm:earlyPatternResults}] Lemmas
\ref{lemma:sectorial} - \ref{lemma:diffF}, show that hypotheses (H1)--(H3) are valid. 
 As
pointed out in Theorem \cite[Theorem 5.7]{SanderWanner:03}, pairwise orthogonality 
is not
required to apply the theory in \cite{MaierPaapeWanner:00}, as long as the angle 
between any two spaces is bounded away from 0 and $\pi$.  Since Lemma
\ref{lemma:eigenfunctionsFormACompleteSet} shows this to be true, we have verified
everything except for the size of $r_{\epsilon}$ and $R_{\epsilon}$.

Using \cite[Remark 3.1, Lemma 3.6]{MaierPaapeWanner:00}, as $\epsilon \to 0$, 
we have that $r_{\epsilon}/L \to C_{r_{\epsilon}}$ and 
$R_{\epsilon}/L \to C_{R_{\epsilon}}$ where $L$ is a Lipschitz constant of the
 nonlinearity $F$.  By Lemma \ref{lemma:diffF}, we have that the Lipschitz 
constant is given as $L = C \cdot R^{\chi}$,
where $C, R$ are constants.  As $\epsilon \to 0$, \cite[Remark 2.5]{MaierPaapeWanner:00} gives
\begin{align}
C \cdot R^{\chi} \le \frac{ C^{--}_{\epsilon} C^{+}_{\epsilon} }{2 C^{+}_{\epsilon} 
+ M_{\alpha, \epsilon} C^{--}_{\epsilon}}, \label{ineq:lipEst}
\end{align}
where
\begin{gather*}
C^{+}_{\epsilon} = \frac{\min{(b^{-}_{\epsilon} - a^{-}_{\epsilon}, b^{+}_{\epsilon} 
- a^{+}_{\epsilon})}}{6 + \chi + 1/ \chi},
\\
C^{--}_{\epsilon} = \frac{b^{--}_{\epsilon} 
- a^{--}_{\epsilon}}{2 \cdot M_{\alpha, \epsilon} + 3\sqrt{2}
\cdot M^{--}_{\epsilon} \cdot (b^{--}_{\epsilon} - a^{--}_{\epsilon})^{\alpha}}.
\end{gather*}
Using Lemma \ref{lemma:dichotomy}, we have that as $\epsilon \to 0$, 
$M_{\alpha, \epsilon} = C_1 \cdot \epsilon^{-\alpha}$ and 
$M^{--}_{\epsilon} \le C_2 \cdot \epsilon^{(-\alpha - \alpha \dim \Omega / 2)}$. 
 This implies that
\begin{gather}
C^{--}_{\epsilon} \ge C_{3} \cdot \epsilon^{(2\alpha + \dim \Omega)/2}, 
\label{ineq:CmmEst}\\
C^{+}_{\epsilon} \ge d \cdot \epsilon^{(\dim \Omega / 2) }. \label{ineq:CpEst}
\end{gather}
Combining Estimates \eqref{ineq:CmmEst} and \eqref{ineq:CpEst} with
 Estimate \ref{ineq:lipEst}, we have
\begin{equation}
R^{\chi}  \le K \cdot \epsilon^{(2\alpha + \dim \Omega) / 2}.
\end{equation}
Since $r_{\epsilon}, R_{\epsilon} \sim R^{\chi}$, we get that
$r_{\epsilon}, R_{\epsilon} \sim \epsilon^{(2\alpha + \dim \Omega)/(2\chi)}$.
\end{proof}

Theorem \ref{thm:earlyPatternResults} shows that the addition of the nonlocal 
term to local diffusion produces similar early pattern results when compared 
to the pure local case considered in \cite{SanderWanner:03}.  
Lemma \cite[Lemma 5.5]{SanderWanner:03} provides an initial estimate 
for the size of the nonlinearity $F$.  However, this bound is improved 
in Proposition \cite[Proposition 6.2]{SanderWanner:03} and we now discuss
 the improved estimate as it is essential for the almost linear result.  
Consider the regions that are given in terms of cones 
$(\bar{u}_{0}, \bar{v}_{0}) + \mathcal{K}_{\delta}$, where
\begin{gather*}
\mathcal{K}_{\delta} = \{ U \in \mathbb{X}^{\alpha} : \norm{U_{-}}_{**} 
\le \delta \norm{U_{+}}_{**}, U = U_{+} + U_{-} \in \mathcal{Y}^{+}_{\epsilon} 
\oplus \mathcal{Y}^{-}_{\epsilon} \},\\
\mathcal{Y}^{+}_{\epsilon} = \mathbb{X}^{+}_{\epsilon} \oplus \mathbb{X}^{++}_{\epsilon} \subset \mathbb{X}^{\alpha}, \mathcal{Y}^{-}_{\epsilon} = \left( \mathbb{X}^{--}_{\epsilon} \cap \mathbb{X}^{\alpha} \right) \oplus \mathbb{X}^{-}_{\epsilon} \subset \mathbb{X}^{\alpha}.
\end{gather*}
Using these cone regions, the improved bound is given by the following lemma 
that follows immediately from Proposition \cite[Proposition 6.2]{SanderWanner:03}.

\begin{lemma} \label{lemma:smallF}
Suppose that Assumptions 
\ref{assumption:rectangularDomain} - \ref{assumption:realEigenvalues}, 
\ref{assumption:K0_sr} and \ref{assumption:K0_sl} are satisfied and let $F$
 be as defined in \eqref{eqn:F}.  For $\dim \Omega / 4 < \alpha < 1$ and 
$\delta_{0} > 0$, denote
\begin{equation}
\delta_{\epsilon} = \delta_{0} \cdot \epsilon^{(\alpha - \dim \Omega/4)}.
\end{equation}
Then there exists $\epsilon-$independent constants $M_1$, $M_2>0$ such that
for every $0 < \epsilon \le 1$ and $U \in \mathcal{K}_{\delta_{\epsilon}}$, with
\begin{equation}
\norm{U}_{**} \le M_1 \cdot \epsilon^{(-\alpha + \dim \Omega / 4)},
\end{equation}
we have
\begin{equation}
\norm{F((\bar{u}_{0},\bar{v}_{0}) + U)}_{\mathbb{L}^{2}(\Omega)}
\le M_2 \epsilon^{(\alpha - \dim \Omega /4)\cdot(\chi+1)}.\norm{U}^{\chi + 1}_{**}
\end{equation}
The order of the zero $(\bar{u}_{0}, \bar{v}_{0})$ of $F$ is given by $\chi$
in Assumption \ref{assumption:smoothNonlinearity}.
\end{lemma}

 In other words, if a solution $U$ with initial condition 
$U_{0} \in (\bar{u}_{0}, \bar{v}_{0}) + \mathcal{K}_{\delta_{\epsilon}}$, 
with $\delta_{\epsilon} = \delta_{0} \cdot \epsilon^{(\alpha - \dim \Omega)}$, 
then it is possible for the solution to remain close to 
$\mathbb{X}^{+}_{\epsilon} \oplus \mathbb{X}^{++}_{\epsilon}$ for larger distances 
away from the homogeneous equilibrium compared to the early pattern results. 
 We now state and prove our final result.

\begin{theorem}[Later Pattern Formation] \label{thm:almostLinearBehavior}
Suppose that Assumptions 
\ref{assumption:rectangularDomain}--\ref{assumption:realEigenvalues}, 
\ref{assumption:K0_sr} and \ref{assumption:K0_sl} are satisfied and choose 
and fix $\delta_{0} \in (0,\frac{1}{2})$ and $0 < \xi \ll 1$.  
Let $\epsilon \in (0, 1]$. Choose $\alpha$ such that 
$\dim \Omega / 4 < \alpha < 1$ where 
$\mathbb{X}^{\alpha} = \mathbb{H}^{2 \alpha}_{\rm per}(\Omega)$.  
There exists a constant $D$ and splitting of $\mathbb{X}^{\alpha}$ such that the following is true.  If $U_{0} \in (\bar{u}_{0}, \bar{v}_{0}) + \mathcal{K}_{\delta_{\epsilon}}$, with $\delta_{\epsilon} = \delta_{0} \cdot \epsilon^{(\alpha - \dim \Omega)}$ whose initial condition satisfies
\begin{equation} \label{eqn:initialR}
0 < \norm{U_{0} - (\bar{u}_{0}, \bar{v}_{0})}_{**}
< \min ( 1,( D \epsilon^{-(\alpha - \dim \Omega /4 )
+ \alpha/ \chi + \xi} )^{1 / ( 1 - \xi)} ),
\end{equation}
then for
$$
\norm{U(t) - (\bar{u}_{0}, \bar{v}_{0})}_{**}
\le D \epsilon^{-(\alpha - \dim \Omega / 4) + \alpha / \chi + \xi }
\cdot \norm{U_{0} - (\bar{u}_{0}, \bar{v}_{0})}_{**}^{\xi},
$$
the relative distance of the $(u,v)$ and $(u_{\rm lin}, v_{\rm lin})$ is bounded by
\begin{align} \label{eqn:tolerance}
\frac{\norm{U(t) - (\bar{u}_{0},\bar{v}_{0})
-U_{\rm lin}(t)}_{**}}{\norm{U_{\rm lin}(t)}_{**}}
\le \frac{\delta_{0}}{2} \cdot \epsilon^{(\alpha - \dim \Omega / 4)}
\end{align}
\end{theorem}

\begin{proof}[Proof of Theorem \ref{thm:almostLinearBehavior}]
Fix $0 < \beta < 1$ such that Lemma \ref{assumption:K0_sl} is satisfied. 
 Lemmas \ref{lemma:sectorial} - \ref{lemma:diffF} are used to provide the 
early pattern results given by Theorem \ref{thm:earlyPatternResults} and 
show that for solutions that are initially close to the unstable subspace 
$(\bar{u}_{0}, \bar{v}_{0}) + \mathbb{X}^{+}_{\epsilon} \oplus \mathbb{X}^{++}_{\epsilon}$ 
remain close to this space.   These lemmas show that the  decomposition of the
 phase space for the local case is also achievable for the mixed system. 
 Furthermore, the fractional power space used for the nonlocal case is a subset
 of the fractional power space used in the local case.   
\cite[Theorem 6.3]{SanderWanner:03} is directly applied, thus giving the result.
\end{proof}

\begin{remark} \rm
By Assumption \ref{assumption:K0_sl}, we have that $\epsilon_{0}$ must satisfy
$$
\epsilon_{0}^{1-\theta} < \Big(\frac{s_{\ell}}{(1-\beta)\hat{K}_{0}}\Big).
$$
If $\theta = 1$, then
$$
1 < \Big(\frac{s_{\ell}}{(1-\beta)\hat{K}_{0}}\Big),
$$
implying that our results hold around some small interval $[\beta_{0}, 1]$ 
that contains $\beta$. This is verified by our numerical results
 (cf. Figure~\ref{fig:noALBForNonlocal1}-\ref{fig:noALBForNonlocal3}).  
For $\beta$ values outside of this interval, only the first few finite 
eigenfunctions corresponding to the
eigenvalues of the spectrum are contained within the unstable subspaces.  
Although the eigenfunctions are not as dominant as the eigenfunctions associated 
with higher eigenmodes, the behavior of solutions cannot be explained by 
considering only the a small number of the most dominant eigenfunctions.  
Again, this is consistent with our numerical results (cf.
Figures~\ref{fig:noALBForNonlocal1}-\ref{fig:noALBForNonlocal3}).
\end{remark}


\section{Concluding remarks}

In this paper, reaction-diffusion systems with mixed nonlocal and local 
diffusion terms are considered where  as $\epsilon \to 0$,
 $\epsilon^{\theta} J(x) \to K(x)$, where $K(x)$ is an $\epsilon$-independent
 kernel.  For $0 \le \theta \le 1$, the initial pattern selection is 
dominated by linear behavior.  We believe our methods can be applied to 
other related mixed local-nonlocal models. For example, such behavior has
 previously been observed numerically for phase field models with local and 
nonlocal diffusion terms, and we expect that similar results can be obtained 
with only minor adjustments of the proofs presented here.  
For further results on the current model, we believe that it would be possible 
to apply the probabilistic methods found in \cite{Wanner:04,DesiSanderWanner:06} 
to show that later stages of pattern formation are governed by linear effects. 
 Furthermore, we conjecture that these results are attainable on certain
 non-rectangular domains, as long as it is possible to define the nonlocal 
kernel to have even symmetry.  For example, it should be possible to extend 
these results to the disk.

Bates and Chen \cite{BatesChen:01} point out that the Laplacian is considered 
as a first-order approximation for pure nonlocal systems for a single space dimension.  See also \cite{BatesFifeGardnerJones:97, BatesChenWang:97}.  Furthermore, it is possible to approximate the nonlocal heat equation subject to Dirichlet boundary conditions \cite{CortazarElguetaRossi:09} and the heat equation subject to Neumann boundary conditions \cite{CortazarElguetaRossiWalanski:09} with local diffusion.  In other words, solutions of the nonlocal system are close to solutions of a local system, using the same initial conditions.  Preliminary numerics suggest that the same may also be said for System \eqref{system:scaledMixed}.  That is, by parameterizing the kernel appropriately with respect to $\epsilon$, the nonlocal system displays almost linear behavior as $\epsilon \to 0$.  Thus for carefully chosen kernels,  the results developed for the mixed case would apply to the nonlocal case as well.



\section{Appendix: Kernel and nonlinearities}  %\label{section:appendix}
\label{KernelNonlinearities}

The numerical results in this paper all use the nonlinearities given in the 
system of Thomas for $f$ and $g$~\cite{Murray:93}.  The nonlinearities are given  as
\begin{equation}
\begin{gathered}
f(u,v)  = a - u - \frac{\rho u v}{1 + u + Ku^2},  \\
g(u,v)  = A( b - v ) - \frac{ \rho u v }{ 1 + u + K u^2 } ,
\end{gathered} \label{eqn:ThomasNonlinearities}
\end{equation}
where $a$, $b$, $\rho$, $A$, and $K$ are positive constants that depend
upon reaction kinetics.  We choose $a = 150$, $b = 100$, $\rho = 13$, $A = 1.5$,
and $K = .05$. These nonlinearities satisfy conditions for a
Turing instability with real eigenvalues.  Furthermore, with $\chi = 1$,
the Thomas system satisfies Assumption \ref{assumption:smoothNonlinearity}.

We consider a kernel that is similar to the kernel used in \cite{HartleyWanner:09}.  
Let the Gaussian kernel $\mathcal{G}$ be defined as
\begin{equation}
\mathcal{G}(x,y) = \exp\Big( \frac{-x^{2}-y^{2}}{ \sigma^{2}}\Big) \cdot \eta(x,y),
\end{equation}
 where $\eta(x,y)$ is a smooth cutoff function.  The function $\eta$ is 1 on
$B_{1/3}(0,0)$, but vanishes outside of $B_{1/2}(0,0)$.  On the domain
$\Omega = [0,1]^{2}$, the kernel $G$ is given as
\begin{equation}
G(x,y) = \frac{C}{\epsilon^{\theta}} \cdot
\left( \mathcal{G}(x,y) + \mathcal{G}(x+1,y) + \mathcal{G}(x,y+1) + \mathcal{G}(x+1,y+1) \right).
\label{eqn:hydra}
\end{equation}
Outside of $\Omega = [0,1]^{2}$, $J(x,y)$ is given as the smooth periodic
extension of $G(x,y)$, denoted as
\begin{equation}
J(x,y) = G_{\rm per}(x,y).
\end{equation}
Note that $\hat{J}_{0} = \frac{C}{\epsilon^{\theta}} \cdot \hat{\mathcal{G}}_{0}$.
We perform numerics for the cases $\theta \to 1$ and $\theta = 1$, although not
all numerics are shown.  For the following computation, we fix $\theta = 1$.
In this case, $\epsilon
\hat{J}_{0}$ lies just to the right of $(s_{\ell}, s_{r}) \approx (.0071, .8806)$.
 For this  case, we choose $\frac{C}{\epsilon}$ so that the infinitely many
eigenvalues of the
linearized right hand side are not all positive.  The condition
\begin{equation}
\frac{ \epsilon \cdot \frac{C}{\epsilon} \hat{\mathcal{G}}_{0} }{ s_{r} } > 1,
\end{equation}
or equivalently
\begin{align}
\frac{ C \hat{\mathcal{G}}_{0}}{ s_{r} } > 1 \label{ineq:finitePosEigCond}
\end{align}
permits a finite number of eigenvalues.
Note that the integral of the kernel $J$ over $[0,1]^{2}$ is given as the
first Fourier coefficient denoted as $\hat{J}_{0}$.
For small $\sigma$, a good approximation for this integral is the volume
of $G$ over all $\mathbb{R}^{2}$.  To understand why, observe that most of the
support for $G$ occurs within $3\sigma$ of each corner of $[0,1]^{2}$
for $\sigma \ll 1$.  Thus, we can compute $\hat{J}_{0}$ using
\[
\hat{\mathcal{G}}_{0}  = \int_{\Omega} \mathcal{G}(x) dx
 \approx \sigma^{2} \pi.
\]
For $\sigma \ll 1$,
\begin{equation}
C = \frac{ 2 s_{r}}{ \sigma^{2} \pi}. \label{eqn:c}
\end{equation}
Choosing $\sigma = .1$ is sufficient for our purposes.  Thus from \eqref{eqn:c},
\begin{align}
C = 200 \cdot \frac{s_{r}}{\pi}.
\end{align}
Note that
$$
\lim_{\epsilon \to 0} \epsilon \cdot \hat{J}_{0} = 2s_{r} > 0.
$$

\subsection*{Acknowledgments}
We want thank the anonymous referees, whose careful comments helped to 
significantly improve this article.

E.~Sander was partially supported by NSF Grants DMS-0639300
and DMS-0907818, as well as NIH Grant R01-MH79502.
R.~Tatum was partially supported by the Office of Naval Research
 In-house Laboratory Independent Research Program.

\begin{thebibliography}{00}

\bibitem{AbramovichAliprantis:00}
Y.~A. Abramovich and C.~D. Aliprantis.
\newblock {\em {An Invitation to Operator Theory}}.
\newblock American Mathematical Society, Providence RI, 2002.

\bibitem{alikakos:bronsard:fusco:98a}
N.~D. Alikakos, L.~Bronsard, and G.~Fusco.
\newblock Slow motion in the gradient theory of phase transitions via energy
  and spectrum.
\newblock {\em Calculus of Variations and Partial Differential Equations},
  6:\penalty0 39--66, 1998.

\bibitem{ArendtSchleich:09}
W.~Arendt and W.~P. Schleich.
\newblock {\em {Mathematical Analysis of Evolution, Information and
  Complexity}}.
\newblock Wiley-VCH, Germany, 2009.

\bibitem{BatesChen:01}
P.~Bates and F.~Chen.
\newblock Spectral analysis and multidimensional stability of traveling waves
  for nonlocal {A}llen-{C}ahn equation.
\newblock {\em J. Math. Anal. Appl.}, 273:\penalty0 45--57, 2001.

\bibitem{BatesChenWang:97}
P.~Bates, F.~Chen, and J.~Wang.
\newblock {\em Global existence and uniqueness of solutions to a nonlocal
  phase-field system: in P.W. Bates, S.-.N Chow, K. Lu, X. Pan (Eds)}.
\newblock International Press, Cambridge, MA, 1997.
\newblock 14-21 pp.

\bibitem{BatesFifeGardnerJones:97}
P.~Bates, P.~Fife, R.~Gardner, and C.~Jones.
\newblock The existence of traveling wave solutions of a generalized
  phase-field model.
\newblock {\em SIAM J. Math. Anal.}, 28:\penalty0 60--93, 1997.

\bibitem{BerestyckiNadinPerthameRyzhik:09}
H.~Berestycki, G.~Nadin, B.~Perthame, and L.~Ryzhik.
\newblock The nonlocal {F}isher-{KPP} equation: traveling waves and steady
  states.
\newblock {\em Nonlinearity}, 22:\penalty0 2813--2844, 2009.

\bibitem{CortazarElguetaRossi:09}
C.~Cortazar, M.~Elgueta, and J.~Rossi.
\newblock Nonlocal diffusion problems that approximate the heat equation with
  {D}irichlet boundary conditions.
\newblock {\em Israel Journal of Mathematics}, 170\penalty0 (1):\penalty0
  53--60, 2009.

\bibitem{CortazarElguetaRossiWalanski:09}
C.~Cortazar, M.~Elgueta, J.~Rossi, and N.~Wolanski.
\newblock How to approximate the heat equation with {N}eumann boundary
  conditions by nonlocal diffusion problems.
\newblock {\em Archive for Rational Mechanics and Analysis}, 187\penalty0
  (1):\penalty0 137--156, 2008.

\bibitem{CourantHilbert:89}
R.~Courant and D.~Hilbert.
\newblock {\em {Methods of Mathematical Physics}}.
\newblock Interscience Publishers, Inc., New York, 1989.

\bibitem{DesiSanderWanner:06}
J.~P. Desi, E.~Sander, and T.~Wanner.
\newblock Complex transient patterns on the disk.
\newblock {\em Discrete and Continuous Dynamical Systems}, 15\penalty0
  (4):\penalty0 1049--1078, 2006.

\bibitem{EngelNagel:00}
K.~Engel and R.~Nagel.
\newblock {\em {One-Parameter Semigroups for Linear Evolution Equations}}.
\newblock Springer-Verlag, New York, 2000.

\bibitem{FieldNoyes:74}
R.~J. Field and R.~M. Noyes.
\newblock Oscillations in chemical systems. {IV}. {L}imit cycle behaviour in a
  model of a real chemical reaction.
\newblock {\em J. Chem. Phys.}, 60:\penalty0 1877--1884, 1974.

\bibitem{fife:03}
P.~Fife.
\newblock Some nonclassical trends in parabolic and parabolic-like evolutions.
\newblock In {\em Trends in nonlinear analysis}, pages 153--191. Springer,
  Berlin, 2003.

\bibitem{grant:93a}
C.~P. Grant.
\newblock Spinodal decomposition for the {C}ahn-{H}illiard equation.
\newblock {\em Communications in Partial Differential Equations}, 18:\penalty0
  453--490, 1993.

\bibitem{Hartley:08}
T.~Hartley.
\newblock An analysis of phase separation processes for stochastic and nonlocal
  extensions of the classical phase field model.
\newblock GMU, 2008.

\bibitem{HartleyWanner:09}
T.~Hartley and T.~Wanner.
\newblock A semi-implicit spectral method for stochastic nonlocal phase-field
  models.
\newblock {\em Discrete and Continuous Dynamical Systems, A}, 25:\penalty0
  399--429, 2009.

\bibitem{Henry:81}
D.~Henry.
\newblock {\em {Geometric Theory of Semilinear Parabolic Equations}}.
\newblock Springer--Verlag, New York, 1981.

\bibitem{HildebrandSkodtShowalter:01}
M.~Hildebrand, H.~Sk\o{}dt, and K.~Showalter.
\newblock Spatial symmetry breaking in the {B}elousov-{Z}habotinksy reaction
  with light-induced remote communication.
\newblock {\em Phys. Rev. Lett.}, 87\penalty0 (8):\penalty0 1--4, 2001.

\bibitem{IfantisPanagopoulos:01}
E.~K. Infantis and P.~N. Panagopoulos.
\newblock Limit points of eigenvalues of truncated tridiagonal operators.
\newblock {\em J. Comp. and Appl. Math.}, 133:\penalty0 413--422, 2001.

\bibitem{Kato:76}
T.~Kato.
\newblock {\em {Perturbation Theory for Linear Operators}}.
\newblock Springer-Verlag, Heidelberg, 1976.

\bibitem{Katznelson:04}
Y.~Katznelson.
\newblock {\em {Introduction to Harmonic Analysis}}.
\newblock Cambridge University Press, United Kingdom, 2004.

\bibitem{LedermanWolanski06}
C.~Lederman and N.~Wolanski.
\newblock Singular perturbation in a nonlocal diffusion problem.
\newblock {\em Communications in Partial Differential Equations}, 31\penalty0
  (1-3):\penalty0 195--241, 2006.

\bibitem{LengyelEpstein:92}
I.~Lengyel and I.~R. Epstein.
\newblock A chemical approach to designing {T}uring patterns in
  reaction-diffusion systems.
\newblock {\em Proc. Natl. Acad. Sci., USA}, 89:\penalty0 3977--3979, 1992.

\bibitem{MaierPaapeWanner:98}
S.~Maier-Paape and T.~Wanner.
\newblock Spinodal decomposition of the {C}ahn-{H}illiard equation in higher
  dimensions. part {I}: probability and wavelength estimate.
\newblock {\em Comm. Math. Phys.}, 195\penalty0 (2):\penalty0 435--464, 1998.

\bibitem{MaierPaapeWanner:00}
S.~Maier-Paape and T.~Wanner.
\newblock Spinodal decomposition for the {C}ahn-{H}illiard equations in higher
  dimensions: nonlinear dynamics.
\newblock {\em Arch. Rational Mech. Anal.}, 151\penalty0 (3):\penalty0
  187--219, 2000.

\bibitem{Murray:81}
J.~Murray.
\newblock A prepattern formation mechanism for animal coat markings.
\newblock {\em Journal of Theoretical Biology}, 88:\penalty0 161--199, 1981.

\bibitem{Murray:93}
J.~D. Murray.
\newblock {\em {Mathematical Biology, Vol. I, An Introduction}}.
\newblock Springer--Verlag, New York, third edition, 1993.

\bibitem{MurrayVol2:93}
J.~D. Murray.
\newblock {\em {Mathematical Biology, Vol. II, Spatial Models and Biomedical
  Applications}}.
\newblock Springer--Verlag, New York, third edition, 1993.

\bibitem{neubert:etal:02}
M.~G. Neubert, H.~Caswell, and J.~D. Murray.
\newblock Transient dynamics and pattern formation: reactivity is necessary for
  {T}uring instabilities.
\newblock {\em Math. Biosci.}, 175\penalty0 (1):\penalty0 1--11, 2002.

\bibitem{Pazy:83}
A.~Pazy.
\newblock {\em {Semigroups of Linear Operators and Applications to Partial
  Differential Equations. Applied Mathematical Sciences}}, volume~44.
\newblock Springer--Verlag, New York, 1983.

\bibitem{RenardyRogers:04}
M.~Renardy and R.~C. Rogers.
\newblock {\em {An Introduction to Partial Differential Equations}}.
\newblock Springer--Verlag, New York, second edition, 2004.

\bibitem{SanderWanner:03}
E.~Sander and T.~Wanner.
\newblock Pattern formation in a nonlinear model for animal coats.
\newblock {\em Journal of Differential Equations}, 191:\penalty0 143--174,
  2003.

\bibitem{Suhubi:03}
E.~S. Suhubi.
\newblock {\em {Functional Analysis}}.
\newblock Kluwer Academic Publishers, Norwell, Massachusettes, first edition,
  2003.

\bibitem{Turing:52}
A.~Turing.
\newblock The chemical basis of morphogenesis.
\newblock {\em Philosophical Transactions of the Royal Society of London.
  Series B, Biological Sciences}, 237:\penalty0 37--72, 1952.

\bibitem{VanagEpstein:01}
V.~K. Vanag and I.~R. Epstein.
\newblock Pattern formation in a tunable reaction-diffusion medium: the {BZ
  }reaction in an aerosol {OT} microemulsion.
\newblock {\em Phys. Rev. Lett.}, 87:\penalty0 1--4, 2001.

\bibitem{Wanner:04}
T.~Wanner.
\newblock Maximum norms of random sums and transient pattern formation.
\newblock {\em Transactions of the American Mathematical Society}, 356\penalty0
  (6):\penalty0 2251--2279, 2004.

\end{thebibliography}


\end{document}





