\documentclass[reqno]{amsart} \usepackage{hyperref} \usepackage{graphicx} \AtBeginDocument{{\noindent\small Sixth Mississippi State Conference on Differential Equations and Computational Simulations, {\em Electronic Journal of Differential Equations}, Conference 15 (2007), pp. 251--264.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu (login: ftp)} \thanks{\copyright 2007 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \setcounter{page}{251} \title[\hfilneg EJDE-2007/Conf/15\hfil Game-theoretic schemes for curvature flows] {Game-theoretic schemes for generalized curvature flows in the plane} \author[M. Rudd\hfil EJDE/Conf/15 \hfilneg] {Matthew Rudd} \address{Matthew Rudd \newline Department of Mathematics \\ University of Texas at Austin \\ Austin, TX 78712, USA} \email{rudd@math.utexas.edu} \dedicatory{Dedicated with gratitude and best wishes to Klaus Schmitt on his 65-th birthday} \thanks{Published February 28, 2007.} \subjclass[2000]{35K10} \keywords{Geometric level set equations; curvature flows; dynamic programming} \begin{abstract} Extending some recent work by Kohn and Serfaty, we discuss a class of two-player discrete-time games whose value functions approximate the solutions of related geometric partial differential equations. Of particular interest are the numerical implications of the resulting game-theoretic approximation schemes for geometric motions. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \begin{section}{Introduction} \label{intro} If a hypersurface $\Gamma_{0} \subset \mathbb{R}^{N}$ moves over time with the known normal velocity $V$, then its subsequent evolution yields a family $\{ \Gamma_{t} : t \geq 0 \}$ of subsets of $\mathbb{R}^{N}$. The subsets $\Gamma_{t}$ may continue to have codimension one, or they may undergo topological changes by breaking, merging, or fattening. Among the various techniques used to analyze moving interfaces in the presence of such possible singularities, the level set approach has proven very effective and leads to the geometric equations studied below. For the sake of completeness, let us recall the level set description of propagating fronts: we look for a function $u : \mathbb{R}^{N} \times \mathbb{R}^{+} \to \mathbb{R}$ such that $\Gamma_{t} = \{ x \in \mathbb{R}^{N} : u(x,t) = 0 \} \,, \quad \text{for } t > 0 \,,$ by observing (cf. the derivation in \cite{sethian:lsm99}) that $u$ solves the initial-value problem $$\label{ls-eqn} u_{t} + | Du | \, V = 0 \,, \quad u(\cdot,0) = u_{0}$$ in an appropriate sense, where the initial datum $u_{0} : \mathbb{R}^{N} \to \mathbb{R}$ is such that $\Gamma_{0} = \{ x \in \mathbb{R}^{N} : u_{0}(x) = 0 \} \,.$ Of course, the specific nature of the level-set equation in (\ref{ls-eqn}) depends on the form of the normal velocity function $V$. For the simple function $V = V(x,t)$, for example, we obtain the Hamilton-Jacobi problem $$\label{gen-eikonal} u_{t} + | Du | \, V(x,t) = 0 \,, \quad u(\cdot,0) = u_{0} \,,$$ and more general Hamilton-Jacobi equations arise for velocity functions of the form $V = V(n,x,t)$, where $$\label{normal} n = n(x,t) = \frac{ Du }{ | Du | }$$ is the unit normal to the front at the point $x$ at time $t$. There are well-known connections between such first-order initial-value problems and deterministic games involving either one player (i.e., optimal control problems) or two players (see, e.g., \cite{bardi:ocv97,elliott:vso87,evans:dgr84,soravia:gmf94}). When the normal velocity $V$ also depends on the spatial derivative of the normal vector, i.e., the tensor $$\label{tensor} Dn = Dn(x,t) = \frac{1}{ | Du | } \left( I - n \otimes n \right) D^{2}u \,,$$ with $n$ given by (\ref{normal}), equation (\ref{ls-eqn}) is second-order. We are particularly interested in normal velocities of the form $V = V(x,t,n,Dn) = -\mathop{\rm Tr}{ \left[ A(n,x,t) Dn \right] }$ for continuous maps $A : \mathbb{R}^{N} \times \mathbb{R} \times \mathbb{R} \to \left( \mathcal{S}^{N} \right)^{+}$, where $\left( \mathcal{S}^{N} \right)^{+}$ is the cone of positive-definite, symmetric $N \times N$ matrices. Such a map $A$ yields the initial-value problem $$\label{geom-ivp} u_{t} - | Du | \mathop{\rm Tr}{ \left[ A(n,x,t) Dn \right] } = 0 \,, \quad u(\cdot,0) = u_{0} \,.$$ The explicit expressions in (\ref{normal}) and (\ref{tensor}) expose the degeneracies of this equation more clearly than the shorthand of equation (\ref{geom-ivp}), but we write the equation in this form for simplicity. For the special case in which $A \equiv I$, equation (\ref{geom-ivp}) is the mean curvature equation, $$\label{mc-eqn} u_{t} - | Du | \, \kappa = 0 \,, \quad \text{where } \kappa := \operatorname{div}{ \Big( \frac{ Du }{ | Du | } \Big) }$$ is the mean curvature of the front. We therefore refer to the equation in (\ref{geom-ivp}) as the generalized curvature equation. Beginning with the independent efforts of Evans and Spruck \cite{evans:mls91}, who concentrated on the mean curvature equation, and Chen, Giga and Goto \cite{chen:uev91}, who studied equations of the form (\ref{gen-geom}), several authors have developed a thorough viscosity theory for geometric initial-value problems of the form $$\label{gen-geom} u_{t} + F(x,t,Du,D^{2}u) = 0 \,, \quad u(\cdot,0) = u_{0} \,,$$ where \textit{geometric} means that, for any vector $p$ and any symmetric matrix $X \in \mathcal{S}^{N}$, $$\label{geometric} F(x,t,\lambda p,\lambda X + \mu p \otimes p) = \lambda F(x,t,p,X) \quad \text{for } \lambda > 0 \,, \, \mu \in \mathbb{R} \,.$$ The theory for problem (\ref{gen-geom}) also requires that $F$ be \textit{degenerate elliptic}, i.e., nonincreasing in its matrix argument: $$\label{deg-elliptic} F(x,t,p,X) \geq F(x,t,p,Y) \quad \text{when } X \leq Y \,.$$ Our assumptions above about the map $A$ guarantee that the generalized curvature equation (\ref{geom-ivp}), when written in the form (\ref{gen-geom}), satisfies both (\ref{geometric}) and (\ref{deg-elliptic}). The basic punchline is that, for a uniformly continuous initial value $u_{0} \in UC(\mathbb{R}^{N})$, problem (\ref{gen-geom}) has a unique uniformly continuous viscosity solution; we refer to \cite{barles:fpp93}, \cite{barles:naf98}, \cite{souganidis:fpt97} and the references therein for relevant definitions and more details. Henceforth, we will concentrate on problems in the plane $\mathbb{R}^{2}$. \end{section} \begin{section} {Approximating curvature flows with discrete-time games} \label{approx} We begin by describing the basic game studied by Kohn and Serfaty in \cite{kohn:dcb}; Spencer introduced this game in \cite{spencer:bg77} as a tool for combinatorial problems. To set up the game, let the final maturity time $T > 0$ and objective function $u_{0} \in UC( \mathbb{R}^{2} )$ be given. Suppose that the game begins at time $t < T$ at the position $x \in \mathbb{R}^2$, and define the game's time step $\varepsilon > 0$ by $$\label{time-step} \varepsilon := \frac{T-t}{m} \,, \quad \text{for some integer } m \in \mathbb{N} \,.$$ The two players, Paul and Carol, now alternate their play as follows: \subsection*{Mean curvature game} \begin{itemize} \item[(i)] Paul chooses a unit vector $v \in S^1$. \item[(i)] Carol accepts or reverses the direction $v$ chosen by Paul. \item[(i)] Paul moves from $x$ to $x + \sqrt{2 \varepsilon} b v$, where $b = \pm 1$ according to Carol's previous play. \end{itemize} Paul and Carol play the next round of the game at time $t + \varepsilon$ at the new position $x + \sqrt{2 \varepsilon} \, b v$. By (\ref{time-step}), the game runs for precisely $m$ rounds, after which their final position is $x_{_{T}} \in \mathbb{R}^{2}$ and Paul pays Carol the amount $u_{0}( x_{_{T}} )$. Paul and Carol have the opposing goals of minimizing and maximizing this payoff, respectively. Paul's value function is therefore $$\label{u-eps} u^{\varepsilon}(x,t) = \min \{ u_{0}( x_{_{T}} ) \} ,$$ where the minimum is taken over all possible final states $x_{_{T}}$ that can be reached in $m$ plays when starting at $x$ at time $t$. Kohn and Serfaty have shown in \cite{kohn:dcb} that $$\label{convergence} \lim_{\varepsilon \to 0}{ u^{\varepsilon}(x,t) } = u(x,t) \,,$$ where $u$ solves the terminal-value problem $$\label{terminal-mc} u_{t} + | Du | \mathop{\rm Tr}{ [ Dn ] } = 0 \,, \quad u(\cdot,T) = u_{0} \,.$$ By reversing time, we see that $u$ solves the initial-value problem (\ref{geom-ivp}) in the special case in which $A \equiv I$, i.e., the mean curvature equation. Going backward in time, the level sets of $u$ therefore evolve by mean curvature and can be approximated by the level sets of $u^{\varepsilon}$. We can exploit this fact to obtain a very simple algorithm for mean curvature flow. For example, suppose that the simple closed curve $\Gamma_{0}$ and $u_{0} \in UC( \mathbb{R}^{2} )$ satisfy $$\label{gamma-0} \Gamma_{0} = \{ x \in \mathbb{R}^{2} : u_{0}(x) = 0 \} \,.$$ $\Gamma_{0}$'s evolution by mean curvature yields the sets $\Gamma_{t} = \{ x \in \mathbb{R}^{2} : u(x,T-t) = 0 \} \,, \quad \text{for} \quad t > 0 \,,$ which we can approximate with the sets $\Gamma^{\varepsilon}_{t} = \{ x \in \mathbb{R}^{2} : u^{\varepsilon}(x,T-t) = 0 \} \,, \quad \text{for } t > 0 \,.$ In particular, $u^{\varepsilon}(x,T-\varepsilon)$ will be $0$ if and only if Paul can reach $\Gamma_{0}$ in exactly one round of the game; this can only occur if $x$ is the midpoint of a segment of length $2 \sqrt{2\varepsilon}$ whose endpoints lie on $\Gamma_{0}$. Similarly, $u^{\varepsilon}(x,T-2\varepsilon)$ can only be zero if $x$ is the midpoint of a segment of length $2 \sqrt{2\varepsilon}$ with endpoints on $\Gamma^{\varepsilon}_{\varepsilon}$, and so on. This provides a simple, mesh-free algorithm for mean curvature flow which will be discussed in greater detail below. To handle the more general geometric initial-value problem (\ref{geom-ivp}), we must modify the basic game just described. Paul and Carol must now play as follows, where $S^{1}$ denotes the unit circle in $\mathbb{R}^{2}$ with its standard metric and $v^{\perp}$ denotes a vector perpendicular to $v$ relative to the standard Euclidean inner product, i.e., if $v = (v_{1}, v_{2})$, then $v^{\perp} := (-v_{2}, v_{1})$. \subsection*{Generalized curvature game} \begin{itemize} \item[(i)] Paul chooses a vector $w \in S^1$, thereby defining his displacement vector $$\label{v-from-w} v := A(w,x,T-t) w^{\perp}$$ as well as the pairing at $(x,t)$: $$\label{pairing} \langle y,z\rangle_A := z^{T} \left( A(w,x,T-t)^{T} \right)^{-1} y \,, \quad \text{for } y, z \in \mathbb{R}^{2} \,.$$ \item[(ii)] Carol accepts or reverses the vector $v$ defined by (\ref{v-from-w}). \item[(iii)] Paul moves from $x$ to $x + \sqrt{2 \varepsilon} \, b v$, where $b = \pm 1$ according to Carol's previous play. \end{itemize} Paul and Carol play the next round of the game at time $t + \varepsilon$ at the new position $x + \sqrt{2 \varepsilon} \, b v$, and the underlying pairing for the next turn will come from $A( \tilde{w}, x + \sqrt{2 \varepsilon} b v, T - t - \varepsilon)$, where $\tilde{w} \in S^{1}$ is Paul's next choice. As before, the game runs for precisely $m$ rounds, after which their final position is $x_{_{T}} \in \mathbb{R}^{2}$ and Paul must pay Carol the amount $u_{0}( x_{_{T}} )$. Since Paul and Carol again have the opposing goals of minimizing and maximizing this payoff, respectively, Paul's value function is still $$\label{u-eps1} u^{\varepsilon}(x,t) = \min\{ u_{0}( x_{_{T}} ) \} ,$$ where the minimum is taken over all possible final states $x_{_{T}}$ that can be reached in $m$ plays when starting at $x$ at time $t$. As with the mean curvature game analyzed in \cite{kohn:dcb}, we want to understand the behavior of the value functions $u^{\varepsilon}$, as $\varepsilon \to 0$, for this generalized game. To analyze their limit, we rely on the fact that each value function $u^{\varepsilon}$ satisfies the dynamic programming equation $$\label{dpp} u^{\varepsilon}(x,t) = \min_{ w_{1} \in S^{1} }{ \max_{ b_{1} = \pm 1 }{ \left\{ u^{\varepsilon}( x + \sqrt{2 \varepsilon} \, b_{1} v_{1}, t + \varepsilon ) \right\} } } \,,$$ where the vectors $v_{1}$ and $w_{1}$ are related by (\ref{v-from-w}); the subscripts here emphasize the fact that $b_{1}$, $w_{1}$ and $v_{1}$ correspond to Paul's first play. A similar equation holds for $u^{\varepsilon}( x + \sqrt{2 \varepsilon} \, b_{1} v_{1}, t + \varepsilon )$, thereby advancing from time $t + \varepsilon$ to time $t + 2\varepsilon$ \, : $u^{\varepsilon}(x,t) = \min_{ w_{1} \in S^{1} }{ \max_{ b_{1} = \pm 1 }{ \min_{w_{2} \in S^{1} }{ \max_{ b_{2} = \pm 1 }{ \left\{ u^{\varepsilon}( x + \sqrt{2 \varepsilon} \, b_{1} v_{1} + \sqrt{2 \varepsilon} b_{2} v_{2}, t + 2\varepsilon ) \right\} } } } } \,.$ Continuing in this manner $m$ times yields $$\label{dpp-m} u^{\varepsilon}(x,t) = \min_{ w_{1} \in S^{1}}{ \max_{ b_{1} = \pm 1 } { \cdots \min_{ w_{m} \in S^{1} }{ \max_{ b_{m} = \pm 1 }{ \Big\{ u^{\varepsilon}( x + \sqrt{2 \varepsilon} \sum_{i=1}^{m} b_{i} v_{i}, T ) \Big\} } } } } \,.$$ After $m$ rounds, however, we reach the end of the game, where $u^{\varepsilon}(\cdot,T)$ equals the known terminal value $u_{0}$. We may therefore rewrite (\ref{dpp-m}) in the form $$\label{soln-formula} u^{\varepsilon}(x,t) = \min_{w_{1} \in S^{1} }{ \max_{ b_{1} = \pm 1 }{ \cdots \min_{ w_{m} \in S^{1} }{ \max_{ b_{m} = \pm 1 }{ \Big\{ u_{0}( x + \sqrt{2 \varepsilon} \sum_{i=1}^{m} b_{i} v_{i} ) \Big\} } } } } \,,$$ obtaining a representation formula for $u^{\varepsilon}$ as a function of $u_{0}$. In terms of the operator $S(\varepsilon) : UC( \mathbb{R}^{2} ) \to UC( \mathbb{R}^{2} )$ defined by $$\left( S(\varepsilon)u \right)(x) := \min_{ w \in S^{1} }{ \max_{ b = \pm 1 }{ \big\{ u( x + \sqrt{2 \varepsilon} \, b v ) \big\} } } \,,$$ formula (\ref{soln-formula}) becomes $u^{\varepsilon} = \left( S(\varepsilon) \right)^{m} u_{0} \,,$ and we want to determine $$\label{pre-exp-formula} \lim_{m \to \infty}{ \left( S(\varepsilon) \right)^{m} u_{0} } = \lim_{\varepsilon \to 0}{ \left( S(\varepsilon) \right)^{m} u_{0} } \,.$$ We will see that this limit is the unique solution $u$ of the terminal-value problem $$\label{geom-tvp} u_{t} + | Du | \mathop{\rm Tr}{ \left[ \, A(n,x,T-t) Dn \, \right] } = 0 \,, \quad u(\cdot,T) = u_{0} \,,$$ where $A : \mathbb{R}^{N} \times \mathbb{R} \times \mathbb{R} \to \left( \mathcal{S}^{N} \right)^{+}$ is continuous and $\left( \mathcal{S}^{N} \right)^{+}$ denotes the cone of positive-definite, symmetric $N \times N$ matrices. Before proving this fact, let us briefly describe its numerical implications. Defining the set $\Gamma_{0}$ by (\ref{gamma-0}) as before, the sets $\Gamma_{t}$ correspond to the generalized curvature flow of $\Gamma_{0}$, and the sets $\Gamma^{\varepsilon}_{t}$ are their approximations via the value function $u^{\varepsilon}$. The point $x$ will belong to $\Gamma^{\varepsilon}_{\varepsilon}$ if and only if Paul can reach $\Gamma_{0}$ in one play, i.e., if and only if there exists $w \in S^{1}$ such that $x \pm \sqrt{2 \varepsilon} A(w,x,T-\varepsilon)w^{\perp} \in \Gamma_{0} \,.$ This leads to a conceptually simple algorithm for generalized curvature flow, but the nonlinear dependence on $w$ certainly complicates its implementation. We will come back to numerical issues below in Section \ref{numerics}. The following theorem analyzes the limit (\ref{pre-exp-formula}). \begin{theorem} \label{game-pde} Let $u_{0} \in UC( \mathbb{R}^{2} )$ be given, let $u$ be the unique viscosity solution of (\ref{geom-tvp}), and define $\varepsilon := \frac{T-t}{m}$ for $m \in \mathbb{N}$ and $t < T$. Then $$\label{exp-formula} u( \cdot, t ) = \lim_{m \to \infty}{ \left( S( \varepsilon ) \right)^{m} u_{0} } = \lim_{\varepsilon \to 0}{ \left( S( \varepsilon ) \right)^{m} u_{0} } \,.$$ \end{theorem} \begin{proof} According to the general convergence results of Barles and Souganidis (\cite{barles:cas91}; see also \cite{souganidis:asv85},\cite{souganidis:mmr85}), modified as in \cite{barles:spc95} to handle the degeneracy in equation (\ref{geom-tvp}), we must show that our approximation scheme is stable, monotone and consistent. In particular, we must verify that $S(\cdot)$ has the following four properties: \begin{itemize} \item[(i)] $S(0) = I$, \item[(ii)] $S(\varepsilon)\left( u + c \right) = S(\varepsilon)u + c$, for any constant $c$, \item [(iii)] $S(\varepsilon)u \leq S(\varepsilon) \tilde{u}$ whenever $u \leq \tilde{u}$, ~ and \item[(iv)] for any smooth $\phi$ and any $x \in \mathbb{R}^{2}$, $$\label{consistency-1} \lim_{\varepsilon \to 0}{ \Big( \frac{ \left( S(\varepsilon) \phi \right)(x) - \phi(x) }{ \varepsilon } \Big) } = | D \phi(x) | \mathop{\rm Tr}{ \left[ A(n,x,T-t) Dn \right] }$$ if $D \phi(x) \neq 0$, where $$\label{consistency-2} n = \frac{ D\phi(x) }{ | D \phi(x) | } \quad \text{and} \quad Dn = \frac{1}{ |D\phi(x)| } \left( I - n \otimes n \right) D^{2}\phi(x) \,,$$ while $$\label{consistency-3} \lambda \leq \liminf_{\varepsilon \to 0}{ \Big( \frac{ \left( S(\varepsilon) \phi \right)(x) - \phi(x) }{ \varepsilon } \Big) } \leq \limsup_{\varepsilon \to 0}{ \Big( \frac{ \left( S(\varepsilon) \phi \right)(x) - \phi(x) }{ \varepsilon } \Big) } \leq \Lambda$$ if $D \phi(x) = 0$, where $\lambda$ and $\Lambda$ are the extreme eigenvalues of $D^{2} \phi(x)$. \end{itemize} The first two of these properties are trivial. The third follows easily from the definition of $S(\varepsilon)$: for $u, \tilde{u} \in UC(\mathbb{R}^{2})$ with $u \leq \tilde{u}$, let $w \in S^{1}$ be optimal for $\tilde{u}$ with $v = A(w,x,T-t) w^{\perp}$. Then $\left( S(\varepsilon) u \right)(x) \leq \max_{ b = \pm 1 }{ u( x + \sqrt{2 \varepsilon}b v ) } \leq \max_{ b = \pm 1 }{ \tilde{u}( x + \sqrt{2\varepsilon} b v ) } = \left( S(\varepsilon) \tilde{u} \right)(x) \,,$ verifying the monotonicity of the scheme. To check the scheme's consistency (property (iv)), pick any smooth $\phi$ and a point $x \in \mathbb{R}^{2}$, and suppose, first of all, that $D \phi(x) \neq 0$. Recall the Riemannian structure on $\mathbb{R}^{2}$ defined by (\ref{pairing}) and Taylor expand $\phi$ to obtain \begin{align*} \left( S( \varepsilon )\phi \right)(x) & = { \min_{ w \in S^{1} }{ \max_{ b = \pm 1 } {\left\{ \phi( x + \sqrt{2 \varepsilon} bv ) \right\} } } } \\ & = { \min_{ w \in S^{1} }{ \max_{ b = \pm 1 } {\left\{ \phi( x ) + \sqrt{2 \varepsilon} b \langle D \phi(x) , v \rangle_ A + \varepsilon \langle D^{2}\phi(x) v , v \rangle_ A + o( \varepsilon^{3/2} ) \right\} } } } \,, \end{align*} from which see that $\frac{ \left( S( \varepsilon )\phi \right)(x) - \phi(x) }{ \varepsilon } = \min_{ w \in S^{1} }{ \max_{ b = \pm 1 }{ \Big\{ \frac{ \sqrt{2} b }{ \sqrt{ \varepsilon } } \langle D \phi(x), v \rangle_A + \langle D^{2} \phi(x) v , v \rangle_A + o( \varepsilon^{1/2} ) \Big\} } } \,.$ Choosing $$\label{optimal-w} w = \frac{ D \phi(x) }{ | D \phi(x) | } \,,$$ the corresponding displacement vector $v$ is $$\label{opt-v} v = \frac{ 1 }{ | D \phi (x) | } A(w,x,T-t) \left( D \phi(x) \right)^{\perp}$$ and $\langle D \phi(x) , v \rangle_A = 0$; any choice of $w$ for which the latter does not hold will clearly not be optimal for small $\varepsilon$. By the definition of the underlying pairing (\ref{pairing}), we have $\langle D^{2} \phi(x) v , v \rangle_A = v^{T} \left( A( w,x,T-t)^{-1} \right)^{T} D^{2}\phi(x) v \,,$ and a direct calculation using (\ref{optimal-w}) and (\ref{opt-v}) reveals that $\langle D^{2} \phi(x) v , v \rangle_A = | D \phi(x) | \mathop{\rm Tr}{ \left[ A(n,x,T-t) Dn \right] } \,,$ with $n$ and $Dn$ given by (\ref{consistency-2}). Property (iv) follows directly in this case. If, instead, $D \phi(x) = 0$, consistency is even simpler, as the first-order term in the Taylor expansion vanishes and the remaining second-order term satisfies (\ref{consistency-3}). \end{proof} The Taylor expansion used in this proof is essentially the heuristic presented in \cite{kohn:dcb}, but the machinery developed by Barles and Souganidis \cite{barles:cas91} renders this argument perfectly rigorous. In addition to being short and simple, the resulting proof seems more natural from the point of view of standard viscosity techniques than the proof offered in \cite{kohn:dcb}. \end{section} \begin{section}{Algorithms and examples} \label{numerics} There are by now numerous papers devoted to algorithms for curvature-depen\-dent flows. Before discussing our game-theoretic algorithm in detail, we will briefly summarize some of the extant approaches, postponing a thorough comparison of these methods for a future publication. The first schemes for mean curvature and related motions relied on finite difference methods on uniform grids (\cite{alvarez:iss92},\cite{caselles:gma93},\cite{crandall:cds96}), and subsequent work focused on developing faster, more efficient algorithms. In \cite{merriman:mmf94}, Bence, Merriman and Osher introduced a very successful algorithm, diffusion-generated motion by mean curvature, which Evans \cite{evans:cam93} and Barles and Georgelin \cite{barles:spc95} subsequently analyzed. Ishii, Pires and Souganidis \cite{ishii:tdt99} then consolidated the work of Bence, Merriman and Osher with that of Gravner and Griffeath on models of cellular automaton dynamics \cite{gravner:tgd93}, resulting in the more general framework of threshold dynamics type approximation schemes. Ruuth and Merriman provide an interesting discussion of some of these developments in \cite{ruuth:cgm00}; they point out, for example, that diffusion-generated motion by mean curvature amounts to tracking the center of a circle as it traverses the curve in such a way that precisely half of its area is always inside the curve. Described this way, we see that the Bence-Merriman-Osher scheme has a similar geometric flavor to our game-theoretic scheme. It seems much simpler, however, to track the midpoint of a segment than to track the center of a circle while maintaining this area condition. A further simplifying feature of our scheme is that it does not require a grid for computations. Motivated by developments in mathematical morphology and image processing, Catt\'{e} et al. \cite{catte:msm95} developed a different approach which leads to the same min-max operator used here. Their point of view is quite different from ours and from that in \cite{kohn:dcb}, however, and, unlike ours, their implementation is grid-based. Following the earlier work by Evans \cite{evans:cam93}, Catt\'{e} et al. prove their analogue of Theorem \ref{game-pde} with nonlinear semigroup techniques; in semigroup parlance, we may interpret equation (\ref{exp-formula}) as an exponential formula of Crandall-Liggett type (\cite{crandall:gsn71},\cite{rudd:wss04}), with the scheme $S(\varepsilon)$ playing the role of the resolvent operator. In addition to their differing philosophy and implementation, we note that Catt\'{e} et al. do not consider the general geometric equation (\ref{geom-tvp}) to which the game-theoretic approach applies. There are at least two other noteworthy algorithms for curvature-dependent flows which have appeared recently. Oberman \cite{oberman:cmd04} has proposed and analyzed a grid-based scheme for mean curvature motion which, in the language of image processing, is a simple and efficient implementation of a median filter. Cao and Moisan \cite{cao:gcc01}, on the other hand, have developed an interesting algorithm which relies on the geometry of the curve and does not involve a computational grid; we refer to Cao's book \cite{cao:gce03} for more on this algorithm and the role of curve evolution in image processing. \subsection{Mean curvature flow} Having briefly surveyed these other techniques, we now describe the algorithms inspired by the two-player games above, beginning with the simpler case of mean curvature motion. To approximate the mean curvature flow of a planar curve $\Gamma_{0}$, we proceed as follows, retaining the notation $\Gamma_{t}$ and $\Gamma^{\varepsilon}_{t}$ used earlier. \subsection*{Idealized mean curvature algorithm} \begin{itemize} \item[(i)] Choose the time step $\varepsilon > 0$. \item[(ii)] Having computed the approximation $\Gamma^{\varepsilon}_{j \varepsilon}$ for some positive integer $j$, find all pairs $(x_{1}, x_{2}) \in \Gamma^{\varepsilon}_{j \varepsilon}$ such that $$\label{segment} \operatorname{dist}{ \left( x_{1}, x_{2} \right) } = 2 \sqrt{ 2 \varepsilon } \,,$$ and, for each pair, add the point $\bar{x} := \frac{ x_{1} + x_{2} }{ 2 }$ to the new set $\Gamma^{\varepsilon}_{(j+1) \varepsilon}$. \item[(iii)] Stop after $k$ rounds when there is no pair $(x_{1}, x_{2} ) \in \Gamma^{\varepsilon}_{k \varepsilon}$ satisfying (\ref{segment}). \end{itemize} This is an idealized scheme for several reasons. First, we must somehow discretize the original curve $\Gamma_{0}$ in order to perform computations, and one cannot expect pairs of points from the resulting discrete set to satisfy (\ref{segment}) exactly. Second, if $\Gamma_{0}$'s evolution by mean curvature results in fattening, it is not clear how to find all pairs satisfying (\ref{segment}) efficiently. Since a specific example of the latter situation may be useful, suppose for the moment that $\Gamma_{0}$ is the left-hand set in Figure 1. \begin{figure}[ht] \label{fatten} \begin{center} \includegraphics[width=0.7\textwidth]{figures/target} \end{center} \caption{The set on the left fattens as it evolves by its mean curvature, yielding sets with interior like the one on the right. The set on the right is solid, like a steering wheel with four holes.} \end{figure} It is well-known that the set on the left will fatten as it evolves by its mean curvature (it also appears, for instance, in \cite{evans:rfn97}); one way to see this, in fact, is to apply the idealized algorithm above by hand on a piece of scrap paper. The set on the right is typical of the resulting evolution. From the point of view of the mean curvature game, fattening occurs when the trajectory of the traversing segment's midpoint is not unique. Accommodating this possibility complicates the implementation of the algorithm, so we keep things simple in the present paper by assuming that $\Gamma_{0}$ is a simple closed curve. Future work will address a more robust implementation. Instead of discretizing with a grid of some sort, we will approximate the curves $\Gamma^{\varepsilon}_{j \varepsilon}$ directly with discrete sets, initializing computations with a discrete set $\Delta^{\varepsilon}_{0}$ of points belonging to the initial curve $\Gamma_{0}$. This leads to the following straightforward implementation of the scheme described above. \subsection*{Practical mean curvature algorithm} \begin{itemize} \item[(i)] Choose the time step $\varepsilon > 0$. \item[(ii)] Suppose that, for some integer $j \geq 0$, we have a discrete set $\Delta^{\varepsilon}_{j \varepsilon}$ which approximates the curve $\Gamma^{\varepsilon}_{j \varepsilon}$. We assume that the points in $\Delta^{\varepsilon}_{j \varepsilon}$ are sorted, corresponding to an oriented traversal of $\Gamma^{\varepsilon}_{j \varepsilon}$. \item[(iii)] Begin at the first point $x_{0} \in \Delta^{\varepsilon}_{j \varepsilon}$. Let $x_{1}$ and $x_{2}$ be the first consecutive pair of points in $\Delta^{\varepsilon}_{j \varepsilon}$ such that $$\label{over-under} \operatorname{dist}{ \left( x_{1}, x_{0} \right) } < 2 \sqrt{ 2 \varepsilon } \quad \text{and} \quad \operatorname{dist}{ \left( x_{2}, x_{0} \right) } > 2 \sqrt{ 2 \varepsilon } \,,$$ and define $\bar{x} := \frac{ 2 x_{0} + x_{1} + x_{2} }{ 4 } \,.$ The point $\bar{x}$ is the midpoint of the segment whose endpoints are the midpoints of the segments connecting $x_{0}$ and $x_{1}$ and $x_{0}$ and $x_{2}$, respectively. Add $\bar{x}$ to the discrete approximation $\Delta^{\varepsilon}_{(j+1) \varepsilon}$ of the curve $\Gamma^{\varepsilon}_{(j+1) \varepsilon}$. \item[(iv)] Advance to the second point in $\Delta^{\varepsilon}_{j \varepsilon}$ and repeat the preceding procedure, adding a second point to $\Delta^{\varepsilon}_{(j+1) \varepsilon}$. Continue in this way until all of $\Delta^{\varepsilon}_{j \varepsilon}$ has been exhausted, thereby completing $\Delta^{\varepsilon}_{(j+1)\varepsilon}$. (When dealing with points at the end of $\Delta^{\varepsilon}_{j \varepsilon}$, the points $x_{1}$ and $x_{2}$ will come from the beginning of $\Delta^{\varepsilon}_{j \varepsilon}$. This can be handled easily with a simple array data structure.) \item[(v)] Stop after $k$ rounds when (\ref{over-under}) cannot be satisfied by points in $\Delta^{\varepsilon}_{k \varepsilon}$. \end{itemize} Figures 2 and 3 illustrate the application of this mean curvature algorithm to two sets, a unit circle and a nonconvex curve built out of four half-circles of radius 1. Both computations used a segment of length $0.1$, corresponding to the time step $\varepsilon = 1/800$, and each snapshot indicates the corresponding number of rounds of the game. The initial discretization $\Delta^{\varepsilon}_{0}$ of the circle consisted of $48$ uniformly spaced points, and it took $0.48$ seconds to compute all $338$ rounds of its resulting evolution. For the nonconvex curve, $\Delta^{\varepsilon}_{0}$ consisted of $96$ points, its evolution to extinction lasted for $1163$ rounds, and it took $5.79$ seconds to compute its full evolution. It is well-known that a simple closed curve will convexify, become circular, and shrink to a point as it evolves by mean curvature (\cite{gage:hes86},\cite{grayson:hes87}), a fact borne out by these examples. (This provides a quick but crude check that the algorithm works as it should.) Although we have avoided the possibility of fattening, it is worth emphasizing that this algorithm does not require any convexity. \begin{figure}[ht] \label{circles-mc} \begin{center} \includegraphics[width=0.9\textwidth]{figures/circles} \end{center} \caption{Snapshots of the motion of a circle by its mean curvature.} \end{figure} \begin{figure}[ht] \label{nonconvex-mc} \begin{center} \includegraphics[width=0.9\textwidth]{figures/nonconvex} \end{center} \caption{Snapshots of the motion of a nonconvex curve by its mean curvature.} \end{figure} \subsection{Generalized curvature flow} To approximate the generalized curvature flow of $\Gamma_{0}$ according to equation (\ref{geom-tvp}), we cannot simply track the midpoint of a segment of length $2 \sqrt{2 \varepsilon}$ as it traverses the curves $\Gamma^{\varepsilon}_{j \varepsilon}$. Instead, we must modify the mean curvature algorithm as follows. \subsection*{Idealized generalized curvature algorithm} \begin{itemize} \item[(i)] Choose the time step $\varepsilon > 0$. \item[(ii)] Having computed the approximation $\Gamma^{\varepsilon}_{j \varepsilon}$ for some positive integer $j$, find all pairs $(x_{1}, x_{2}) \in \Gamma^{\varepsilon}_{j \varepsilon}$ such that the problem $$\label{gen-segment} \sqrt{ 2 \varepsilon} \, A(w, \bar{x}, T - (j+1)\varepsilon) w = v$$ has a solution $w \in S^{1}$, where $$v := x_{2} - x_{1} \quad \text{and} \quad \bar{x} := \frac{ x_{1} + x_{2} }{ 2 } \,.$$ For each such pair $( x_{1}, x_{2} )$, add the point $\bar{x}$ to the new set $\Gamma^{\varepsilon}_{(j+1) \varepsilon}$. \item[(iii)] Stop after $k$ rounds when (\ref{gen-segment}) has no solution $w \in S^{1}$ for any pair $(x_{1}, x_{2} ) \in \Gamma^{\varepsilon}_{k \varepsilon}$. \end{itemize} This is an idealized scheme for the same reasons given above in the case of mean curvature flow. A practical implementation in this case is much more challenging, however, since we must deal with the nonlinear problem (\ref{gen-segment}). At this preliminary stage, we only present results for the trivial generalized curvature flow problem in which $A(n,x,t) \equiv A$ for some constant nonsingular matrix $A$. In this very special case, we simply change step $(ii)$ of the practical mean curvature algorithm as follows: starting with the first point $x_{0} \in \Delta^{\varepsilon}_{j \varepsilon}$, find the first consecutive points $x_{1}$ and $x_{2}$ in $\Delta^{\varepsilon}_{j \varepsilon}$ such that $$\label{gen-over-under} \| A^{-1} \left( x_{1} - x_{0} \right) \| < 2 \sqrt{ 2 \varepsilon } \quad \text{and} \quad \| A^{-1} \left( x_{2} - x_{0} \right) \| > 2 \sqrt{ 2 \varepsilon } \,,$$ where $\| \cdot \|$ is the standard Euclidean norm. Add the point $\bar{x} := \frac{ 2 x_{0} + x_{1} + x_{2} }{ 4 } \;$ to $\Delta^{\varepsilon}_{(j+1) \varepsilon}$ and, continuing in this manner, complete the set $\Delta^{\varepsilon}_{(j+1) \varepsilon}$. Figures 4 and 5 illustrate the application of this generalized curvature algorithm to the circle and nonconvex curve treated earlier, again using a segment of length $0.1$ in both examples. The matrix $A$ used in both cases is $A = \left[ \begin{array}{cc} 1 & 0 \\ 0 & 4 \end{array} \right] \,,$ and one sees clearly that both curves approach circles" relative to the underlying pairing (\ref{pairing}) defined by $A$. \begin{figure}[ht] \label{circles-aniso} \begin{center} \includegraphics[width=0.9\textwidth]{figures/aniso_circles} \end{center} \caption{ Snapshots of the motion of a circle by generalized curvature.} \end{figure} \begin{figure}[ht] \label{nonconvex-aniso} \begin{center} \includegraphics[width=0.9\textwidth]{figures/aniso_nonconvex} \end{center} \caption{Snapshots of the motion of a nonconvex curve by generalized curvature.} \end{figure} Future work will confront the more difficult problem (\ref{gen-segment}), as well as numerical analysis questions which we have ignored here (e.g., convergence rates and the interaction of the time step $\varepsilon$ with the resolution of the initial discretization $\Delta^{\varepsilon}_{0}$). The application of these ideas to other problems, such as the motion of curves in $\mathbb{R}^{3}$ and the active contour model in \cite{caselles:gma93}, will also be discussed elsewhere. \end{section} \subsection*{Acknowledgments} I discussed the main ideas presented here in an invited talk at the 6th UAB-MSU Conference on Differential Equations and Computational Simulations, held at Mississippi State University in May 2005. It is a pleasure to thank the conference organizers for all of their hospitality and hard work, and it was a tremendous honor to participate in the special session honoring Klaus Schmitt, a wonderful advisor and friend. Happy birthday, Klaus! \begin{thebibliography}{10} \bibitem{alvarez:iss92} {L.~Alvarez, P.-L. Lions, and J.-M. Morel}, \emph{Image selective smoothing and edge detection by nonlinear diffusion. {II}}, SIAM J. Numer. Anal., 29 (1992), pp.~845--866. \bibitem{bardi:ocv97} {M.~Bardi and I.~Capuzzo-Dolcetta}, \emph{Optimal Control and Viscosity Solutions of {H}amilton-{J}acobi-{B}ellman Equations}, Birkh\"auser Boston Inc., Boston, MA, 1997. \bibitem{barles:spc95} {G.~Barles and C.~Georgelin}, \emph{A simple proof of convergence for an approximation scheme for computing motions by mean curvature}, SIAM J. Numer. Anal., 32 (1995), pp.~484--500. \bibitem{barles:fpp93} {G.~Barles, H.~M. Soner, and P.~E. Souganidis}, \emph{Front propagation and phase field theory}, SIAM J. Control Optim., 31 (1993), pp.~439--469. \bibitem{barles:cas91} {G.~Barles and P.~E. Souganidis}, \emph{Convergence of approximation schemes for fully nonlinear second order equations}, Asymptotic Analysis, 4 (1991), pp.~271--283. \bibitem{barles:naf98} {G.~Barles and P.~E. Souganidis}, \emph{A new approach to front propagation problems: theory and applications}, Arch. Rational Mech. Anal., 141 (1998), pp.~237--296. \bibitem{cao:gce03} {F.~Cao}, \emph{Geometric Curve Evolution and Image Processing}, vol.~1805 of Lecture Notes in Mathematics, Springer-Verlag, Berlin, 2003. \bibitem{cao:gcc01} {F.~Cao and L.~Moisan}, \emph{Geometric computation of curvature driven plane curve evolutions}, SIAM J. Numer. Anal., 39 (2001), pp.~624--646 (electronic). \bibitem{caselles:gma93} {V.~Caselles, F.~Catt{\'e}, T.~Coll, and F.~Dibos}, \emph{A geometric model for active contours in image processing}, Numer. Math., 66 (1993), pp.~1--31. \bibitem{catte:msm95} {F.~Catt{\'e}, F.~Dibos, and G.~Koepfler}, \emph{A morphological scheme for mean curvature motion and applications to anisotropic diffusion and motion of level sets}, SIAM J. Numer. Anal., 32 (1995), pp.~1895--1909. \bibitem{chen:uev91} {Y.~G. Chen, Y.~Giga, and S.~Goto}, \emph{Uniqueness and existence of viscosity solutions of generalized mean curvature flow equations}, J. Differential Geom., 33 (1991), pp.~749--786. \bibitem{crandall:gsn71} {M.~G. Crandall and T.~M. Liggett}, \emph{Generation of semigroups of nonlinear transformations on general {B}anach spaces}, Amer. J. Math., 93 (1971), pp.~265--298. \bibitem{crandall:cds96} {M.~G. Crandall and P.~Lions}, \emph{Convergent difference schemes for nonlinear parabolic equations and mean curvature motion}, Numer. Math., 75 (1996), pp.~17--41. \bibitem{elliott:vso87} {R.~J. Elliott}, \emph{Viscosity Solutions and Optimal Control}, vol.~165 of Pitman Research Notes in Mathematics Series, Longman Scientific \& Technical, Harlow, 1987. \bibitem{evans:cam93} {L.~C. Evans}, \emph{Convergence of an algorithm for mean curvature motion}, Indiana Univ. Math. J., 42 (1993), pp.~533--557. \bibitem{evans:rfn97} {L.~C. Evans}, \emph{Regularity for fully nonlinear elliptic equations and motion by mean curvature}, in Viscosity Solutions and Applications (Montecatini Terme, 1995), vol.~1660 of Lecture Notes in Math., Springer, Berlin, 1997, pp.~98--133. \bibitem{evans:dgr84} {L.~C. Evans and P.~E. Souganidis}, \emph{Differential games and representation formulas for solutions of {H}amilton-{J}acobi-{I}saacs equations}, Indiana Univ. Math. J., 33 (1984), pp.~773--797. \bibitem{evans:mls91} {L.~C. Evans and J.~Spruck}, \emph{Motion of level sets by mean curvature. {I}}, J. Differential Geom., 33 (1991), pp.~635--681. \bibitem{gage:hes86} {M.~Gage and R.~Hamilton}, \emph{The heat equation shrinking convex plane curves}, J. Diff. Geom., 23 (1986), pp.~69--96. \bibitem{gravner:tgd93} {J.~Gravner and D.~Griffeath}, \emph{Threshold growth dynamics}, Trans. AMS, 340 (1993), pp.~837--870. \bibitem{grayson:hes87} {M.~Grayson}, \emph{The heat equation shrinks embedded plane curves to round points}, J. Diff. Geom., 26 (1987), pp.~285--314. \bibitem{ishii:tdt99} {H.~Ishii, G.~E. Pires, and P.~E. Souganidis}, \emph{Threshold dynamics type approximation schemes for propagating fronts}, J. Math. Soc. Japan, 51 (1999), pp.~267--308. \bibitem{kohn:dcb} {R.~V. Kohn and S.~Serfaty}, \emph{A deterministic--control--based approach to motion by curvature}, To appear, Comm. Pure Appl. Math. \bibitem{merriman:mmf94} {B.~Merriman, J.~K. Bence, and S.~J. Osher}, \emph{Motion of multiple functions: a level set approach}, J. Comput. Phys., 112 (1994), pp.~334--363. \bibitem{oberman:cmd04} {A.~M. Oberman}, \emph{A convergent monotone difference scheme for motion of level sets by mean curvature}, Numer. Math., 99 (2004), pp.~365--379. \bibitem{rudd:wss04} {M.~Rudd}, \emph{Weak and strong solvability of parabolic variational inequalities in {B}anach spaces}, J. Evol. Equ., 4 (2004), pp.~497--517. \bibitem{ruuth:cgm00} {S.~J. Ruuth and B.~Merriman}, \emph{Convolution--generated motion and generalized {H}uygens' principles for interface motion}, SIAM J. Appl. Math., 60 (2000), pp.~868--890 (electronic). \bibitem{sethian:lsm99} {J.~A. Sethian}, \emph{Level Set Methods and Fast Marching Methods}, vol.~3 of Cambridge Monographs on Applied and Computational Mathematics, Cambridge University Press, 1999. \bibitem{soravia:gmf94} {P.~Soravia}, \emph{Generalized motion of a front propagating along its normal direction: a differential games approach}, Nonlinear Anal., 22 (1994), pp.~1247--1262. \bibitem{souganidis:asv85} {P.~E. Souganidis}, \emph{Approximation schemes for viscosity solutions of Hamilton-{J}acobi equations}, J. Differential Equations, 59 (1985), pp.~1--43. \bibitem{souganidis:mmr85} {P.~E. Souganidis}, \emph{Max-min representations and product formulas for the viscosity solutions of {H}amilton-{J}acobi equations with applications to differential games}, Nonlinear Anal., 9 (1985), pp.~217--257. \bibitem{souganidis:fpt97} {P.~E. Souganidis}, \emph{Front propagation: theory and applications}, in Viscosity Solutions and Applications (Montecatini Terme, 1995), vol.~1660 of Lecture Notes in Math., Springer, Berlin, 1997, pp.~186--242. \bibitem{spencer:bg77} {J.~Spencer}, \emph{Balancing games}, J. Combinatorial Theory Ser. B, 23 (1977), pp.~68--74. \end{thebibliography} \end{document}