\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage[all]{xy}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2011 (2011), No. 114, pp. 1--11.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2011 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2011/114\hfil $p$-harmonious functions]
{$p$-harmonious functions with drift on graphs via games}

\author[A. P. Sviridov\hfil EJDE-2011/114\hfilneg]
{Alexander P. Sviridov}  % in alphabetical order

\address{Alexander P. Sviridov \newline
Department of Mathematics\\
University of Pittsburgh,
Pittsburgh, PA 15260, USA}
\email{aps14@pitt.edu}

\thanks{Submitted October 26, 2010. Published September 6, 2011.}
\subjclass[2000]{35Q91, 35B51, 34A12, 31C20}
\keywords{Dirichlet problem; comparison principle;
 mean-value property;  \hfill\break\indent
 stochastic games; unique continuation} 

\begin{abstract}
 In a connected finite graph $E$ with set of vertices
 $\mathfrak{X}$, choose a nonempty subset, not equal to 
 the whole set,  $Y\subset \mathfrak{X}$,
 and call it the  boundary  $Y=\partial\mathfrak{X}$.
 Given a real-valued function  $F: Y\to \mathbb{R}$, our objective 
 is to find a function $u$, such that   $u=F$ on $Y$, and 
 for all $x\in \mathfrak{X}\setminus Y$,
 $$
 u(x)=\alpha \max_{y \in S(x)}u(y)+\beta \min_{y \in S(x)}u(y)
 +\gamma \Big( \frac{\sum_{y \in S(x)}u(y)}{\#(S(x))}\Big).
 $$
 Here  $\alpha, \beta,  \gamma $ are non-negative constants such that
 $\alpha+\beta + \gamma =1$,  the set $S(x)$ is the collection of vertices 
 connected to $x$ by an edge, and $\#(S(x))$ denotes its cardinality. 
 We prove the existence and uniqueness of  a solution of the above 
 Dirichlet problem and study the qualitative properties of the solution.
\end{abstract}

\maketitle
\numberwithin{equation}{section}


\newtheorem{theorem}{Theorem}[section]
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{example}[theorem]{Example}


%\newcommand{-\kern-1.1em\int\nolimits}{\kern 0.2em\vrule width 0.6em height 0.69678ex depth -0.58065ex \kern -1.1em \int \nolimits}
%\newcommand{-\kern-1.1em\int\nolimits}{-\kern-1.1em\int\nolimits}

\section{Introduction} \label{intro}

The goal of this paper is to study functions that satisfy
\begin{equation}\label{p-harm-drift}
u(x)=\alpha \max_{y \in S(x)}u(y)+\beta \min_{y \in S(x)}u(y)
+\gamma \Big( \frac{\sum_{y \in S(x)}u(y)}{\#(S(x))}\Big).
\end{equation}
We denote a graph by  $E$ and the collection of vertices
by $\mathfrak{X}$. We choose $Y$ to be a proper nonempty subset
of $\mathfrak{X}$ and call it the boundary. In equation
\eqref{p-harm-drift} the set $S(x)$ is the collection of
vertices connected to the given vertex $x$ by a single edge,
and $\alpha$, $\beta$ and $\gamma$ are predetermined non-negative
constants such that $\alpha + \beta + \gamma=1$.
The cardinality of $S(x)$ is denoted by $\# S(x)$ .
A function satisfying \eqref{p-harm-drift} is called
$p$-harmonious with drift, by analogy with continuous case
studied  in \cite{MPR2}. 
Functions of this type arise as approximations
of $p$-harmonic functions. In particular, an approximating
sequence could be generated by running zero-sum stochastic
games on a graph of decreasing step-size. The value of the
game function satisfies a nonlinear equation, which is directly
linked to the existence and uniqueness of the solution of
the $p$-Laplacian as demonstrated in \cite{PSSW,PS,MPR1}.
We present the connections between equation
\eqref{p-harm-drift} and game theory in
Theorem \ref{MartingalePDETheor}.


We formally pose the Dirichlet problem: For a given
$F:Y\to \mathbb{R}$ find a function $u$ defined on $\mathfrak{X}$,
such that $u=F$ on $Y$ and $u$ satisfies \eqref{p-harm-drift}.
We address questions of existence and uniqueness of the solution
of this Dirichlet problem in Theorems \ref{existence} and
\ref{cmpr}. 
We state the strong comparison principle
in Theorem \ref{strcomppricip}. We also study the question of
unique continuation for $p$-harmonious functions with drift.
In particular we present an example of $p$-harmonious function
which does not have the unique continuation property. The current
manuscript is based on the results obtained in \cite{APS}.

The equation \eqref{p-harm-drift} can be restated in a more
traditional notation with the help of the following definitions,
which we borrowed from \cite{KLW}.

\begin{definition} \label{def1.1} \rm
The Laplace operator on the graph is given by
$$
\Delta u(x)=-\kern-1.1em\int\nolimits_{S(x)}u-u(x).
$$
\end{definition}

\begin{definition} \label{def1.2} \rm
The infinity Laplacian on the graph is given by
$$
\Delta_{\infty}u(x)=\frac{1}{2}(\max_{S(x)} u+\min_{S(x)}u)-u(x).
$$
\end{definition}

\begin{definition} \label{def1.3} \rm
For $X=(x,y,z)\in \mathbb{R}^3$ we define the analog of the maximal
directional derivative
$$
\langle X\rangle_{\infty}=\max \{x,y,z\}.
$$
\end{definition}

With the above definitions we can restate \eqref{p-harm-drift} as
\begin{equation}
(\alpha-\beta)\langle \nabla u \rangle_{\infty}
+2\beta \Delta_{\infty}u+\gamma \Delta u=0.
\end{equation}

\section{Game setup and definitions} \label{sec:2}

Most of our results are proved using the following game.
We consider a connected graph $E$ with vertex set $\mathfrak{X}$.
The set $\mathfrak{X}$ is finite unless stated otherwise.
We equip $\mathfrak{X}$ with the $\sigma$-algebra $\mathcal{F}$
of all subsets of $\mathfrak{X}$. For an arbitrary vertex $x$
we define $S(x)$ the collection of vertices, which are connected
to the vertex $x$ by a single edge. In case $\mathfrak{X}$ is infinite,
we require that $\mathfrak{X}$ is at least locally finite; i.e.
the cardinality of $S(x)$ is finite.  At the beginning of the
game a token is placed at  some point $x_0 \in \mathfrak{X}$.
Then we toss a three-sided virtual coin. The side of a coin
labelled 1 comes out with probability $\alpha$ and in this
case player I chooses where to move the token among all
vertices in $S(x)$. The side of a coin labelled 2 comes out
with probability $\beta$ and in this case player II chooses
where to move the token among all vertices in $S(x)$.
Finally, the side of a coin labelled 3 comes out with
probability $\gamma$ and in this case we choose the next
point randomly (uniformly) among all vertices in $S(x)$.
This setup has been described in \cite{PSSW} and in \cite{PPS}
and is known as \lq\lq biased tug-of-war with noise\rq\rq.
The game stops once we hit the boundary set $Y$. The set $Y$
is simply predetermined non-empty set of vertices at which game
terminates. In the game literature the set $Y$ is called set
of absorbing states. Let $F:Y\to \mathbb{R}$ be the payoff
function defined on $Y$. If game ends at some vertex $y \in Y$,
then player I receives from player II the sum of $F(y)$ dollars.

Let us define the value of the game for player I.
Firstly, we formalize the notion of a pure strategy.
We define a strategy $S_I$ for player I as a collection of maps
 $\{\sigma^{k}_I\}_{k\in \mathbb{N}}$, such that for each $k$,
\begin{gather*}
\sigma^{k}_I: \mathfrak{X}^k \to \mathfrak{X},\\
\sigma^{k}_I(x_0, \dots , x_{k-1})= x_k,
\end{gather*}
where
$$
\mathfrak{X}^k=\underbrace{\mathfrak{X}\times\mathfrak{X}
\times\dots \times\mathfrak{X}}_{k\, \text{times}}.
$$
Hence, $\sigma^{k}_I$ tells player I where to move given
$(x_0, \dots , x_{k-1})$ - the history of the game up to the
step $k$, if he wins the toss. We call a strategy {\it stationary}
if it depends only on the current position of the token.
Given two strategies for player I and II the transition probabilities
for $k\geq 1$ are given by
\[
\pi_{k}(x_0,\dots ,x_{k-1};y)= \alpha \delta_{\sigma_I^{k}(x_0,\dots
,x_{k-1})}(y)+\beta \delta_{\sigma_{II}^{k}(x_0,\dots ,x_{k-1})}(y)
 +\gamma U_{S(x_{k-1})}(y),
\]
where we have set
$$
U_{S(x_{k-1})} \text{ is a uniform distribution on }
S(x_{k-1}) \text{ and }  \pi_{0}(y)=\delta_{x_0}(y).
$$
We equip $\mathfrak{X}^k$ with product $\sigma$-algebra
$\mathcal{F}^k$,
$$
\mathcal{F}^k=\underbrace{\mathcal{F}\otimes\mathcal{F}\otimes\dots
\otimes\mathcal{F}}_{k\, \text{times}}
$$
and then we define a probability measure on
$(\mathfrak{X}^k, \mathcal{F}^k)$ in the following way:
\begin{gather*}
\mu_0= \pi_0=\delta_{x_0}, \\
\mu_k(A^{k}\times A)=\int_{A^{k}}\pi_{k}(x_0,\dots ,x_{k-1};A)
d\mu_{k-1},
\end{gather*}
where $A^{k-1}\times A$ is a rectangle in
$(\mathfrak{X}^k, \mathcal{F}^{k})$.
The space of infinite sequences with elements from $\mathfrak{X}$
is $\mathfrak{X}^{\infty}$. Let
$X_k:\mathfrak{X}^{\infty}\to \mathfrak{X}$ be the coordinate
process defined by
$$
X_k(h)=x_k, \quad \text{for } h=(x_0, x_1, x_2, x_3,\dots )
\in \mathfrak{X}^{\infty}.
$$
We equip $\mathfrak{X}^{\infty}$ with product $\sigma$-algebra
$\mathcal{F}^{\infty}$. For precise definition of
$\mathcal{F}^{\infty}$ see \cite{KALLENBERG}.

The family of $\{\mu_k\}_{k\geq 0}$ satisfies the conditions
of Kolmogorov extension theorem \cite{VARADHAN}, therefore,
we can conclude that there exists a unique measure
$\mathbb{P}^{x_0}$ on $(\mathfrak{X}^{\infty}, \mathcal{F}^{\infty})$
with the following property:

\begin{equation} \mathbb{P}^{x_0}(B_k\times \mathfrak{X}\times
\mathfrak{X}\times \times\mathfrak{X} \dots )=\mu_k(B_k),
\quad \text{for } B_k \in \mathcal{F}^k
\end{equation}
and
\begin{equation}
\mathbb{P}^{x_0}[X_k\in A|X_0=x_0, X_1=x_1,\dots , X_{k-1}
=x_{k-1}]=\pi_{k}(x_0,\dots ,x_{k-1};A).
\end{equation}
We are now ready to define the value of the game for player I.
The boundary hitting time is given by
$$
\tau=\inf_{k}\{X_k \in Y\}.
$$
Consider strategies $S_I$ and $S_{II}$ for player I and player II
respectively. We define
\begin{equation}
F^x_{-}(S_I, S_{II}) =\begin{cases}
\mathbb{E}^x_{S_I,S_{II}}[F(X_{\tau})]
 & \text{if } \mathbb{P}^x_{S_I,S_{II}}(\tau<\infty)=1\\
 -\infty &\text{otherwise}
\end{cases}
\end{equation}
\begin{equation}
F^x_{+}(S_I, S_{II}) =\begin{cases}
\mathbb{E}^x_{S_I,S_{II}}[F(X_{\tau})]
 & \text{if } \mathbb{P}^x_{S_I,S_{II}}(\tau<\infty)=1\\
+\infty &\text{otherwise}
\end{cases}
\end{equation}
The value of the game for player I is
$$
u_I(x)=\sup_{S_I}\inf_{S_{II}}{\mathbb F}^x_{-}(S_I,S_{II})
$$
and the value of the game for player II is
$$
u_{II}(x)=\inf_{S_{II}}\sup_{S_I}{\mathbb F}^x_{+}(S_I,S_{II})
$$
These definitions penalize players severely for not being able
to force the game to end. Whenever player I has a strategy to
finish the game almost surely, then we simplify notation by setting
$$
u_I(x)=\sup_{S_I}\inf_{S_{II}}E^x_{S_I,S_{II}}[F(X_{\tau})].
$$


Similarly, for player II we set
$$
u_{II}(x)=\inf_{S_{II}}\sup_{S_I}E^x_{S_I,S_{II}}[F(X_{\tau})].
$$
The following lemma states rigorously whether player I has a
strategy to finish the game almost surely:


\begin{lemma} \label{lem2.1}
If $\mathfrak{X}$ is a finite set, then player I (player II)
 has strategies to finish the game almost surely.
\end{lemma}

\begin{proof}
When $\gamma=0$, this result was already proven by
Peres, Schramm, Sheffield, and Wilson in \cite[Theorem 2.2]{PSSW}.
When $\gamma \neq 0$, the statement follows from the fact that
random walk on a finite graph is recurrent.
\end{proof}

We always have $u_I(x)\leq u_{II}(x)$. Whenever $u_I(x)= u_{II}(x)$
for all $x \in \mathfrak{X}$ we say that game has a value.


\section{Existence} \label{sec:3}

Here is the first existence result for equation \eqref{p-harm-drift}.

\begin{theorem}
[Dynamic Programming Principle equals Mean Value Property]
\label{existence}
The value functions $u_I$ and $u_{II}$ satisfy the Dynamic
Programming Principle (DPP) or the Mean Value Property (MVP):
\begin{gather}\label{mvpdpp}
u_I(x)=\alpha \max_{y \in S(x)}u_I(y)
+\beta \min_{y \in S(x)}u_I(y)
+\gamma -\kern-1.1em\int\nolimits_{S(x)}u_I(y)dy,
\\
u_{II}(x)=\alpha \max_{y \in S(x)}u_{II}(y)
+\beta \min_{y \in S(x)}u_{II}(y)
+\gamma -\kern-1.1em\int\nolimits_{S(x)}u_{II}(y)dy.
\end{gather}
\end{theorem}

The above result is true in the general setting of discrete
stochastic games (see Maitra and Sudderth, \cite[chapter 7]{MS}).
Here we provide a simpler proof in Markovian case.
It turns out that optimal strategies are Markovian
(see \cite[chapter 5]{MS}).
%
\begin{proposition}[The stationary case]\label{DPPMarkovian}
In a game with stationary strategies the value functions
$u_I$ and $u_{II}$ satisfy the Dynamic Programming Principle (DPP)
or the Mean Value Property (MVP):
\begin{gather}
u_I(x)=\alpha \max_{y \in S(x)}u_I(y)+\beta \min_{y \in S(x)}u_I(y)
+\gamma -\kern-1.1em\int\nolimits_{S(x)}u_I(y)dy,
\\
u_{II}(x)=\alpha \max_{y \in S(x)}u_{II}(y)
 +\beta \min_{y \in S(x)}u_{II}(y)
+\gamma -\kern-1.1em\int\nolimits_{S(x)}u_{II}(y)dy.
\end{gather}
\end{proposition}

\begin{proof}
We will provide a proof only for $u_I$;
the proof for $u_{II}$ follows by symmetry.
Take a set of vertices $\mathfrak{X}$, boundary $Y$ and adjoin
one vertex $y^*$ to the boundary. Denote new boundary by
$Y^*=Y\cup \{y^*\}$ and the new set of vertices by
$\mathfrak{X}^*= \mathfrak{X}\setminus\{y^*\}$ and define
\begin{equation}
F^*(y) =\begin{cases}
F(y) & \text{if } y \in Y\\
u_I(y^*)&\text{if } y= y^*.
\end{cases}
\end{equation}
Let $u_I(x)$ be the value of the game with $\mathfrak{X}$ and
$Y$, and $u^{*}_I(x)$ be the value of the game
with  $\mathfrak{X}^*$ and $Y^*$.
The goal is to show that
$$
u^{*}_I(x)=u_I(x).
$$
Once we prove the above, the main result follows by extending $F$
to the set $S(x)$.

\begin{remark} \label{rmk3.3} \rm
The idea of extending $F$ is used in \cite[Lemma 3.5]{PSSW}
\end{remark}

Hence, we have to show $u^{*}_I(x)=u_I(x)$.
Since we consider only Markovian strategies we can think of them
as mappings $S_I:\mathfrak{X}\to \mathfrak{X}$.
For the game $\mathfrak{X}^*$ and $Y^*$,
we define $S^{*}_I$ as a restriction of $S_I$ to $\mathfrak{X}^*$
Here are the steps in detail:
\begin{equation}
\begin{split}
u^{*}_I(x)
&= \sup_{S_I^{*}}\inf_{S_{II}^{*}}\big (E^x_{S_I^{*},
 S_{II}^{*}}F^*(X_{\tau^*})\big )\\
&=\sup_{S_I^{*}}\inf_{S_{II}^{*}}\big(E^x_{S_I^{*},
 S_{II}^{*}}F^*(X_{\tau^*})\chi_{\{X_{\tau^*}=y^*\}}
 +E^x_{S_I^{*},S_{II}^{*}}F^*(X_{\tau^*})\chi_{\{X_{\tau^*}=y^*\}^c}
 \big )\\
&=\sup_{S_I^{*}}\inf_{S_{II}^{*}}\big (E^x_{S_I^{*},
 S_{II}^{*}}u_I(y^*)\chi_{\{X_{\tau^*}=y^*\}}+E^x_{S_I^{*},
 S_{II}^{*}}F^*(X_{\tau^*})\chi_{\{X_{\tau^*}=y^*\}^c}\big )\\
&= \sup_{S_I^{*}}\inf_{S_{II}^{*}}\big (E^x_{S_I^{*},
 S_{II}^{*}}\sup_{S_I}\inf_{S_{II}}E^{y^*}_{S_I,
 S_{II}}F(X_{\tau})\chi_{\{X_{\tau^*}=y^*\}}\\
&\quad +E^x_{S_I^{*},S_{II}^{*}}F^*(X_{\tau^*})
 \chi_{\{X_{\tau^*}=y^*\}^c}\big )\\
&= \sup_{S_I^{*}}\inf_{S_{II}^{*}} \sup_{S_I}\inf_{S_{II}}
 \big (E^x_{S_I^{*},S_{II}^{*}}\big (E^{y^*}_{S_I,
 S_{II}}F(X_{\tau})\big )\chi_{\{X_{\tau^*}=y^*\}}\\
&\quad +E^x_{S_I^{*},S_{II}^{*}}F^*(X_{\tau^*})
 \chi_{\{X_{\tau^*}=y^*\}^c}\big ).
\end{split}
\end{equation}
If we can show that
\begin{equation} \label{stationaryproof}
\begin{split}
&\sup_{S_I^{*}}\inf_{S_{II}^{*}} \sup_{S_I}\inf_{S_{II}}
 \big (E^x_{S_I^{*},S_{II}^{*}}\big (E^{y^*}_{S_I, S_{II}}
 F(X_{\tau})\big )\chi_{\{X_{\tau^*}=y^*\}}\\
&+E^x_{S_I^{*},S_{II}^{*}}F^*(X_{\tau^*})\chi_{\{X_{\tau^*}=y^*\}^c}
 \big )\\
&= \sup_{S_I^{*}}\inf_{S_{II}^{*}} \sup_{S_I}\inf_{S_{II}}
 \big (E^x_{S_I,S_{II}} F(X_{\tau})\chi_{\{X_{\tau^*}=y^*\}}
 +E^x_{S_I,S_{II}}F(X_{\tau})\chi_{\{X_{\tau^*}=y^*\}^c}\big ).
\end{split}
\end{equation}
We can complete the proof in the following way:
\begin{align*}
u^{*}_I(x)
&= \sup_{S_I^{*}}\inf_{S_{II}^{*}} \sup_{S_I}\inf_{S_{II}}
 \big (E^x_{S_I,S_{II}} F(X_{\tau})\chi_{\{X_{\tau^*}=y^*\}}
 +E^x_{S_I,S_{II}}F(X_{\tau})\chi_{\{X_{\tau^*}=y^*\}^c}\big )\\
& = \sup_{S_I}\inf_{S_{II}}\sup_{S_I^{*}}\inf_{S_{II}^{*}}
 \big (E^x_{S_I,S_{II}} F(X_{\tau})\chi_{\{X_{\tau^*}=y^*\}}
 +E^x_{S_I,S_{II}}F(X_{\tau})\chi_{\{X_{\tau^*}=y^*\}^c}\big )\\
&= \sup_{S_I}\inf_{S_{II}}\big (E^x_{S_I,S_{II}}
 F(X_{\tau})\chi_{\{X_{\tau^*}=y^*\}}+E^x_{S_I,S_{II}}
 F(X_{\tau})\chi_{\{X_{\tau^*}=y^*\}^c}\big )\\
&= \sup_{S_I}\inf_{S_{II}}E^x_{S_I,S_{II}} F(X_{\tau})=u_I(x).
\end{align*}
Let us clarify \eqref{stationaryproof}. Actually, we have the
following two equalities
\begin{gather}\label{mar11}
E^x_{S_I^{*},S_{II}^{*}}E^{y^*}_{S_I, S_{II}}
F(X_{\tau})\chi_{\{X_{\tau^*}=y^*\}}=E^x_{S_I,S_{II}}
F(X_{\tau})\chi_{\{X_{\tau^*}=y^*\}}, \\
\label{mar2}E^x_{S_I^{*},S_{II}^{*}}F^*(X_{\tau^*})
\chi_{\{X_{\tau^*}=y^*\}^c}=E^x_{S_I,S_{II}}F(X_{\tau})
\chi_{\{X_{\tau^*}=y^*\}^c}
\end{gather}
Equation ($\ref{mar11}$) could be thought of as payoff computed
for the trajectories that travel through a point $y^*$.
Roughly speaking we first discount boundary points to the point
$y^*$ and then discount value at $y^*$ back to $x$ which is the
same as to discount boundary points to $x$ through trajectories
that contain $y^*$, keeping in mind that  $S^{*}_{i}$ is just
a restriction of $S_{i}$. Equation \eqref{mar2} is a payoff
computed for the trajectories that avoid $y^*$, and, therefore,
there is no difference between $S^{*}_{i}$ and $S_{i}$,
since $S^{*}_{i}$ is just a restriction of $S_{i}$ to
$\mathfrak{X}\setminus \{y^*\}$.
\end{proof}

The following proposition is an extension of the result stated
in \cite{MPR3}. It characterizes optimal strategies. By optimal
strategies we mean any pair of strategies $\hat{S}_I$ and
$\hat{S}_{II}$ such that
\begin{equation}
E^x_{\hat{S}_I,\hat{S}_{II}}F(X_{\tau})
=\sup_{S_I}\inf_{S_{II}}E^x_{S_I,S_{II}}F(X_{\tau})
=u_I=u_{II}.
\end{equation}

\begin{proposition}\label{dppsimple}
Consider a game on the graph $E$ with finite set of vertices
$\mathfrak{X}$. Then the the strategy $\hat{S}_I$
($\hat{S}_{II})$ under which player I (player II) moves from
vertex $x$ to vertex $z$ with
$$
u(z)=\max_{y\in S(x)} u(y), \quad
(u(z)=\min_{y\in S(x)} u(y))
$$
is optimal.
\end{proposition}

\begin{proof}
Let us start the game at vertex $x$ ($X_0=x$).
We claim that under strategies $\hat{S}_I$ and
$\hat{S}_{II}$ $u_I(X_k)$ is a martingale due to following arguments:
\begin{equation}
\begin{split}
&\mathbb{E}^x_{ \hat{S}_I, \hat{S}_{II}}[u_I(X_k)|X_0,\dots ,X_{k-1}]\\
&=\alpha u_I(X^I_{k})+\beta u_I(X^{II}_{k})
 +\gamma -\kern-1.1em\int\nolimits_{S(X_{k-1})}u_I(y)dy  \\
& = \alpha \max_{y \in S(X_{k-1})}u_I(y)
 +\beta \min_{y \in S(X_{k-1})}u_I(y)
 +\gamma -\kern-1.1em\int\nolimits_{S(X_{k-1})}u_I(y)dy\\
&= u_I(X_{k-1}),
\end{split}
\end{equation}
where $v(X^{I}_{k})$ indicates the choice of player I and
 $v(X^{II}_{k})$ indicates the choice of player II.
Then
\[
u_I(X^{II}_{k})=\min_{y \in S(X_{k-1})}u_I(y),\quad
u_I(X^{II}_{k})=\max_{y \in S(X_{k-1})}u_I(y)
\]
by choice of strategies $\hat{S}_I$ and $\hat{S}_{II}$.
In addition, since $u_I$ is a bounded function, we conclude
that $u_I(X_k)$ is a uniformly integrable martingale.
Hence, by Doob's Optional Stopping Theorem
\begin{equation}
E^x_{\hat{S}_I,\hat{S}_{II}}F(X_{\tau})
=E^x_{\hat{S}_I,\hat{S}_{II}}u_I(X_{\tau})
=E^x_{\hat{S}_I,\hat{S}_{II}}u_I(X_{0})
=u_I(x),
\end{equation}
\end{proof}


\begin{example} \label{examp3.5} \rm
We would like to warn the reader that the Proposition \ref{dppsimple}
does not claim that tugging towards that maximum of $F$ on the
boundary would be an optimal strategy for player I.
Figure \ref{fig1} shows a counterexample.

\begin{figure}[ht]
$$
\xymatrix{ -1  \\ e_0 \ar@{<->}[u] &e_1  \ar@{<->}[l] &3/2\ar@{<->}[l] \\
 1 \ar@{<->}[u] }
$$
\caption{Counterexample - tugging towards the boundary} \label{fig1}
\end{figure}

The boundary vertices are indicated by the numbers, which reflect
the value of $F$ at each vertex. We consider the game starting
at vertex $e_0$ and require player II always pull towards
the vertex labelled -1. For player I we choose $S^{a}_I$ to be
the strategy of always tugging towards vertex $3/2$ and let
$S^{b}_I$ be the strategy of moving towards vertex 1. We see that
\begin{gather}
E^{e_0}_{S^{a}_I, S_{II}}F(X_{\tau})=-1\cdot 2/3+3/2\cdot 1/3
=-1/6, \\
E^{e_0}_{S^{b}_I, S_{II}}F(X_{\tau})=-1\cdot 1/2+1\cdot 1/2=0\,.
\end{gather}
\end{example}

\section{Uniqueness}\label{sec:4}

Uniqueness will follow from the comparison principle below proven
 by using Doob's Optional Sampling Theorem.

\begin{theorem}[via Martingales]\label{cmpr}
Let $v$ be a solution of
\begin{equation} \label{uniqueness1}
v(x)=\alpha \max_{y \in S(x)}v(y)+\beta \min_{y \in S(x)}v(y)
+\gamma -\kern-1.1em\int\nolimits_{S(x)}v(y)dy
\end{equation}
on a graph $E$ with a countable set of vertices $\mathfrak{X}$
and boundary $Y$.  Assume
\begin{itemize}
\item $F(y)=u_I(y)$, \text{ for all } $ y \in  Y$,
\item $\inf_{Y}F>-\infty$,
\item $v$ bounded from below, and
\item $v(y)\geq F(y), \quad \text{ for all } y \in  Y$
\end{itemize}
Then $u_I$ is bounded from below on $\mathfrak{X}$  and
$v(x)\geq u_I(x)$, for $x \in \mathfrak{X}$.
\end{theorem}

\begin {proof}
Note that we only need ``$\leq$'' in equation \eqref{uniqueness1}.
The theorem says that $u_I$ is the smallest super-solution
with given boundary value $F$.
We proceed as in \cite[Lemma 2.1]{PSSW}.
Since the game ends almost surely,
$$
u_I\geq \inf_{Y}F >-\infty
$$
which proves that $u_I$ is bounded from below.
Now we have to show that
$$
v(x)\geq \sup_{S_I}\inf_{S_{II}}F_{-}^x(S_I, S_{II})=u_I(x)
$$
If we fix an arbitrary strategy $S_I$, then we have to show that
\begin{equation} \label{compineq}
v(x)\geq \inf_{S_{II}}F_{-}^x(S_I, S_{II}).
\end{equation}
Consider a game that start at vertex $x$ ($X_0=x$).
We have two cases

\textbf{Case 1:}
If our fixed $S_I$ cannot force the game to end a.s.
(i.e. $\mathbb{P}^x_{S_I, S_{II}}(\tau < \infty)<1$),
then by the definition of $F_{-}$,
$\inf_{S_{II}}F_{-}^x(S_I, S_{II})=-\infty$ and the
inequality \eqref{compineq} holds.

\textbf{Case 2:}
Now assume that our fixed $S_I$ forces the game to end despite
all the efforts of the second player. Let player II choose a
strategy of moving  to $\min_{y\in S(x)}v(y)$ - denote such
a strategy $\hat{S}_{II}$. If we prove that $v(X_k)$ is
a supermartigale bounded from below, then we can finish the
proof by applying Doob's Optional Stopping Theorem:
\begin{align*}
\inf_{S_{II}}\mathbb{E}^x_{S_I, S_{II}}F(X_\tau)
& \leq  \mathbb{E}^x_{S_I, \hat{S}_{II}}F(X_\tau)
 \leq \mathbb{E}^x_{S_I, \hat{S}_{II}}v(X_\tau)\\
& \leq  \mathbb{E}^x_{S_I, \hat{S}_{II}}v(X_0) = v(X_0)=v(x),
\end{align*}
where we have used Fatou's lemma. The result follows upon
taking $\sup_{S_I}$. Hence, we only need to prove that
 $v(X_k)$ is a supermartingale under measure
$\mathbb{P}^x_{S_I, \hat{S}_{II}}$:
\begin{align*}
&\mathbb{E}^x_{S_I, \hat{S}_{II}}[v(X_k)|X_0,\dots ,X_{k-1}]\\
&= \alpha v(X^I_{k})+\beta v(X^{II}_{k})
  +\gamma -\kern-1.1em\int\nolimits_{S(X_{k-1})}v(y)dy  \\
&\leq \alpha \max_{y \in S(X_{k-1})}v(y)
 +\beta \min_{y \in S(X_{k-1})}v(y) +\gamma -\kern-1.1em\int\nolimits_{S(X_{k-1})}v(y)dy
= v(X_{k-1}),
\end{align*}
where $v(X^{I}_{k})$ indicates the choice of player I and
 $v(X^{II}_{k})$ indicates the choice of player II.
Then $v(X^{II}_{k})=\min_{y \in S(X_{k-1})}v(y)$ by choice of
strategy for player II.
\end{proof}

In case $\min_{y \in S(X_{k-1})}v(y)$ is not achieved
(i.e. graph is not locally finite), we need to modify the above
proof by making player II move within $\epsilon $ neighborhood
of $\min_{y \in S(X_{k-1})}v(y)$.
We can prove similar result for $u_{II}$. The next theorem
is the extension of the result obtained in \cite{MPR2}.

\begin{theorem}\label{valueexists}
If graph $E$  is finite and $F$ is bounded below on $Y$,
then $u_I=u_{II}$, so the game has a value.
\end{theorem}

\begin{proof}
Clearly, finite $E$ implies that $F$ is bounded below. We included
this redundant statement to suggest future possible extensions to
an uncountable graph.
We know that $u_I\leq u_{II}$ always holds, so we only need to
show $u_I\geq u_{II}$. Assume F is bounded below. Similar to the
proof of Lemma \ref{cmpr} we can show that $u_I$
is a supermartingale bounded below by letting player I to choose
an arbitrary strategy  $S_I$ and requiring player II always move
to $\min_{y\in S(x)}u_I(y)$ from $x$ - strategy $\hat{S}_{II}$.
For simplicity of the presentation we consider a case when
$\min_{y\in S(x)}u_I(y)$ is achievable, for the general case
we have to employ $\epsilon$, like in Theorem $\ref{cmpr}$.
We start the game at $x$, so $X_0=x$. Recall
$u_{II}(x)=\inf_{S_{II}}\sup_{S_I}F_{+}(S_I, S_{II})$
\begin{align*}
u_{II}(x)
&  \leq  \sup_{S_I}\mathbb{E}^x_{S_I,\hat{S}_{II}}[F(X_{\tau})]
\quad  \text{(since E is finite)}\\
&=\sup_{S_I}\mathbb{E}^x_{S_I, \hat{S}_{II}}[u_I(X_{\tau})]\\
& \leq \sup_{S_I}\mathbb{E}^x_{S_I, \hat{S}_{II}}[u_I(X_{0})]
=u_I(x).
\end{align*}
Due to Doob's Optional Stopping Theorem.
\end{proof}

\section{Connections among games, partial differential equations
and DPP} \label{sec:5}

This section summarizes some previous results and presents new
prospectives on known issues.

\begin{theorem}\label{MartingalePDETheor}
Assume we are given a function $u$ on the set of vertices
$\mathfrak{X}$ and consider a strategy $\hat{S}_I$
($\hat{S}_{II})$ where player I (player II) moves from vertex $x$
to vertex $z$ with
$$u(z)=\max_{y\in S(x)} u(y) \quad
(u(z)=\min_{y\in S(x)} u(y)).
$$
Then the following two statements are equivalent:
\begin{itemize}
\item the process $u(X_n)$ is a martingale under the measure
 induced by strategies $\hat{S}_I$  and $\hat{S}_{II}$,
\item the function $u$ is a solution of Dirichlet problem
  \eqref{p-harm-drift}.
\end{itemize}
In addition, $u(X_n)$ is a martingale under the measure induced
by strategies $\hat{S}_I$  and $\hat{S}_{II}$ implies that
$\hat{S}_I$ and $\hat{S}_{II}$ are the optimal strategies.
\end{theorem}

\begin{proof}
Suppose that $u(X_n)$ is a martingale under measure induced
by strategies $\hat{S}_I$  and $\hat{S}_{II}$. Fix an arbitrary
point $x \in \mathfrak{X}$ and consider a game which starts
at $x=X_0$, then
\begin{equation} \label{MartingalePDE}
\begin{split}
E^x_{\hat{S}_I, \hat{S}_{II}}[u(X_1)|X_0]
&=\alpha u(X^{I}_1)+\beta u(X^{II}_1)+\gamma-\kern-1.1em\int\nolimits_{S(X_0)}u(y)dy  \\
&=\alpha \max_{y\in S(X_0)} u(y)+\beta \min_{y\in S(X_0)}
  +\gamma-\kern-1.1em\int\nolimits_{S(X_0)}u(y)dy  \\
&=u(X_0).
\end{split}
\end{equation}
Conversely, assume that $u$ solves Dirichlet problem
\eqref{p-harm-drift}, then \eqref{MartingalePDE}
implies that $u(X_n)$ is a martingale under measure induced by
strategies $\hat{S}_I$  and $\hat{S}_{II}$.

Let us show a final implication.
The result relies on the fact that our game has a value and value
of game function is the solution of the Dirichlet
problem \eqref{p-harm-drift}.
Since $u(X_n)$ is a martingale under measure induced by strategies
$\hat{S}_I$  and $\hat{S}_{II}$ we have
\begin{equation}
E^x_{\hat{S}_I,\hat{S}_{II}}F(X_{\tau})
=E^x_{\hat{S}_I,\hat{S}_{II}}u(X_{\tau})
=E^x_{\hat{S}_I,\hat{S}_{II}}u(X_0)=u(x).
\end{equation}
By the uniqueness result (Theorem \ref{cmpr})
\begin{equation}
u(x)=\sup_{S_I}\inf_{S_{II}}E^x_{S_I,S_{II}}F(X_{\tau}).
\end{equation}
\end{proof}

\section{Strong comparison principle}\label{sec:6}

\begin{theorem}\label{strcomppricip}
Assume that $u$ and $v$ are solutions of equation
\eqref{p-harm-drift} on $\mathfrak{X}\setminus Y$,
$\gamma \neq 0$, $u\leq v$ on the boudary $Y$,
and exists $x\in \mathfrak{X}$ such that $u(x)=v(x)$,
then $u=v$ through the whole $\mathfrak{X}$.
\end{theorem}

\begin{proof}
By  Theorem \ref{cmpr} from the fact that $u\leq v$ on the boundary
we know that $u\leq v$ on $\mathfrak{X}$. By definition of
$p$-harmonious function we have
\begin{gather}
v(x)=\alpha \max_{y \in S(x)}v(y)+\beta \min_{y \in S(x)}v(y)
 +\gamma -\kern-1.1em\int\nolimits_{S(x)}v(y)dy,
\\
u(x)=\alpha \max_{y \in S(x)}u(y)+\beta \min_{y \in S(x)}u(y)
 +\gamma -\kern-1.1em\int\nolimits_{S(x)}u(y)dy.
\end{gather}
Since $u\geq v$ on $\mathfrak{X}$ we know that
\begin{gather*}
\max_{y \in S(x)}v(y)\leq \max_{y \in S(x)}u(y),\\
\min_{y \in S(x)}v(y)\leq \min_{y \in S(x)}u(y),\\
-\kern-1.1em\int\nolimits_{S(x)}v(y)dy\leq -\kern-1.1em\int\nolimits_{S(x)}u(y)U(dy).
\end{gather*}
But since $u(x)=v(x)$, we actually have equalities
\begin{gather*}
\max_{y \in S(x)}v(y)= \max_{y \in S(x)}u(y),\\
\min_{y \in S(x)}v(y)= \min_{y \in S(x)}u(y),\quad
-\kern-1.1em\int\nolimits_{S(x)}v(y)dy= -\kern-1.1em\int\nolimits_{S(x)}u(y)dy.
\end{gather*}
From equality of average values and the fact that $u\geq v$
we conclude that $u=v$ on $S(x)$. Since our graph is connected,
we immediately get the result.
\end{proof}

\section{Remarks on unique continuation} \label{uniquecont}

We can pose the following question. Let $E$ be a finite graph
with the vertex set $\mathfrak{X}$ and let $B_R(x)$ be
the ball of radius $R$ contained within this graph.
Here we assign to every edge of the graph length one and let
$$
d(x,y)=\inf_{x\sim y}\{|x\sim y|\},
$$
where $x \sim y$ is the path connecting vertex $x$ to the vertex $y$
and $|x\sim y|$ is the number of edges in this path.
Assume that $u$ is a $p$-harmonious function on $\mathfrak{X}$
and $u=0$ on $B_R(x)$. Does this mean that $u=0$ on $\mathfrak{X}$?
It seems like the answer to this question depends on the values
of $u$ on the boundary $Y$, as well as properties of the graph $E$
itself.
Here we can provide simple examples for particular graph,
which shows that $u$ does not have to be zero through the
whole $\mathfrak{X}$. See tables \ref{table1} and \ref{table2}.


\begin{table}[ht!]
\caption{$p=2,\; 8$ neighbors} \label{table1}
\begin{center}
\begin{tabular}{|ccccccccccc|}
\hline\\
164 & $-349$ & 80 & 163 & 1& $-164$ &1 &163 & 96 & $-617$ &74 \\
$-349$ & $-52$ & $-19$ & 28 & 1& $-20$ & 1 & 28 & $-38$ & $-9$ & 596 \\
80 & $-19$ & $-4$ & 1 & 1 & $-2$ & 1 & 1 & $-1$ & 35 & $-217$ \\
163 & 28 & 1 & 0 & 0& 0 & 0 & 0 & 1 & $-26$ & $-26$ \\
1 & 1 & 1 & 0 & 0& 0 & 0 & 0 & $-2$ & 1 & 1 \\
$-164$ & $-20$ & $-2$ & 0 & 0& 0 & 0 & 0 & 1 & 7 & 52\\
1 & 1 & 1 & 0 & 0& 0 & 0 & 0 & 1 & 1 & 1\\
163 & 28 & 1 & 0 & 0& 0 & 0 & 0 & $-2$ & 1 & $-53$\\
80 & $-19$ & $-4$ & 1 & 1& $-2$ & 1 & 1 & $-1$ & $-19$ & 80\\
$-349$ & $-52$ & $-19$ & 28 & 1& $-20$ & 1 & 28 & $-19$ & 2 & $-160$\\
164 & $-349$ & 80 & 163 & 1 & $-164$ & 1 & 163 & 77 & 403 & 461 \\
\hline
\end{tabular}
\end{center}
\end{table}

\begin{table}[ht!]
\caption{$p=\infty,\, 8$ neighbors} \label{table2}
\begin{center}
\begin{tabular}{|ccccccccccc|}
\hline\\
$-31$ & 21 & $-11$ & $-5$ & 1& 3 &1 &$-5$ & 11 & $-21$ & 23 \\ 
21 & $-5$ & 5 & $-3$ & $-1$& 1 &$-1$ & 3 & $-5$ & 1 & 21 \\ 
$-11$ & 5 & 0 & 1 & $-1$ & 0 & 1 & $-1$ & 0 & 5 & $-11$ \\ 
$-5$ & $-3$ & 1 & 0 & 0& 0 & 0 & 0 & 1 & $-3$ & 5 \\ 
3 & $-1$ & $-1$ & 0 & 0& 0 & 0 & 0 & $-1$ & $-1$ & 3 \\
1 & 1 & 0 & 0 & 0& 0 & 0 & 0 & 0 & 1 & 1\\
3 & $-1$ & 1 & 0 & 0& 0 & 0 & 0 & 1 & $-1$ & 3\\ 
$-5$ & 3 & $-1$ & 0 & 0& 0 & 0 & 0 & $-1$ & 3 & $-5$\\ 
11 & $-5$ & 0 & 1 & $-1$& 0 & 1 & $-1$ & 0 &$-5$ & 11\\ 
$-21$ & 1 & 5 & $-3$ & $-1$& 1 & $-1$ & 3 & $-5$ & 5 & $-21$\\ 
23 & 21 & $-11$ & $-5$ & 1 & 3 & 1 & $-5$ & 11 & $-21$ & 31\\
\hline
\end{tabular}
\end{center}
\end{table}


\subsection*{Acknowledgements} 
The author would like to thank Dr. Manfredi for this his guidance, 
inspiration and support that made this research possible.

\begin{thebibliography}{00}

\bibitem{KLW} R. Kaufman, J. G. Llorente, J.-M. Wu;
\emph{Nonlinear harmonic measures on trees},
Ann. Acad. Sci. Fenn. Math., 28, 279Ð302, (2003).

\bibitem{KALLENBERG} O. Kallenberg;
 \emph{Foundations of Modern Probability}, 2-5. Springer,  New York (1997)

\bibitem{MS} A. P. Maitra, W. D. Sudderth;
\emph{Discrete Gambling and Stochastic Games},
 171-226. Springer-Verlag, New York (1996).

\bibitem{MPR1} J. J. Manfredi, M. Parviainen, J. D. Rossi;
\emph{An asymptotic mean value characterization for $p$-harmonic functions},
 Proc. Amer. Math. Soc., 138(3), 881-889 (2010).

\bibitem{MPR2} J. J. Manfredi, M. Parviainen, J. D. Rossi;
\emph{On the definition and properties of $p$-harmonious functions},
preprint (2009).

\bibitem{MPR3} J. J. Manfredi, M. Parviainen, J. D. Rossi;
\emph{Dynamic programming principle for tug-of-war games with noise},
preprint (2009).

\bibitem{PPS} Yuval Peres, Gabor Pete, and Stephanie Somersille;
\emph{Biased tug-of-war, the biased infinity Laplacian, 
and comparison with exponential cones},
preprint (2009).

\bibitem{PS} Y. Peres, S. Sheffield;
 \emph{Tug-of-war with noise: A game-theoretic view of the $p$-Laplacian},
Duke Math. J., 145 (1), 91-120 (2008).

\bibitem{PSSW} Y. Peres, O. Schramm, S. Sheffield, D. Wilson;
\emph{Tug-of-war and the infinity Laplacian}, J. Amer. Math. Soc., 22, 
 167-210 (2009).

\bibitem{APS} Alexander P. Sviridov;
\emph{Elliptic equations in graphs via stochastic games},
 Dissertation (2010).

\bibitem{VARADHAN} S. R. S. Varadhan;
\emph{Probability Theory}, 41-42. Courant Institute of Mathematical Science, 
 New York (2001).

\end{thebibliography}

\end{document}
