\documentclass[reqno]{amsart}
\usepackage{graphicx}

\AtBeginDocument{{\noindent\small
{\em Electronic Journal of Differential Equations},
Vol. 2004(2004), No. 135, pp. 1--10.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2004 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}

\title[\hfilneg EJDE-2004/135\hfil A stochastic control problem]
{A stochastic control problem}

\author[William Margulies, Dean Zes\hfil EJDE-2004/135\hfilneg]
{William Margulies, Dean Zes} % in alphabetical order

\address{William Margulies \hfill\break
Department of Mathematics\\
CSULB, Long Beach, CA 90840, USA}
\email{wmarguls@csulb.edu}

\address{Dean Zes \hfill\break
B\&Z Engineering Consulting, 3134 Kallin Ave, Long Beach, CA 90808, USA}
\email{deanzes@charter.net}

\date{}
\thanks{Submitted May 28, 2004. Published Nov. 23, 2004.}
\subjclass[2000]{60H05, 60H07}
\keywords{Stochastic differential equations; control problems; Jacobi functions}

\begin{abstract}
 In this paper, we study a specific stochastic differential equation
 depending on a parameter and obtain a representation of its 
 probability density function in terms of Jacobi Functions.
 The equation arose in a control problem with a quadratic performance 
 criteria.  The quadratic performance is used to eliminate the control 
 in the standard  Hamilton-Jacobi variational technique.  
 The resulting stochastic differential  equation has a noise amplitude 
 which complicates the solution.  We then solve Kolmogorov's partial 
 differential equation for the probability  density function by using 
 Jacobi Functions. A particular value of the parameter makes the 
 solution a Martingale and in this case we prove that the solution goes
 to zero almost surely as time tends to infinity.
\end{abstract}

\maketitle
\numberwithin{equation}{section}

\newtheorem{theorem}{Theorem}[section]
\newtheorem{lem}[theorem]{Lemma}
\newtheorem{definition}[theorem]{Definition}

\section{Introduction}

The purpose of this paper is to solve a certain family of stochastic
optimal control problems.  Specifically, the stochastic optimal control
is defined by the stochastic differential equation with control
\begin{equation} \label{e1}
\begin{gathered}
dx_t=(ax+bu)dt +\sqrt{cx^2+du^2+e} d\beta_t
x(0)=x_0.
\end{gathered} \end{equation}
where $x_t$ is the state variable, $u$ the control, and $\beta_t$ represents
a Brownian motion.
The control is determined by the quadratic cost index
\[
I(x,t,u)=\frac12E_{x,t}\big[\int_t^{t_f}(qx^2(s)+ru^2(s))ds + p_fx_{t_f}^2\big],
\]
which is to be minimized over the controls $u$.

We solve this problem by using the Halmilton-Jacobi-Bellman equation and solve for
the minimum control.  Then we use the Kolmogorov equations to solve for
the probability distribution function of the state variable.
We get an explicit form of the solution in terms of Jacobi functions.

The equation arose from a linear control problem with a quadratic
performance criteria, see Zes \cite{Z} and the references there.
Equations like this arise in filtering theory, communications and finance.
The original problem was discrete and was analyzed by Jacobson \cite{J},
and this is a continuous version of that problem with a more general
noise term.  The control enters the noise term and complicates the solution.
We solve the equation explicitly in terms of Jacobi functions which leads
to a very complicated representation.  To show how the representation
works we solve the equation for a special value of the parameter where one
can obtain the solution in a simple closed form.  We can then analyze the
representation and see it gives the same result.  For another value of the
parameter the solution is a Martingale and in this case the solution is
stable and tends to zero almost surely (a.s.) as time tends to infinity.
We also use MATLAB to generate some sample traces of the state variable.

In section two, we develop the partial differential equation that the
probability density function of the state variable satisfies.
In section three we use separation of variables to get an ordinary
differential equation and eigenvalue problem that we can use to represent
our solution to the partial differential equation of section two.
In section four, we incorporate, using Koornwinder \cite{K}, all the necessary
 properties of the Jacobi Functions that we need to represent our solution.
In section five, we actually represent the solution in terms of the Jacobi
Functions.
In section six, we solve the equation for a value of the parameter for
which we can obtain a closed form solution.
In this case we then can see our formula gives the correct result.
In section seven, we show the long term behavior for a particular value
of the parameter and show the solution tends to $0$ a.s. as $t$
tends to infinity.
In section eight, we give some numerical results for certain parameter values.
 We use the simulation algorithms as in the article by Higham, see \cite{H}.

\section{The Optimal Control Problem}

        We begin with the stochastic differential equation (s.d.e),
        defined in the Ito sense,
\begin{equation} \label{eq1}
\begin{gathered}  dx_t = (ax + bu)dt + \sqrt{cx^2 + du^2+ e}\,d\beta_t,\\
 x(o) = x_o
\end{gathered}
\end{equation}
        where the beta process $\beta(t)$ is Brownian motion
        defined in the usual way, $E[\beta] = 0, E[(d\beta)^2] = dt$,
        and the noise amplification constants ($c,d,e$) must all be
        non-negative, for the process to be real.
        Next, we stipulate the familiar quadratic cost index to be
        minimized,
\begin{equation}
        I(x,t,u(\cdot)) = \frac{1}{2}
 E_{x,t} \big[\int_{t}^{t_f}(qx(s)^2 + ru(s)^2)ds   + p_fx_f^2\big].
\label{eq2}
\end{equation}
Then we want to find
\[
J(x,t)=\inf_{u}I(x,t,u)
\]
and to find that control, $u^*$ such that
\[
J(x,t)=I(x,t,u^*).
\]
        The problem which we pose can be solved in a standard way as seen in
Fleming and Rishel \cite[Theorem 4.1]{FR},  or Oksendal
 \cite[Theorem 11.2.1]{Ok}.   The infinitesimal generator,
 $\mathcal{A}$ of the process, $x_t$, is
\[
\mathcal{A}=\frac12(cx^2+du^2+e)\frac{\partial^2}{\partial x^2}+(ax+bu)
\frac{\partial}{\partial x}.
\]
Thus we have the following Hamilton-Jacobi-Bellman equation
\[
-\frac{\partial}{\partial t}J(x,t)=\inf_{u}\mathcal{A}J(x,t)+(qx^2+ru^2)/2.
\]
The infimum on the right is attained at
$u=-\frac{b\frac{\partial J}{\partial x}}{d\frac{\partial^2J}{\partial x^2}+r}$.
This gives
\[
-\frac{\partial}{\partial t}J(x,t)=\frac12(cx^2+e)\frac{\partial^2J}{\partial x^2}+ax\frac{\partial J}{\partial x}+\frac12 qx^2-\frac12\frac{b^2(\frac{\partial J}{\partial x})^2}{d\frac{\partial^2J}{\partial x^2}+r}.
\]
Now put $J=\frac12(p(t)x^2+l(t))$ and obtain the following equations for $p$ and $l$.
\begin{gather*}
p'=\frac{b^2p^2}{(dp+r)}-cp-2ap-q\;,\; p(t_f)=p_f\\
l'=-ep
\end{gather*}
If $p(t)$ is constant or $r=0$, the optimum control takes the form
$u=-kx$.  Whence the stochastic differential equation becomes
\[
dx_t =(ax-bkx)dt +\sqrt{cx^2+dk^2x^2+e}\; d\beta_t
=Axdt+\sqrt{Cx^2+e}\,d\beta_t
\]
Thus we have the above equation for the stochastic process $x_t$.

We want to solve for the probability density function $\rho(x,t)$ of the process
(we put $C=e=1$ for simplicity).  We put $\rho(x,0)=f(x)$ the probability density
function of the initial random variable $x_0$.  To solve this stochastic equation
we solve the Kolmogorov partial differential equation with initial value $f(x)$,
i.e. we solve
\begin{equation} \label{one}
\begin{gathered}
\begin{aligned}
\rho_t&=L(\rho)=\frac12((x^2+1)\rho)_{xx}-((Ax)\rho)_x \\
&=\frac12(x^2+1)\rho_{xx}+(2-A)x\rho_x+(1-A)\rho\quad   -\infty<x<\infty,\; 0\leq t
\end{aligned} \\
\rho(x,0)=f(x).
\end{gathered}
\end{equation}

\section{Separation of Variables}

In this section we use separation of variables to show the form of the
representation of our solution.
To this end we first consider a solution of the form
\[
\rho(x,t)=\exp(-p(y)t)V(x,y)
\]
We substitute into (\ref{one}) to get
\begin{align*}
-p(y)\exp(-p(y)t)V(x,y)&=\exp(-p(y)t)\big[\frac12(x^2+1)V^{\prime\prime}(x,y)\\
&\quad+(2-A)xV'(x,y)+(1-A)V(x,y)\big].
\end{align*}
Thus if $V$ satisfies
\begin{equation}
L(V)=\frac12(x^2+1)V^{\prime\prime}(x)+(2-A)xV'(x)+(1-A)V(x)=-p(y)V, \label{two}
\end{equation}
then $\rho$ satisfies (\ref{one}).
Hence for certain $H(y)$,
\[
\rho(x,t)=\int_0^\infty H(y)V(x,y)\exp(-p(y)t)dy\]
satisfies the partial differential equation (\ref{one}), but not
necessarily the initial condition.

\section{Jacobi Functions}\label{J}

In this section we present the properties of the Jacobi Functions we will need.
We follow Koornwinder \cite{K} for the presentation.

\begin{definition} \label{def4.1} \rm
Jacobi Functions. For $0<\tau<\infty, \alpha, \beta$ parameters we put
\[
\phi_y^{(\alpha,\beta)}(\tau)={}_2\!F_1(\frac12(\alpha+\beta+1-iy),
\frac12(\alpha+\beta+1+iy);\alpha+1;-\sinh^2\tau)
\]
where $_2\!F_1(a,b;c;z)$ is the standard hypergeometric function.
\end{definition}

\begin{definition} \label{def} \rm
Jacobi transform. Let $g$ be a smooth even function.  Then the Jacobi transform is
\[
\hat g(y)=\int_0^\infty g(\tau)\phi^{(\alpha,\beta)}_y(\tau)
\Delta_{(\alpha,\beta)}(\tau) d\tau
\]
where
$\Delta_{(\alpha,\beta)}(\tau)=(2\sinh\tau)^{2\alpha+1}(2\cosh\tau)^{2\beta+1}$.
\end{definition}

To state the inversion formula we need the following function and sets.
% \label{def4.3} \rm
\[
c_{(\alpha,\beta)}(y)=\frac{2^{\alpha+\beta+1-iy}\Gamma(\alpha+1)
\Gamma(iy)}{\Gamma(\frac12(\alpha+\beta+1+iy))\Gamma(\frac12(\alpha-\beta+1+iy))}.
\]
We also define the set
\[
D_{(\alpha,\beta)}=\{i(|\beta|-\alpha-1-2m)|m=0,1,2,\dots;|\beta|-\alpha-1-2m>0\}.
\]
Note that $D_{(\alpha,\beta)}$ is a finite set.
Also note that the set $D_{(\alpha,\beta)}$ contains the poles of
$(c_{(\alpha,\beta)}(-y))^{-1}$ with positive real part.

 For $y\in D_{(\alpha,\beta)}$ we put
\[
d(y)=-i\mbox{Res}_{\mu=y}(c_{(\alpha,\beta)}(\mu)c_{(\alpha,\beta)}(-\mu))^{-1}
\]
Now we can state the theorem for the inversion formula, see
\cite[Theorem 2.3]{K}.  Let $\mathcal D_e$ be the even $C^\infty$
functions with compact support on $\mathbb R$.  We will drop the superscripts
on $\phi$ and the subscripts on $c$.

\begin{theorem} \label{thm4.6}
 If $\alpha >-1$, $\beta\in \mathbb R$, $g\in\mathcal D_e$ then
\[
g(\tau)=\frac1{2\pi}\int_0^\infty \hat g(y)\phi_y(\tau)|c(y)|^{-2}dy
+\sum_{y\in D_{(\alpha,\beta)}}\hat g(y)\phi_y(\tau)d(y).
\]
\end{theorem}

Note that we need these formulas only for $\alpha=\pm \frac12$ and $\beta=1-A$.
Since our $A$ can be negative, $D$ may be nonempty.
Also note that the smoothness of $g$ can be relaxed but the evenness is crucial
and we will deal with that when we represent the solution.

\section{Representation of the Solution}

In this section we show how to use the results of Sections 3-4 to solve the
initial-value problem.  We show that the solution can be written as an integral
of the Jacobi Transform of the initial data.  As in section three put
\[
\rho(x,t)=V(x,y)\exp(-p(y)t)
\]
where we now put
\[
V(x,y)=\phi_y^{(-\frac12,1-A)}(\tau(x))= _2\!F_1(\frac12(\frac32-A-iy),\frac12(\frac32-A+iy);\frac12;-x^2)
\]
where $\tau(x)=\sinh^{-1}(x)$ and the polynimial $p(y)$ will be defined below.
Use the substitutions
\begin{align*}
&z=-x^2,\; F=\;_2\!F_1, V_x=-2xF',\\
& V_{xx}=(4x^2F^{\prime\prime}-2F')
\end{align*}
and compute $L(V)$ to obtain (prime denotes differention with respect to $z$)
\[
L(V)=-2[z(1-z)F^{\prime\prime}+(\frac12-(\frac52-A)z)F'-\frac12(1-A)F ].
\]
Since the function $F$ is a hypergeometric function with parameters
\begin{equation}
a=\frac12(\frac32-A-iy), b=\frac12(\frac32-A+iy), c=\frac12 \label{four}
\end{equation}
we have, with $p(y)=\frac12((A-\frac12)^2+y^2)$,
\[
L(V)=-p(y)V.
\]
Therefore, $\rho$ satisfies $\rho_t=L(\rho)$.

We now derive a second solution so that we can solve (\ref{one}) for all initial
data.
We have $F$ is a hypergeometric function with parameters as in (\ref{four}).
We then have, see Andrews \cite{A}, that $z^{1-c}\;_2F_1(1+a-c,1+b-c;2-c;z)$ is a
second independent solution to the same equation.  We use this to determine a
solution as follows.  We put
\[
W(x,y)=x\phi_y^{(\frac12,1-A)}(\tau(x))=x\;_2\!F_1(\frac12(\frac52-A-iy),\frac12(\frac52-A+iy);\frac32;-x^2)
\]
and with the substitutions
$z=-x^2$, $F=_2F_1$, $W_x=F-2x^2F'$,
$W_{xx}=4x^3F''-6xF'$,
we have
 \[
L(W)=-2x[(1-z)zF^{\prime\prime}+(\frac32-(\frac72-A)z)F'-(\frac32-A)F].
\]
The corresponding hypergeometric parameters for $F$ are
\begin{equation}
a=\frac12(\frac52-A-iy), b=\frac12(\frac52-A+iy), c=\frac32
\end{equation}
Now $F$ with its parameters satisfy
\[
(1-z)zF^{\prime\prime}+(\frac32-(\frac72-A)z)F'-\frac14((\frac52-A)^2+y^2)F=0.
\]
Thus we have
\begin{align*}
L(W)&=-2x[\frac14((\frac52-A)^2+y^2)F-(\frac32-A)F]\exp(-p(y)t)\\
&=-\frac12((A-\frac12)^2+y^2)W\\
&=-p(y)W.
\end{align*}
Thus we have $L(W)=-p(y)W$.  Again if $\rho=W\exp(-p(y)t)$ then $\rho_t=L(\rho)$.
We now have two solutions to the partial differential equation (\ref{one}):
\[
\rho_1(x,t)=\phi_y^{(-\frac12,1-A)}(\tau(x))\exp(-p(y)t) \mbox{ and }
\rho_2(x,t)=x\phi_y^{(\frac12,1-A)}(\tau(x))\exp(-p(y)t)
\]
thus $(\rho_i)_t=L(\rho_i)$ for $i=1,2$.

Let $f(x)$ be an arbitrary smooth probability density function.
Then $f(x)=f_e(x)+f_o(x)$ where these are the even and odd parts of $f$.  We put
\[
g_e(x)=\begin{cases}
\frac{f_o(x)}{x} &\mbox{for }x\ne 0\\
f'(0) &\mbox{for }x=0 \end{cases}
\]
The function $g_e(x)$ is an even function and is $C^\infty$ if $f$ is. We have $f(x)=f_e(x)+xg_e(x)$.
We can now write the solution as (recall $x=\sinh\tau$, $\tau(x)=\sinh^{-1}x$):
 \begin{equation} \label{five}
\begin{aligned}
\rho(x,t)&=\frac1{2\pi}\int_0^\infty \hat f_e(y)\phi_y^{(-\frac12,1-A)}
(\tau(x))|c_{(-\frac12,1-A)}(y)|^{-2}\exp(-p(y)t)dy\\
&\quad+\sum_{y\in D_{(-\frac12,1-A)}}\hat f_e(y)\phi_y^{(-\frac12,1-A)}
(\tau(x))d(y)\exp(-p(y)t)\\
&\quad+\frac1{2\pi}\int_0^\infty \hat g_e(y)x\phi_y^{(\frac12,1-A)}
(\tau(x))|c_{(\frac12,1-A)}(y)|^{-2}\exp(-p(y)t)dy\\
&\quad+\sum_{y\in D_{(\frac12,1-A)}}\hat g_e(y)x\phi_y^{(\frac12,1-A)}
(\tau(x))d(y)\exp(-p(y)t).
\end{aligned}
 \end{equation}
This function satisfies the partial differential equation and for the initial
condition we have, using Theorem 4.6.
\[
\rho(x,0)=f_e(x)+xg_e(x)=f(x).
\]

\section{The case $A=1/2$}

For our initial data we take the delta function, $\delta_{x_0}$, based at $x_0$.
Put $A=1/2$ to get the equation
\[
         dx_t = \frac12 x_tdt + \sqrt{(x_t^2 +1)}\,d\beta_t,
\]
Put $V(x)=\sinh^{-1}x$ and use Ito's formula to obtain
\begin{align*}
dV(x_t)
&=\frac1{\sqrt{1+x_t^2}}dx_t-\frac12(1+x_t^2)^{-\frac32}x_tdx_t^2\\
&=\frac1{\sqrt{1+x_t^2}}dx_t-\frac12x_t(1+x_t^2)^{-\frac32}(1+x_t^2)dt\\
&=-\frac12\frac{x_t}{\sqrt{1+x_t^2}}dt+\frac12\frac{x_t}{\sqrt{1+x_t^2}}dt
+d\beta_t\\
&=d\beta_t.
\end{align*}
We can integrate it directly to obtain
\begin{equation}
      \sinh^{-1}(x(t)) -\sinh^{-1}(x_0) = [\beta(t) - \beta(0)]
\label{eq27}
\end{equation}
 and taking $\beta(0) = 0$, we have
\[
      \sinh^{-1}(x(t)) -\sinh^{-1}(x_0)
      = \beta(t).
\]
        Now we also have
\[
    \rho(x,t) = \frac{1}{\sqrt{2\pi t}}[\exp(-\frac{\beta^2}{2t})]\frac{d\beta}{dx}.
\]

        Thus the probability density function takes the form
\[
 \rho(x,t) = \frac{1}{\sqrt{2\pi t(x^2+1)}}\exp[-\frac{(\sinh^{-1}x - \sinh^{-1}x_o)^2}{2t}].
\]
Now we compute the solution given by the representation (\ref{five}).
We first note that the set $\mathcal D$ is empty.  See Koornwinder \cite{K}
for the following identities.
\begin{gather*}
\phi^{(-\frac12, \frac12)}_y(\tau)=(\cosh)^{-2(\frac12)}(\tau)\phi_y^{(-\frac12, -\frac12)}(\tau)\\
\phi^{(-\frac12, -\frac12)}_y(\tau)=\cos(y\tau),  \quad \phi_y^{\frac12 \frac12}(\tau)=\frac{2\sin(y\tau)}{y\sinh2\tau)}\\
c_{(-\frac12, \frac12)}(y)\overline{c}_{(-\frac12, \frac12)}(y)=1\\
c_{(\frac12, \frac12)}(y)\overline{c}_{(\frac12, \frac12)}(y)=\frac4{y^2}
\end{gather*}
For the initial data, we have the following:
\begin{gather*}
f_e(x)=\frac12(\delta_{x_0}(x)+\delta_{x_0}(-x))\\
g_e(x)=\frac1{2x}(\delta_{x_0}(x)-\delta_{x_0}(-x))
\end{gather*}
Then for the Jacobi transforms we have
\[
\widehat{f}_e(y)=\int_0^\infty\frac12(\delta_{x_0}(x(\tau))
+\delta_{x_0}(-x(\tau)))(\cosh(\tau))^{-1}
\cos(y\tau)(2\sinh(\tau))^0(2\cosh(\tau))^2d\tau
\]
and after a change of variables to the $x$ variable, $\widehat{f}_e(y)= 2\cos(y\tau_0)$.
Also changing to the $x$ variable
\begin{align*}
\widehat{g}_e(y)
&=\int_0^\infty\frac1{2x}(\delta_{x_0}(x(\tau))-\delta_{x_0}(-x(\tau)))
\frac{2\sin(y\tau))}{y\sin(2\tau)}(2\sinh(\tau))^2(2\cosh(\tau))^2d\tau\\
&=16\int_0^\infty(\delta_{x_0}(x)-\delta_{x_0}(-x))\frac{x(\sqrt{1+x^2})^2
\sin(y\tau(x))}{2y\sinh(\tau(x))\cosh(\tau(x))}\frac{dx}{\sqrt{1+x^2}}\\
&=\frac8y\sin(y\tau(x_0)).
\end{align*}
We now substitute these into the representation (\ref{five}) and obtain
\begin{align*}
\rho(x,t)&=\frac1{2\pi}\int_0^\infty 2\cos(y\tau_0)(\cosh(\tau(x))^{-1}\cos(y\tau(x))\exp(-y^2t/2)dy\\
&\quad+\frac1{2\pi}\int_0^\infty\frac8y\sin(y\tau(x_0))x\frac{2\sin(y\tau(x))}{y\sinh(2\tau(x))}\frac{y^2}{4}\exp(-y^2t/2) dy\\
&=\frac1{\pi\sqrt{1+x^2}}\int_0^\infty\cos(y\tau_0)\cos(y\tau(x))\exp(-y^2t/2)dy\\
&\quad+\frac2{2\pi}\int_0^\infty 8\frac{\sin(y\tau(x_0))\sin(y\tau(x))}{2\cosh(\tau(x))}\frac14\exp(-y^2t/2) dy\\
&=\frac1{\pi\sqrt{1+x^2}}\int_0^\infty\cos(y\tau_0)\cos(y\tau(x))\exp(-y^2t/2)dy\\
&\quad+\frac1{\pi\sqrt{1+x^2}}\int_0^\infty\sin(y\tau(x_0))\sin(y\tau(x))
\exp(-y^2t/2) dy\\
&=\frac1{\pi\sqrt{1+x^2}}\int_0^\infty\cos y(\tau(x)-\tau(x_0))\exp(-y^2t/2)dy.
\end{align*}
Now we have the identity
\[
\int_0^\infty\cos(yz)\exp(-y^2/2)dy=\sqrt{\frac\pi{2}}\exp(-z^2/2).
\]
This gives
\[
\rho(x,t)=\frac{1}{\sqrt{2\pi t(1+x^2)}}\exp(-\frac{(\sinh^{-1}(x)-\sinh^{-1}(x_0))^2}{2t})
\]
and this checks with our explicit derivation.

\section{Long Term Behavior}

In this section we derive the long term behavior of the solution to the SDE
\[
dx_t=\sqrt{1+x_t^2}d\beta_t.
\]
This represents the situation with $A=0$.
We put $y_t=\exp(\gamma t)x_t$. Then
\begin{align*}
dy_t&=\gamma\exp(\gamma t)x_tdt+\exp(\gamma t)dx_t\\
&=\gamma y_tdt+\exp(\gamma t)\sqrt{1+\exp(-2\gamma t)y_t^2}d\beta_t.
\end{align*}
We use the Lyapunov function $v(y)=y^{2/3}$. One can see Kushner\cite{KU}
for a discussion of such techniques.  We apply the one dimensional Ito's formula
 \cite{Ok} to obtain
\begin{align*}
dv(y_t)&=\frac23y_t^{-1/3}dy_t+\frac12(-\frac13)(\frac23)y_t^{-\frac43}dy_t^2\\
&=(\frac23\gamma y^{2/3}_t-\frac19\exp(2\gamma t)y_t^{-\frac43}-\frac19y^{2/3}_t)dt\\
&\quad +\frac23 y^{-1/3}_t\exp(\gamma t)\sqrt{1+\exp(-2\gamma t)y_t^2}d\beta_t.
\end{align*}
We have therefore the following integral equation for $v(y_t)$:
\begin{align*}
v(y_t)&=v_0+\int_0^t(\frac23\gamma-\frac19)y^{2/3}_sds
-\int_0^t\frac19\exp(2\gamma t)y_s^{-4/3}ds \\
&\quad +\int_0^t\frac23\exp(\gamma t)y_s^{-1/3}\sqrt{1+\exp(-2\gamma s)
y_s^2}d\beta_s.
\end{align*}
We take expectation of both sides(note $E\int_0^tfd\beta_s=0$) to obtain
\begin{align*}
E(v(y_t))&=v_0+\int_0^t(\frac23\gamma-\frac19)E(v(y_s))ds
-\int_0^t\frac19\exp(2\gamma t)E(y_s^{-4/3})ds\\
&\leq v_0+\int_0^t|\frac23\gamma-\frac19|E(v(y_s))ds.
\end{align*}
It follows from the Gronwall-Bellman inequality that
\[
E(v(y_t))\leq v_0\exp(|\frac23\gamma-\frac19|t)
\]
Put $\gamma=1$ and use the relationship between $y$ by $x$ to obtain
\[
E(x_t^\frac23)\leq v_0\exp(-\frac19 t).
\]
 A standard Chebychev estimate, for any $\epsilon>0$, gives
\[
P(|x|>\epsilon)\leq \epsilon^{-2/3}v_0\exp(-\frac19 t).
\]
This shows that $|x_t|\to 0$ a.s. as $t\to\infty$.

\section{Numerical Results}

In this section, we use the algorithms as presented in Higham \cite{H} to
show the behavior of the solutions to the equation when $A=0$.
In Figure \ref{fone}, there are two solutions graphed with different
initial conditions.


\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.9\textwidth]{fig1}
\end{center}
\caption{Solutions with initial values $10$ and $-10$.
\label{fone}}
\end{figure}

\begin{thebibliography}{0}

\bibitem{A}
L.~Andrews.
\newblock {\em Special Functions for Engineers and Applied Mathematicians}.
\newblock {Macmillan Publishing Company}, New York, 1985.

\bibitem{FR}
W.~H. Fleming and R.~W. Rishel.
\newblock {\em Deterministic ahd Stochastic Optimal Control}.
\newblock Springer-Verlag, New York, 1975.

\bibitem{H}
Desmond~J. Higham.
\newblock An algorrithmic introduction to numerical simulation of stochastic
  differential equations.
\newblock {\em SIAM Rev.}, 43:525--546, 2001.

\bibitem{J}
D.~H. Jacobson.
\newblock A general result in stochastic optimal control of nonlinear discrete
  time systems with quadratic performance criteria.
\newblock {\em J. Math. Anal. Appl.}, 47, 1974.

\bibitem{K}
T.~H. Koornwinder.
\newblock {\em Special Functions: Group Theoretical Aspects and Applications}.
\newblock D. Reidel Publishing Company, Dordrecht, Holland, 1984.

\bibitem{KU}
Harold~Joseph Kushner.
\newblock {\em Stochastic Stability and Control}, volume~33 of {\em Mathematics
  in Science and Engineering}.
\newblock Academic Press, New York, 1967.

\bibitem{Ok}
B.~Oksendal.
\newblock {\em Stochastic Differential Equations}.
\newblock Springer-Verlag, Neq York, 1995.

\bibitem{Z}
Dean Zes.
\newblock Extended {LQR} model with noise amplification.
\newblock {\em Proceedings of the American Control Conference, Philadelphia},
  1998.

\end{thebibliography}

\end{document}
