\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{graphicx}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2013 (2013), No. 99, pp. 1--13.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2013 Texas State University - San Marcos.}
\vspace{8mm}}

\begin{document}
\title[\hfilneg EJDE-2013/99\hfil Persistence and extinction]
{Persistence and extinction of a non-autonomous logistic equation
 with random perturbation}

\author[M. Liu, K. Wang \hfil EJDE-2013/99\hfilneg]
{Meng Liu, Ke Wang}  % in alphabetical order

\address{Meng Liu \newline
School of Mathematical Science, Huaiyin Normal University, Huaian 223300,
China}
\email{liumeng0557@sina.com, Tel +86 0517 84183732}

\address{Ke Wang \newline
Department of Mathematics, Harbin Institute of Technology, Weihai 264209,
China}
\email{w\_k@hotmail.com}

\thanks{Submitted September 4, 2012. Published April 18, 2013.}
\subjclass[2000]{34F05, 92D25, 60H10, 60H20}
\keywords{Logistic model; random perturbation; persistence; extinction}

\begin{abstract}
 Persistence and extinction of a randomized non-autonomous logistic
 equation is studied. Sufficient conditions for extinction,
 non-persistence in the mean, weak persistence and stochastic permanence
 are established. The critical number between weak persistence and
 extinction is obtained. 
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{remark}[theorem]{Remark}
\allowdisplaybreaks


\section{Introduction}\label{sec:1}

 Logistic system is the most important model in both ecology
and mathematical ecology. Persistence and extinction of this model
is an interesting and important topic owing to its theoretical and
practical significance. The deterministic Logistic equation is
usually denoted by:
\begin{equation}\label{1.1}
dx(t)/dt=x(t)[r-ax(t)]
\end{equation}
for $t\geq 0$ with initial value $x(0)=x_0>0$, 
and $x(t)$ is the population density at time $t$. $r$ stands for 
the growth rate and $a$ denotes the intraspecific competition coefficient;
i.e., $r/a$ is the carrying capacity. We refer the reader to
May \cite{May01} for a detailed model construction. Model \eqref{1.1} 
describes a single species whose members compete among themselves 
for a limited amount of food and living space.

Owing to its theoretical and practical significance, system
\eqref{1.1} and its generalization form have been extensively
studied and many important results on the global dynamics of
solutions have been founded, see e.g. Freedman and Wu
\cite{Freedman92}, Golpalsamy \cite{Golpalsamy} and
Lisena \cite{Lisena} and the references therein. Particularly, the book
by Golpalsamy \cite{Golpalsamy} is a very good
reference in this area.

On the other hand, in the real world, population dynamics is
inevitably affected by environmental noise which is an important
component in an ecosystem (see e.g. Gard
\cite{Gard84,Gard86,Gard88}). May \cite{May01} pointed out that due to
environmental noise, the birth rate in the population system should be stochastic. Therefore lots of
authors introduce stochastic perturbation into deterministic models
to reveal the effect of environmental variability on the population
dynamics in mathematical ecology (see e.g.
\cite{Allen,Beddington,Braumann08}, 
\cite{Ji07}-\cite{Q.Luo}, \cite{Pang}-\cite{Xing12}.

Especially, under the assumption that
the the growth rate $r$ in \eqref{1.1} is stochastically perturbed,
with
$$
r\to r+\sigma x^\theta\dot{B}(t),
$$
Ji, Jiang, Shi and O'Regan \cite{Ji07} studied the 
stochastic Logistic equation
\begin{equation}\label{111}
dx(t)=x(t)[(r-ax(t))dt+\sigma x^\theta dB(t)],
\end{equation}
where $\dot{B}(t)$ represents the white noise, namely $B(t)$ is a
Brownian motion defined on a complete probability space
$(\Omega,\mathcal {F},\mathcal {P})$; $\sigma^2$ denotes the intensity of the
white noise; $\theta\in (0,0.5)$. Ji et al. \cite{Ji07} showed that following
lemma.

\begin{lemma}\label{lem1}
 If $r>0$, $a>0$, $\theta\in (0,0.5)$ and
$\sigma>0$, then
\begin{itemize}
\item[(i)] Equation \eqref{111} has a unique
and positive solution on $t\geq0$ almost surely (a.s.) with any given
initial value $x_0>0$.

\item[(ii)] The solution of  \eqref{111} is stochastically
persistent; i.e., for any given $\varepsilon\in (0,1)$, there are
positive constants $H_1$ and $H_2$ such that
$$
\limsup_{t\to+\infty}\mathcal {P}\{x(t)\geq H_1\}\geq 1-\varepsilon,\quad
\limsup_{t\to+\infty}\mathcal {P}\{x(t)\leq H_2\}\geq 1-\varepsilon.
$$
\end{itemize}
\end{lemma}

Some important and interesting questions are as follows:
\begin{itemize}
\item[(Q1)] Since model \eqref{111} describes a population
dynamics, it is critical to find out when the population is
extinctive and when is persistent. Furthermore, can we find out the
critical number between survival and extinction for model
\eqref{111}?

\item[(Q2)] The results of Lemma 1 are based on the conditions
$\theta\in (0,0.5)$, $r>0$ and $\sigma>0$. What happens if
these conditions are not satisfied?

\item[(Q3)] Model \eqref{111} assumes that the growth rate $r$ 
and the carrying capacity $r/a$ are independent of time $t$. 
However, the natural growth rates of many populations vary with $t$ 
in real situation, for example, due to the seasonality. 
Thus, do the conclusions of Lemma \ref{lem1} still hold if all 
the coefficients vary with $t$?
\end{itemize}

In this paper, we shall study the  stochastic
non-autonomous Logistic model
\begin{equation}\label{1.2}
 dx(t)=x(t)[r(t)-a(t)x(t)]{dt}+\sigma(t)x^{1+\theta}(t){dB(t)},
\end{equation}
where $\theta\in (0,1)$; $r(t)$, $a(t)$ and $\sigma(t)$ are
continuous and boundedness functions on $R_+:=[0,+\infty)$ and
$\min_{t\in R_+}a(t)>0$. We have the following results:
\begin{itemize}
\item[(R1)] Equation \eqref{1.2} has a unique
and positive solution on $t\geq0$ a.s. with any given initial value
$x_0>0$.

\item[(R2)] Define $\langle
r\rangle^\ast=\limsup_{t\to+\infty}t^{-1}\int_0^tr(s)ds$.
\begin{itemize}

\item[(R21)] If $\langle r\rangle^\ast<0$, then the species, $x(t)$,
represented by model \eqref{1.2} goes to extinction a.s., i.e.
$\lim_{t\to+\infty}x(t)=0$, a.s..

\item[(R22)] If $\langle r\rangle^\ast=0$, then $x(t)$ is
nonpersistent in the mean a.s., i.e.
\[
\lim_{t\to+\infty}\langle
x(t)\rangle=\lim_{t\to+\infty}t^{-1}\int_0^tx(s)ds=0, \quad
\text{a.s.}.
\]

\item[(R23)] If $\langle r\rangle^\ast>0$, then $x(t)$ is weakly
persistent (see e.g. \cite{Allen,Hallam86}) a.s.; i.e.,
$x^\ast=\limsup_{t\to+\infty}x(t)>0$, a.s. 
 \end{itemize}

\item[(R3)] Define $r_\ast=\liminf_{t\to+\infty}r(s)$. 
If $r_\ast>0$, then $x(t)$ is stochastically permanent; i.e., 
for any given $\varepsilon\in (0,1)$, there are positive constants $H_1$ 
and $H_2$ such that
$$
\liminf_{t\to+\infty}\mathcal {P}\{x(t)\geq H_1\}\geq 1-\varepsilon,\quad
\liminf_{t\to+\infty}\mathcal {P}\{x(t)\leq H_2\}\geq 1-\varepsilon.
$$
\end{itemize}
The important contributions of this paper is therefore clear.

\begin{remark}\label{rmk1} \rm 
It is useful to point out that our definition of stochastic permanence 
is different from the definition of stochastic persistence given 
in \cite{Ji07}. It is easy to see that if $x(t)$ is stochastically 
permanent, then it is stochastically persistent. But the converse 
is not true.
\end{remark}

The rest of the paper is organized as follows. In Section 2, we
give the proofs of our main results. In Section 3, we work out
some figures to illustrate our main theorems. The last section
gives the conclusions.

\section{Proofs}

For the sake of convenience, we define the following symbols:
\begin{gather*}
\langle f(t)\rangle=t^{-1}\int_0^tf(s)ds,\quad
f^\ast=\limsup_{t\to+\infty}f(t),\quad
f_\ast=\liminf_{t\to+\infty}f(t),\\
\hat{\nu}=\max_{t\in R_+}\nu(t),\quad
\check{\nu}=\min_{t\in R_+}\nu(t).
\end{gather*}

\begin{theorem} Equation \eqref{1.2} has a unique and
positive solution on $t\geq0$ with any given initial value $x_0>0$.
\end{theorem}

\begin{proof}
Our proof is motivated by the works of Mao,
Marion and Renshaw \cite{Mao02}. Since the
coefficients of Eq. \eqref{1.2} are locally Lipschitz
continuous, then for any given initial value $x(0)\in R_+$, there is
a unique maximal local solution $x(t)$ on $t\in [0,\tau_e]$, where
$\tau_e$ is the explosion time (see e.g. \cite{RefM97}). To show
this solution is global, we only need to show that $\tau_e=\infty$.
For this end, let $n_0>0$ be so large that $x_0$ lying within the
interval $[1/n_0,n_0]$. For each integer $n>n_0$, define the
stopping times
$$
\tau_n=\inf\{t\in[0,\tau_e]:x(t)\notin (1/n,n)\}.
$$ 
Clearly, $\tau_n$ is increasing as $n\to \infty$. Let
$\tau_\infty=\lim_{n\to+\infty}\tau_n$, whence
$\tau_\infty\leq \tau_e~a.s$. Now, we only need to show
$\tau_\infty=\infty$. If this statement is false, there is a pair of
constants $T>0$ and $\varepsilon\in(0,1)$ such that
$$
\mathcal {P}\{\tau_\infty<\infty\}>\varepsilon
$$
Consequently, there exists an integer $n_1\geq n_0$ such that
\begin{equation}\label{Equ8}
\mathcal {P}\{\tau_n<T\}>\varepsilon, n>n_1.
\end{equation}
Define
$$
V(x)=\sqrt{x}-1-0.5\ln x.
$$
If $x(t)\in R_+$, in view of It\^{o}'s formula (see e.g.
\cite{RefM97}), we have
\begin{equation}\label{Equ9}
\begin{aligned}
dV(x)
&=V_xdx+0.5V_{xx}(dx)^2\\
&=0.5x^{-0.5}(1-x^{-0.5})\big[x(r(t)-a(t)x)dt
+\sigma(t)x^{1+\theta}dB(t)\big]\\
&\quad +0.5(-0.25x^{-1.5}+0.5x^{-2})\sigma^2(t)x^{2+2\theta}dt\\
&=\Big[-0.125\sigma^2(t)x^{0.5+2\theta}+0.25\sigma^2(t)x^{2\theta}-0.5a(t)x^{1.5}
+0.5a(t)x\\
&\quad +0.5r(t)x^{0.5}-0.5r(t)\Big]dt+0.5\sigma(t)x^\theta(x^{0.5}-1)dB(t).
\end{aligned}
\end{equation} 
Note that $\min_{t\in R_+}a(t)>0$, then
there is clearly a constant $G_1>0$ such that
$$
-0.25\sigma^2(t)x^{2\theta}(0.5x^{0.5}-1)-0.5a(t)x^{1.5}
+0.5a(t)x+0.5r(t)x^{0.5}-0.5r(t)<G_1.
$$
Substituting this inequality into  \eqref{Equ9}, we see that
$$
dV(x(t))\leq G_1dt+0.5\sigma(t)x^\theta(x^{0.5}-1)dB(t),
$$
which implies that 
$$
\int_{0}^{\tau_n\wedge T}dV(x(t))\leq
\int_{0}^{\tau_n\wedge T}G_1dt+\int_{0}^{\tau_n\wedge
T}0.5\sigma(s)x^\theta(s)(x^{0.5}(s)-1)dB(s),
$$  
where $\rho\wedge\varrho=\min\{\rho, \varrho\}$. Taking expectation on
both sides of the above inequality, we can derive that
\begin{equation}\label{Equ10}
EV(x(\tau_n\wedge T))\leq V(x_0)+G_1E(\tau_n\wedge T)\leq
V(x_0)+G_1T
\end{equation}
Set $\Omega_n=\{\tau_n\leq T\}$, then by inequality \eqref{Equ8} we
have $\mathcal {P}(\Omega_n)\geq \varepsilon$. Note that for every
$\omega\in\Omega_n$, $x(\tau_n,\omega)$ equals either $n$ or $1/n$,
hence $V(x(\tau_n,\omega))$ is no less than
$$
\min\{\sqrt{n}-1-0.5\ln n, 1/\sqrt{n}-1+0.5\ln n\}.
$$
It then follows from \eqref{Equ10} that
$$
V(x_0)+G_1T\geq E[1_{\Omega_n}(\omega)V(x(\tau_n))]\geq
\varepsilon\min\{\sqrt{n}-1-0.5\ln n, 1/\sqrt{n}-1+0.5\ln n\}
$$
where $1_{\Omega_n}$ is the indicator function of $\Omega_n$.
Letting $n\to \infty$ leads to the contradiction
$$\infty>V(x_0)+G_1T=\infty,$$which completes the proof.\end{proof}

\begin{theorem} \label{thm3}
If $\langle r\rangle^\ast<0$, then the
species, $x(t)$, represented by model \eqref{1.2} goes to
extinction a.s.
\end{theorem}

\begin{proof} 
Applying It\^{o}'s formula to \eqref{1.2}, it gives
$$
d\ln x=\frac{dx}{x}-\frac{(dx)^2}{2x^2}=[r(t)-a(t)x
-0.5\sigma^2(t)x^{2\theta}]{dt}+\sigma(t) x^\theta dB(t).
$$
In other words,
\begin{equation}\label{114}
\ln x(t)-\ln x_0=\int_0^t[r(s)-a(s)x(s)-0.5\sigma^2(s)x^{2\theta}(s)]ds+M(t),
\end{equation}
where $M(t)=\int_0^t\sigma(s)x^\theta(s)dB(s)$ is a local martingale,
whose quadratic variation is 
$$
\langle M(t),M(t)\rangle=\int_0^t\sigma^2(s)x^{2\theta}(s)ds.
$$ 
In view of the exponential martingale inequality (see e.g. \cite{RefM97}), 
for any positive constants $T,\alpha$ and $\beta$, we have
\begin{equation}\label{115}
\mathcal {P}\Big\{\sup_{0\leq t\leq T}\big[M(t)-\frac{\alpha}{2}\langle
M(t),M(t)\rangle\big]>\beta\Big\}\leq \exp\{-\alpha\beta\}.
\end{equation}
Choose $T=n$, $\alpha=1$, $\beta=2\ln n$, then it follows that
$$
\mathcal {P}\Big\{\sup_{0\leq t\leq
n}\big[M(t)-\frac{1}{2}\langle M(t),M(t)\rangle\big]>2\ln n\Big\}\leq 1/n^2.
$$
Using Borel-Cantalli Lemma \cite{RefM97} leads to that for almost 
all $\omega\in\Omega$, there is a random
integer $n_0=n_0(\omega)$ such that for $n\geq n_0$,
$$
\sup_{0\leq t\leq
n}\big[M(t)-\frac{1}{2}\langle M(t),M(t)\rangle\big]\leq2\ln n.
$$ 
That is to say
$$
M(t)\leq2\ln n+\frac{1}{2}\langle M(t),M(t)\rangle
=2\ln n+0.5 \int_0^t\sigma^2(s)x^{2\theta}(s)ds
$$
for all $0\leq t\leq n$, $n\geq n_0$ almost surely. Substituting the
above inequality into \eqref{114}, it results in
\begin{equation}\label{119}
\ln x(t)-\ln x_0\leq
\int_0^tr(s)ds-\int_0^ta(s)x(s)ds+2\ln n\leq \int_0^tr(s)ds
+2\ln n
\end{equation} 
for all $0\leq t\leq n,~n\geq n_0$ almost surely. In
other words, we have shown that for $0<n-1\leq t\leq n$,
$$
t^{-1}\{\ln x(t)-\ln x_0\}\leq
\langle r(t)\rangle+\frac{2\ln n}{n-1},
$$ 
which means that
$[t^{-1}\ln x(t)]^\ast\leq \langle r\rangle^\ast$. That is to say,
if $\langle r\rangle^\ast<0$, one can see that
$\lim_{t\to +\infty} x(t)=0$.
\end{proof}

\begin{theorem} \label{thm4}
If $\langle r\rangle^\ast=0$, then $x(t)$ is nonpersistent in the 
mean a.s.
\end{theorem}

\begin{proof}
For any given $\varepsilon>0$, there exists a
$T_1$ such that 
$$
t^{-1}\int_{0}^tr(s)ds\leq \langle
r\rangle^\ast+\varepsilon/2=\varepsilon/2,\quad t\geq T_1.
$$
Substituting this inequality into \eqref{119}, one can see that
$$
\ln x(t)-\ln x_0\leq \int_0^tr(s)ds-\int_0^ta(s)x(s)ds+2\ln n\leq
\varepsilon t/2-\check{a}\int_0^tx(s)ds+2\ln n
$$ 
for all $T_1\leq t\leq n$, $n\geq n_0$ almost surely. 
Note that there exists a $T>T_1$
such that for all $T\leq n-1\leq t\leq n$ and $~n\geq n_0$ we have
$(\ln n)/t\leq \varepsilon/4$. In other words, we have already shown
that
$$
\ln x(t)-\ln x_0\leq \varepsilon t-\check{a}\int_0^tx(s)ds
$$ 
for sufficiently large $t>T$. Let $g(t)=\int_{0}^{t}x(s)ds$, then we
obtain
$$
\ln(dg/dt)<\varepsilon t-\check{a}g(t)+\ln~x_0,\quad t>T,
$$
which means that
$$
\exp(\check{a}g(t))(dg/dt)<x_0\exp(\varepsilon t),\quad t>T.
$$
Integrating this inequality from $T$ to $t$ gives
$$
\check{a}^{-1}\big[\exp(\check{a}g(t))-\exp(\check{a}g(T))\big]
<x_0\varepsilon^{-1}\big[\exp(\varepsilon t)-\exp(\varepsilon T)\big].
$$
Rewriting this inequality one then sees that
$$
\exp(\check{a}g(t))<\exp(\check{a}g(T))+x_0\check{a}
\varepsilon^{-1}\exp(\varepsilon t)-x_0\check{a}
\varepsilon ^{-1}\exp(\varepsilon T).
$$ 
Taking the logarithm of both sides leads to
$$
g(t)<\check{a}^{-1}\ln\Big\{x_0\check{a}\varepsilon^{-1}\exp(\varepsilon
t)+\exp(\check{a}g(T))-x_0\check{a}\varepsilon ^{-1}\exp(\varepsilon
T)\Big\}.
$$ 
In other words, we have already shown that
$$
\Big\{t^{-1}\int_{0}^{t}x(s)ds\Big\}^\ast
\leq\check{a}^{-1}\Big\{t^{-1}\ln\big[x_0\check{a}\varepsilon^{-1}
\exp(\varepsilon t)+\exp(\check{a}g(T))
-x_0\check{a}\varepsilon ^{-1}\exp(\varepsilon T)\big]\Big\}^\ast.
$$ 
An application of the L'Hopital's rule, one can derive
$$
\langle x\rangle^\ast\leq \check{a}^{-1}
\Big\{t^{-1}\ln\big[x_0\check{a}\varepsilon^{-1}\exp(\varepsilon t)
\big]\Big\}^\ast=\varepsilon/\check{a}.
$$
Since $\varepsilon$ is arbitrary, we get $\langle x\rangle^\ast\leq
0$, which is the required assertion.
\end{proof}

\begin{theorem} \label{thm5}
If $\langle r\rangle^\ast>0$, then $x(t)$ is weakly persistent a.s.
\end{theorem}

\begin{proof} 
First, let us show that
\begin{equation}\label{116} 
[t^{-1}\ln
x(t)]^\ast\leq 0\quad\text{a.s.}
\end{equation} 
In fact, applying It\^{o}'s formula to \eqref{1.2}, it results in
\begin{align*}
d(\exp(t)\ln x)
&=\exp(t)\ln xdt+\exp(t)d\ln x\\
&=\exp(t)[\ln x+r(t)-a(t)x-0.5\sigma^2(t)x^{2\theta}]{dt}
+\exp(t)\sigma(t) x^\theta dB(t).
\end{align*}
Thus, we have shown that
\begin{equation}\label{117}
\exp(t)\ln x(t)-\ln x_0=\int_0^t\exp(s)[\ln
x(s)+r(s)-a(s)x(s)-0.5\sigma^2(s)x^{2\theta}(s)]{ds}+N(t),
\end{equation}
where $N(t)=\int_0^t\exp(s)\sigma(s)x^\theta(s)dB(s)$ is a
martingale with the quadratic form
$$
\langle N(t),N(t)\rangle=\int_0^t\exp(2s)\sigma^2(s) x^{2\theta}(s)ds.
$$ 
It then follows from the exponential martingale inequality \eqref{115},
by choosing $T=\gamma k$,
 $\alpha=\exp(-\gamma k)$ and $\beta=\theta\exp(\gamma k)\ln k$, that
$$
\mathcal {P}\Big\{\sup_{0\leq t\leq
\gamma k}\big[N(t)-0.5\exp(-\gamma k)\langle
N(t),N(t)\rangle\big]>\theta\exp(\gamma k)\ln k\Big\}\leq k^{-\theta},
$$
where $\theta>1$ and $\gamma>1$. By virtue of the famous Borel-Cantelli lemma,
for almost all $\omega\in\Omega$, there exists $k_0(\omega)$ such
that for every $k\geq k_0(\omega)$,
$$
N(t)\leq 0.5\exp(-\gamma k)\langle N(t),N(t)\rangle+\theta\exp(\gamma k)\ln k,
\quad 0\leq t\leq \gamma k.
$$ 
Substituting the above inequality into  \eqref{117}
yields 
\begin{align*}
&\exp(t)\ln x(t)-\ln x_0\\
&\leq\int_0^t\exp(s)\big[\ln
x(s)+r(s)-a(s)x(s)-0.5\sigma^2(s)x^{2\theta}(s)\big]{ds}\\
&\quad +0.5\exp(-\gamma k)\int_0^t\exp(2s)\sigma^2(s) 
x^{2\theta}(s)ds+\theta\exp(\gamma k)\ln k \\
&=\int_0^t\exp(s)\big[\ln x(s)+r(s)-a(s)x(s)\\
&\quad -0.5\sigma^2(s)x^{2\theta}(s)
[1-\exp(s-\gamma k)]\big]{ds}+\theta\exp(\gamma k)\ln k.
\end{align*}
It is easy to see that for any $0\leq s\leq \gamma k$ and $x>0$, since 
$\min_{t\in R_+}a(t)>0$, then there exists a constant $C$ independent of $k$
such that
$$
\ln x+r(s)-a(s)x-0.5\sigma^2(s)x^{2\theta}[1-\exp(s-\gamma k)]\leq C.
$$
In other words, for any $0\leq t\leq \gamma k$, we have
$$
\exp(t)\ln x(t)-\ln x_0\leq C[\exp(t)-1]+\theta\exp(\gamma k)\ln k.
$$ 
That is to say
$$
\ln x(t)\leq\exp(-t)\ln x_0+C[1-\exp(-t)]
 +\theta\exp(-t)\exp(\gamma k)\ln k.
$$ 
If $\gamma(k-1)\leq t\leq \gamma k$ and $k\geq k_0(\omega)$, we
have
$$
\ln x(t)/t\leq\exp(-t)\ln x_0/t+C[1-\exp(-t)]/t
+\theta\exp(-\gamma(k-1))\exp(\gamma k)\ln k/t,
$$ 
which becomes the desired assertion \eqref{116} by letting
$t\to +\infty$.

Now suppose that $\langle r\rangle^\ast>0$, we  prove
that $x^\ast>0$ a.s.. If this assertion is not true, let $S$ be the
set $S=\{x^\ast=0\}$, then $\mathcal {P}(S)>0$. It follows from
\eqref{114} that
\begin{equation}\label{118}
t^{-1}[\ln x(t)-\ln x(0)]
=\langle r(t)\rangle-\langle a(t)x(t)\rangle-0.5\langle
\sigma^2(t)x^{2\theta}(t)\rangle+M(t)/t.
\end{equation} 
On the other hand, for for all $\omega\in S$, we have 
$\lim_{t\to +\infty} x(t,\omega)=0$, then the law of large numbers for local
martingales  (see e.g. \cite{RefM97}) implies that
$\lim_{t\to +\infty}M(t)/t=0$. Substituting the above
inequality into \eqref{118} gives 
$$
[\ln x(t,\omega)/t]^\ast=\langle r(t)\rangle^\ast>0.
$$ 
Then $\mathcal {P}\Big([\ln x(t)/t]^\ast>0\Big)>0$, this
contradicts \eqref{116}.
\end{proof}

\begin{theorem} \label{thm6}
If $r_\ast>0$, then species $x(t)$ represents by model \eqref{1.2}
 will be stochastically permanent.
\end{theorem}

\begin{proof} 
First we demonstrate that for any given 
$0<\varepsilon<1$, there exists constant $H_1>0$ such that 
$\mathcal{P}_\ast\{x(t)\geq H_1\}\geq 1-\varepsilon$. Define
$$
V_1(x)=1/x^{1+\theta}
$$ 
for $x\in R_+$. Applying It\^{o}'s formula to
equation \eqref{1.2} we can obtain 
\begin{align*}
dV_1(x(t))
&=-(1+\theta)x^{-2-\theta}dx+0.5(1+\theta)(2+\theta)x^{-3-\theta}(dx)^2\\
&=(1+\theta)V_1(x)[ a(t)x-r(t)]dt+0.5(1+\theta)(2+\theta)\sigma^2(t)
 x^{\theta-1}dt \\
&\quad -(1+\theta)\sigma(t)x^{-1}dB(t).
\end{align*}
Define 
$$
V_2(x)=(1+V_1(x))^\kappa,
$$ 
where $0<\kappa<1$. Applying It\^{o}'s formula again leads to 
\begin{align*}
dV_2(x(t))
&=\kappa(1+V_1(x(t)))^{\kappa-1}dV_1+0.5\kappa(\kappa-1)
(1+V_1(x(t)))^{\kappa-2}(dV_1)^2 \\
&=\kappa(1+V_1(x))^{\kappa-2}\Big\{(1+V_1(x))\Big[(1+\theta)V_1(x)
[a(t)x-r(t)]\\
&\quad +0.5(1+\theta)(2+\theta)\sigma^2(t)x^{\theta-1}\Big]
 +0.5(\kappa-1)(1+\theta)^2\sigma^2(t)x^{-2}\Big\}dt\\
&\quad -\kappa(1+V_1(x))^{\kappa-1}(1+\theta)\sigma(t)x^{-1}dB(t)\\
&=\kappa(1+\theta)(1+V_1(x))^{\kappa-2}\Big\{-r(t)V_1^2(x)-r(t)V_1(x)
+a(t)V_1(x)x^{-\theta}\\
&\quad +a(t)x^{-\theta}+0.5(2+\theta)\sigma^2(t)x^{\theta-1}\\
&\quad +0.5(2+\theta)\sigma^2(t)x^{-2}+0.5(\kappa-1)(1+\theta)
\sigma^2(t)x^{-2}\Big\}dt \\
&\quad -\kappa(1+V_1(x))^{\kappa-1}(1+\theta)\sigma(t)x^{-1}dB(t)\\
&\leq\kappa(1+\theta)(1+V_1(x))^{\kappa-2}
 \Big\{-\big(r_\ast-\varepsilon\big)V_1^2(x)\\
&\quad +\hat{a}V_1(x)x^{-\theta}+\hat{a}x^{-\theta}
 +1.5\hat{\sigma^2}x^{\theta-1} +1.5\hat{\sigma^2}x^{-2}\Big\}dt\\
&\quad -\kappa(1+V_1(x))^{\kappa-1}(1+\theta)\sigma(t)x^{-1}dB(t)
\end{align*}
for sufficiently large $t$. In the last inequality, we have used the
facts that $r_\ast>0$, $\theta<1$ and $\kappa<1$. Now, choose $\eta>0$
sufficiently small to satisfy
$$
0<\frac{\eta}{\kappa(1+\theta)}<r_\ast-\varepsilon.
$$
Define $V_3(x)=\exp\{\eta t\}V_2(x)$. By  It\^{o}'s formula,
\begin{align*}
&dV_3(x(t))\\
&=\eta\exp\{\eta t\}V_2(x)dt+\exp\{\eta t\}dV_2(x)\\
&\leq(1+\theta)\kappa\exp\{\eta t\}(1+V_1(x))^{\kappa-2}
\Big\{\frac{\eta(1+V_1(x))^2}{\kappa(1+\theta)}\\
&\quad -(r_\ast-\varepsilon)V_1^2(x)+\hat{a}V_1(x)x^{-\theta}
 +\hat{a}x^{-\theta}+1.5\hat{\sigma^2}x^{\theta-1}
 +1.5\hat{\sigma^2}x^{-2}\Big\}dt \\
&\quad -\exp\{\eta t\}\kappa(1+V_1(x))^{\kappa-1}(1+\theta)
 \sigma(t)x^{-1}dB(t)\\
&=(1+\theta)\kappa\exp\{\eta t\}(1+V_1(x))^{\kappa-2}\Big\{
-\Big(r_\ast-\varepsilon-\frac{\eta}{\kappa(1+\theta)}\Big)V_1^2(x)\\
&\quad +\frac{2\eta}{\kappa(1+\theta)}V_1(x)+\frac{\eta}{\kappa(1+\theta)}
 +\hat{a}V_1(x)x^{-\theta}+\hat{a}x^{-\theta}+1.5\hat{\sigma^2}x^{\theta-1}
 +1.5\hat{\sigma^2}x^{-2}\Big\}dt\\
&-\exp\{\eta t\}\kappa(1+V_1(x))^{\kappa-1}(1+\theta)\sigma(t)x^{-1}dB(t)\\
&=\exp\{\eta t\}J(x)dt-\exp\{\eta t\}\kappa(1+V_1(x))^{\kappa-1}
 (1+\theta)\sigma(t)x^{-1}dB(t)
\end{align*}
for sufficiently large $t$, where
\begin{equation}\label{11}
\begin{aligned}
J(x)&=(1+\theta)\kappa (1+V_1(x))^{\kappa-2}
\Big\{ -\Big(r_\ast-\varepsilon-\frac{\eta}{\kappa(1+\theta)}\Big)V_1^2(x)
 +\frac{2\eta}{\kappa(1+\theta)}V_1(x)\\
&\quad +\frac{\eta}{\kappa(1+\theta)}+\hat{a}V_1(x)x^{-\theta}
 +\hat{a}x^{-\theta}+1.5\hat{\sigma^2}x^{\theta-1}
 +1.5\hat{\sigma^2}x^{-2}\Big\}.
\end{aligned}
\end{equation}
 Now, let us show that $J(x)$ is upper bounded in $R_+$. 
To prove this, without loss of generality, let us suppose that 
$\hat{\sigma^2}>0$. Set
\[
K=\min\Big\{1,\Big(\frac{r_\ast-\varepsilon
-\eta/[\kappa(1+\theta)]}{3\hat{\sigma^2}}\Big)^{-2\theta}\Big\}.
\]

(a) If $x\geq K$, then it follows from the definition of $V_1(x)$
that $J(x)$ is upper bounded, namely, there exists a positive number
$J_1$ such that $\sup_{x\geq K}J(x)<J_1$.

(b) If $x<K$, then making use of $x<1$ and $0<\theta<1$ lead to that
\begin{equation}\label{22}
x^{-\theta}\leq x^{-0.5-0.5\theta}=V_1^{0.5}(x),\quad
x^{\theta-1}=x^{2\theta}x^{-\theta-1}\leq V_1(x) .
\end{equation} 
At the same time, it follows from
$x<\Big(\frac{r_\ast-\varepsilon
-\eta/[\kappa(1+\theta)]}{3\hat{\sigma^2}}\Big)^{-2\theta}$
that
\begin{equation}\label{33}
-0.5\Big(r_\ast-\varepsilon-\frac{\eta}{\kappa(1+\theta)}\Big)V_1^2(x)
+1.5\hat{\sigma^2}x^{-2}<0.
\end{equation}
Substituting \eqref{22} and \eqref{33} into \eqref{11} gives 
\begin{align*}
J(x)&\leq(1+\theta)\kappa(1+V_1(x))^{\kappa-2}\Big\{
 -0.5\Big(r_\ast-\varepsilon-\frac{\eta}{\kappa(1+\theta)}\Big)V_1^2(x)
 +\frac{2\eta}{\kappa(1+\theta)}V_1(x)\\
&\quad +\frac{\eta}{\kappa(1+\theta)}+\hat{a}V_1^{1.5}(x)+\hat{a}V_1^{0.5}(x)
 +1.5\hat{\sigma^2}V_1(x)\Big\}\\
&=(1+\theta)\kappa(1+V_1(x))^{\kappa-2}\Big\{
-0.5\Big(r_\ast-\varepsilon-\frac{\eta}{\kappa(1+\theta)}\Big)V_1^2(x)
 +\hat{a}V_1^{1.5}(x)\\
&\quad +[\frac{2\eta}{\kappa(1+\theta)}+1.5\hat{\sigma^2}]V_1(x)
+\hat{a}V_1^{0.5}(x)+\frac{\eta}{\kappa(1+\theta)}\Big\} \\
&=:(1+\theta)\kappa(1+V_1(x))^{\kappa-2}H(x).
\end{align*}
Note that $r_\ast-\varepsilon-\frac{\eta}{\kappa(1+\theta)}>0$, 
then there is a positive constant $x_0\leq K$ such that if $x\leq x_0$, 
then $H(x)\leq 0$. Therefore if $0<x\leq x_0$, then $J(x)\leq0$.
 On the other hand, if $x_0\leq x\leq K$, by the continuity of 
$(1+\theta)\kappa(1+V_1(x))^{\kappa-2}H(x)$, there is a positive number
 $J_2$ such that $\sup_{x_0\leq x\leq K}J(x)<J_2$. In other words, 
we have shown that if $x\leq K$, then $\sup_{x\leq K}J(x)<J_2$. Consequently,
$J(x)$ is upper bounded in $R_+$, namely $J_3:=\sup_{x\in
R_+}J(x)<+\infty$. Therefore,
$$
dV_3(x(t))\leq J_3\exp\{\eta t\}dt-\exp\{\eta
t\}\kappa(1+V_1(x))^{\kappa-1}(1+\theta)\sigma(t)x^{-1}dB(t)
$$ 
for sufficiently large $t$. Integrating both sides of the above
inequality and then taking expectations give
$$
E\Big[\exp\{\eta t\}\Big(1+V_1(x(t))\Big)^\kappa\Big]
\leq \Big(1+V_1(x(T))\Big)^\kappa
+J_3(\exp\{\eta t\}-\exp\{\eta T\})/\eta.
$$ 
That is to say 
$$
\limsup_{t\to+\infty}E[V_1^\kappa(x(t))]\leq
\limsup_{t\to+\infty}E[(1+V_1(x(t)))^\kappa]
\leq J_3/\eta.
$$ 
In other words, we have already shown that
$$
\limsup_{t\to+\infty}E[x^{-\kappa(1+\theta)}(t)]\leq J_3/\eta=:J_4.
$$
Thus for any given $\varepsilon>0$, let
$H_1=\varepsilon^{-\kappa(1+\theta)}/J_4^{-\kappa(1+\theta)}$, by 
Chebyshev's inequality, we can derive that
$$
\mathcal {P}\{x(t)<H_1\}=\mathcal {P}\{x^{-\kappa(1+\theta)}(t)
>H_1^{-\kappa(1+\theta)}\}
\leq H_1^{\kappa(1+\theta)}
E[x^{-\kappa(1+\theta)}(t)],
$$ 
that is to say
$\limsup_{t\to+\infty}\mathcal {P}\{x(t)< H_1\}\leq
H_1^{\kappa(1+\theta)} J_4=\varepsilon$. Consequently
$$
\liminf_{t\to+\infty}\mathcal {P}\{x(t)\geq
H_1\}\geq 1-\varepsilon.
$$
Next we show that for arbitrary fixed $\varepsilon>0$, there exists
$H_2>0$ such that 
$\mathcal {P}_\ast(x(t)\leq H_2)\geq 1-\varepsilon$. The following 
proof is motivated by the works of Luo and Mao \cite{Q.Luo}. 
Define 
$$
V(x)=x^q
$$ 
for $x\in R_+$, where $0<q<1$. Then it follows from It\^{o}'s formula that
\begin{align*}
dV(x)
&=qx^{q-1}dx+\frac{q(q-1)}{2}x^{q-2}(dx)^2\\
&=qx^{q-1}\Big\{x[r(t)-a(t)x]+\sigma(t)x^{1+\theta}dB(t)\Big\}
+\frac{q-1}{2}x^{q-2}\sigma^2(t)x^{2+2\theta}dt \\
&=qx^q\big[r(t)-a(t)x-\frac{1-q}{2}\sigma^2(t)x^{2\theta}\big]dt
+q\sigma(t)x^{q+\theta}dB(t).
\end{align*}
Let $k_0>0$ be so large that $x_0$ lying within the
interval $[1/k_0, k_0]$. For each integer $k\geq k_0$, define the
stopping time
$$
\tau_k=\inf\{t\geq 0: x(t)\notin(1/k,k)\}.
$$
Clearly $\tau_k\to \infty$ almost surely as $k\to \infty$. 
Applying It\^{o}'s formula again to $\exp\{t\}V(x)$ gives
\begin{align*}
&d(\exp\{t\}V(x))=\exp\{t\}V(x)dt+\exp\{t\}dV(x)\\
&=\exp\{t\}\big[x^q+qx^q(r(t)-a(t)x-\frac{1-q}{2}\sigma^2(t)
 x^{2\theta})\big]dt+\exp\{t\}q\sigma(t)x^{q+\theta}dB(t)\\
&\leq\exp\{t\}[x^q+qx^q(r(t)-a(t)x)]dt+\exp\{t\}q\sigma(t)
 x^{q+\theta}dB(t)\\
&\leq \exp\{t\}M_5+\exp\{t\}q\sigma(t)x^{q+\theta}dB(t),
\end{align*}
where $M_5$ is a positive constant.
Integrating this inequality and then taking expectations on both
sides, one can see that
$$
E\big[\exp\{t\wedge\tau_k\}x^q(t\wedge\tau_k)\big]-x^q_0
\leq E\int_0^{t\wedge\tau_k}\exp\{s\}M_5ds\leq M_5(\exp\{t\}-1),
$$
Letting $k\to \infty$ yields
$$
\exp\{t\}E[x^q(t)]\leq x^q_0+M_5(\exp\{t\}-1),
$$
which indicates that
$$
\limsup_{t\to+\infty}E[x^q(t)]\leq M_5.
$$
Then the desired assertion follows from Chebyshev's inequality.
\end{proof}

\section{Numerical simulations}
In this section we shall use the Milstein method mentioned in Higham
\cite{D.J.Higham} to illustrate the analytical results.
Consider the discretization equation
\begin{align*}
x_{k+1}&=x_k+x_k[r(k~\Delta t)-a(k~\Delta t)x_k]\Delta t
+\sigma(k\Delta t) x_k^{1+\theta}\sqrt{\Delta
t}\xi_{k}\\
&\quad +0.5\sigma^2(k~\Delta t)x_k^{2+2\theta}(\xi_{k}^2\Delta
t-\Delta t),
\end{align*}
where $\xi_k$, $k=1,2,\dots ,n$ are Gaussian random variables.

In Figure 1, we choose $\theta=0.8$, $a(t)=0.3+0.1\sin (2t)$ and
$\sigma^2(t)=8$. The only difference between conditions of Figure 1(a),
Figure 1(b), Figure 1(c) and Figure 1(d) is that the representation of $r(t)$
is different. In Figure 1(a), we choose $r(t)=-0.001+0.2\sin t$. Then
we have $\langle r(t)\rangle^\ast<0$. In view of Theorem 2,
$x$ goes to extinction. Figure 1(a) confirms this. In
Figure 1(b), we choose $r(t)=0.2\sin t$. Then it is easy to obtain
$\langle r(t)\rangle^\ast=0$. It follows from Theorem 3 that
$x$ is non-persistence in the mean. See Figure 1(b). ~In
Figure 1(c), we choose $r(t)=0.001+0.2\sin t$. Then
$\langle r(t)\rangle^\ast>0$. By virtue of Theorem 4, one
can obtain that $x$ is weakly persistent. This can be
seen from Figure 1(c). In Figure 1(d), we choose $r(t)=0.12+0.02\sin t$.
Then $\liminf_{t\to+\infty}r(t)>0$. By
Theorem 5, $x$ is stochastically
permanent. Figure 1(d) confirms this.

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.49\textwidth]{fig1a}
\includegraphics[width=0.49\textwidth]{fig1b}\\
(a)\hfil (b)\\
\includegraphics[width=0.49\textwidth]{fig1c}
\includegraphics[width=0.49\textwidth]{fig1d}\\
(c) \hfil (d)
\end{center}
\caption{Solutions of system \eqref{1.2} for $\theta=0.8$,
$a(t)=0.3+0.1\sin(2t)$, $\sigma^2(t)=8$, $x(0)=0.4$, step size 
$\Delta t=0.001$. The
horizontal axis represents the time $t$. (a) with
$r(t)=-0.001+0.2\sin t$; (b)  with $r(t)=0.2\sin t$; (c)  with
$r(t)=0.001+0.2\sin t$; (d)  with $r(t)=0.12+0.2\sin t$}
  \label{fig1}
\end{figure}

\section{Concluding remarks}

For a stochastic non-autonomous Logistic
equation we obtained sufficient conditions for extinction,
non-persistence in the mean, weak persistence and stochastic
permanence. The critical number between weak
persistence and extinction was obtained initially. 
The behavior of the model for several coefficient cases was studied.
More precisely,
\begin{itemize}
\item[(I)] If $\langle r\rangle^\ast<0$, then $x(t)$
is extinctive with probability one.

\item[(II)] If $\langle r\rangle^\ast=0$, then $x(t)$ is non-persistence 
in the mean with probability one.

\item[(III)] If $\langle r\rangle^\ast>0$, then $x(t)$ is be weakly persistent
with probability one.

\item[(IV)] If $r_\ast>0$, then $x(t)$ is stochastically
permanent. 
\end{itemize}

Our key contributions in this article are:
\begin{itemize}
\item[(A)] We obtained the critical number between weak persistence and
extinction for the first time, which is neglected by all the
existing papers.

\item[(B)] Our conditions of Theorem 5 are much weaker than
(ii) in Lemma \ref{lem1}. And our results are stronger than (ii) in
Lemma \ref{lem1} (see Remark 1 above).

\item[(C)] This article deals with the \emph{non-autonomous}
stochastic logistic model, while \cite{Ji07} considered the
\emph{autonomous} case.
\end{itemize}

\subsection*{Acknowledgements}
The authors thank the editor and reviewer for their important and valuable
comments. The authors were supported by grants 11171081 and 11171056 from
the NSFC of China.

\begin{thebibliography}{00}

\bibitem{Allen} Allen, L. J. S.;
\emph{Persistence and extinction in single-species reaction-
diffusion models}.  {Bull. Math. Biol.} 1983; 45: 209-227.

 \bibitem{Beddington} Beddington J. R.; May R.M.;
\emph{Harvesting natural populations in a randomly
fluctuating environment}.  {Science} 1977; 197: 463--465.

\bibitem{Braumann08} Braumann, C. A.;
\emph{Growth and extinction of populations in randomly
varying environments}.  {Comput. Math. Appl.} 2008; 56: 631-644.

\bibitem{Freedman92} Freedman, H.I., Wu, J.;
\emph{Periodic solutions of single-species models
with periodic delay,} SIAM J. Math. Anal. 1992; 23: 689-701.

\bibitem{Gard84} Gard, T.C.;
\emph{Persistence in stochastic food web models.}  {Bull.
Math. Biol.} 1984; 46: 357--370.

\bibitem{Gard86} Gard, T. C.;
\emph{Stability for multispecies population models in random
environments.}  {Nonlinear Anal.} 1986; 10:1411--1419.

\bibitem{Gard88} Gard, T. C.;
\emph{Introduction to Stochastic Differential Equations},
Dekker: New York, 1988.


\bibitem{Golpalsamy} Golpalsamy, K.;
\emph{Stability and Oscillations in Delay Differential
Equations of Population Dynamics.} Kluwer Academic: Dordrecht, 1992.

\bibitem{Hallam86} Hallam, T. G.; Ma, Z.;
\emph{Persistence in population models with demographic fluctuations}.
 {J. Math. Biol.} 1986; 24: 327-339.

\bibitem{D.J.Higham} Higham, D. J.;
\emph{An algorithmic introduction to numerical simulation of
stochastic differential equations}. SIAM Rev. 2001; 43: 525--546.

\bibitem{Ji07} Ji, C. Y.; Jiang. D. Q.; Shi, N. Z. , O'Regan, D.;
\emph{Existence, uniqueness, stochastic persistence and global stability 
of positive solutions of the logistic equation with random perturbation}.
  {Math. Meth. Appl. Sci.} 2007; 30:77-89.

\bibitem{Jiang05} Jiang, D. Q.;  Shi N. Z.;
\emph{A note on non-autonomous logistic equation with random perturbation}.
{J. Math. Anal. Appl.} 2005; 303: 164-172.

\bibitem{Jiang06} Jiang, D. Q.; Shi, N. Z.; Li X. Y.;
\emph{Global stability and stochastic permanence of a non-autonomous 
logistic equation with random perturbation}.  {J. Math. Anal. Appl.} 
2008; 340: 588-597.

\bibitem{Li09} Li, X. Y.; Mao, X. R.;
\emph{Population dynamical behavior of non-autonomous
Lotka-Volterra competitive system with random perturbation}.
 {Discrete Contin. Dyn. Syst.} 2009; 24: 523-545.

\bibitem{Liu11c} Liu, M; Wang, K.;
\emph{Persistence and extinction in stochastic non-autonomous logistic systems}. 
J. Math. Anal. Appl. 2011; 375: 443-457.

\bibitem{Liu11a} Liu, M; Wang, K; Wu, Q.;
\emph{Survival analysis of stochastic competitive models in a
polluted environment and stochastic competitive exclusion principle}.
 Bull. Math. Biol. 2011; 73: 1969-2012.

\bibitem{Liu12a} Liu, M; Wang, K.;
\emph{On a stochastic logistic equation with impulsive perturbations}. 
Comput. Math. Appl. 2012; 63: 871-886.

\bibitem{Liu12b} Liu, M; Wang, K.;
\emph{Stationary distribution, ergodicity and extinction of a stochastic
generalized logistic system}. Appl. Math. Lett. 2012; 25: 1980-1985.

\bibitem{Liu13a} Liu, M; Wang, K.;
\emph{Population dynamical behavior of Lotka-Volterra cooperative systems
 with random perturbations}. Discrete Contin. Dyn. Syst. 2013; 33: 2495-2522.

\bibitem{Liu13b} Liu, M; Wang, K.;
\emph{Analysis of a stochastic autonomous mutualism model}. 
J. Math. Anal. Appl. 2013; 402: 392-403.

\bibitem{Li} Li, X. Y.; Jiang, D. Q.; Mao, X. R.;
\emph{Population dynamical behavior of
Lotka-Volterra system under regime switching}.  {J. Comput. Appl.
Math.} 2009; 232: 427--448.

\bibitem{Lisena} Lisena, B.;
\emph{Global attractivity in nonautonomous logistic
equations with delay}.  {Nonlinear Anal. Real World Appl.} 2008;
9:53-63.

\bibitem{Q.Luo} Luo, Q; Mao, X. R.;
\emph{Stochastic population dynamics under regime switching}.  
{J. Math. Anal. Appl.} 2007; 334: 69--84

\bibitem{RefM97} Mao, X. R.;
\emph{Stochastic Differential Equations and
Applications}. Horwood Publishing: Chichester, 1997.

\bibitem{Mao02} Mao, X. R.; Marion, G.; Renshaw, E.;
\emph{Environmental Brownian noise suppresses explosions 
in populations dynamics}.  {Stochastic Process. Appl.} 2002; 97: 95-110.

\bibitem{May01} May, R. M.;
\emph{Stability and Complexity in Model Ecosystems},
Princeton University Press: NJ, 2001.

\bibitem{Pang} Pang, S.; Deng, F.; Mao, X. R.;
\emph{Asymptotic properties of stochastic population dynamics}. 
 {Dyn. Contin. Discrete Impuls. Syst. Ser. A Math. Anal.} 2008; 15: 603--620.

\bibitem{Rudnicki07} Rudnicki, R.; Pichor, K.;
\emph{Influence of stochastic perturbation on
prey-predator systems}.  {Math. Biosci.} 2007; 206: 108--119.

\bibitem{Wu12} Wu, Z.; Huang, H.; Wang, L.;
\emph{Stochastic delay Logistic model under regime switching}. 
Abstr. Appl. Anal. 2012; doi:10.1155/2012/241702.

\bibitem{Xing12} Xing, Z,; Peng, J.;
\emph{Boundedness, persistence and extinction of a stochastic non-autonomous 
logistic system with time delays}. Appl. Math. Model. 2012;  36: 3379-3386.

\end{thebibliography}

\end{document}

