\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{graphicx}
\AtBeginDocument{{\noindent\small
Seventh Mississippi State - UAB Conference on Differential Equations and
Computational Simulations,
{\em Electronic Journal of Differential Equations},
Conf. 17 (2009), pp. 107--121.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2009 Texas State University - San Marcos.}
\vspace{9mm}}
\begin{document} \setcounter{page}{107}
\title[\hfilneg EJDE-2009/Conf/17\hfil Fourth-order PDEs for image denoising]
{Fourth-order partial differential equations for effective
image denoising}
\author[S. Kim, H. Lim \hfil EJDE/Conf/17 \hfilneg]
{Seongjai Kim, Hyeona Lim} % in alphabetical order
\address{Seongjai Kim \newline
Department of Mathematics \& Statistics, Mississippi State
University, Mississippi State, MS 39762, USA}
\email{skim@math.msstate.edu}
\address{Hyeona Lim \newline
Department of Mathematics \& Statistics,
Mississippi State University,
Mississippi State, MS 39762, USA}
\email{hlim@math.msstate.edu}
\thanks{Published April 15, 2009.}
\subjclass[2000]{35K55, 65M06, 65M12}
\keywords{Variational approach;
PDE-based restoration model; \hfill\break\indent
fourth-order PDEs; piecewise planarity condition}
\begin{abstract}
This article concerns mathematical image denoising methods
incorporating fourth-order partial differential equations (PDEs).
We introduce and analyze \emph{piecewise planarity conditions} (PPCs)
with which unconstrained fourth-order variational models in continuum
converge to a piecewise planar image.
It has been observed that fourth-order variational models holding
PPCs can restore better images than models without PPCs and
second-order models.
Numerical schemes are presented in detail and various examples in image
denoising are provided to verify the claim.
\end{abstract}
\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\section{Introduction} \label{intro}
During the previous decade or so, mathematical frameworks
employing powerful tools of partial differential equations (PDEs)
and functional analysis have emerged and successfully applied to
various image processing (IP) tasks, particularly for image
restoration
\cite{AlvarezLionsMorel:92,CatteLionsMorelColl:92,ChanOsherShen:TR,
Kim:PDE_Color,MarquinaOsher:00,NitzbergShiota:92,
PeronaMalik:90,RudinOsherFatemi:92,YouETAL:96}; see also
\cite{AngenentETAL:06,ChanShen:05}. Those PDE-based models have
allowed researchers and practitioners not only to introduce
effective models but also to improve traditional algorithms in
image denoising. However, these PDE-based models tend to either
converge to a piecewise constant image or introduce image blur
(undesired dissipation), partially because the models are derived
by minimizing a functional of the image gradient. As a
consequence, the conventional PDE-based models may lose
interesting fine structures during the denoising. In order to
reduce the artifact, researchers have studied various mathematical
and numerical techniques which either incorporate diffusion
modulation, constraint terms, and iterative refinement
\cite{KimLim:NC_Beta,MarquinaOsher:00,Meyer:01,OsherETAL:04TV} or
minimize a functional of second derivatives of the image
\cite{LysakerLundervoldTai:03,OsherSoleVese:03,YouKaveh:00}. These
new mathematical models may preserve fine structures better than
conventional ones; however, more advanced models and appropriate
numerical procedures are yet to be developed.
In this article, we will introduce and analyze \emph{piecewise planarity
conditions} (PPCs) with which unconstrained fourth-order variational
models in continuum converge to a piecewise planar image.
Although such a mathematical theory may not hold for discrete data
(images) in the same way as in continuum, it will provide a mathematical
background for the advancement of the state-of-the-art methods for
PDE-based image denoising.
It has been observed that fourth-order variational models holding
the PPCs can restore images better than second-order models and
other fourth-order ones.
Fourth-order models have proved to restore texture regions favorably,
while second-order models tend to restore edges of large contrasts
more clearly.
In the next section, we briefly review variational approaches
for second-order and fourth-order PDE-based models in image denoising.
Section~\ref{PPCs} considers the PPCs for fourth-order variational
models.
In Section~\ref{Difference_Schemes}, we present difference schemes for
fourth-order models, a variable constraint parameter, and a timestep
size for a stable simulation.
Section~\ref{Numerical} contains numerical experiments along with
comparisons among various fourth-order models and a second-order
model.
Our development and experiments are concluded in Section~\ref{Conclusions}.
\section{Preliminaries} \label{Preliminaries}
This section reviews briefly variational approaches
for second-order and fourth-order PDE-based models.
\subsection{Second-order PDEs} \label{ss-Second-Order}
Let the observed image ($u_0 $) be given as
$$
u_0 =u+v,
$$
where $u$ is the desired image and $v$ denotes a Gaussian noise of
mean zero.
Then, second-order denoising models
\cite{Kim:PDE_Color,KimLim:NC_Beta,Meyer:01,PeronaMalik:90,RudinOsherFatemi:92}
can be obtained by minimizing an energy functional of the image gradient,
\begin{equation}
u =\arg\min_{u}\Big[\int_{\Omega}\rho(|\nabla u|)\,d\mathbf{x}
+\frac{\lambda}{2}\int_{\Omega}(u_0 -u)^2\,d\mathbf{x}\Big],
\label{eqn_Eu}
\end{equation}
where $\Omega$ is the image domain, $\lambda\ge0$ denotes
the constraint parameter, and $\rho$ is a function having
the properties:
\begin{equation}
\rho(s)\ge0, \; s\ge0; \quad \rho'(s)>0, \; s>0.
\label{eqn_rho}
\end{equation}
In this article, we will call $\rho$ a \emph{modulator}.
It is often convenient to transform the minimization problem
\eqref{eqn_Eu} into a differential equation, called the
\emph{Euler-Lagrange equation}, by applying the variational calculus
\cite{Weinstock:74} and parameterizing the energy descent direction
by an artificial time $t$:
\begin{equation}
\frac{\partial u}{\partial t}=
\nabla\cdot\Big(\rho'(|\nabla u|)\frac{\nabla u}{|\nabla u|}\Big)
+\lambda\,(u_0 -u).
\label{eqn_Eu_Euler}
\end{equation}
For example, the model \eqref{eqn_Eu_Euler} is reduced to the \emph{total
variation} (TV) model \cite{RudinOsherFatemi:92} for $\rho(s)=s$ and the
\emph{Perona-Malik} (PM) model \cite{PeronaMalik:90} when
$\rho(s)=\frac12{K^2}\ln(1+s^2/K^2)$, for some $K>0$, and $\lambda=0$.
For the PM model, the modulator $\rho$ is strictly convex for $sK$. ($K$ is a threshold.)
Thus the model can enhance image content having large gradient magnitudes
such as edges; however, it will flatten slow transitions.
For a later frequent use, we define
\begin{equation}
\rho_{_{PM}}(s):=\frac{K^2}2\,\ln\Big(1+\frac{s^2}{K^2}\Big), \quad K>0.
\label{eqn_rho_PM}
\end{equation}
Most of conventional variational restoration models have a tendency
to converge to a piecewise constant image.
Such a phenomenon is called the \emph{staircasing effect}.
Although these results are important for understanding of the current
diffusion-like models, the resultant signals may not be desired in
applications where the preservation of both slow transitions and fine
structures is important.
In order to suppress the staircasing effect and improve the restorability
of second-order PDE models, variants have been suggested incorporating
diffusion modulation, different constraint terms, and iterative refinement
\cite{KimLim:NC_Beta,MarquinaOsher:00,Meyer:01,OsherETAL:04TV}; see
also \cite{YouETAL:96}.
For example, for an effective suppression of staircasing, Marquina-Osher
\cite{MarquinaOsher:00} have suggested to scale the stationary part of
the TV model by $|\nabla u|$:
\begin{equation}
\frac{\partial u}{\partial t}=
|\nabla u|\,\nabla\cdot\Big(\frac{\nabla u}{|\nabla u|}\Big)
+\lambda\,|\nabla u|\,(u_0 -u),
\label{eqn_ITV}
\end{equation}
which is called the \emph{improved total variation} (ITV) model
\cite{OsherFedkiw:Book03}.
\subsection{Fourth-order PDEs} \label{ss_Fourth-Order}
An approach which derives a fourth-order PDE is to minimize
an energy functional of the image Laplacian \cite{YouKaveh:00}:
\begin{equation}
u=\arg\min_{u}\int_{\Omega}\rho(|\Delta u|)\,d\mathbf{x},
\label{eqn_Laplacian}
\end{equation}
where $\Delta$ denotes the Laplacian operator.
It is not difficult to check that the associated Euler-Lagrange equation reads
\begin{equation}
\frac{\partial u}{\partial t}=
-\Delta\,\Big(\rho'(|\Delta u|)\frac{\Delta u}{|\Delta u|}\Big).
\label{eqn_Laplacian_Euler}
\end{equation}
As in You-Kaveh \cite{YouKaveh:00}, one can prove that when the modulator
$\rho=\rho_{_{PM}}$, the solution of \eqref{eqn_Laplacian_Euler}
converges to a piecewise harmonic image, i.e., $\Delta u\to0$ locally,
as $t\to\infty$.
(Although the authors used the term ``piecewise planar",
it must be understood as ``piecewise harmonic".)
\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.45\textwidth]{fig1} % FIG_PNG/locally_Laplacian.png
\end{center}
\caption{A locally smooth surface on the domain of $(4\times4)$
subdomains. Let the height on boundary subdomains be zero for
simplicity. A locally harmonic surface can be defined on the whole
domain for an arbitrary height at $\mathbf{x}_0$.}
\label{fig_locally_Laplacian}
\end{figure}
The You-Kaveh model \cite{YouKaveh:00} often introduces speckles in an
objectionable level.
The authors claimed it was due to a less \emph{masking capability} of
\eqref{eqn_Laplacian_Euler} than second-order models.
However, the local harmonicity is not sufficient for an effective denoising.
To see this, we consider a simple cartoon image as in
Figure~\ref{fig_locally_Laplacian}, where a locally harmonic surface
can be formed on the image domain for an arbitrary value at the point
$\mathbf{x}_0$.
Thus, for a given perturbation (due to noise), the You-Kaveh model
can result in a locally harmonic surface possibly keeping the noise
perturbation, instead of suppressing it.
This is why the You-Kaveh model tends to introduce speckles, as shown
in Figure~\ref{fig_ELaine_Two} below.
Other approaches incorporating fourth-order PDEs can be found in
\cite{LysakerLundervoldTai:03}:
\begin{equation}
u=\arg\min_{u}\Big\{\int_{\Omega}|\mathcal{D} u|\,d\mathbf{x}
+\frac{\lambda}2\int_{\Omega}(u_0 -u)^2\,d\mathbf{x}\Big\},
\label{eqn_Tai_Min}
\end{equation}
where $\lambda\ge0$ denotes a constraint parameter and
$$
|\mathcal{D} u|=\begin{cases}
|\Sigma u|:=|u_{xx}|+|u_{yy}|, \quad\text{or}\\
|\Theta u|:=(u_{xx}^2+u_{xy}^2+u_{yx}^2+u_{yy}^2)^{1/2}.
\end{cases}
$$
The corresponding Euler-Lagrange equations read respectively
\begin{equation}
\begin{gathered}
\frac{\partial u}{\partial t}
= -\Big(\frac{u_{xx}}{|u_{xx}|}\Big)_{xx}
-\Big(\frac{u_{yy}}{|u_{yy}|}\Big)_{yy}
+\lambda(u_0 -u), \\
\frac{\partial u}{\partial t}
= -\Big(\frac{u_{xx}}{|\Theta u|}\Big)_{xx}
-\Big(\frac{u_{xy}}{|\Theta u|}\Big)_{yx}
-\Big(\frac{u_{yx}}{|\Theta u|}\Big)_{xy}
-\Big(\frac{u_{yy}}{|\Theta u|}\Big)_{yy} +\lambda(u_0 -u).
\end{gathered}
\label{eqn_Tai}
\end{equation}
These fourth-order PDE-based models have proved to be superior to
\eqref{eqn_Laplacian_Euler} for an appropriate choice of $\lambda$.
Note that the functional in \eqref{eqn_Tai_Min} is convex ($\rho(s)=s$).
Thus, when $\lambda=0$, the minimization will converge to a
\emph{globally} planar image.
In this article, we will first find conditions for which non-constrained
fourth-order PDE-based restoration models converge to a \emph{piecewise}
planar image.
Then, a comparison study will be performed for various differential
operators ($\mathcal{D}$) and modulators ($\rho$).
Properties of a variational model are combined effects of the differential
operator, the modulator, and the constraint term.
\section{Piecewise Planarity Conditions} \label{PPCs}
We first define a vector-valued second-order differential operator
\begin{equation}
\Pi_{\zeta}=(\partial_{xx},\zeta\partial_{xy},
\zeta\partial_{yx},\partial_{yy})^T, \quad \zeta\ge0,
\label{eqn_Pi_zeta}
\end{equation}
where the superscript $T$ denotes the transpose.
Since
$
|\Pi_0 u| \le |\Sigma u| \le \sqrt{2}\,|\Pi_0 u|,
$
the minimization problem with $|\mathcal{D} u|=|\Sigma u|$ is equivalent to
the one with $|\mathcal{D} u|=|\Pi_0 u|$.
Thus in this section we will focus on second-order differential operators
$\Pi_{\zeta}$, $\zeta\ge0$.
Consider a minimization problem of the form
\begin{equation}
u=\arg\min_{u}\int_{\Omega}\rho(|\mathcal{D} u|)\,d\mathbf{x},
\quad \mathcal{D}=\Pi_{\zeta}, \; \zeta\ge0.
\label{eqn_D2_Min}
\end{equation}
Then, the associated evolutionary Euler-Lagrange equation reads
\begin{equation}
\frac{\partial u}{\partial t}
=-\mathcal{D}\cdot\Big(\rho'(|\mathcal{D} u|)\frac{\mathcal{D} u}{|\mathcal{D} u|}\Big).
\label{eqn_PDE4}
\end{equation}
Note that $|\Pi_{\zeta} u|=0$, $\zeta>0$, if and only if the
solution $u$ is globally planar.
On the other hand, $|\Pi_{0} u|=0$ when $u$ is globally bilinear.
Since
\begin{equation}
|\Delta u|\le\sqrt{2}\,|\Pi_{\zeta} u|, \quad \zeta\ge0,
\label{eqn_Del_Pi_Th}
\end{equation}
it is easy to see that a minimizer of \eqref{eqn_D2_Min}
is also a minimizer of \eqref{eqn_Laplacian}.
However, the reverse is not true; a minimizer of \eqref{eqn_Laplacian}
may not be a minimizer of \eqref{eqn_D2_Min}.
The following theorem proves that for selected modulators $\rho$,
piecewise planar images are the only minimizers of
\eqref{eqn_D2_Min} (equivalently, the only stationary states of
\eqref{eqn_PDE4}), when the restored image is to be continuous and
piecewise smooth.
\begin{theorem} \label{thm_Min_0}
Suppose that $u$ is continuous and piecewise smooth.
Let $\rho$ satisfy $\eqref{eqn_rho}$ and
\begin{equation}
\rho(0)=0, \quad \lim_{s\to\infty}\frac{\rho(s)}{s}=0.
\label{eqn_PPC2}
\end{equation}
Then $u$ is piecewise planar if and only if
\begin{equation}
\int_{\Omega}\rho(|\mathcal{D} u|)\,d\mathbf{x}=0, \quad \mathcal{D}=\Pi_{\zeta},\; \zeta>0.
\label{eqn_Min_0}
\end{equation}
\end{theorem}
\begin{proof} ($\Rightarrow$)
Let $u$ be piecewise planar.
Consider the corresponding partition of $\Omega$: $\{\Omega_j\}_{j=1}^M$,
for some $M>0$, on each of which the image is planar.
Let us denote the interior and the boundary of $\Omega_j$ by
$\Omega^{0}_j$ and $\partial\Omega_j$, respectively.
Define $\Gamma_{jk}=\partial\Omega_j\cap\partial\Omega_k$, the
edge between neighboring subregions $\Omega_j$ and $\Omega_k$.
Then, it is easy to see that $\Gamma_{jk}$ are straight line segments.
Let $u_j=u|_{\Omega_j}$.
Then, for $\mathcal{D}=\Pi_{\zeta}$, $\zeta>0$,
\begin{equation}
|\mathcal{D} u_j(\mathbf{x})|\equiv0, \quad \forall\,\mathbf{x}\in\Omega_j^0,
\; j=1,\dots ,M.
\label{eqn_Interior_pt}
\end{equation}
On the other hand, $\nabla u_j$ are constant vectors on each
$\Omega_j$, $j=1,\dots ,M$. Without loss of generality, we may
assume that
\begin{equation}
\nabla u_j \neq \nabla u_k, \quad \forall\,\mathbf{x}\in\Gamma_{jk},
\; j,k=1,\dots ,M.
\label{eqn_Edge_pt}
\end{equation}
So we have $|\mathcal{D} u(\mathbf{x})|=\infty, \;
\forall\,\mathbf{x}\in\cup_{jk}\Gamma_{jk}$.
In order to deal with the integral of the infinite on a set of measure
zero, let $\Omega_{jk}^{\varepsilon}$ be the $\varepsilon$-rectangle of $\Gamma_{jk}$
that has the same length as $\Gamma_{jk}$ and a width of $\varepsilon$ in a
normal direction of $\Gamma_{jk}$, with $\Gamma_{jk}$ being centered.
Define $S^{(\varepsilon)}_{jk}$ on $\Omega_{jk}^{\varepsilon}$ to have the property
that $\nabla S^{(\varepsilon)}_{jk}$ becomes a linear transition between $\nabla
u_j$ and $\nabla u_k$.
Then, $|\mathcal{D} S^{(\varepsilon)}_{jk}|$ is a constant on $\Omega_{jk}^{\varepsilon}$:
$|\mathcal{D} S^{(\varepsilon)}_{jk}|={c_1}/{\varepsilon}$, for some $c_1>0$.
Thus, by denoting the length of $\Gamma_{jk}$ by $c_{jk}$, we have
\begin{equation}
\int_{\Omega_{jk}^{\varepsilon}}\rho(|\mathcal{D} S^{(\varepsilon)}_{jk}|)\,d\mathbf{x}
=\rho(c_1/\varepsilon)\cdot c_{jk}\varepsilon.
\label{eqn_Integral}
\end{equation}
It therefore follows from \eqref{eqn_PPC2} and \eqref{eqn_Integral} that
\begin{equation}
\int_{\Gamma_{jk}}\rho(|\mathcal{D} u|)\,d\mathbf{x}
=\lim_{\varepsilon\to0}\int_{\Omega_{jk}^{\varepsilon}}\rho(|\mathcal{D} S^{(\varepsilon)}_{jk}|)\,d\mathbf{x}
=0.
\label{eqn_Gamma_ve}
\end{equation}
Note that the above holds for all $\Gamma_{jk}$, $j,k=1,\dots ,M$;
\eqref{eqn_Min_0} follows from \eqref{eqn_Interior_pt} and
\eqref{eqn_Gamma_ve}.
\noindent($\Leftarrow$) Let $u$ be continuous and piecewise smooth
and satisfy \eqref{eqn_Min_0}. Then, since $\rho(0)=0$ and
$\rho(s)>0$ for $s>0$, the quantity $|\mathcal{D} u|$ must be zero at
least at all interior points of the subregions in which $u$ is
smooth; which implies that $u$ is piecewise planar. This completes
the proof.
\end{proof}
\subsection*{Remarks}
From the above theorem we have few remarks:
\begin{itemize}
\item
The PPCs are \eqref{eqn_PPC2} and $\mathcal{D}=\Pi_{\zeta}$ for $\zeta>0$.
The choice $\mathcal{D}=\Delta$ or $\mathcal{D}=\Pi_0$ is not a part of PPCs.
The model with \eqref{eqn_PPC2} and $\mathcal{D}=\Pi_0$ would
converge to a piecewise bilinear image.
\item
The conditions in \eqref{eqn_PPC2} implies that the modulator $\rho$
must be non-convex, at least for large values of $s$.
Non-convex modulators are, for example,
\begin{equation}
\rho(s)=\rho_{\alpha}(s):=\frac{1}{\alpha} s^{\alpha},\; 0<\alpha<1;
\quad \rho(s)=\rho_{_{PM}}(s).
\label{eqn_rho_2}
\end{equation}
\item
Minimizers of \eqref{eqn_Min_0} are not unique; each of all
piecewise planar images is a minimizer.
Thus it is a good idea to incorporate a constraint term in order for the
resulting model to converge to a \emph{desirable} piecewise planar image,
closer to the given image, that preserves fine structures satisfactorily.
\item
The diffusion magnitude $|\mathcal{D} u|$ never approaches the infinity for
(bounded) discrete data.
Thus the minimization process in \eqref{eqn_D2_Min} may not completely
avoid a tendency of converging to a \emph{globally} planar image.
However, the tendency will be weaker than the convex minimization
as in \eqref{eqn_Tai_Min}.
By incorporating a proper constraint term, non-convex fourth-order
PDEs can restore better images, as numerically verified in
Section~\ref{Numerical}.
\end{itemize}
\section{Difference Schemes} \label{Difference_Schemes}
We rewrite the models in \eqref{eqn_Laplacian_Euler} and
\eqref{eqn_PDE4}, incorporating a constraint term, as
\begin{equation}
\frac{\partial u}{\partial t} =-\mathcal{D}\cdot\Big(\frac{\mathcal{D} u}{\mathcal{P}(u)}\Big)
+\lambda\,(u_0 -u), \quad \mathcal{D}=\Delta,\,\Pi_{\zeta}, \; \zeta\ge0,
\label{eqn_General_Form}
\end{equation}
where
$$
\mathcal{P}(u)=\frac{|\mathcal{D} u|}{\rho'(|\mathcal{D} u|)}.
$$
Note that one can apply a (linearized) implicit time-stepping scheme
to the above model when the nonlinear part $\mathcal{P}(u)$ is evaluated from the
previous time level.
In that case, the models with $\mathcal{D}=\Pi_0$ are separable such that
they can be efficiently simulated by the alternating direction implicit
(ADI) method \cite{DouglasGunn:64,DouglasKim:01ADFS,Kim:PDE_Color}.
In this article, for simplicity, we will employ the explicit time-stepping
procedure for all models in \eqref{eqn_General_Form}.
Let $\Delta t^n$ be the $n$th timestep size,
$t^n=\sum_{k=0}^{n-1}\Delta t^k$, and $g^n=g(\cdot,t^n)$ for a function
of time (and the spatial variables). Given
$u_0 =u^0,u^1,\dots,u^n$, the explicit scheme computes $u^{n+1}$
\emph{explicitly} as
\begin{equation}
\begin{gathered}
\frac{u^{n+1}-u^n}{\Delta t^n}
=-A_{\mathcal{D}}^nu^n +\lambda^n\,(u_0 -u^n), \\
A_{\mathcal{D}}^nu^n:=\mathcal{D}_h\cdot\Big(\frac{\mathcal{D}_h u^n}{\mathcal{P}_h(u^n)}\Big),
\end{gathered}
\label{eqn_Explicit0}
\end{equation}
or equivalently
\begin{equation}
u^{n+1}
= \left[I-\Delta t^n(A_{\mathcal{D}}^n+\lambda^nI)\right]u^n
+\lambda^n\,\Delta t^nu_0 ,
\label{eqn_Explicit}
\end{equation}
where the subscript $h$ indicates spatial approximations, for which we
will consider difference schemes in the following.
\subsection{Spatial schemes for $\mathcal{D}=\Pi_{\zeta}$} \label{ss_Schemes_Pi}
Let $(ij)$ and $\mathbf{x}_{ij}$ denote the $(ij)$th pixel in the image
and $g_{ij}=g(\mathbf{x}_{ij},\cdot)$.
We will drop the superscript $n$ for a simpler presentation.
Then, a central finite difference scheme for the $x$-directional
diffusion operator can be formulated as
\begin{equation}
\begin{aligned}
\Big(\frac{u_{xx}}{\mathcal{P}(u)}\Big)_{xx}(\mathbf{x}_{ij})
&\approx
\Big(\frac{u_{xx}}{\mathcal{P}(u)}\Big)_{i-1,j}
-2\Big(\frac{u_{xx}}{\mathcal{P}(u)}\Big)_{i,j}
+\Big(\frac{u_{xx}}{\mathcal{P}(u)}\Big)_{i+1,j} \\
&\approx\frac{1}{\mathcal{P}(u)_{i-1,j}}\,(u_{i-2,j}-2u_{i-1,j}+u_{i,j}) \\
&\quad -\frac{2}{\mathcal{P}(u)_{i,j}}\,(u_{i-1,j}-2u_{i,j}+u_{i+1,j}) \\
&\quad +\frac{1}{\mathcal{P}(u)_{i+1,j}}\,(u_{i,j}-2u_{i+1,j}+u_{i+2,j}) \\
&=\frac{1}{\mathcal{P}(u)_{i-1,j}}\,u_{i-2,j}
-\Big(\frac{2}{\mathcal{P}(u)_{i-1,j}}+\frac{2}{\mathcal{P}(u)_{i,j}}\Big)\,u_{i-1,j} \\
&\quad +\Big(\frac{1}{\mathcal{P}(u)_{i-1,j}}+\frac{4}{\mathcal{P}(u)_{i,j}}
+\frac{1}{\mathcal{P}(u)_{i+1,j}}\Big)\,u_{i,j} \\
&\quad -\Big(\frac{2}{\mathcal{P}(u)_{i,j}}+\frac{2}{\mathcal{P}(u)_{i+1,j}}\Big)\,u_{i+1,j}
+\frac{1}{\mathcal{P}(u)_{i+1,j}}\,u_{i+2,j},
\end{aligned}
\label{eqn_FD_xx}
\end{equation}
where
$$
\mathcal{P}(u)_{ij}=\frac{|\mathcal{D}_h u_{ij}|}{\rho'(|\mathcal{D}_h u_{ij}|)}.
$$
For the boundary condition, we will simply consider the flat extension
of the boundary pixel values in the outer normal direction.
The $y$-directional diffusion operator can be approximated
\emph{analogously}:
\begin{equation}
\begin{aligned}
\Big(\frac{u_{yy}}{\mathcal{P}(u)}\Big)_{yy}(\mathbf{x}_{ij})
&\approx \frac{1}{\mathcal{P}(u)_{i,j-1}}\,u_{i,j-2}
-\Big(\frac{2}{\mathcal{P}(u)_{i,j-1}}+\frac{2}{\mathcal{P}(u)_{i,j}}\Big)\,u_{i,j-1}
\\
&\quad +\Big(\frac{1}{\mathcal{P}(u)_{i,j-1}}+\frac{4}{\mathcal{P}(u)_{i,j}}
+\frac{1}{\mathcal{P}(u)_{i,j+1}}\Big)\,u_{i,j} \\
&\quad -\Big(\frac{2}{\mathcal{P}(u)_{i,j}}+\frac{2}{\mathcal{P}(u)_{i,j+1}}\Big)\,u_{i,j+1}
+\frac{1}{\mathcal{P}(u)_{i,j+2}}\,u_{i,j+2}.
\end{aligned}
\label{eqn_FD_yy}
\end{equation}
The mixed-derivative can be approximated as
\begin{equation}
(u_{xy})_{ij} \approx
Q_hu_{ij} :=\frac{u_{i+1,j+1}+u_{i-1,j-1}
-u_{i-1,j+1}-u_{i+1,j-1}}4.
\label{eqn_uxy}
\end{equation}
Note that the approximation of $u_{xy}$ is the same as that of $u_{yx}$.
Thus, we have
\begin{equation}
\Big(\frac{u_{xy}}{\mathcal{P}(u)}\Big)_{yx}(\mathbf{x}_{ij})
\approx Q_h\Big(\frac{Q_h u}{\mathcal{P}_h(u)}\Big)_{ij}
\approx \Big(\frac{u_{yx}}{\mathcal{P}(u)}\Big)_{xy}(\mathbf{x}_{ij}).
\label{eqn_FD_xy}
\end{equation}
\subsection{Spatial schemes for $\mathcal{D}=\Delta$} \label{ss_Schemes_Delta}
Define the discrete Laplacian $\Delta_h$ as
\begin{equation}
\Delta_h u_{ij}=u_{i-1,j}+u_{i+1,j}+u_{i,j-1}+u_{i,j+1}-4u_{ij}.
\label{eqn_Delta_h}
\end{equation}
Let
\begin{equation}
R_{ij}=\frac{\Delta_h u_{ij}}{\mathcal{P}(u)_{ij}}, \quad
\mathcal{P}(u)_{ij}=\frac{|\Delta_h u_{ij}|}{\rho'(|\Delta_h u_{ij}|)}.
\label{eqn_R_ij}
\end{equation}
Then, the diffusion operator can be discretized as
\begin{equation}
\Delta\Big(\frac{\Delta u}{\mathcal{P}(u)}\Big)(\mathbf{x}_{ij})
\approx \Delta_h R_{ij},
\label{eqn_FD_Delta_PM}
\end{equation}
which is a difference scheme of 13-point stencil.
\subsection{A variable constraint parameter $\{\lambda^n_{ij}\}$}
\label{ss_Schemes_lambda}
Now, we will present a strategy for $\lambda$ which is variable.
Multiply the stationary part of \eqref{eqn_General_Form} by $(u_0 -u)$
and average the resulting equation \emph{locally} to obtain
\begin{equation}
\lambda(\mathbf{x}) \sigma_{\mathbf{x}}^2\approx
\frac{1}{|\Omega_{\mathbf{x}}|}\int_{\Omega_{\mathbf{x}}}(u_0 -u)\,
\mathcal{D}\cdot\Big(\frac{\mathcal{D} u}{\mathcal{P}(u)}\Big)\,d\mathbf{x},
\label{eqn_Local_Constraint}
\end{equation}
where $\Omega_{\mathbf{x}}$ is a neighborhood of $\mathbf{x}$ (e.g., the window of
$(3\times3)$ pixels centered at $\mathbf{x}$) and $\sigma_{\mathbf{x}}^2$ denotes
the local noise variance measured over $\Omega_{\mathbf{x}}$.
Then, the right side of the above equation can be approximated,
in the $n$th time level, as in
\begin{equation}
\lambda^n(\mathbf{x})\approx \frac{1}{\sigma_{\mathbf{x}}^2}\,
\|u_0 -u^n\|_{\mathbf{x}}\, \left\|A_{\mathcal{D}}^nu^n\right\|_{\mathbf{x}},
\label{eqn_PDE4_lambda}
\end{equation}
where $A_{\mathcal{D}}^nu^n$ is defined as in \eqref{eqn_Explicit0} and
$\|g\|_{\mathbf{x}}$ denotes a local average of $|g|$ over $\Omega_{\mathbf{x}}$.
Notice that the constraint parameter in \eqref{eqn_PDE4_lambda} is
proportional to both the absolute residual $|u_0 -u^n|$ and the
diffusion magnitude $|A_{\mathcal{D}}^nu^n|$, which can effectively suppress
undesired dissipation that may arise on fast transitions.
It also should be noticed that the approximation \eqref{eqn_PDE4_lambda}
of \eqref{eqn_Local_Constraint} is based on the assumption
that $|u_0 -u^n|$ and $|A_{\mathcal{D}}^nu^n|$ are relatively independent,
although they may not be.
In practice, one can find an effective constraint parameter as follows:
\begin{equation}
\lambda^n_{ij}=\frac{\beta_1}{\widetilde{\sigma}^2}\,
|u_{0,ij}-u^n_{ij}|\, \left|A_{\mathcal{D}}^nu^n_{ij}\right|,
\label{eqn_PDE4_lambda_ij}
\end{equation}
where $\widetilde{\sigma}^2$ denotes an estimation of $\sigma^2$ and
$\beta_1$ is a positive constant ($0<\beta_1\le1$).
In this article, we will set $\widetilde{\sigma}^2=\sigma^2$, the true
noise variance, when the fourth-order models are compared with the
second-order ones.
\subsection{Choice of $\Delta t^n$} \label{ss_Schemes_Dt}
The explicit scheme for fourth-order PDEs in \eqref{eqn_Explicit} is
numerically stable only if the timestep size is sufficiently small.
In the literature, the timestep size has often been chosen heuristically.
However, for efficiency reasons, it is desirable to set $\Delta t^n$ as
large as possible, provided that the algorithm is stable.
We will consider an effective strategy for the selection of the timestep
size in this subsection.
Note that some coefficients of neighboring values of $u^n_{ij}$ in the
right side of \eqref{eqn_Explicit} are always negative and the coefficient
of $u^n_{ij}$ itself is conditionally nonnegative; it is hard to analyze
mathematical stability for numerical schemes of fourth-order models.
It has been observed that the algorithm occasionally diverges when
the timestep size is set so as to make the coefficient of $u^n_{ij}$
nonnegative:
\begin{equation}
\Delta t^n \le \min_{ij}\frac{1}{A^n_{\mathcal{D},ij}+\lambda^n_{ij}},
\label{eqn_Dt_0}
\end{equation}
where $A^n_{\mathcal{D},ij}$ is the coefficient of $u^n_{ij}$ in
$A^n_{\mathcal{D}}u^n(\mathbf{x}_{ij})$.
Thus we will try to choose the timestep size slightly smaller than the one
in \eqref{eqn_Dt_0}:
\begin{equation}
\Delta t^n = \min_{ij}\frac{\gamma}{A^n_{\mathcal{D},ij}+\lambda^n_{ij}},
\quad 0<\gamma<1.
\label{eqn_Dt_Constant}
\end{equation}
(It has been verified that when $\gamma\ge0.6$, the resulting
algorithm becomes occasionally unstable.
We will set $\gamma=0.5$ in practice.)
For $\mathcal{D}=\Delta$, the coefficient $A^n_{\mathcal{D},ij}$ of $u^n_{ij}$ in
$A^n_{\mathcal{D}}u^n_{ij}$ can be found as
$$
A^n_{\Delta,ij}
=\frac{1}{\mathcal{P}(u)_{i-1,j}} +\frac{1}{\mathcal{P}(u)_{i+1,j}}
+\frac{1}{\mathcal{P}(u)_{i,j-1}}
+\frac{1}{\mathcal{P}(u)_{i,j+1}} +\frac{16}{\mathcal{P}(u)_{i,j}},
%\label{eqn_An_ij_D}
$$
while $A^n_{\Pi_{\zeta},ij}$, $\zeta\ge0$, reads
\begin{align*}
A^n_{\Pi_{\zeta},ij}
&= \frac{1}{\mathcal{P}(u)_{i-1,j}} +\frac{1}{\mathcal{P}(u)_{i+1,j}}
+\frac{1}{\mathcal{P}(u)_{i,j-1}}
+\frac{1}{\mathcal{P}(u)_{i,j+1}} +\frac{8}{\mathcal{P}(u)_{i,j}} \\
&\quad +\frac{\zeta}{8}
\Big(\frac{1}{\mathcal{P}(u)_{i+1,j+1}} +\frac{1}{\mathcal{P}(u)_{i-1,j-1}}
+\frac{1}{\mathcal{P}(u)_{i-1,j+1}} +\frac{1}{\mathcal{P}(u)_{i+1,j-1}}\Big).
\end{align*}
\section{Numerical Experiments} \label{Numerical}
In this section, we will verify effectiveness of fourth-order image
denoising models considered in previous sections. The computation is carried
out on a 3.20 GHz desktop having a Linux operating system.
The explicit scheme \eqref{eqn_Explicit} is implemented for 15 different
models: combinations of five differential operators ($\Delta,\ \Sigma,\
\Pi_0,\ \Pi_{1/4}$, and $\Pi_1$) and three modulators ($\rho_1$,
$\rho_{0.6}$, and $\rho_{_{PM}}$).
The input images are first scaled by $1/255$ to have real values between
0 and 1 and then, after denoising, they are scaled back for the 8-bit
display in which the pixel values are integers between 0 and 255.
When $\rho=\rho_{\alpha}$, $0<\alpha\le1$, we have
$\mathcal{P}(u)=|\mathcal{D} u|^{2-\alpha}$.
To prevent the denominators in \eqref{eqn_FD_xx}, \eqref{eqn_FD_yy},
\eqref{eqn_FD_xy}, and \eqref{eqn_R_ij} from approaching zero,
a relaxation parameter $\varepsilon$ is incorporated:
$$
\mathcal{P}(u) \approx (|\mathcal{D} u|^2+\varepsilon^2)^{(2-\alpha)/2}, \quad \varepsilon>0.
$$
Many of algorithm parameters are chosen heuristically;
we set $\varepsilon=0.01$, $K=0.03$ in \eqref{eqn_rho_PM}, and $\gamma=0.5$ in
\eqref{eqn_Dt_Constant}.
(When $\gamma=0.6\sim0.9$, the explicit scheme has occasionally diverged.)
The restoration quality is measured by the \emph{signal-to-noise ratio} (SNR)
and the \emph{peak signal-to-noise ratio} (PSNR), which are defined as
$$
\text{SNR}\equiv \frac{\sum_{ij}g_{ij}^2}{\sum_{ij}(g_{ij}-h_{ij})^2},\quad
\text{PSNR}\equiv 10\log_{10}\Big(\frac{\sum_{ij}255^2}
{\sum_{ij}(g_{ij}-h_{ij})^2}\Big),
%\label{eqn_PSNR}
$$
where $g$ is the original image, $h$ denotes the compared image,
and the unit of PSNR is decibel (dB).
\begin{figure}[ht]
\centerline{(a)\hskip0.265\textwidth (b) \hskip0.265\textwidth (c) }
\centerline{
\includegraphics[width=0.3\textwidth]{fig2a} % FIG/Lenna.jpg
\includegraphics[width=0.3\textwidth]{fig2b} % FIG/Lenna_MZ20.jpg
\includegraphics[width=0.3\textwidth]{fig2c} % FIG/Lenna_MZ60.jpg
}
\caption{Lenna: (a) The original image $g$ and noisy images
perturbed by an additive Gaussian noise of
(b) PSNR=24.77 (SNR=81.00) and (c) PSNR=15.23 (SNR=9.00).}
\label{fig_Lenna}
\end{figure}
First, we apply the 15 fourth-order models to the restoration of
the Lenna image shown in Figure~\ref{fig_Lenna}; the noisy images in
Figures~\ref{fig_Lenna}(b) and \ref{fig_Lenna}(c) are obtained by
perturbing the original image by an additive Gaussian noise of PSNR=24.77
(SNR=81.00) and PSNR=15.23 (SNR=9.00), respectively.
We have observed that it is difficult to set an appropriate stopping
criterion for the 15 different models to be compared fairly.
Thus the PSNR is computed after each iteration of the explicit scheme
\eqref{eqn_Explicit}; the models will be compared for the best PSNRs
they can achieve for the restored image.
\begin{table}[ht]
\caption{A PSNR analysis. Set $\beta_1=0$ (unconstrained).}
\label{tbl_Lenna_Unconstrained}
\begin{center}
\begin{tabular}{|c|c||cc|cc|cc|} \hline
\multicolumn{2}{|c||}{} & \multicolumn{2}{|c|}{$\rho=\rho_1$} & \multicolumn{2}{|c|}{$\rho=\rho_{0.6}$}
& \multicolumn{2}{|c|}{$\rho=\rho_{_{PM}}$} \\ \hline
PSNR & $\mathcal{D}$ & Iter & PSNR & Iter & PSNR & Iter & PSNR \\ \hline\hline
24.77 & $\Delta$ & 83 & 30.68 & 279 & 30.60 & 220 & 30.44 \\
& $\Sigma$ & 40 & 31.09 & 104 & 31.46 & 119 & 31.40 \\
& $\Pi_0$ & 46 & 31.08 & 129 & 31.49 & 78 & 31.45 \\
& $\Pi_{1/4}$ & 36 & 31.21 & 87 & 31.63 & 61 & 31.52 \\
& $\Pi_1$ & 28 & 31.07 & 71 & 31.44 & 50 & 31.34 \\ \hline\hline
15.23 & $\Delta$ & 502 & 25.45 & 2015 & 25.48 & 2492 & 25.39 \\
& $\Sigma$ & 205 & 25.58 & 526 & 26.01 & 802 & 26.05 \\
& $\Pi_0$ & 239 & 25.59 & 617 & 26.06 & 533 & 26.05 \\
& $\Pi_{1/4}$ & 151 & 25.62 & 434 & 26.07 & 336 & 25.90 \\
& $\Pi_1$ & 111 & 25.51 & 350 & 25.89 & 289 & 25.61 \\ \hline
\end{tabular}
\end{center}
\end{table}
Table~\ref{tbl_Lenna_Unconstrained} shows the best PSNRs and the
corresponding iteration counts for the 15 models with no constraint term
($\beta_1=0$).
For the unconstrained problem, the model with $\mathcal{D}=\Pi_{1/4}$ and
$\rho=\rho_{0.6}$ has performed the best in PSNR, for both noise levels.
The lowest PSNRs are observed for the models incorporating $\mathcal{D}=\Delta$.
Note that the models incorporating $\mathcal{D}=\Pi_1$ have performed not better
than the models having $\mathcal{D}=\Pi_0$ (and $\mathcal{D}=\Pi_{1/4}$).
When $\mathcal{D}=\Pi_1$, the involvement of cross derivatives into the models
is too strong.
In order for a non-convex model to hold the PPCs, the model should
incorporate $\mathcal{D}=\Pi_{\zeta}$, $\zeta>0$; however, $\zeta$ should not
be too large for an effective denoising.
(The models work reasonably well when $\zeta=0.1\sim0.35$.)
As one can see from the table (and also from constrained problems in
Tables~\ref{tbl_Lenna_Constrained} and \ref{tbl_ThreeFigure}), the models
with $\mathcal{D}=\Pi_{\zeta}$ converge faster, as $\zeta$ approaches 1.
Such a faster convergence of $\Pi_1$ seems due to a \emph{balanced}
incorporation of cross derivatives, which in turn makes the
finite difference scheme involve a larger number of pixels in various
directions in a balanced fashion.
For all differential operators ($\mathcal{D}$), the non-convex models
($\rho=\rho_{0.6}$ and $\rho=\rho_{_{PM}}$) have outperformed over the
convex models ($\rho=\rho_{1}$).
\begin{table}[ht]
\caption{A PSNR analysis. Set $\beta_1=0.4$ (constrained).}
\label{tbl_Lenna_Constrained}
\begin{center}
\begin{tabular}{|c|c||cc|cc|cc|} \hline
\multicolumn{2}{|c||}{} & \multicolumn{2}{|c|}{$\rho=\rho_1$} & \multicolumn{2}{|c|}{$\rho=\rho_{0.6}$}
& \multicolumn{2}{|c|}{$\rho=\rho_{_{PM}}$} \\ \hline
PSNR & $\mathcal{D}$ & Iter & PSNR & Iter & PSNR & Iter & PSNR \\ \hline\hline
24.77 & $\Delta$ & 113 & 30.96 & 347 & 30.80 & 265 & 30.62 \\
& $\Sigma$ & 59 & 31.52 & 137 & 31.81 & 147 & 31.69 \\
& $\Pi_0$ & 65 & 31.45 & 166 & 31.78 & 97 & 31.74 \\
& $\Pi_{1/4}$ & 50 & 31.53 & 111 & 31.90 & 76 & 31.83 \\
& $\Pi_1$ & 38 & 31.38 & 91 & 31.73 & 63 & 31.67 \\ \hline\hline
15.23 & $\Delta$ & 944 & 26.75 & 3273 & 26.55 & 3709 & 26.34 \\
& $\Sigma$ & 477 & 27.32 & 969 & 27.44 & 1200 & 27.21 \\
& $\Pi_0$ & 521 & 27.22 & 1052 & 27.37 & 813 & 27.25 \\
& $\Pi_{1/4}$ & 331 & 27.25 & 745 & 27.41 & 514 & 27.26 \\
& $\Pi_1$ & 251 & 27.13 & 619 & 27.25 & 475 & 27.05 \\ \hline
\end{tabular}
\end{center}
\end{table}
In Table~\ref{tbl_Lenna_Constrained}, we experiment the same as
in Table~\ref{tbl_Lenna_Unconstrained} except for $\beta_1=0.4$ in
\eqref{eqn_PDE4_lambda_ij}.
For the constrained model, we have observed similar performances as in the
unconstrained one.
Here the PSNRs are slightly higher, and the PSNR differences between
non-convex models and convex ones are smaller than those for the
unconstrained models in Table~\ref{tbl_Lenna_Unconstrained}.
For the high noise level, the constrained model with $\mathcal{D}=\Sigma$
and $\rho=\rho_{0.6}$ has performed the best in PSNR.
\begin{figure}[ht]
\centerline{ (a) \hskip0.42\textwidth (b) }
\centerline{
\includegraphics[width=0.45\textwidth]{fig3a} %FIG/Lenna_MZ60_ITV_Cons_30.jpg
\includegraphics[width=0.45\textwidth]{fig3b} %FIG/Lenna_MZ60_Sigma_Al_969.jpg
}
\caption{Lenna: Restored images (best in PSNR) by (a) the ITV model
and (b) a fourth-order model, that shows PSNR=26.88 (SNR=131.51, CPU
time=0.28 seconds) and
PSNR=27.44 (SNR=149.63, CPU time=42.88 seconds), respectively.}
\label{fig_Lenna_Sigma_ITV}
\end{figure}
In Figure~\ref{fig_Lenna_Sigma_ITV}, we present the best restored images
(measured in PSNR) from the ITV model \eqref{eqn_ITV} and the fourth-order
models, for the high noise level.
For the ITV model, the diffusion term is approximated as in
\cite{KimLim:NC_Beta} and the constraint parameter is set analogously
as in Section~\ref{ss_Schemes_lambda}; we select a spatially variable
timestep size defined as
\begin{equation}
\Delta t^n_{ij} = \frac{\gamma}{A^n_{ITV,ij}+\lambda^n_{ij}},
\label{eqn_Dt_Variable}
\end{equation}
where $A^n_{ITV,ij}$ is the coefficient of $u^{n}_{ij}$ in the
approximation
of the diffusion term of the ITV model, which is 4 independently on $u^n$;
see \cite{KimLim:NC_Beta} for details.
The ITV model shows its best PSNR (=26.88) in 30 iterations, while the
fourth-order model with $\mathcal{D}=\Sigma$ and $\rho=\rho_{0.6}$ requires 969
iterations to reach its best PSNR (=27.44).
(When the fourth-order models employ the variable timestep size in
\eqref{eqn_Dt_Variable}, they converge 2-3 times faster, but their best
PSNRs are often lowered by an observable amount; fourth-order models have
shown higher sensitivity to numerical schemes and parameter choices.)
As one can see from the figure, the fourth-order model restores texture
regions better than the second-order model,
while edges of relatively big contrasts are preserved more clearly by
the ITV model.
Thus, for untextured images, the second-order model may restore better
images than the fourth-order models.
The second-order model converges fast without sacrificing the restoration
quality, when the variable timestep size \eqref{eqn_Dt_Variable} is
employed.
Also, for the second-order model, it is relatively easy to set an
appropriate stopping criterion.
However, all the fourth-order models except involving $\mathcal{D}=\Delta$
have restored better images than the second-order model.
Another example is given in order to verify the effectiveness of
fourth-order models on textured images. We first perturb the original Texture
image (Figure~\ref{fig_Texture_Sigma_ITV}(a)) by an additive Gaussian noise of
PSNR=15.24 (SNR=11.51); see Figure~\ref{fig_Texture_Sigma_ITV}(b).
\begin{figure}[ht]
\centerline{ (a) \hskip0.36\textwidth (b) } \centerline{
\includegraphics[width=0.4\textwidth]{fig4a} %FIG/texture.jpg
\includegraphics[width=0.4\textwidth]{fig4b} %FIG/texture_MZ60.jpg
} \centerline{ (c) \hskip0.36\textwidth (d) } \centerline{
\includegraphics[width=0.4\textwidth]{fig4c} %FIG/texture_MZ60_ITV_3.jpg
\includegraphics[width=0.4\textwidth]{fig4d} % FIG/texture_MZ60_S_A_353.jpg
} \caption{Texture: (a) The original image $g$, (b) noisy image
perturbed by an additive Gaussian noise of PSNR=15.24 (SNR=11.51),
and the restored images by (c) the ITV model, PSNR=18.21 (SNR=22.80)
and (d) the fourth-order model, PSNR=19.22 (SNR=28.78).}
\label{fig_Texture_Sigma_ITV}
\end{figure}
Figure~\ref{fig_Texture_Sigma_ITV}(c) represents recovered image using the ITV
model with the spatially variable timestepsize \eqref{eqn_Dt_Variable} and
Figure~\ref{fig_Texture_Sigma_ITV}(d) is a denoised image using the
fourth-order model ($\mathcal{D}=\Pi_{1/4}$ and $\rho=\rho_{0.6}$). After
results are measured in PSNR and visual verification, the ITV model shows its
best recovered image after only 3 iterations (PSNR= 18.21, CPU time=0.33
seconds). The fourth-order model reaches its best PSNR (=19.22) after 353
iterations (CPU time=65.05 seconds). As one can see from the figures,
the fourth-order model preserves textures better than the ITV model.
Although ITV model gives the results faster than the fourth-order model,
the difference is not significant.
\begin{figure}[ht]
\centerline{ (a) \hskip0.26\textwidth (b) \hskip0.3\textwidth (c) }
\centerline{
\includegraphics[width=0.30\textwidth]{fig5a} %FIG/Bamboo.jpg
\includegraphics[width=0.30\textwidth]{fig5b} % FIG/Elaine256.jpg
\includegraphics[width=0.35\textwidth]{fig5c} % FIG/PokerChips.jpg
}
\caption{Test images: (a) Bamboo, (b) Elaine, and (c) Poker Chips.}
\label{fig_ThreeFigure}
\end{figure}
We select a set of images as in Figure~\ref{fig_ThreeFigure},
for more tests and comparisons.
Table~\ref{tbl_ThreeFigure} presents a PSNR analysis, showing the best
PSNRs and the corresponding iteration counts, for the 15 different
constrained fourth-order models applied to the restoration of three images in
Figure~\ref{fig_ThreeFigure}.
As one can see from the table, we have again reached the same conclusions:
(a) the models incorporating $\mathcal{D}=\Pi_1$ converge fastest and
(b) non-convex models outperform over convex models.
When the ITV model \eqref{eqn_ITV} is applied, the best PSNRs turn out
to be 32.44, 28.10, and 24.68 respectively for Bamboo, Elaine, and Poker
Chips; the fourth-order models holding the PPCs restore better
images than the second-order model for all cases.
\begin{table}[ht]
\caption{A PSNR analysis. Set $\beta_1=0.4$ (constrained).}
\label{tbl_ThreeFigure}
\begin{center}
\begin{tabular}{|c|c||cc|cc|cc|} \hline
\multicolumn{2}{|c||}{} & \multicolumn{2}{|c|}{$\rho=\rho_1$} & \multicolumn{2}{|c|}{$\rho=\rho_{0.6}$}
& \multicolumn{2}{|c|}{$\rho=\rho_{_{PM}}$} \\ \hline
Image (PSNR)
& $\mathcal{D}$ & Iter & PSNR & Iter & PSNR & Iter & PSNR \\ \hline\hline
%%---------------------------------------------------------------------
Bamboo & $\Delta$ & 1186 & 32.04 & 3597 & 31.71 & 3326 & 31.34 \\
(16.81) & $\Sigma$ & 778 & 32.66 & 1251 & 32.82 & 1126 & 32.32 \\
& $\Pi_0$ & 724 & 32.45 & 1186 & 32.60 & 787 & 32.33 \\
& $\Pi_{1/4}$ & 523 & 32.86 & 844 & 33.14 & 534 & 32.82 \\
& $\Pi_1$ & 406 & 33.29 & 678 & 33.56 & 491 & 33.29 \\ \hline\hline
%%---------------------------------------------------------------------
Elaine & $\Delta$ & 802 & 28.06 & 2627 & 27.85 & 2706 & 27.58 \\
(16.81) & $\Sigma$ & 416 & 28.46 & 748 & 28.54 & 912 & 28.48 \\
& $\Pi_0$ & 414 & 28.43 & 864 & 28.56 & 602 & 28.51 \\
& $\Pi_{1/4}$ & 282 & 28.45 & 570 & 28.59 & 407 & 28.50 \\
& $\Pi_1$ & 219 & 28.32 & 468 & 28.43 & 371 & 28.30 \\ \hline\hline
%%---------------------------------------------------------------------
Poker Chips&$\Delta$& 383 & 24.59 & 1581 & 24.57 & 1965 & 24.45 \\
(16.82) & $\Sigma$ & 184 & 25.15 & 491 & 25.55 & 776 & 25.44 \\
& $\Pi_0$ & 194 & 24.97 & 565 & 25.44 & 501 & 25.48 \\
& $\Pi_{1/4}$ & 144 & 25.16 & 418 & 25.66 & 319 & 25.64 \\
& $\Pi_1$ & 110 & 25.19 & 361 & 25.61 & 276 & 25.54 \\ \hline
\end{tabular}
\end{center}
\end{table}
\begin{figure}[ht]
\centerline{(a)\hskip0.265\textwidth (b) \hskip0.265\textwidth (c) }
\centerline{
\includegraphics[width=0.3\textwidth]{fig6a} % FIG/Elaine_PSNR16p81.jpg
\includegraphics[width=0.3\textwidth]{fig6b} % FIG/Elaine_Rho06_Laplacian.jpg
\includegraphics[width=0.3\textwidth]{fig6c} %FIG/Elaine_Rho06_Pi025_570.jpg
}
\caption{Elaine: (a) A noisy image of PSNR=16.82 (SNR=15.29) and restored
images by (b) $\mathcal{D}=\Delta$ and (c) $\mathcal{D}=\Pi_{1/4}$.
For both models, we set $\rho=\rho_{0.6}$ and $\beta_1=0.4$.
}
\label{fig_ELaine_Two}
\end{figure}
Figure~\ref{fig_ELaine_Two} depicts a noisy image of Elaine and restored
images by two constrained fourth-order models:
$\mathcal{D}=\Delta$ and $\mathcal{D}=\Pi_{1/4}$.
For both models, we set $\rho=\rho_{0.6}$ and $\beta_1=0.4$.
The restored image with $\mathcal{D}=\Delta$ incorporates speckles, which is
objectionable, as in Figure~\ref{fig_ELaine_Two}(b).
On the other hand, the model of $\mathcal{D}=\Pi_{1/4}$ has restored edges and fine
structures more clearly.
It is apparent that fourth-order models holding the PPCs can suppress
speckles effectively and result in better images than models without PPCs.
\section*{Conclusions} \label{Conclusions}
Properties of a variational model in image denoising are combined effects
of the differential operator, the modulator, and the constraint term.
In this article, we have introduced \emph{piecewise planarity conditions}
(PPCs) for fourth-order variational denoising models in continuum.
For an efficient simulation of fourth-order variational models, we have
considered effective numerical methods such as a variable constraint
parameter and a stable timestep size.
Although the PPCs may not be satisfied by discrete data (images) in
the same way as in continuum, they may serve as a valuable mathematical
tool for the advancement of the state-of-the-art methods for PDE-based
denoising.
Models with the PPCs have proved to be able to restore better images
measured in PSNR (and visual inspection) than models without PPCs.
Also, it has been observed that fourth-order variational models can
restore better images than second-order ones, particularly when they
hold the PPCs.
Fourth-order models have restored texture regions favorably, while
second-order models tend to restore edges of large contrasts more clearly.
\subsection*{Acknowledgment}
The work of the author is supported in part by NSF grants
DMS-0630798 and DMS-0609815.
The code utilized for this article has been implemented for the modelcode
library: \emph{Graduate Research and Applications for Differential Equations}
(GRADE), and is available at
{\tt www.msstate.edu/$\sim$skim/GRADE} or by asking the author via email
({\tt skim@math.msstate.edu}).
\begin{thebibliography}{00}
\bibitem{AlvarezLionsMorel:92}
L.~Alvarez, P.~Lions, and M.~Morel;
Image selective smoothing and edge detection by nonlinear diffusion. {II},
\emph{SIAM J. Numer. Anal.}, vol.~29, pp. 845--866, 1992.
\bibitem{AngenentETAL:06}
S.~Angenent, E.~Pichon, and A.~Tannenbaum;
Mathematical methods in medical image processing,
\emph{Bulletin of the American Mathematical Society},
vol.~43, no.~3, pp. 365--396, 2006.
\bibitem{CatteLionsMorelColl:92}
F.~Catte, P.~Lions, M.~Morel, and T.~Coll;
Image selective smoothing and edge detection by nonlinear diffusion.
\emph{SIAM J. Numer. Anal.}, vol.~29, pp. 182--193, 1992.
\bibitem{ChanOsherShen:TR} T.~Chan, S.~Osher, and J.~Shen;
The digital {TV} filter and nonlinear denoising,
{Department of Mathematics, University of California, Los
Angeles, CA 90095-1555}, {Technical Report} \#99-34, October 1999.
\bibitem{ChanShen:05} T.~Chan and J.~Shen;
\emph{Image Processing and Analysis}. Philadelphia: SIAM, 2005.
\bibitem{DouglasGunn:64} J.~{Douglas, Jr.} and J.~Gunn;
A general formulation of alternating direction
methods {Part I}. {Parabolic} and hyperbolic problems, \emph{{Numer.
Math.}}, vol.~6, pp. 428--453, 1964.
\bibitem{DouglasKim:01ADFS} J.~{Douglas, Jr.} and S.~Kim;
Improved accuracy for locally one-dimensional
methods for parabolic equations, \emph{Mathematical Models and Methods in
Applied Sciences}, vol.~11, pp. 1563--1579, 2001.
\bibitem{Kim:PDE_Color} S.~Kim;
{PDE}-based image restoration: {A} hybrid model and color image
denoising, \emph{IEEE Trans. Image Processing}, vol.~15, no.~5, pp.
1163--1170, 2006.
\bibitem{KimLim:NC_Beta} S.~Kim and H.~Lim;
A non-convex diffusion model for simultaneous image
denoising and edge enhancement, \emph{Electronic Journal of Differential
Equations}, Conference 15, pp. 175--192, 2007.
\bibitem{LysakerLundervoldTai:03}
M.~Lysaker, A.~Lundervold, and X.-C. Tai;
Noise removal using fourth-order partial differential equation
with applications to medical magnetic resonance
images in space and time, \emph{IEEE Trans. Image Process.}, vol.~12,
no.~12, pp. 1579--1590, 2003.
\bibitem{MarquinaOsher:00} A.~Marquina and S.~Osher;
Explicit algorithms for a new time dependent model
based on level set motion for nonlinear deblurring and noise removal,
\emph{SIAM J. Sci. Comput.}, vol.~22, pp. 387--405, 2000.
\bibitem{Meyer:01} Y.~Meyer;
\emph{Oscillating Patterns in Image Processing and Nonlinear
Evolution Equations}, ser. University Lecture Series. Providence,
Rhode Island: American Mathematical Society,
2001, vol.~22.
\bibitem{NitzbergShiota:92} M.~Nitzberg and T.~Shiota;
Nonlinear image filtering with edge and corner
enhancement, \emph{IEEE Trans. on Pattern Anal. Mach. Intell.}, vol.~14,
pp. 826--833, 1992.
\bibitem{OsherETAL:04TV}
S.~Osher, M.~Burger, D.~Goldfarb, J.~Xu, and W.~Yin;
Using geometry and iterated refinement for inverse problems (1):
{Total} variation based image
restoration, Department of Mathematics, UCLA, LA, CA 90095, {CAM Report}
\#04-13, 2004.
\bibitem{OsherFedkiw:Book03} S.~Osher and R.~Fedkiw;
\emph{Level Set Methods and Dynamic Implicit
Surfaces}. New York: Springer-Verlag,
2003.
\bibitem{OsherSoleVese:03} S.~Osher, A.~{Sol\'e}, and L.~Vese;
Image decomposition and restoration using
total variation minimization and the {$H^{-1}$} norm,'' \emph{SIAM J.
Multiscale Model. Simul.}, vol.~1, no.~3, pp. 349--370, 2003.
\bibitem{PeronaMalik:90} P.~Perona and J.~Malik;
Scale-space and edge detection using anisotropic diffusion,
\emph{IEEE Trans. on Pattern Anal. Mach. Intell.}, vol.~12, pp.
629--639, 1990.
\bibitem{RudinOsherFatemi:92} L.~Rudin, S.~Osher, and E.~Fatemi;
Nonlinear total variation based noise
removal algorithms, \emph{Physica D}, vol.~60, pp. 259--268, 1992.
\bibitem{Weinstock:74} R.~Weinstock;
\emph{Calculus of Variations}.New York: Dover Pubilcations, Inc., 1974.
\bibitem{YouKaveh:00} Y.-L. You and M.~Kaveh;
Fourth-order partial differential equations for noise
removal, \emph{IEEE Trans. Image Process.}, vol.~9, pp. 1723--1730, 2000.
\bibitem{YouETAL:96} Y.-L. You, W.~Xu, A.~Tannenbaum, and M.~Kaveh;
Behavioral analysis of anisotropic diffusion in image processing,
\emph{IEEE Trans. Image Process.}, vol.~5, pp. 1539--1553, 1996.
\end{thebibliography}
\end{document}