\documentclass[reqno]{amsart}
\usepackage{graphicx}
\usepackage{hyperref}
\AtBeginDocument{{\noindent\small
Sixth Mississippi State Conference on Differential Equations and
Computational Simulations,
{\em Electronic Journal of Differential Equations},
Conference 15 (2007), pp. 175--192.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu (login: ftp)}
\thanks{\copyright 2007 Texas State University - San Marcos.}
\vspace{9mm}}
\begin{document} \setcounter{page}{175}
\title[\hfilneg EJDE-2006/Conf/15\hfil Non-convex diffusion model]
{A non-convex diffusion model for simultaneous \\
image denoising and edge enhancement}
\author[S. Kim, H. Lim\hfil EJDE/Conf/15 \hfilneg]
{Seongjai Kim, Hyeona Lim} % in alphabetical order
\address{Seongjai Kim \newline
Department of Mathematics and Statistics,
Mississippi State University,
Mississippi State, MS 39762-5921, USA}
\email{skim@math.msstate.edu}
\address{Hyeona Lim \newline
Department of Mathematics and Statistics,
Mississippi State University,
Mississippi State, MS 39762-5921, USA}
\email{hlim@math.msstate.edu}
\thanks{Published February 28, 2007.}
\thanks{Partially supported by grants DMS-0312223 and DMS-0630798 from the NSF}
\subjclass[2000]{35K55, 65M06, 65M12}
\keywords{Fine structures; denoising; edge enhancement; \hfill\break\indent
nonphysical dissipation; total variation (TV) model;
non-convex (NC) diffusion model; \hfill\break\indent
texture-free residual (TFR) parameterization}
\begin{abstract}
Mathematical restoration models, in particular, total variation-based
models can easily lose fine structures during image denoising.
In order to overcome the drawback, this article introduces two
strategies: the non-convex (NC) diffusion and the texture-free residual
(TFR) parameterization.
A non-standard numerical procedure is suggested and its stability
is analyzed to effectively solve the NC diffusion model which is
mathematically unstable.
It has been numerically verified that the resulting algorithm
incorporating the NC diffusion and TFR parameterization is able to not
only reduce the noise satisfactorily but also enhance edges effectively,
at the same time.
Various numerical examples are shown to confirm the claim.
\end{abstract}
\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\section{Introduction} \label{Introduction}
Image restoration is an important image processing (IP) step for
various real-world image-related applications and is often necessary
as a pre-processing for other imaging techniques such as segmentation
and compression.
Thus image restoration methods have occupied a peculiar position
in IP, computer graphics, and their applications
\cite{GonzalezWoods:02Book,Meyer:01,MitraSicuranza:01,book_ms,Sapiro:01Book}.
There have been various partial differential equation (PDE)-based
models in image restoration such as the Perona-Malik model
\cite{PeronaMalik:90}, the motion by mean curvature
\cite{Kim:PDE_Color}, the total variation (TV) minimization
\cite{MarquinaOsher:00,RudinOsherFatemi:92}, and color restoration models
\cite{dn_cb1,dn_cs2,Kim:PDE_Color,dn_ks,dn_skm}.
These PDE-based models have been extensively studied to answer
fundamental questions in image restoration and have allowed
researchers and practitioners not only to introduce new mathematical
models but also to improve traditional algorithms
\cite{AlvarezLionsMorel:92,CatteLionsMorelColl:92,ChanOsherShen:TR,%
NitzbergShiota:92,YouETAL:96}.
Good references to work on them are e.g. Aubert-Kornprobst
\cite{AubertKornprobst:02}, Osher-Fedkiw \cite{OsherFedkiw:Book03},
and Sapiro \cite{Sapiro:01Book}.
However, most of these PDE-based restoration models and their numerical
realizations show a common drawback: {\it loss of fine structures}.
In particular, they often introduce an unnecessary numerical dissipation
on regions where the image content changes rapidly such as on edges and
textures.
Therefore it is very important to develop mathematical
models and/or numerical techniques which can effectively preserve fine
structures during the restoration.
The main objective in this article is to develop a model for simultaneous
image denoising and edge enhancement.
The objective is quite challenging, because image denoising filters out
high-frequency components of the image while the edges in principle show
high frequencies.
To overcome difficulties in conventional PDE-models,
we will consider a non-convex model incorporating an effective
constraint parameter which can both suppress noise and enhance edges
at the same time.
An outline of the paper is as follows.
In the next section, we briefly review TV-based restoration models
and recent studies for the reduction of nonphysical dissipation.
Section~\ref{Dissipation} analyzes sources of the nonphysical dissipation.
Based on the analysis, we suggest a non-convex (NC) diffusion model which
is unstable mathematically.
A non-standard numerical procedure is introduced for the new model and
analyzed for stability.
In Section~\ref{Constraint}, we study an effective strategy for the
choice of the constraint parameter, called the texture-free residual (TFR)
parameterization.
Section~\ref{Numerical} presents various numerical examples to show
effectiveness of the NC diffusion and the TFR parameterization.
It has been numerically verified that the resulting algorithm
can satisfactorily reduce the noise and enhance edges simultaneously.
\section{Preliminaries} \label{Preliminaries}
\subsection{TV-based models} \label{ss_TV-Based}
Let $f$ be the observed image of the form
\begin{equation}
f=K*u+v, \label{eqn_u0}
\end{equation}
where $K$ is a blurring operator, $*$ represents the convolution,
$v$ denotes the noise, and $u$ is the desired image to find.
In this article, our main concern is to remove noise.
Thus we assume that $K=I$, the identity operator.
A common denoising technique is to minimize a functional of
gradient, given as
\begin{equation}
\min_u F_p(u), \quad
F_p(u)=\int_{\Omega}|\nabla u|^p\,d\mathbf{x}
+\frac{\lambda}2\,\|f-u\|^2, \label{eqn_Min_Fp}
\end{equation}
where $\lambda\ge0$ and $\|\cdot\|$ denotes the $L^{2}$-norm.
When $p=1$, the first term in $F_1(u)$ is called the
{\it total variation} (TV).
It is often convenient to transform the minimization problem
\eqref{eqn_Min_Fp} into a differential equation, called the
{\it Euler-Lagrange equation}.
Recall that for the minimization problem in 2D of the form
$$
\min_{u}\int_{\Omega}G(\mathbf{x},u,\nabla u)d\mathbf{x},
%\label{eqn_Min_f}
$$
the Euler-Lagrange equation \cite[\S3.3]{Weinstock:74} reads
$$
\Big(-\frac{\partial}{\partial x} \frac{\partial}{\partial u_x}-
\frac{\partial}{\partial y} \frac{\partial}{\partial u_y}+
\frac{\partial}{\partial u}\Big)G =0.
$$
By applying the variational calculus, one can transform the minimization
problem \eqref{eqn_Min_Fp} into an equivalent differential equation:
\begin{equation}
-p\,\nabla\cdot\Big(\frac{\nabla u}{|\nabla u|^{2-p}}\Big)
-\lambda\,(f-u) =0.
\label{eqn_EL_p_0}
\end{equation}
For a convenient numerical simulation of \eqref{eqn_EL_p_0},
we may parameterize the energy descent direction by an artificial
time $t$; the resulting evolutionary Euler-Lagrange equation can
be formulated as:
\begin{equation}
\frac{\partial u}{\partial t}-\nabla\cdot\Big(\frac{\nabla u}{|\nabla u|^{2-p}}\Big)
=\frac{\lambda}{p}(f-u).
\label{eqn_EL_p}
\end{equation}
The restored image becomes closer to $f$ as $\lambda$ grows.
For the above equation, the given image is set for the initial value,
i.e., $u(t=0)=f$.
For the boundary condition, the no-flux condition has often been adopted
for simplicity.
Note that to prevent the denominator $|\nabla u|$ approaching zero,
it must be regularized as
\begin{equation}
|\nabla u| \,\approx\, |\nabla^{\varepsilon} u|\,:=\,(u_x^2+u_y^2+\varepsilon^2)^{1/2},
\label{eqn_Gradient_u}
\end{equation}
for some $\varepsilon>0$ small.
Such a regularization can be a significant source of nonphysical
dissipation to be discussed later in this article.
As most other PDE-based models, the model \eqref{eqn_EL_p}, $1\le p\le2$,
can easily lose fine structures (due to nonphysical dissipation).
An interesting case in \eqref{eqn_EL_p} is when $p=1$, the {\it TV model}:
\begin{equation}
\frac{\partial u}{\partial t}-\nabla\cdot\Big(\frac{\nabla u}{|\nabla u|}\Big)
=\lambda(f-u).
\label{eqn_TV_Model}
\end{equation}
It is known that the TV model tends to transform the image into
a collection of locally constant portions, which is called the
{\it staircasing}.
The removal of staircasing has been an interesting research topic
\cite{dn_blo,dn_cb1,dn_cl}.
As an anti-staircasing approach, Marquina and Osher
\cite{MarquinaOsher:00} introduced the {\it improved TV} (ITV) model
\begin{equation}
\frac{\partial u}{\partial t}-{|\nabla u|}\,\nabla\cdot \Big(\frac{\nabla u}{|\nabla u|}\Big)
=\lambda\,{|\nabla u|}\,(f-u).
\label{eqn_ITVM}
\end{equation}
In order to obtain the above model, the authors have first scaled
the stationary TV model \eqref{eqn_EL_p_0} by a factor of
$|\nabla u|$ and then introduced the time parameterization.
The scaling can be beneficial in some aspects, as indicated in
\cite[\S11.3]{OsherFedkiw:Book03}.
However, as Cha and Kim have analyzed in their recent paper
\cite{ChaKim:MONTE}, such a scaling is hardly advantageous for the
reduction of nonphysical dissipation.
Note that since $|\nabla u|$ vanishes only in flat regions, the ITV
model must have the same steady states as the TV model \eqref{eqn_TV_Model}.
\subsection{Recent studies for the reduction of nonphysical dissipation}
\label{ss_Recent_Studies}
For the TV-based image restoration, the staircasing effect is now well
understood and relatively easy to handle compared with nonphysical
dissipation.
Here we review briefly three of recent studies for the reduction of
nonphysical dissipation: employment of Besov norm \cite{Meyer:01},
iterative refinement \cite{OsherETAL:04TV}, and
the method of nonflat time evolution (MONTE) \cite{ChaKim:MONTE}.
{\bf Employment of Besov norm}:
As Meyer \cite{Meyer:01} analyzed, the $L^2$-norm applied to the
residual $(f-u)$ in the TV minimization \eqref{eqn_Min_Fp} is not
sensitive enough to distinguish the noise from textures.
As a consequence, the residual can easily contain not only the noise
but also fine structures and therefore the restored image $u$ turns out
to be erroneous and blurry.
To reduce the blur associated with the TV model, Meyer
suggested the following modified variational problem:
\begin{equation}
u=\arg\min_{u\in BV(\Omega)}(|u|_{BV}+{\lambda}\|f-u\|_*),
\label{eqn_Meyer}
\end{equation}
where $|\cdot|_{BV}$ is
the bounded variation (BV) seminorm defined as
$$
|u|_{BV}=\int_{\Omega}|\nabla u|\,d\mathbf{x}
$$
and $\|\cdot\|_*$ denotes the norm in the Besov space
$B_{\infty}^{-1,\infty}$; see \cite[\S1.13-1.15]{Meyer:01} for details.
The Besov norm has shown a better ability in the distinction of different
textures and therefore it can preserve more of fine structures.
However, the model \eqref{eqn_Meyer} is difficult to minimize, in
particular, utilizing the Euler-Lagrange equation approach.
{\bf Iterative refinement}:
In order to effectively suppress nonphysical dissipation,
Osher {\it et al.} \cite{OsherETAL:04TV} recently suggested
an iterative refinement procedure:
\begin{itemize}
\itemsep=-2pt
\item {\it Initialize}: $u_0=e_0=0$.
\item {\it For $k=1,2,\cdots$}: compute $u_{k}$ as the minimizer
of the following model
\begin{equation}
u_{k}=\arg\min_{u\in BV(\Omega)}
\Big(|u|_{BV}+\frac{\lambda}{2}\|f+e_{k-1}-u\|^2\Big)
\label{eqn_TV_Iter}
\end{equation}
and update
\begin{equation}
e_{k}=e_{k-1}+f-u_{k}.
\label{eqn_ek}
\end{equation}
\end{itemize}
The authors have proved that $u_k$ converges monotonically in $L^2$
to $f$, the noisy image, as $k\to\infty$.
However, it should be noticed that such a mathematical property is not
necessarily advantageous to a denoising algorithm, because the iterates
$u_k$ can get noisier as the iteration goes.
The iterative refinement recovers not only fine structures
but also the noise \cite{OsherETAL:04TV}.
{\bf The method of nonflat time evolution (MONTE) \cite{ChaKim:MONTE}}:
The MONTE is a numerical approach for an effective reduction
of nonphysical dissipation.
Consider a model of the form
\begin{equation}
\frac{\partial u}{\partial t}+\mathcal{L} u =\mathcal{R}(f-u),
\label{eqn_General_Denoising}
\end{equation}
where $\mathcal{L}$ is a nonlinear diffusion operator and $\mathcal{R}$ denotes a
nonnegative constraint term.
The above equation is a generalized restoration model which fits with
most of known PDE-based models including the TV model \eqref{eqn_TV_Model},
the ITV model \eqref{eqn_ITVM}, the Meyer model \eqref{eqn_Meyer},
the Perona-Malik model \cite{PeronaMalik:90}, and the
motion by mean curvature.
The MONTE begins with a keen observation:
Plugging $v=f-u$ into the model \eqref{eqn_General_Denoising},
we obtain the associated {\it residual equation}
\begin{equation}
\frac{\partial v}{\partial t}+\mathcal{R} v=\mathcal{L} u.
\label{eqn_VU}
\end{equation}
Then one can see from \eqref{eqn_VU} that the residual $v$ increases
as the diffusion term $\mathcal{L} u$ grows.
Note that the diffusion term is often larger in modulus on fine structures
than on smooth regions.
Thus the residual becomes extensive on fine structures; as a result,
the restored image $u$ must involve a lavish nonphysical dissipation
wherever the diffusion term is large in modulus.
In order to reduce such nonphysical dissipation effectively,
Cha and Kim \cite{ChaKim:MONTE} has introduced the MONTE in which
the timestep size (in the numerical time integration) is determined as
\begin{equation}
\Delta t(\mathbf{x},t^n)=\tau\,F(\mathcal{L} u(\mathbf{x},t^{n-1})),
\label{eqn_Dynamic_Dt}
\end{equation}
where $\tau>0$ is a constant and, for some $\gamma,\,\eta>0$,
$$
F(s)=\frac{\gamma}{1+\eta\,|s|}.
$$
Thus the solution is computed on a nonflat surface of time evolution
and the corresponding residual becomes a standard numerical realization
of
\begin{equation}
\frac{\partial v}{\partial t}+F(\mathcal{L} u)\,\mathcal{R} v = F(\mathcal{L} u)\,\mathcal{L} u.
\label{eqn_General_F_Error}
\end{equation}
Note that when the ratio $\gamma/\eta$ is fixed,
the function $G(s)\,(:=F(s)s)$ becomes flatter on $\{s:|s|\ge s_0>0\}$,
approaching $\gamma/\eta$, as $\eta$ increases.
Thus with appropriately chosen parameters ($\gamma$ and $\eta$),
the MONTE allows the residual to involve {\it equalized diffusion}
except for very smooth regions.
As a consequence, the restored image can preserve fine structures more
effectively, which has also been numerically verified \cite{ChaKim:MONTE}.
Since the MONTE can be viewed as a time-stepping
numerical procedure, it is applicable to various restoration models.
The MONTE also improves computational efficiency for all
aforementioned PDE-based models; see \cite{ChaKim:MONTE} for details.
However, to further improve edge-preservation capability,
the operator $F(\mathcal{L} u)\,\mathcal{R}$ must be well understood,
because otherwise the diffusion equalization can be degraded
in \eqref{eqn_General_F_Error}.
The main objectives in this article are (1) to analyze sources of
nonphysical dissipation of TV-based models, (2) to introduce a non-convex
diffusion model as a remedy for nonphysical dissipation and as a method
of edge enhancement, and (3) to study the operator $\mathcal{R}$;
in order to remove noise, preserving fine structures more effectively,
and at the same time, to enhance edges.
\section{Control of Nonphysical Dissipation} \label{Dissipation}
This section begins with an analysis for sources of nonphysical
dissipation: regularization and convexity.
Then we will introduce a new non-convex diffusion model and
its non-standard numerical procedure which can control dissipation
satisfactorily and enhance edges effectively.
\subsection{Regularization and convexity} \label{ss_Regularization}
Due to the regularization in \eqref{eqn_Gradient_u},
the minimization problem in \eqref{eqn_Min_Fp} must be modified as
\begin{equation}
\min_u F_{\varepsilon,\,p}(u), \quad
F_{\varepsilon,\,p}(u)=\int_{\Omega}|\nabla^{\varepsilon} u|^{p}\,d\mathbf{x}
+\frac{\lambda}2\,\|f-u\|^2,
\label{eqn_Min_F1_ve}
\end{equation}
and its corresponding Euler-Lagrange equation becomes
\begin{equation}
-p\,\nabla\cdot\Big(\frac{\nabla u}{|\nabla^{\varepsilon} u|^{2-p}}\Big)
-\lambda\,(f-u) =0.
\label{eqn_TV_PDE}
\end{equation}
\begin{figure}
\centerline{(a)}
\centerline{ \includegraphics[width=.48\textwidth]{figures/tv_sharp} }
\vskip1mm
\centerline{(b)}
\centerline{ \includegraphics[width=.48\textwidth]{figures/tv_smooth}}
\caption{A cartoon image in one-dimensional space (a)
and its blurry version (b).
Gradient magnitudes are indicated in the bottom of each figure.}
\label{fig_TV_Blur}
\end{figure}
In the following, we will see that the regularization becomes
a source of numerical dissipation.
To understand the combined effects of $p$ and $\varepsilon$ in
\eqref{eqn_Min_F1_ve} (and therefore in \eqref{eqn_TV_PDE}), we will
first measure the functional $F_{\varepsilon,\,p}$, with $\lambda=0$, for the
cartoon images as in Figure~\ref{fig_TV_Blur}.
Table~\ref{tbl_TV_Blur} presents the values of $F_{\varepsilon,\,p}$
for various selections of $\varepsilon$ and $p$ (with $\lambda=0$).
Consider the first case $F_{0,\,2}$; the value is smaller for the
blurry image in Figure~\ref{fig_TV_Blur}(b), which implies that the
minimization with $(\varepsilon,p)=(0,2)$ invokes blur.
Applying the same logic, one can see that $F_{0,\,1}$ does not
introduce blur during the minimization.
But, when we set $(\varepsilon,p)=(0.1,1)$ as to prevent the denominator
approaching zero in the Euler-Lagrange equation, the minimization
algorithm will make the image blurry, because the blurry image reduces
the value of the functional.
Here an interesting case is when $p<1$.
As one can see from the table that when $p=0.9$,
the functional has a smaller value for the sharper image.
Thus the functional $F_{\varepsilon,\,p}$, with appropriate $p<1$ and $\varepsilon>0$,
can be utilized for edge enhancement, at least for the cartoon image
in Figure~\ref{fig_TV_Blur}.
%%------------------------------------------------------------
\begin{table*}
\begin{center}
\caption{The values of the functional $F_{\varepsilon,\,p}$, with $\lambda=0$,
for images in Figure~\ref{fig_TV_Blur}.}
\label{tbl_TV_Blur}
\vskip1mm
\begin{tabular}{|l||l|l|} \hline
$F_{\varepsilon,\,p}$ & \multicolumn{1}{c|}{Figure~\ref{fig_TV_Blur}(a)}
& \multicolumn{1}{c|}{Figure~\ref{fig_TV_Blur}(b)} \\ \hline \hline
$F_{0,\,2}$ & $0+0+1^2+0=1$ & $ 0+0.5^2+0.5^2+0=0.5$ \\[1mm]
$F_{0,\,1}$ & $0+0+1+0=1$ & $ 0+0.5+0.5+0=1$ \\[1mm]
$F_{0.1,\,1}$ & $0.1+0.1+\sqrt{1.01}+0.1\approx1.31$
& $0.1+\sqrt{0.26}+\sqrt{0.26}+0.1 \approx1.22$\\[1mm]
$F_{0,\,0.9}$ & $0+0+1^{0.9}+0=1$ & $0+0.5^{0.9}+0.5^{0.9}+0 \approx1.07$
\\ \hline
\end{tabular}
\end{center}
\end{table*}
We summarize what we have seen with $F_{\varepsilon,\,p}$, as follows:
\begin{itemize}
\itemsep=-1pt
\item The strictly convex minimization ($p>1$) makes images blurrier.
\item The TV model itself ($p=1$ and $\varepsilon=0$) may not introduce ``blur".
However, its regularization ($p=1$ and $\varepsilon>0$) does.
\item When $p<1$, the model can make the image sharper.
For the non-convex functional, there may not exist the minimizer;
however, this does not mean that the discrete version of the equation
is not interesting by itself.
This case has motivated one of the authors to develop an image
denoising model which can preserve fine structures satisfactorily
for color images \cite{Kim:PDE_Color}.
\end{itemize}
Based on the above observation, we introduce the following model:
\begin{equation}
\frac{\partial u}{\partial t}-|\nabla^{\varepsilon} u|^{1+\omega}
\nabla\cdot\Big(\frac{\nabla u}{|\nabla^{\varepsilon} u|^{1+\omega}}\Big)
=\beta\,(f-u), \quad \omega\in[-1,1),
\label{eqn_New_Model}
\end{equation}
where $\beta=\beta(\mathbf{x},t)\ge0$.
Following the idea of the ITV model \eqref{eqn_ITVM},
the parameter $\beta$ can be chosen as
$$
\beta=\frac{\lambda}{1-\omega}|\nabla^{\varepsilon} u|^{1+\omega},
$$
which is related to the minimization problem \eqref{eqn_Min_F1_ve} with
$p=1-\omega$.
There is no mathematical criterion for the choice of $\omega$;
we will set it heuristically between 0 and 1 (non-convex diffusion).
The constraint parameter can play a crucial role in the preservation of
fine structures.
In \S\ref{Constraint}, we will study an effective numerical procedure
for the determination of $\beta(\mathbf{x},t)$.
In order to understand characteristics of the new model
\eqref{eqn_New_Model}, we consider its one-dimensional formulation:
$$
\frac{\partial u}{\partial t}-(u_x^2+\varepsilon^2)^{(1+\omega)/2}
\Big(\frac{u_x}{(u_x^2+\varepsilon^2)^{(1+\omega)/2}}\Big)_x
=\beta\,(f-u),
$$
or equivalently,
\begin{equation}
\frac{\partial u}{\partial t}-\frac{\varepsilon^2-\omega u_x^2}{u_x^2+\varepsilon^2}\,u_{xx}
=\beta\,(f-u).
\label{eqn_New_Model_1D}
\end{equation}
Thus, when $\omega>0$, the equation \eqref{eqn_New_Model_1D} (and
therefore \eqref{eqn_New_Model}) acts as a diffusion model for small
derivatives ($|u_x|\le\varepsilon/\sqrt{\omega}$) and as a reverse-time
heat equation on regions where the slope is relatively large
($|u_x|>\varepsilon/\sqrt{\omega}$).
Such a property will play an important role for a simultaneous
denoising and edge enhancement, although the mathematical model is
unstable.
In the following subsection, we will introduce an efficient linearized
time-stepping procedure along with a non-standard spatial numerical
scheme, which makes the resulting algorithm stable for reasonable choices
of simulation parameters.
\subsection{Numerical procedures} \label{ss_Numerical}
Here we present numerical procedures for \eqref{eqn_New_Model}.
The spatial scheme can be viewed as a weighted average and the temporal
approximation is based on a linearized $\theta$-method and the
alternating direction implicit (ADI) perturbation;
the resulting algorithm is unconditionally stable for $\theta=1$
(fully implicit) and stable for a reasonably large timestep size
when $\theta=1/2$ (Crank-Nicolson).
Denote the timestep size by $\Delta t$.
Set $t^n=n\Delta t$ and $u^n=u(\cdot,t^n)$ for $n\ge0$.
Then, the problem \eqref{eqn_New_Model} can be linearized
by evaluating the nonlinear parts from the previous time level.
Consider the linearized $\theta$-method for \eqref{eqn_New_Model}
of the form:
\begin{equation}
\frac{u^n-u^{n-1}}{\Delta t}
+(\mathcal{A}^{n-1}+\beta^{n} I)\,[\theta u^n+(1-\theta)u^{n-1}]
=\beta^{n}\,f, \quad 0\le\theta\le1,
\label{eqn_theta_method}
\end{equation}
where $\mathcal{A}^{n-1}$ is a spatial approximation
of a linearized diffusion operator, i.e.,
$$
\mathcal{A}^{n-1}u^m\approx -|\nabla^{\varepsilon} u^{n-1}|^{1+\omega}
\nabla\cdot\Big(\frac{\nabla u^m}{|\nabla^{\varepsilon} u^{n-1}|^{1+\omega}}\Big),
\quad m=n-1,n.
$$
For an efficient computation of \eqref{eqn_theta_method},
the matrix $\mathcal{A}^{n-1}$ can be integrated to be separable, i.e.,
$\mathcal{A}^{n-1}=\mathcal{A}^{n-1}_1+\mathcal{A}^{n-1}_2$, with
\begin{equation}
\mathcal{A}^{n-1}_{\ell}u^m \approx -|\nabla^{\varepsilon} u^{n-1}|^{1+\omega}\,
\partial_{x_{\ell}}\Big(\frac{\partial_{x_{\ell}}u^m}
{|\nabla^{\varepsilon} u^{n-1}|^{1+\omega}}\Big),
\quad \ell=1,2,
\label{eqn_A_ell}
\end{equation}
where
$\nabla=(\partial_{x_{1}},\partial_{x_{2}})^T=(\partial_x,\partial_y)^T$.
Let
$$
\mathcal{B}_{\ell}^{n-1}=\mathcal{A}_{\ell}^{n-1}+\frac12\beta^{n} I, \quad \ell=1,2.
$$
Then, the {\it alternating direction implicit} (ADI) method
\cite{DouglasGunn:64,DouglasKim:01ADFS}
is a perturbation of \eqref{eqn_theta_method}, with
a splitting error of $\mathcal{O}(\Delta t^2)$:
\begin{equation}
\begin{aligned}
\big[1+\theta\Delta t \mathcal{B}_{1}^{n-1}\big]\,u^{*}
&= \big[1-(1-\theta)\Delta t \mathcal{B}_{1}^{n-1}
-\Delta t \mathcal{B}_{2}^{n-1}\big]\,u^{n-1} +\Delta t \beta^{n}\,f, \\
\big[1+\theta\Delta t \mathcal{B}_{2}^{n-1}\big]\,u^n
&= u^{*}+\theta\Delta t \mathcal{B}_{2}^{n-1}u^{n-1},
\end{aligned}
\label{eqn_theta_ADI2}
\end{equation}
where $u^{*}$ is an intermediate solution.
When the matrices $\mathcal{A}_{\ell}^{n-1}$ are composed with a 3-point stencil,
each sweep in \eqref{eqn_theta_ADI2} can be carried out by inverting a
series of tri-diagonal matrices; the ADI is efficient.
Now, we will construct the matrix $\mathcal{A}_1^{n-1}$; one can obtain
$\mathcal{A}_2^{n-1}$ similarly.
Let $\mathcal{D}\,u^{n-1}_{i-1/2,j}$ be a finite difference
approximation of $|\nabla u^{n-1}|$ evaluated at $\mathbf{x}_{i-1/2,j}$,
the mid point of $\mathbf{x}_{i-1,j}$ and $\mathbf{x}_{i,j}$.
For example, a second-order scheme reads
\begin{equation}
\mathcal{D} u^{n-1}_{i-1/2,j}
= \Big((u^{n-1}_{i,j}-u^{n-1}_{i-1,j})^2
+\Big[\frac12\Big(\frac{u^{n-1}_{i-1,j+1}+u^{n-1}_{i,j+1}}2
-\frac{u^{n-1}_{i-1,j-1}+u^{n-1}_{i,j-1}}2\Big)\Big]^2\Big)^{1/2}.
\label{eqn_DijW1}
\end{equation}
Define
\begin{equation}
d_{ij,W}^{n-1}=[(\mathcal{D}\,u^{n-1}_{i-1/2,j})^2+\varepsilon^2]^{(1+\omega)/2},
\quad d_{ij,E}^{n-1}=d_{i+1,j,W}^{n-1}.
\label{eqn_Dij}
\end{equation}
Then the differential operators in \eqref{eqn_A_ell} can be approximated as
\begin{equation}
\begin{gathered}
-\partial_{x_{1}}\Big(\frac{\partial_{x_{1}}u^m}
{|\nabla^{\varepsilon} u^{n-1}|^{1+\omega}}\Big)
\approx -\frac{1}{d_{ij,W}^{n-1}}u^m_{i-1,j}
+\Big(\frac{1}{d_{ij,W}^{n-1}}+\frac{1}{d_{ij,E}^{n-1}}\Big)u^m_{i,j}
-\frac{1}{d_{ij,E}^{n-1}}u^m_{i+1,j}, \\[.2in]
|\nabla^{\varepsilon} u^{n-1}|^{1+\omega}
\approx 2\frac{d_{ij,W}^{n-1}\cdot d_{ij,E}^{n-1}}{d_{ij,W}^{n-1}+d_{ij,E}^{n-1}}.
\end{gathered}
\label{eqn_A_ell_FDM}
\end{equation}
By combining the right sides of \eqref{eqn_A_ell_FDM}, we can obtain
the three consecutive non-zero elements of the matrix $\mathcal{A}_{1}^{n-1}$
corresponding to the pixel $\mathbf{x}_{ij}$:
\begin{equation}
[\mathcal{A}_{1}^{n-1}]_{ij}=(-a_{ij,W}^{n-1},\,2,\,-a_{ij,E}^{n-1}),
\label{eqn_A_ij}
\end{equation}
where
\begin{equation}
a_{ij,W}^{n-1}=\frac{2\,d_{ij,E}^{n-1}}{d_{ij,W}^{n-1}+d_{ij,E}^{n-1}}, \quad
a_{ij,E}^{n-1}=\frac{2\,d_{ij,W}^{n-1}}{d_{ij,W}^{n-1}+d_{ij,E}^{n-1}}.
\label{eqn_AijEAijW}
\end{equation}
Note that $a_{ij,W}^{n-1}+a_{ij,E}^{n-1}=2$.
The above non-standard numerical scheme has been successfully applied
as an edge-forming formula for image zooming of arbitrary magnification
factors \cite{ChaKim:Edge_Form_Color,ChaKim:Edge_Form}.
The following theorem analyzes stability for
the $\theta$-method \eqref{eqn_theta_method}.
\begin{theorem} \label{thm_Stability}
Define $\beta_0=\min_{i,j,n}\beta^n_{ij}$
and $\beta_1=\max_{i,j,n}\beta^n_{ij}$.
Suppose that the $\theta$-method $\eqref{eqn_theta_method}$ incorporate
the numerical scheme in $\eqref{eqn_DijW1}$-$\eqref{eqn_AijEAijW}$
and let
\begin{equation}
(4+\beta_1)\,(1-\theta)\Delta t\le 1.
\label{eqn_Max_Condition}
\end{equation}
Then $\eqref{eqn_theta_method}$ is stable and holds the maximum
principle and its solution satisfies
\begin{equation}
\|u^n-f\|_{\infty} \le
\frac{4}{4+\beta_0}\,\|f\|_{\infty}, \quad n\ge0.
\label{eqn_Stab_Estimate}
\end{equation}
\end{theorem}
\begin{proof}
We will first prove the following inequality:
\begin{equation}
\min_{i,j}f_{ij} \leq u_{ij}^n
\leq \|f\|_{\infty}, \quad n\ge0,
\label{eqn_Stable}
\end{equation}
which implies the maximum principle and stability.
The equation \eqref{eqn_theta_method} at a point $\mathbf{x}_{ij}$
can be written as
\begin{equation}
\begin{aligned}
&[1+(4+\beta^{n}_{ij})\theta\Delta t]\,u_{ij}^n \\
& =\theta\Delta t
[a_{ij,W}^{n-1}\,u_{i-1,j}^n +a_{ij,E}^{n-1}\,u_{i+1,j}^n
+a_{ij,S}^{n-1}\,u_{i,j-1}^n +a_{ij,N}^{n-1}\,u_{i,j+1}^n] \\
&\quad +(1-\theta)\Delta t
[a_{ij,W}^{n-1}\,u_{i-1,j}^{n-1} +a_{ij,E}^{n-1}\,u_{i+1,j}^{n-1}
+a_{ij,S}^{n-1}\,u_{i,j-1}^{n-1} +a_{ij,N}^{n-1}\,u_{i,j+1}^{n-1}] \\
&\quad +[1-(4+\beta^{n}_{ij})(1-\theta)\Delta t]\,u_{ij}^{n-1}
+\Delta t\beta^{n}_{ij}\,f_{ij}.
\end{aligned}
\label{eqn_theta_method_Xij}
\end{equation}
Let $u_{ij}^n$ be a local minimum.
Then, it follows from \eqref{eqn_Max_Condition} and the identity
\begin{equation}
a_{ij,W}^{n-1}+a_{ij,E}^{n-1}+a_{ij,S}^{n-1}+a_{ij,N}^{n-1}=4
\label{eqn_Equal_4}
\end{equation}
that each of coefficients in the right side of
\eqref{eqn_theta_method_Xij}, including the term of $f_{ij}$,
is nonnegative and their sum becomes $1+(4+\beta^{n}_{ij})\theta\Delta t$.
Thus, since $u_{ij}^n$ is smaller than or equal to the neighboring values,
we must have
$$
f_{ij} \le u_{ij}^n.
$$
The inequality holds for all local minima, which proves
the first inequality in \eqref{eqn_Stable}.
The same argument can be applied for local maxima to verify
the other inequality.
Now, to prove \eqref{eqn_Stab_Estimate},
let $\delta_{ij}^{n}=u_{ij}^{n}-f_{ij}$, $n\ge0$.
Then it follows from \eqref{eqn_theta_method_Xij} that
\begin{equation}
\begin{aligned}
& [1+(4+\beta^{n}_{ij})\,\theta\Delta t]\delta_{ij}^n \\
& =\theta\Delta t
[a_{ij,W}^{n-1}\,u_{i-1,j}^n +a_{ij,E}^{n-1}\,u_{i+1,j}^n
+a_{ij,S}^{n-1}\,u_{i,j-1}^n +a_{ij,N}^{n-1}\,u_{i,j+1}^n] \\
&\quad +(1-\theta)\Delta t
[a_{ij,W}^{n-1}\,u_{i-1,j}^{n-1} +a_{ij,E}^{n-1}\,u_{i+1,j}^{n-1}
+a_{ij,S}^{n-1}\,u_{i,j-1}^{n-1} +a_{ij,N}^{n-1}\,u_{i,j+1}^{n-1}] \\
&\quad -4\Delta t f_{ij}
+[1-(4+\beta^{n}_{ij})(1-\theta)\Delta t]\,\delta_{ij}^{n-1}.
\end{aligned}
\label{eqn_BP4}
\end{equation}
Thus, utilizing \eqref{eqn_Stable} and \eqref{eqn_Equal_4}, we have
\begin{equation}
|\delta_{ij}^n| \leq
\frac{4\Delta t}{1+(4+\beta_0)\,\theta\Delta t}\,\|f\|_{\infty}
+\gamma_0 \,\|\delta^{n-1}\|_{\infty},
\label{eqn_BP6}
\end{equation}
where
$$
\gamma_0=\frac{1-(4+\beta_0)(1-\theta)\Delta t}{1+(4+\beta_0)\,\theta\Delta t}
=1-\frac{(4+\beta_0)\Delta t}{1+(4+\beta_0)\,\theta\Delta t}.
$$
The inequality \eqref{eqn_Max_Condition} also holds when
$\beta_1$ is replaced by $\beta_0$, i.e.,
$$
(4+\beta_0)\Delta t \leq 1+(4+\beta_0)\,\theta\Delta t,
$$
which implies $0\le\gamma_0<1$.
Since $\|\delta^{0}\|_{\infty}=0$, we can have
\begin{equation}
\begin{aligned}
|\delta_{ij}^n|
&\le \frac{4\Delta t}{1+(4+\beta_0)\,\theta\Delta t}
\cdot\sum_{k=0}^{n-1}\gamma_0^k\cdot\|f\|_{\infty} \\[3mm]
&\le \frac{4\Delta t}{1+(4+\beta_0)\,\theta\Delta t}
\cdot\frac{1}{1-\gamma_0}\cdot\|f\|_{\infty}\\
&= \frac{4}{4+\beta_0}\,\|f\|_{\infty},
\end{aligned}
\label{eqn_BP8}
\end{equation}
which completes the proof.
\end{proof}
The above analysis deserves a few remarks.
\begin{enumerate}
\item The stability condition in Theorem~\ref{thm_Stability} holds
independently on $\omega\in[-1,1)$.
\item The $\theta$-method is unconditionally stable for $\theta=1$.
When $\theta=1/2$ (Crank-Nicolson), the stability condition reads
$$
\Delta t\le\frac2{(4+\beta_1)}.
$$
However, in practice, it is stable for reasonable choices
of the timestep size, say, $\Delta t\le2$.
The Crank-Nicolson scheme is preferred due to a smaller numerical error.
\item The main diagonal of the matrix $\mathcal{A}^{n-1}$ is 4 for all $n\ge1$,
independently on $\omega$ and image contents as well.
The numerical scheme in \eqref{eqn_DijW1}-\eqref{eqn_AijEAijW},
producing such a diffusion matrix, plays important roles
in mathematical analysis and practical computation.
\item As one can see from \eqref{eqn_Stab_Estimate}, the residual
$(f-u)$ becomes smaller in modulus as $\beta$ increases.
A question may occur whether the estimate can be sharper or not.
To answer the question, we consider an example.
Let $f$ be zero at all pixels except one point, say $\mathbf{x}_0$.
Then the desired image to recover must be zero: $u\equiv0$, which is
possible when $\beta=0$ in a neighborhood of $\mathbf{x}_0$.
In the case, $\|u-f\|_{\infty}=\|f\|_{\infty}=f(\mathbf{x}_0)$.
It seems to the authors that the estimate \eqref{eqn_Stab_Estimate}
may not be sharper, as long as the maximum norm is the measure.
\end{enumerate}
We will close the section, figuring out the stationary solution of
the $\theta$-method \eqref{eqn_theta_method} incorporating the numerical
scheme in $\eqref{eqn_DijW1}$-$\eqref{eqn_AijEAijW}$.
By removing the superscripts $n$ and $n-1$ from \eqref{eqn_theta_method_Xij},
one can obtain
$$
(4+\beta_{ij})u_{ij}=u_{i-1,j}+u_{i+1,j}+u_{i,j-1}+u_{i,j+1}
+\beta_{ij}f_{ij},
$$
which is also satisfied by a second-order finite difference
solution of the following elliptic equation
\begin{equation}
-\nabla\cdot\nabla u=\beta(f-u).
\label{eqn_Poisson}
\end{equation}
Hence we may conclude that the iterates of the $\theta$-method
\eqref{eqn_theta_method} approach the solution of \eqref{eqn_Poisson},
provided that the numerical scheme \eqref{eqn_DijW1}-\eqref{eqn_AijEAijW}
is incorporated and the simulation parameters convince
\eqref{eqn_Max_Condition}.
Note that the solution of \eqref{eqn_Poisson} can be controlled in some
degrees by selecting $\beta$ appropriately.
In practice, the computation in image restoration stops much earlier
than reaching the stationary state; however, the restored image must
show some characteristics of the stationary solution.
In this point of view, the choice of the constraint parameter $\beta$
can be crucial for an effective image restoration.
\section{The Constraint Parameter} \label{Constraint}
The selection of the constraint parameter has been an interesting problem
for PDE-based models, in particular, for the TV model \eqref{eqn_TV_Model}
and its relatives.
The basic mechanism of the PDE-based denoising is diffusion.
Thus the parameter $\beta$ cannot be too large; it must be small enough
to introduce a sufficient amount of diffusion.
On the other hand, it should be large enough to keep the details in the
image.
However, in the literature, the parameter has been chosen constant for
most cases; the resulting models either smear out fine structures
more excessively than desired or leave a certain amount of noise in
the restored image.
In order to overcome the difficulty, the parameter must be variable;
it can become larger wherever dissipation is excessive, being small
else where.
In the following, we will consider an automatic and effective numerical
method for the determination of the constraint function $\beta(\mathbf{x},t)$:
\begin{enumerate}
\item Set $\beta$ as a constant:
\begin{equation}
\beta(\mathbf{x},0)=\beta_0.
\label{eqn_TFR1}
\end{equation}
\item For $n=2,3,\cdots$
\begin{enumerate}
\itemsep=-1pt
\item[(2a)] Compute the absolute residual and a quantity $G_{Res}^{n-1}$:
\begin{equation}
\begin{aligned}
R^{n-1} &= |f-u^{n-1}|, \\
G_{Res}^{n-1}&= \max\Big(0,\,\mathcal{S}_{m}(R^{n-1})-\overline{R^{n-1}}\Big),
\end{aligned}\label{eqn_TFR2}
\end{equation}
where $\mathcal{S}_{m}$ is a smoother and $\overline{R^{n-1}}$ denotes
the $L^2$-average of $R^{n-1}$.
\item[(2b)] Update:
\begin{equation}
\beta^n=\beta^{n-1}\,+\,\gamma^n\,G_{Res}^{n-1},
\label{eqn_TFR3}
\end{equation}
where $\gamma^n$ is a scaling factor having the property:
$\gamma^n\to0$ as $n\to\infty$.
\end{enumerate}
\end{enumerate}
The above procedure has been motivated from the following observation.
The PDE-based denoising algorithms tend to have a larger numerical
dissipation near fine structures.
The tendency in turn makes the residual have structural components
on regions where dissipation is excessive.
It is more apparent when the absolute residual is smoothed and
adjusted by an average.
Such structural components in the residual can be viewed as an indicator
for nonphysical dissipation.
By adding the components to the constraint parameter $\beta$, and by
allowing the parameter to grow at pixels wherever diffusion is inordinate,
we may return fine structures in the residual back to the restored image.
The objective is to restore images whose corresponding residual
is free of structures and textures.
We will call the above procedure, \eqref{eqn_TFR1}-\eqref{eqn_TFR3},
the {\it texture-free residual} (TFR) parameterization.
In practice, one may wish to limit the parameter in a prescribed interval,
i.e., $\beta(\mathbf{x},t)\in[\beta_0,\beta_1]$.
Also one may want to update the parameter a few times only.
Note that the $\theta$-method \eqref{eqn_theta_ADI2} converges fast, not
larger than 10 iterations, for most images having reasonable noise levels.
Thus we can update the TFR parameter $\beta$ in the first few iterations,
say, four.
In the case, the scaling factor $\gamma^n$ can be selected as follows:
\begin{equation}
\gamma^n\,\|G_{res}^{n-1}\|_{\infty}=\eta\,(\beta_1-\beta_0),
\quad \eta=\begin{cases}
0.4,& n=2,\\
0.3,& n=3,\\
0.2,& n=4,\\
0.1,& n=5.
\end{cases}
\label{eqn_gamma}
\end{equation}
\noindent{\it Remark}.
One may compute the quantity $R^{n-1}$ in a different way:
\begin{equation}
R^{n-1} = |\nabla(f-u^{n-1})|.
\label{eqn_Rn2}
\end{equation}
The above and the one in \eqref{eqn_TFR2} have made no
significant differences in practice.
\section{Numerical Experiments} \label{Numerical}
\begin{figure*}[t]
\centerline{(a) \hskip0.35\textwidth (b)}
\centerline{
\includegraphics[width=0.38\textwidth]{figures/lenna_orig}
\includegraphics[width=0.38\textwidth]{figures/lenna_MZ20}
}
\centerline{(c) \hskip0.35\textwidth (d)}
\centerline{
\includegraphics[width=0.38\textwidth]{figures/lenna_itv}
\includegraphics[width=0.38\textwidth]{figures/lenna_itvbeta}
}
\caption{Lenna: (a) The original image $g$, (b) a noisy image $f$
with a Gaussian noise (PSNR=29.6), and restored images $u$ by
using (c) ITV and (d) ITV-TFR.}
\label{fig_Lenna_MZ20_Restore}
\end{figure*}
In this section, we present some numerical examples to show effectiveness
of the non-convex model, \eqref{eqn_New_Model} with $\omega>0$,
and the TFR parameterization.
For comparison purposes, we consider four different models:
the ITV model with constant $\beta(=\beta_c)$ (ITV),
the non-convex model with constant $\beta(=\beta_c)$ (NC),
the ITV model with variable $\beta$ (ITV-TFR), and
the non-convex model with variable $\beta$ (NC-TFR).
For all experiments, images are first scaled to have values in $[0,1]$
and after denoising, it is scaled back for the 8-bit display.
For ITV-TFR and NC-TFR, the variable $\beta$ is chosen
as in \eqref{eqn_TFR1}-\eqref{eqn_gamma}.
We set the parameters as $\varepsilon=0.05$, $\beta_c=0.6$, $\beta_0=0.5$,
$\beta_1=5.0$, and $\Delta t=1$.
For non-convex models, $\omega =0.9$.
(Exceptionally, for the last example in Figure~\ref{fig_Brain_Restore},
we set $\omega=1.5$.)
The residual is measured in the {\it peak signal-to-noise ratio} (PSNR)
defined as
$$
\mbox{PSNR}\equiv 10\log_{10}\Big(\frac{\sum_{ij}255^2}
{\sum_{ij}(g_{ij}-u_{ij})^2}\Big)\mbox{dB},
$$
where $g$ denotes the original image and $u$ is the restored image
from a noisy image of $g$.
In Figure~\ref{fig_Lenna_MZ20_Restore}, we verify effectiveness of the
TFR parameterization presented in Section~\ref{Constraint}.
The Lenna image, Figure~\ref{fig_Lenna_MZ20_Restore}(a),
is contaminated by a Gaussian noise of PSNR=29.6 as in
Figure~\ref{fig_Lenna_MZ20_Restore}(b) and restored by the
ITV and ITV-TFR models.
As one can see from Figure~\ref{fig_Lenna_MZ20_Restore}(c), the ITV
model has introduced a great deal of nonphysical dissipation which in
turn makes the restored image blurry (PSNR=33.3).
On the other hand, the ITV-TFR model has restored an image as in
Figure~\ref{fig_Lenna_MZ20_Restore}(d), which shows much less nonphysical
dissipation (PSNR=35.0).
It is apparent from the example that the TFR parameterization
is effective in preserving fine structures.
It has been observed from various numerical examples that the NC model
improves the restoration quality over the ITV model.
However, for most cases, ITV-TFR outperforms the NC model and NC-TFR is
the best.
In the following example, we will compare performances of the four models.
\begin{figure*}%[htbH]
\centerline{(a) \hskip0.35\textwidth (b)}
\centerline{
\includegraphics[width=0.38\textwidth]{figures/clock_orig}
\includegraphics[width=0.38\textwidth]{figures/clock_MZ20}
}
\centerline{(c) \hskip0.35\textwidth (d)}
\centerline{
\includegraphics[width=0.38\textwidth]{figures/clock_itv}
\includegraphics[width=0.38\textwidth]{figures/clock_non}
}
\centerline{(e) \hskip0.35\textwidth (f)}
\centerline{
\includegraphics[width=0.38\textwidth]{figures/clock_itvbeta}
\includegraphics[width=0.38\textwidth]{figures/clock_nonbeta}
}
\caption{Clock: (a) The original image, (b) a noisy image
with a Gaussian noise (PSNR=29.6), and restored images by
using (c) ITV, (d) NC, (e) ITV-TFR, and (f) NC-TFR.}
\label{fig_Clock_MZ20_Restore}
\end{figure*}
Figure~\ref{fig_Clock_MZ20_Restore} presents restored images
for the Clock image carried out by four different models.
The noisy image in Figure~\ref{fig_Clock_MZ20_Restore}(b) contains
a Gaussian noise of PSNR=29.6.
Again for this example, the ITV model introduces a large
amount of nonphysical dissipation to lose fine structures as in
Figure~\ref{fig_Clock_MZ20_Restore}(c).
One can see from Figures~\ref{fig_Clock_MZ20_Restore}(d) and
\ref{fig_Clock_MZ20_Restore}(e) that the ITV-TFR outperforms the NC
model, which has been observed for most cases.
On the other hand, the NC-TFR model produces the best restored image
as shown in Figure~\ref{fig_Clock_MZ20_Restore}(f).
It preserves fine structures of the image effectively and therefore
most details are restored satisfactorily.
\begin{figure*}%[htbH]
\centerline{
\includegraphics[width=0.30\textwidth]{figures/tree_orig}
\includegraphics[width=0.30\textwidth]{figures/milk}
\includegraphics[width=0.30\textwidth]{figures/tank}
}
\caption{Additional images utilized for a PSNR analysis: Park, Milk, and Tank.}
\label{fig_TankMilk}
\end{figure*}
In the following, we present a PSNR analysis for a more systematic
comparison of the four models.
We select three additional images in Figure~\ref{fig_TankMilk}, in
addition to the ones dealt in Figures~\ref{fig_Lenna_MZ20_Restore}
and \ref{fig_Clock_MZ20_Restore}.
\begin{table*}%[htbH]
\begin{center}
\caption{PSNR analysis.}
\label{tbl_PSNR}
\begin{tabular}{|l|lllll|} \hline
& \multicolumn{1}{c}{$f$} & \multicolumn{1}{c}{ITV}
& \multicolumn{1}{c}{NC}
& \multicolumn{1}{c}{ITV-TFR}
& \multicolumn{1}{c|}{NC-TFR} \\
\hline\hline
Lenna & 29.6 & 33.3 (3) & 34.2 (3) & 35.0 (6) & 35.7 (8) \\
& 24.7 & 32.1 (3) & 32.6 (5) & 32.8 (5) & 33.0 (4) \\ \hline
Clock & 29.6 & 32.9 (3) & 33.8 (3) & 35.3 (7) & 35.8 (8) \\
& 24.7 & 31.6 (4) & 32.1 (4) & 32.5 (5) & 32.7 (4) \\ \hline
Park & 28.5 & 28.3 (3) & 28.7 (3) & 32.1 (26) & 32.2 (26)\\
& 24.8 & 27.9 (3) & 28.5 (3) & 30.1 (17) & 30.1 (17)\\ \hline
Milk & 28.5 & 33.8 (5) & 34.4 (4) & 35.9 (20) & 36.2 (18)\\
& 22.9 & 31.9 (6) & 32.3 (5) & 32.5 (5) & 32.8 (3) \\ \hline
Tank & 28.5 & 31.5 (4) & 31.8 (4) & 33.1 (23) & 33.2 (22)\\
& 23.9 & 30.7 (5) & 30.8 (4) & 31.1 (6) & 31.1 (5) \\ \hline
\end{tabular}
\end{center}
\end{table*}
Table~\ref{tbl_PSNR} shows PSNRs of noised images $f$ and restored
images $u$ by applying the four different models.
For each image, Gaussian noise is applied in two different levels.
The integers in parentheses represent the number of CN-ADI iterations
which can restore the best image, measured in PSNR and visual verification.
As shown in the table, the NC and ITV-TFR models have restored the
images better than the ITV model; the NC-TFR model has obtained the
best PSNR values for all cases.
The TFR parameterization (ITV-TFR and NC-TFR) takes in general
more iterations to reach its best restored images.
However, this observation does not imply that the TFR parameterization
makes the denoising procedure slow down.
Instead it is related to the capability that can continue improving image
quality for a longer time.
For example, for the Park image with PSNR=28.5 in the noisy image
$f$, three iterations of the ITV-TFR and NC-TFR models
produce restored images of PSNR=29.5 and PSNR=29.8, respectively.
These intermediate images have better PSNRs (and better in visual
verification) than those of the constant parameterization.
It is safe to say that the TFR parameterization does not slow down the
restoration procedure but runs more steps to continuously improve the
image quality in higher levels.
The inclusion of the NC diffusion ($\omega>0$) improves the performance
{\it only slightly} when compared by the PSNR.
For example, PSNRs of the NC model are slightly higher than those of
the ITV model and PSNRs of the NC-TFR model are improved by only a
small amount over the ITV-TFR model.
However, the idea of the NC diffusion should not be underestimated.
In the following example, we will explore characteristics of the NC
diffusion.
\begin{figure*}%[htbH]
\centerline{(a) \hskip0.46\textwidth (b)}
\centerline{
\includegraphics[width=0.49\textwidth]{figures/Brain_orig}
\includegraphics[width=0.49\textwidth]{figures/brain_itv5}
}
\vskip1mm
\centerline{(c) \hskip0.46\textwidth (d)}
\centerline{
\includegraphics[width=0.49\textwidth]{figures/brain_itvbeta6}
\includegraphics[width=0.49\textwidth]{figures/brain_nonbeta7}
}
\caption{Human Brain: (a) The original image and denoised images
by (b) ITV, (c) ITV-TFR, and (d) NC-TFR.}
\label{fig_Brain_Restore}
\end{figure*}
%------------------------------------------------------
\begin{figure*}%[htbH]
\centerline{
\includegraphics[width=1.0\textwidth]{figures/lincut_true4cavs48}
}
\caption{Horizontal line cuts of the Brain image and its restored images
in Figure~\ref{fig_Brain_Restore}.
Each of the curves depicts the first 48 values of the 200th row
of the corresponding image.}
\label{fig_Brain_Line}
\end{figure*}
Figure~\ref{fig_Brain_Restore} shows an MRI image for a human brain
and the restored images by ITV, ITV-TFR, and NC-TFR.
(No noise is applied.)
For this real-world example, we have reset $\omega=1.5$ for NC-TFR
to perform its best.
The other parameters ($\varepsilon$, $\beta_c$, $\beta_0$, $\beta_1$, and
$\Delta t$) are kept the same as in previous examples, because they
have been verified {\it little} sensitive to the image content.
The ITV model has reduced the noise reasonably well, although its restored
image looks somewhat blurry as in Figure~\ref{fig_Brain_Restore}(b).
However, the ITV-TFR and NC-TFR models produce better
images as shown in Figures~\ref{fig_Brain_Restore}(c) and
\ref{fig_Brain_Restore}(d), respectively in six and seven iterations.
These TFR-parameterized models can reduce the noise effectively,
preserving fine structures satisfactorily.
In order to show details of the restoration, Figure~\ref{fig_Brain_Line}
presents horizontal line segments of the original brain image and the
restored images.
Each of the curves depicts the first 48 values of the 200th row of the
corresponding image.
The solid, dot-dashed, dot-solid, and dashed curves are respectively
for the original Brain image and three restored images by the ITV,
ITV-TFR, and NC-TFR models in Figure~\ref{fig_Brain_Restore}.
The ITV model dissipates the image in some degree, while the
TFR-parameterized models represent the original image quite accurately.
It should be noticed that the restored image by the NC-TFR model
shows edges sharper than the original image, particularly
on pixels 13-20 and 32-40.
It is clear to see that the NC-TFR model has an ability not only to
reduce the noise but also to enhance edges.
Such simultaneous image denoising and edge enhancement (without an
apparent torture of image content) are unique characteristics of the
NC-TFR model.
\section*{Conclusions} \label{Conclusions}
PDE-based models in image restoration show a common drawback,
loss of fine structures, due to a severe nonphysical dissipation.
After presenting preliminaries, we have analyzed sources of the
nonphysical dissipation focusing on the total variation-based models.
Based on the analysis, a non-convex (NC) diffusion model has been
introduced in order to enhance edges effectively by
compensating nonphysical dissipation.
Then a non-standard numerical procedure has been suggested and analyzed
for stability.
In order to further improve the edge-preserving capability, this
article has studied a strategy for the choice of an effective constraint
parameter, called the texture-free residual (TFR) parameterization.
It has been numerically verified that (1) the TFR parameterization
improves the image quality continuously and in higher levels, (2)
the NC diffusion shows an ability to make the edges sharper than the
original real-world images of interests, and (3) the resulting algorithm
incorporating both the NC diffusion and the TFR parameterization can
simultaneously suppress the noise and enhance edges.
\begin{thebibliography}{00}
\bibitem{AlvarezLionsMorel:92}
L.~Alvarez, P.~Lions, and M.~Morel, ``Image selective smoothing and edge
detection by nonlinear diffusion. {II},'' \emph{SIAM J. Numer. Anal.},
vol.~29, pp. 845--866, 1992.
\bibitem{AubertKornprobst:02}
G.~Aubert and P.~Kornprobst, \emph{Mathematical Problems in Image Processing},
ser. Applied Mathematics Sciences. New
York: Springer-Verlag, 2002, no. 147.
\bibitem{dn_blo}
P.~V. Blomgren, ``{Total Variation Methods for Restoration of Vector Valued
Images, (Ph.D. thesis)},'' \emph{{UCLA Dept. of Math. CAM 98-30}}, 1998.
\bibitem{dn_cb1}
P.~V. Blomgren and T.~F. Chan, ``{Color TV: Total} variation methods for
restoration of vector valued images,'' \emph{{IEEE Trans. Image Processing}},
vol.~7, no.~3, pp. 304--309, 1998.
\bibitem{CatteLionsMorelColl:92}
F.~Catte, P.~Lions, M.~Morel, and T.~Coll, ``Image selective smoothing and edge
detection by nonlinear diffusion.'' \emph{SIAM J. Numer. Anal.}, vol.~29, pp.
182--193, 1992.
\bibitem{ChaKim:MONTE}
Y.~Cha and S.~Kim, ``{MONTE}: {The} method of nonflat time evolution in
{PDE}-based image restoration,'' (submitted).
\bibitem{ChaKim:Edge_Form_Color}
------, ``Edge-forming methods for color image zooming,'' \emph{IEEE Trans.
Image Process.}, vo.~15, no.~8, pp. 2315--2323, 2006.
\bibitem{ChaKim:Edge_Form}
------, ``Edge-forming methods for image zooming,'' \emph{J. Mathematical
Imaging and Vision}, vo.~25, no.~3, pp. 353--364, 2006.
\bibitem{dn_cl}
A.~Chambolle and P.~L. Lions, ``{Image recovery via Total Variational
minimization and related problems},'' \emph{Numer. Math.}, vol.~76, pp.
167--188, 1997.
\bibitem{ChanOsherShen:TR}
T.~Chan, S.~Osher, and J.~Shen, ``The digital {TV} filter and nonlinear
denoising,'' {Department of Mathematics, University of California, Los
Angeles, CA 90095-1555}, {Technical Report} \#99-34, October 1999.
\bibitem{dn_cs2}
T.~F. Chan and J.~Shen, ``Variational restoration of non-flat image features:
{Models} and algorithms,'' \emph{{SIAM Journal of Applied Mathematics}},
vol.~61, no.~4, pp. 1338--1361, 2000.
\bibitem{DouglasGunn:64}
J.~{Douglas, Jr.} and J.~Gunn, ``A general formulation of alternating direction
methods {Part I}. {Parabolic} and hyperbolic problems,'' \emph{{Numer.
Math.}}, vol.~6, pp. 428--453, 1964.
\bibitem{DouglasKim:01ADFS}
J.~{Douglas, Jr.} and S.~Kim, ``Improved accuracy for locally one-dimensional
methods for parabolic equations,'' \emph{Mathematical Models and Methods in
Applied Sciences}, vol.~11, pp. 1563--1579, 2001.
\bibitem{GonzalezWoods:02Book}
R.~Gonzalez and R.~Woods, \emph{Digital Image Processing, {\it 2nd Ed.}}\hskip
1em plus 0.5em minus 0.4em\relax Upper Saddle River, New Jersey:
Prentice-Hall, Inc., 2002.
\bibitem{Kim:PDE_Color}
S.~Kim, ``{PDE}-based image restoration: {A} hybrid model and color image
denoising,'' \emph{IEEE Trans. Image Processing},
vol.~15, no.~5, pp. 1163--1170, 2006.
\bibitem{dn_ks}
R.~Kimmel and N.~Sochen, ``Orientation diffusion or how to comb a porcupine?''
\emph{{Special issue on PDEs in Image Processing, Computer Vision, and
Computer Graphics, J. Visual Comm. Image Representation}}, vol.~13, pp.
238--248, 2002.
\bibitem{MarquinaOsher:00}
A.~Marquina and S.~Osher, ``Explicit algorithms for a new time dependent model
based on level set motion for nonlinear deblurring and noise removal,''
\emph{SIAM J. Sci. Comput.}, vol.~22, pp. 387--405, 2000.
\bibitem{Meyer:01}
Y.~Meyer, \emph{Oscillating Patterns in Image Processing and Nonlinear
Evolution Equations}, ser. University Lecture Series.\hskip 1em plus 0.5em
minus 0.4em\relax Providence, Rhode Island: American Mathematical Society,
2001, vol.~22.
\bibitem{MitraSicuranza:01}
S.~Mitra and G.~Sicuranza, \emph{Nonlinear Image Processing}.\hskip 1em plus
0.5em minus 0.4em\relax San Diego, San Francisco, New York, Boston, London,
Sydney, ToKyo: Academic Press, 2001.
\bibitem{book_ms}
J.-M. Morel and S.~Solimini, \emph{Variational Methods in Image Segmentation},
ser. Progress in Nonlinear Differential Equations and Their
Applications.\hskip 1em plus 0.5em minus 0.4em\relax Birkh\"auser, Boston,
1995, vol.~14.
\bibitem{NitzbergShiota:92}
M.~Nitzberg and T.~Shiota, ``Nonlinear image filtering with edge and corner
enhancement,'' \emph{IEEE Trans. on Pattern Anal. Mach. Intell.}, vol.~14,
pp. 826--833, 1992.
\bibitem{OsherETAL:04TV}
S.~Osher, M.~Burger, D.~Goldfarb, J.~Xu, and W.~Yin, ``Using geometry and
iterated refinement for inverse problems (1): {Total} variation based image
restoration,'' Department of Mathematics, UCLA, LA, CA 90095, {CAM Report}
\#04-13, 2004.
\bibitem{OsherFedkiw:Book03}
S.~Osher and R.~Fedkiw, \emph{Level Set Methods and Dynamic Implicit
Surfaces}.\hskip 1em plus 0.5em minus 0.4em\relax New York: Springer-Verlag,
2003.
\bibitem{PeronaMalik:90}
P.~Perona and J.~Malik, ``Scale-space and edge detection using anisotropic
diffusion,'' \emph{IEEE Trans. on Pattern Anal. Mach. Intell.}, vol.~12, pp.
629--639, 1990.
\bibitem{RudinOsherFatemi:92}
L.~Rudin, S.~Osher, and E.~Fatemi, ``Nonlinear total variation based noise
removal algorithms,'' \emph{Physica D}, vol.~60, pp. 259--268, 1992.
\bibitem{Sapiro:01Book}
G.~Sapiro, \emph{Geometric partial differential equations and image
analysis}.\hskip 1em plus 0.5em minus 0.4em\relax Cambridge: Cambridge
University Press, 2001.
\bibitem{dn_skm}
N.~Sochen, R.~Kimmel, and R.~Malladi, ``A general framework for low level
vision,'' \emph{{IEEE Trans. Image Processing}}, vol.~7, no.~3, pp. 310--318,
1998.
\bibitem{Weinstock:74}
R.~Weinstock, \emph{Calculus of Variations}.\hskip 1em plus 0.5em minus
0.4em\relax New York: Dover Pubilcations, Inc., 1974.
\bibitem{YouETAL:96}
Y.~You, W.~Xu, A.~Tannenbaum, and M.~Kaveh, ``Behavioral analysis of
anisotropic diffusion in image processing,'' \emph{IEEE Trans. Image
Process.}, vol.~5, pp. 1539--1553, 1996.
\end{thebibliography}
\end{document}