\documentclass[reqno]{amsart}
\usepackage{hyperref}
\usepackage{graphicx}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2013 (2013), No. 133, pp. 1--11.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2013 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2013/133\hfil Numerical approach]
{Numerical approach for a relaxed  minimization problem
 arising in tomographic reconstruction}

\author[A. Srour,  M. Jradeh \hfil EJDE-2013/133\hfilneg]
{Ali Srour, Mouhammad Jradeh}  % in alphabetical order

\address{Ali Srour \newline
Department of Mathematics, Lebanese University,
Hawch Al-Oumara, Zahleh, Lebanon}
\email{ali.srour.lb@gmail.com}

\address{Mouhammad Jradeh \newline
 Department of Mathematics, Lebanese University,
Hawch Al-Oumara, Zahleh, Lebanon}
\email{mouhamad.jradeh@gmail.com}


\thanks{Submitted April 28, 2012. Published May 29, 2013.}
\subjclass[2000]{65K05, 65K15, 65K10}
\keywords{Tomographic reconstruction; variational method;
relaxation;  \hfill\break\indent Lagrange  multipliers;
mathematical programming;  Lagrangian method}

\begin{abstract}
 The purpose of this article is to develop a numerical scheme
 for a system of optimality conditions for a smooth minimization
 problem that arises in tomographic reconstruction of binary axially
 symmetric objects. The density function of the object with the
 Lagrange multipliers is seen as a saddle point of an associated Lagrangian,
 then we use the Usawa scheme mixed with descent gradient method to give
 the corresponding numerical scheme.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{remark}[theorem]{Remark}
\allowdisplaybreaks

\section{Introduction}

The applied mathematics is nowadays dedicating a great attention to the
tomographic reconstruction, especially
through the most eminent strategy entitled the variational method.
In this article, we consider the  problem
\begin{equation} \label{eP}
\begin{gathered}
\text{minimize } F_o(u):= \frac{1}{2}\|Hu-g\|^2_{L^2(\Omega)}+\lambda J(u) , \\
u\in \mathcal{D},
\end{gathered}
\end{equation}
where
$$
\mathcal{D}=\{u\in BV(\Omega): u(u-1)=0\text{ a.e. in } \Omega \},
$$
and $BV(\Omega)$ denotes the space of the  functions of bounded variation
space  defined by
$$
BV(\Omega)=\{ u\in L^1(\Omega): J(u)<\infty\},
$$
with
$$
J(u)=\sup\Big\{\int_{\Omega}u(x)\operatorname{div} (\phi(x))dx:
\phi \in C^1_c(\Omega,\mathbb{R}^2),\|\phi\|_{\infty}\leq 1\Big\}.
$$
Here $C^1_c(\Omega,\mathbb{R}^2)$ denotes the space of the $C^1$
functions with compact support in $\Omega$ with value in $\mathbb{R}^2$.
In the following,  we shall denote $\|\cdot\| $ the $L^2(\Omega)$-norm.
In the same way, $\left(\cdot,\cdot\right)_2  $
denotes the $L^2(\Omega)$- scalar product, $(\cdot,\cdot)_{H^1 }$ denotes
 the $H^1(\Omega)$- scalar product and
$\langle \cdot , \cdot\rangle_{V',V}$,
the duality product between $V'$ and $V$,
where $V$ is a Banach space and $V'$ is the dual space of $V$.
$H$ is a projection operator which is detailed in the next section,
and $g$ is the density of the observed image.

 Problem \eqref{eP} is a non-regular minimization problem which has been
obtained by Abraham et al \cite{IR02}, in order to reconstruct the density
of a binary axially symmetric object by using the variational method.
The  problem \eqref{eP} has two main difficulties, the first one is the
lack of differentiability and the second is the non-convexity constraint.
 Considering these difficulties, we could not apply the classical
optimization methods to prove the optimality system condition associated
to \eqref{eP}.

In \cite{MA}, the first author and Bergounioux have relaxed the binary
constraint and regularized \eqref{eP}
to obtain a smooth minimization problem \eqref{ePalpha}  without a convexity
constraint and then prove the optimality system associated to \eqref{ePalpha}.
In this article, we will prove that through these modifications, we obtain
an appropriate numerical result for the reconstruction of our symmetrical object.

The outline of this article is as follows: in the second section,
we introduce and analyze the tomographic model; the setting of the problem is
stated  in section 3.  In fourth section, we use a Lagrangien method based
on a Uzawa method and the descent gradient method to develop a stable
numerical scheme. The fifth  section  is  dedicated to present our numerical
scheme. Finally we discuss it by using some numerical test.

\section{Our model of tomographic reconstruction}

In \cite{IR02}, the authors  tried to use the tomographic technology
to reconstruct the volume of a three-dimensional-axially-symmetrical object
from a two-dimensional projection; that is, if an object is exposed to a beam
of electrons, every point of this object is characterized by its attenuation
coefficient.
The tomographic reconstruction method consists of gathering these
2D projections of an X-ray radiography through
an object and using these projections to rebuild the structure of this
object in 3D (Figure \ref{source}).

\begin{figure}
\begin{center}
\includegraphics[width=0.9\textwidth]{fig1} % source1.ps
\end{center}
\caption{Experimental step \cite{IR02}}
\label{source}
\end{figure}


Frequently, by using a sufficient number of projections, we can build the
density of the object
(see for instance \cite{H98}), however in \cite{IR02} the authors used a
single X-ray to rebuild the density of a binary
axially-symmetrical object composed of one homogeneous material
(drawn in black), and of some holes (in white) (see Figure \ref{cp}(a)).
With this hypothesis, it is sufficient to consider a plane section, and
after making a rotation around the axle of symmetry,
we obtained the 3D object.

Our work example, shown in Figure \ref{cp}(a) represents a symmetric object
containing all standard difficulties that may appear, such as:
\begin{itemize}
\item Several disconnected holes.

\item  Small holes located on the symmetry axis (where details are expected
 to be difficult to recover because the noise variance is maximal around
 the symmetric axis after reconstruction).

\item  Smaller details on the boundary of the top holes serve as a test
 for lower bound detection.
\end{itemize}

Assuming that $u$ is the density of the initial object,  and $g$ is the density
of the observed image. The problem of reconstruction consists of finding $u$
such that $Hu=g$ where $H$ is the projection operator from $L^2(\Omega)$
to $L^2(\Omega)$ given for a symmetric axial object by
\begin{equation}
 Hu(y,z) = \int^{+\infty}_{|y|}u(r,z)\frac{r}{\sqrt{r^2-y^2}}dr.
\end{equation}

Here, $\Omega$ is a regular open bounded in $\mathbb{R}^2$ which represents
the domain of image ($\Omega=[0,a]\times(-a,a), a>0$).

In  \cite[Lemma 1]{IR02}, the author proved that $H$ is a continuous operator
from $L^2(\Omega)$ to $L^2(\Omega)$. Now,  to find $u$, we will use the
inverse operator $H^{-1}$ given in \cite{IR02} by:
\begin{equation}\label{1ohi}
u(r,z)=H^{-1}g(r,z)= -\frac{1}{\pi}\int^{a}_{r}\frac{\frac{\partial g}{\partial y}(y,z)}{\sqrt{y^2-r^2}}dy,
\end{equation}
for all $(r,z) \in \Omega$.
 Since the operator $H^{-1}$ contains a derivative term,
it cannot be extended as a continuous linear operator from $L^{p}(\Omega)$
to $L^{q}(\Omega)$. Then, applying $H^{-1}$ provides  a deficient and
imperfect reconstruction of the original image; see Figure \ref{cp}.

\begin{figure}
\begin{center}
\includegraphics[width=0.3\textwidth]{fig2a} \quad % image1.ps
\includegraphics[width=0.3\textwidth]{fig2b} \quad % a.ps
\includegraphics[width=0.3\textwidth]{fig2c} \\ % recp1.ps
(a) the real object $u$ \hfil
(b) the projection $g$ \hfil
(c) Reconstruction $H^{-1}(g)$
\end{center}
\caption{Instability of reconstruction by using $H^{-1}$}
\label{cp}
\end{figure}

Because of the experiment step, there is  an  additional  noise perturbation
to the given image $g$. This perturbation assumed to be an addition of
 Gaussian white noise, denoted $\tau$, of zero mean, and of standard
deviation $\sigma_\tau$. Other perturbations, such as blur due to
 the detector response, and X-ray source spot size, or blur motion,
are not taken into account  in our study. With these assumptions,
the projection of an object $u$ is:
$$
g=Hu+\tau.
$$

 \section{Variational method: The relaxed problem}

To avoid  the instability of the inverse projection method, we will use
the variational method  technique of finding the density
 $u$ of the initial object, it consists to find $u$ as a minimum
of the problem \eqref{eP}.


 Aubert and Kornprobst  \cite{GK}, Acar and Vogel et al \cite{VA,VO}
studied closed problems. In our model, we faced two supplementary difficulties,
the first one comes from the fact that, the domain $\mathcal{D}$ is not convex,
and its interior is empty for most usual topologies.
 The second difficulty is that, the total variation $J(u)$ of a function $u$
in $BV(\Omega)$ is not Fréchet differentiable. According to these difficulties,
we cannot apply the classical theory of optimization to obtain an optimality
system of \eqref{eP}.

To study numerically this problem,  Bergounioux and the first author have
modified problem \eqref{eP} through  numerically-accepted modifications \cite{MA}.
 Firstly,  to preserve the differentiability of $J$, we replace the total
variation $J(u)$  by the $L^2(\Omega)$-norm of the gradient of
$u\in H^1(\Omega)$ and considering the $H^1(\Omega)$ as an underlying space
instead of $BV(\Omega)$. For the second difficulty,
we relaxed the domain $\mathcal{D}$ by considering the domain
 \begin{equation}
 \label{Dalpha}
 \mathcal{D}_{\alpha}:=\{ u \in H^1(\Omega):
0\le u \le 1 ,\, (u,1-u)_2 \le \alpha \},
 \end{equation}
where $\alpha>0$, this relaxation of the binary constraint is motivated
and justified numerically, indeed, it is not possible to ensure $(u,1-u)_{2}=0$
during computation but rather $(u,1-u)_{2}\leq \alpha$ where  $\alpha$ may be
 chosen small as wanted.

 After theses modifications, we consider the smooth relaxed problem
\begin{equation} \label{ePalpha}
\begin{gathered}
 \text{minimize }  F(u):={\frac{1}{2}\|Hu-g\|^2 +\frac{\lambda}{2}
\|\nabla u\|^2 }, \\
 u\in \mathcal{D_\alpha}
 \end{gathered}
\end{equation}
In \cite{MA}, we proved that the problem \eqref{ePalpha} has at least
 one solution in $H^1(\Omega)$, however since the constraint
$\mathcal{D}_{\alpha}$ is also  not convex, it is not possible to find
the ``admissible'' directions to compute derivatives, then we use
general mathematical programming problems results and optimal control
in Banach spaces (Zowe and Kurcyusz \cite{ZK}, and Tröltzsch \cite{tr1,tr2})
to derive optimality conditions for \eqref{ePalpha}.

 To apply the method of mathematical programming to \eqref{ePalpha},
we introduce a virtual control variable, we can write  problem \eqref{ePalpha} as
\begin{equation} \label{ePalpha1}
 \begin{gathered}
 \text{minimize }  F(u) \\
 (u,v)\in \mathcal{C}_{\alpha},
 \end{gathered}
\end{equation}
where $(u_\alpha,v_\alpha=1-u_{\alpha})$ denoted the solution of
\eqref{ePalpha} in $H^1(\Omega)\times L^2(\Omega)$.
 Using the mathematical programming method and after a step of penalization,
we  derive the following optimality conditions.

 \begin{theorem} \label{thm3.1}
Assume  $ u_\alpha $ is a solution to \eqref{ePalpha} and
$ v_{\alpha}=1-u_\alpha$. There exists a Lagrange multiplier
$(q_\alpha,r_\alpha)\in \mathcal{M}(\Omega)\times\mathbb{R}^{+}$   such that
 for all $u\in H^1(\Omega)\cap L^{\infty}(\Omega)$ with $u\geq 0$,
\begin{gather}  \label{so1}
 \big( H^*(Hu_\alpha-g)+ r_\alpha v_\alpha, u-u_\alpha\big)_2
+\lambda\big( \nabla u_\alpha,\nabla (u-u_\alpha) \big)_2
+ \langle q_\alpha, u-u_\alpha\rangle_{\mathcal{M}, L^\infty}  \geq 0,\\
 \label{so2}
\langle q_\alpha, v-v_\alpha\rangle_{\mathcal{M}, L^\infty}  + r_\alpha
 ( u_\alpha, v-v_\alpha)_2 \geq 0,  \quad
\forall v\in \mathcal{V}_{\rm ad}\cap L^{\infty}(\Omega),
\\
 r_\alpha[( u_\alpha,v_\alpha)_2 -\alpha]=0,
 \end{gather}
where
 $$
\mathcal{V}_{\rm ad}=\{v\in H^1(\Omega) :  v\geq 0\; {\rm{a.e}}\}.
$$
\end{theorem}

Now, we will use the above optimality system to establish a numerical
scheme for \eqref{ePalpha} by considering the couple
(solution, Lagrangian multipliers) as a saddle-point of a Lagrangian
function associated to \eqref{ePalpha}.

 \section{Saddle-point formulation}

 Let $L^{\alpha}$ be the Lagrangian function associated with
\eqref{ePalpha}, defined on
$H^1(\Omega)\times L^2(\Omega)\times \mathcal{M}(\Omega)\times \mathbb{R}$ by
 $$
L^{\alpha}(u,v,q,r)=\frac{1}{2}\|Hu-g\|^2+\frac{\lambda}{2}
\|\nabla u\|^2+\langle q ,u+v-1\rangle_{\mathcal{M},L^{\infty}}
+r[(u,v)_{2}-\alpha]
$$
For the rest of this article, we denote
$$
F(u)=\frac{1}{2}\|Hu-g\|^2+\frac{\lambda}{2}\|\nabla u\|^2.
$$

 \begin{theorem} \label{sd}
Let $(u_\alpha,v_\alpha)$ be a solution of \eqref{ePalpha}, then
$(u_\alpha,v_\alpha,q_\alpha,r_\alpha)$ satisfies
 \begin{equation}\label{s1}
 L^{\alpha}(u_\alpha,v_\alpha,q_\alpha,r_\alpha)
\geq L^{\alpha}(u_\alpha,v_\alpha,q,r)\quad
\forall (q,r)\in \mathcal{M}\times\mathbb{R}^{+}.
 \end{equation}
and
 \begin{equation}\label{s2}
 \nabla_{u,v}L^{\alpha}(u_\alpha,v_\alpha,q_\alpha,r_\alpha)(u-u_\alpha,
v-v_\alpha)\geq 0,
\end{equation}
for all $(u,v)\in \mathcal{U}_{\rm ad}\cap L^{\infty}\times \mathcal{V}_{\rm ad}
\cap L^{\infty}$.
 \end{theorem}

 \begin{proof}
The first assertion comes from the fact that for all
$(q,r)\in \mathcal{M}\times \mathbb{R}^{+}$,
 $$
L^{\alpha}(u_\alpha,v_\alpha,q,r)=F(u_\alpha)+r[(u_\alpha,v_\alpha)-\alpha]
\leq F(u_\alpha),
$$
 and
 $$
F(u_\alpha)=L^{\alpha}(u_\alpha,v_\alpha,q_\alpha,r_\alpha).
$$
Moreover, adding \eqref{so1} and \eqref{so2}, we obtain exactly the second
part of the theorem.
 \end{proof}

 We denote
 $$
\mathcal{A}=\mathcal{U}_{\rm ad}\cap L^{\infty}\times \mathcal{V}_{\rm ad}
\cap L^{\infty}\times \mathcal{M}\times \mathbb{R}^{+}.
$$
 Because of the bilinear term $(u,v)_2-\alpha$, the Lagrangian $L^{\alpha}$
is not convex and the theorem \ref{sd} is not sufficient to ensure the
existence of a saddle point of $L^{\alpha}$. But, it is easy to see that
this theorem is still valid if we replace $L^{\alpha}$ by the linearized
Lagrangian function $\mathcal{L}^{\alpha}$,
 $$
\mathcal{L}^{\alpha}(u,v,q,r)=F(u)+(q,u+v-1)_{\mathcal{M},L^{\infty}}
+r[(u,v_\alpha)_2+(u_\alpha,v)_2-2\alpha].
$$
 More precisely, we have the following statement.

 \begin{theorem} \label{thm4.2}
The couple $(u_\alpha,v_\alpha, q_\alpha,r_\alpha)$
(solution, Lagrange multiplier) is a saddle point of the linearized
Lagrangian function $\mathcal{L}^{\alpha}$ on $\mathcal{A}$:
 $$
\mathcal{L}^{\alpha}(u_\alpha,v_\alpha,q,r)
\leq\mathcal{L}^{\alpha}(u_\alpha,v_\alpha,q_\alpha,r_\alpha)
\leq\mathcal{L}^{\alpha}(u,v,q_\alpha,r_\alpha)\quad\text{for all }
 (u,v,q,r)\in \mathcal{A}.
$$
\end{theorem}

 \begin{proof}
We get first the left hand part of the above inequality since for any
$(q,r)\in \mathcal{M}\in\mathbb{R}^{+}$,
 $$
 \mathcal{L}^{\alpha}(u_\alpha,v_\alpha,q,r)
=F(u_\alpha)+2r[(u_\alpha,v_\alpha)_2-\alpha]
\leq F(u_\alpha)=\mathcal{L}^{\alpha}(u_\alpha,v_\alpha,q_\alpha,r_\alpha).
$$
 The right hand part comes from
 $$
\nabla_{u,v} \mathcal{L}^{\alpha}(u_\alpha,v_\alpha,q_\alpha,r_\alpha)
=\nabla_{u,v} L^{\alpha}(u_\alpha,v_\alpha,q_\alpha,r_\alpha),
$$
and the convexity of $\mathcal{L}^{\alpha}$.
\end{proof}

 In the case where the bilinear constraint is inactive
$(u_\alpha,v_\alpha)<\alpha$ we obtain $r_\alpha=0$. In this case, it
is easy to see that
$\nabla_{u,v} \mathcal{L}^{\alpha}(u_\alpha,v_\alpha,q_\alpha,r_\alpha)$
is then equal that $\nabla_{u,v} L^{\alpha}(u_\alpha,v_\alpha,q_\alpha,r_\alpha)$
and the above theorem yields
$$
L^{\alpha}(u_\alpha,v_\alpha,q_\alpha,r_\alpha)
\leq L^{\alpha}(u,v,q_\alpha,r_\alpha),
$$
and then $(u_\alpha,v_\alpha,q_\alpha,r_\alpha)$ is a saddle point of
$L^{\alpha} $ on $\mathcal{A}$.

 \begin{remark} \label{rmk4.1} \rm
As in our initial problem \eqref{ePalpha}, we look for the couple
($u_\alpha$,$v_\alpha$) such that $u_\alpha\cdot v_\alpha=u_\alpha(1-u_\alpha)$
 is close to $0$, it will be interesting to study numerically the case
when the constraint $(u,v)_{2}$ is inactive i.e:
 $$
(u,v)_{2}<\alpha.
$$
 \end{remark}

 \section{Numerical scheme and discretization}

 The basic method to compute a saddle point is the Uzawa algorithm
and the descent gradient method \cite{GA}. In this method, we look
for the couple (solution, Lagrangian multiplier) as the following way.

 \subsection*{Algorithm ($\mathcal{A}$)}
\begin{itemize}
 \item[Step $0$] Initialization. Set $n=0$, choose
$u^{0}\in \mathcal{U}_{\rm ad},q^{0}\in L^2(\Omega), v^{-1}
\in \mathcal{V}_{\rm ad}$.

\item[Step $1$]  Compute:
\begin{gather*}
 u^{n}=\text{argmin } L^{\alpha}(u,v^{n-1},q^{n},r^{n})  \text{ on }
 \mathcal{U}_{\rm ad},
\\
 v^{n}=\text{argmin } L^{\alpha}(u^{n}, v,q^{n}, r^{n}) \text{ on }
 \mathcal{V}_{\rm ad}.
\end{gather*}

 \item[Step $2$] Compute $q^{n}$:
 $$
q^{n+1}=q^{n}+ \rho_{1}(u^{n}+v^{n}-1), \quad\rho_{1}\geq 0.
$$

 \item[Step $3$] Compute $r^{n}$:
 $$
r^{n+1}=r^{n}+\rho_{2} [(u^n,v^n)_{2}-\alpha]_{+},\quad \rho_{2}\geq 0.
$$

\item[Step $4$] Criterion of stopping: Stop the process as soon as
$\|u^{n+1}-u^{n}\|_{\infty}<{\rm tol}$
 and $\| q^{n+1}-q^{n}\|_{\infty}<{\rm tol}$,
where tol is a positive real number.
\end{itemize}

 To calculate the first step, we use the gradient descent method:
 we look for $u^n$ and $v^n$ as follows
\begin{gather*}
u^{n+1}=[u^{n}-\mu_n\nabla_{u}L^{\alpha}(u^n,v^n,r^n,q^n)]_{+},
\\
v^{n+1}=[v^{n}-\delta_n\nabla_{v}L^{\alpha}(u^n,v^n,r^n,q^n)]_{+},
\end{gather*}
 where $\mu_n$ and $\delta_{n}$ are two positive real numbers.

 In addition, as the study of a measure is too hard from a numerical point
of view, we consider in step 2, the term $ q_{\alpha}$ as a function
in $L^2(\Omega)$.  With  gradient descent method, the numerical scheme
becomes
 \begin{equation}\label{sh}
\begin{gathered}
 u^{n+1}=\big[u^{n}-\mu_{n}\big(H^{\star}Hu^{n}+ H^{\star} g_{i,j}
-\lambda\Delta u^{n}+ q^{n}+r^{n} v^{n-1}\big)\big]_{+},
\\
v^{n}=\left[v^{n-1}-\delta_{n}(v^{n-1}+r^{n} u^{n}\right]_{+},\\
q^{n+1}=q^{n}+\rho_{1}(u^{n}+v^{n}-1),\\
r^{n+1}=r^{n}+\rho_{2}[(u^{n},v^{n})_{2}-\alpha]_{+},
\end{gathered}
\end{equation}
where  $\mu_{n}$, $\delta_{n}$, $\rho_{1}$, $\rho_{2}$ are positive real numbers.
 The discretization process is the  standard Finite Difference Method.
For the projection operator $H$, we use its explicit formula to find
the matrix associated to this operator as claimed in the following theorem.

 \begin{theorem} \label{thm5.1}
The matrix associated to the projection operator is given by
 \begin{equation}\label{H}
 h_{i,j}=\frac{2}{N}(\sqrt{j^2-(i-1)^2}-\sqrt{(j-1)^2-(i-1)^2}
\quad \text{for } j\geq i
\end{equation}
and
\begin{equation}
 h_{i,j}=0 \quad\text{for }  j<i.
\end{equation}
\end{theorem}

\begin{proof}
For  all function $u\in L^{\infty}(\Omega)$,  $\Omega =(-a,+a)\times(-a,+a)$
where $a$ is a positive real,  the projection operator $H$ is given by
 $$
Hu(y,z)=2\int_{|y|}^{a} u(r,z)\frac{r}{\sqrt{r^2-y^2}}dr.
$$
 In a discrete space, we suppose that for all $j=1:N$, and for all
$i=1:N$, $u(.,y_{j})$ is constant in the interval  $[ih,(i+1)h]$, then
 \begin{align*}
  Hu(ih,jh)
&=2\int_{ih}^{Nh}u(r,hj)\frac{r}{\sqrt{r^2-(ih)^2}}dr\\
&=2\sum_{k=i}^{N-1}\int_{kh}^{(k+1)h}u_{k,j}
 \frac{r}{\sqrt{r^2-(ih)^2}} \\
&\quad\text{(where $u_{k,j}$ is  the value  of $u$ on $[kh,(k+1)h]$)}\\
&=2\sum_{k=i}^{N-1}u_{k,j}\int_{kh}^{(k+1)h}
 \frac{r}{\sqrt{r^2-(ih)^2}}\\
&=2\sum_{k=i}^{N-1}[\sqrt{r^2-(ih)^2}]^{(k+1)h}_{kh}\\
&=  2\sum_{k=i}^{N-1}u_{k,j}[\sqrt{(k+1)^2h^2-(ih)^2}
-\sqrt{(kh)^2-(ih)^2}\\
& =2\sum_{k=i}^{N-1}u_{k,j}h[\sqrt{(k+1)^2-(i)^2}-\sqrt{(k)^2-(i)^2}].
 \end{align*}
By taking $h=1/N$ and using the previous formula, we conclude the proof.
\end{proof}


The Laplacian term, can be discretized as
\begin{equation}\label{delta}
 \Delta u^{n}(x_{i}, y_{j})
=\frac{ u^{n}_{i+1,j}+u^{n}_{i-1,j}-2u^{n}_{i,j}}{h^2}
+\frac{ u^{n}_{i,j+1}+u^{n}_{i,j-1}-2u^{n}_{i,j}}{h^2}
 \end{equation}
Replacing \eqref{delta} in \eqref{sh}, we obtain
\begin{gather*}
  \begin{aligned}
 u^{n+1}_{i,j}
&=[ u^{n}_{i,j}-\mu_{n}[H^{\star}Hu^{n}_{i,j}+ H^{\star} g_{i,j}
- \lambda \Big(\frac{ u^{n}_{i+1,j}+u^{n}_{i-1,j}-2u^{n}_{i,j}}{h^2}\\
&\quad +\frac{ u^{n}_{i,j+1}+u^{n}_{i,j-1}-2u^{n}_{i,j}}{h^2}\Big)
 +q^{n}_{i,j}+r^{n} v^{n}_{i,j}]_{+},
 \end{aligned}\\
v^{n}_{i,j}= [v^{n-1}_{i,j}-\delta_{n}(q^{n}_{i,j}+r^{n}u^{n}_{i,j})]_{+},\\
q^{n+1}_{i,j}= q^{n}_{i,j}+\rho_{1}(u^{n}_{i,j}+v^{n}_{i,j}-1),\\
r^{n+1}=r^{n}+\rho_{2} [(u^n,v^n)_{2,i,j}-\alpha]_{+},
\end{gather*}
 where   $H^{\star}Hu^{n}_{i,j}$ is the discretization of
$H^{\star}Hu^{n}$ and $H^{\star} g_{i,j}$ is that of $H^{\star} g$.


 The discretized image is represented by a $N\times N$ array identified
with a $N^2$ vector. Due to the symmetry, it suffices to deal with half
image of size $N\times N/2$. In our study, the projected image(observed data)
is perturbed with a Gaussian noise $\tau$ with standard deviation
$\sigma_{\tau}$,
 $$
\tau(x)=\frac{1}{\sqrt{2\pi}\sigma_{\tau}}
e^{-\frac{|x|^2}{2\sigma^2_{\tau}}}.
$$
 With this perturbation, the observed image is given by:
 \begin{equation}\label{ii}
  g= Hu_{\rm orig}+  \tau.
  \end{equation}


\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.3\textwidth]{fig2a} \quad %image1.ps %fig3
\includegraphics[width=0.3\textwidth]{fig3b} \quad %initialef.ps
\includegraphics[width=0.3\textwidth]{fig3c} \\ % 1000.eps
(a) real object \hfil  (b) observed image $g$ \hfil (c) $\lambda=10^{-3}$
\\
\includegraphics[width=0.3\textwidth]{fig3d} \quad % 900.eps
\includegraphics[width=0.3\textwidth]{fig3e} \quad % 500.eps
\includegraphics[width=0.3\textwidth]{fig3f} \\ % 400.eps
(d) $\lambda=\frac{1}{900}$ \hfil (e) $\lambda=2.10^{-3}$ \hfil (f) $\lambda=2.5.10^{-3}$
 \end{center}
 \caption{Reconstruction for some small values of $\lambda$ with
$\alpha=10^{-2}$ and tol$=0.001$}
\label{lambda-small}
\end{figure}


\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.3\textwidth]{fig2a} \quad % image1.ps fig4
\includegraphics[width=0.3\textwidth]{fig3b} \quad % initialef.ps
\includegraphics[width=0.3\textwidth]{fig4c} \\ % 1.eps
(a) real object \hfil (b) observed image $g$ \hfil (c) $\lambda=1$
\\
\includegraphics[width=0.3\textwidth]{fig4d} \quad % 5.eps
\includegraphics[width=0.3\textwidth]{fig4e} \quad % 10.eps
\includegraphics[width=0.3\textwidth]{fig4f} \\ % 15.eps
(d) $\lambda=0.2$ \hfil (e) $\lambda=0.1$ \hfil (f) $\lambda=0.066$
\\
\includegraphics[width=0.3\textwidth]{fig4g} \quad % 100.eps
\includegraphics[width=0.3\textwidth]{fig4h} \quad % 50.eps
\includegraphics[width=0.3\textwidth]{fig4i} \\ % 75.eps}\\
(g) $\lambda=0.01$ \hfil (h) $\lambda=0.02$ \hfil (i) $\lambda=0.0133$
 \end{center}
 \caption{Reconstruction for different values of $\lambda$
 with $\alpha=10^{-2}$ and tol$=0.001$}
\label{lambda-big}
 \end{figure}

\begin{figure}[ht]
\begin{center}
\includegraphics[width=0.3\textwidth]{fig2a} \quad % image1.ps fig5
\includegraphics[width=0.3\textwidth]{fig3b} \\ % initialef.ps
(a) real object\hfil (b) observed image $g$
\\
\includegraphics[width=0.3\textwidth]{fig5c} \quad % lambda10alfa100.eps
\includegraphics[width=0.3\textwidth]{fig5d} \quad % lambda10alfa500.eps
\includegraphics[width=0.3\textwidth]{fig5e} \\ % lambda10alfa1000.eps 
(c) $\alpha=0.01$ \hfil (d) $\alpha=0.002$ \hfil (e) $\alpha=0.001$
\end{center}
\caption{Reconstruction for different values of $\alpha$ and with $\lambda=0.1$}
\label{alpha-moving}
\end{figure}

 \section{Interpretation  and sensitivity of numerical results}

The algorithm ($\mathcal{A}$) depends on two parameters  $\lambda$ and $\alpha$.
Next we  show that there is a strong sensibility
of the results with respect to $\lambda$ and weak with respect to  $\alpha$.


\subsection{Sensitivity with respect to $\lambda$}
The aim of the constant $\lambda$ before the
regularization term $\|u\|^2$ is to increase or decrease the smoothness
of the function $u$, more precisely if  $\lambda$ is too
small,  we recover some information on the original image,
but we do not succeed in abolishing efficiently the Gaussian noise
with the standard deviation $\sigma_{\tau}$. These theoretical
predictions are confirmed numerically as shown in Figure \ref{lambda-small}.

However, when  $\lambda$ is large, the solution becomes regular and this
may imply an important loss of information (the image of the mouth
for example turns to an arc of circle) (see Figure \ref{lambda-big}).



So the best $\lambda$ will be not too small in order to ``erase'' the
noise and not too large to keep all the important parts of the original
image.


\subsection{Sensitivity with respect to $\alpha$}
The algorithm ($\mathcal{A}$) has a low sensibility  with respect to the
 parameter $\alpha$, this weak dependence is due to the fact that
the integral $(u,v)_{2}$  converges rapidly to zero, and by consequence,
the  inactive assumption $ (u,v)_{2}<\alpha$ is always verified from a
small number of iterations. The Figure \ref{alpha-moving} confirms this claim.



\begin{thebibliography}{10}

\bibitem{IR02}
I.~Abraham, R.~Abraham, and M.~Bergounioux.
\newblock An active curve approach for tomographic reconstruction of binary
  radially symmetric objects.
\newblock {\em Numerical Mathematics and advanced Application, Kinisch~ K, of
  G, Steinbach, O(Ed), pp 663-670,2008 Springe.}

\bibitem{VA}
R.~Acar and C.~R. Vogel.
\newblock Analysis of bounded variation penalty methods for ill-posed problems.
\newblock {\em Inverse Problems}, 10(6):1217--1229, 1994.

\bibitem{GA}
G.~Allaire.
\newblock Analyse numérique et optimisation.
\newblock {\em Les éditions de l'école polytechnique}, 2005.

\bibitem{GK}
G.~Aubert and P.~Kornprobst.
\newblock {\em Mathematical problems in image processing}, volume 147 of {\em
  Applied Mathematical Sciences}.
\newblock Springer, New York, second edition, 2006.
\newblock Partial differential equations and the calculus of variations,.

\bibitem{MA}
M.~Bergounioux and A.~Srour.
\newblock A relaxation method for smooth tomographic reconstruction of binary
  axially symmetric objects.
\newblock {\em Pacific Journal of Optimisation}.

\bibitem{H98}
Gabor~T. Herman.
\newblock {\em Image reconstruction from projections}.
\newblock Academic Press Inc. [Harcourt Brace Jovanovich Publishers], New York,
  1980.
\newblock The fundamentals of computerized tomography, Computer Science and
  Applied Mathematics.

\bibitem{ZK}
J.~Zowe~S. Kurcyusz.
\newblock Regularity and stability for the mathematical programming problem in
  banach spaces.
\newblock {\em Applied mathematics and Optimization}, 42, pp.49-62(5), 1979.

\bibitem{tr2}
F.~Tr\"oltzsch.
\newblock A modification of the zowe and kurcyusz regularity condition with
  application to the optimal control of noether operator equations with
  constraints on the control and the state.
\newblock {\em Math. Operationforsch. Statist., Ser. Optimization}, 14,.

\bibitem{tr1}
F.~Tr\"oltzsch.
\newblock Optimality conditions for parabolic control problems and
  applications.
\newblock {\em Teubner Texte, Leipzig,}, (1984).

\bibitem{VO}
C.~R. Vogel and M.~E. Oman.
\newblock Iterative methods for total variation denoising.
\newblock {\em SIAM J. Sci. Comput.}, 17(1):227--238, 1996.
\newblock Special issue on iterative methods in numerical linear algebra
  (Breckenridge, CO, 1994).

\end{thebibliography}


\end{document} 
