\documentclass[reqno]{amsart}
\usepackage{graphicx,subfigure}
\usepackage{hyperref}
\AtBeginDocument{{\noindent\small
Sixth Mississippi State Conference on Differential Equations and
Computational Simulations,
{\em Electronic Journal of Differential Equations},
Conference 15 (2007), pp. 141--151.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu (login: ftp)}
\thanks{\copyright 2007 Texas State University - San Marcos.}
\vspace{9mm}}
\begin{document} \setcounter{page}{141}
\title[\hfilneg EJDE-2006/Conf/15/\hfil Issues in Adaptive Mesh Refinement]
{Issues in adaptive mesh refinement implementation}
\author[N. Hannoun, V. Alexiades\hfil EJDE/Conf/15 \hfilneg]
{Noureddine Hannoun, Vasilios Alexiades}
\address{Noureddine Hannoun \newline
Department of Mathematics, University of Tennessee, Knoxville, TN
37996-1300, USA}
\email{hannoun@math.utk.edu}
\address{Vasilios Alexiades \newline
Department of Mathematics, University of Tennessee,
Knoxville, TN 37996-1300 USA, \newline
and
Oak Ridge National Laboratory, Oak Ridge, TN 37831, USA}
\email{alexiades@utk.edu}
\thanks{Published February 28, 2007.}
\subjclass[2000]{68U99, 65M50, 65Y20}
\keywords{Adaptive mesh refinement; data structure; computational method;
\hfill\break\indent object oriented programming; conservation laws}
\begin{abstract}
Physical phenomena often involve discontinuities and/or localized
high-gradient areas. The numerical simulation of these problems and
conventional techniques (Finite Elements, Finite Volumes, Finite
Differences, and Spectral Methods) with a uniform grid is inefficient
when high accuracy is required. Adaptive Mesh Refinement (AMR) is a
technique that allows local refinement of the grid. In this
presentation, we describe a typical AMR technique and address
implementation and algorithmic issues. Triangular unstructured grids
and a regular 1 to 4 refinement are considered.
\end{abstract}
\maketitle
\section{Introduction}
The numerical solution of problems in Science and Engineering usually
requires a grid that covers the computational domain. Discretization
of the model equations is performed at the grid element level.
Solution accuracy may be increased through the use of higher order
discretization, or uniform refinement. Both methods have drawbacks:
higher order schemes are prone to producing oscillations (nonmonotone),
while uniform refinement is too expensive.
Adaptivity of the mesh is necessary to cluster grid points in the regions where it
is most needed, while keeping the grid coarse elsewhere. There are two main adaptive
strategies~\cite{kn:C02}: adaptive mesh redistribution, also called p-refinement,
and adaptive mesh refinement, also called h-refinement. The first type of
refinement, p-refinement, continuously repositions a fixed number of cells to
improve resolution in selected areas. Although easier to implement, it has serious
drawbacks: it does not allow for a change of topology in the solution discontinuity,
and the grid may get severely distorted. The second type of mesh adaption,
h-refinement, or more commonly AMR~\cite{kn:BO84}, adds new cells and deletes other
cells that are no longer required. The AMR technique is extremely useful for
applications involving discontinuities, shock waves~\cite{kn:SA04}, phase
change~\cite{kn:PGD98} or/and combustion~\cite{kn:MR04} interfaces, or any localized
large-gradient regions. However, it is more complex to implement and program.
The present work aims at presenting the steps involved in a typical
AMR technique for unstructured triangular grids. Emphasis will be on
some of the problems that may arise during the implementation.
\section{Choosing the data structure}
\subsection{Types of triangular mesh refinement}
For grids with triangular elements, there are three common refinement
types, which are illustrated in Figure~\ref{fig:types-of-refinement}.
The regular 1:4 refinement, (c), has the advantage of maximizing
children angles, thus keeping the element aspect ratio close to one.
This refinement type is adopted in the present work.
\begin{figure}[!htb]
\includegraphics[width=3.9in]{figures/types-of-refinement}
\caption{Triangle refinement: (a) longest-edge bisection,
(b) Interesection of bisectors,
(c) Edge midpoints or regular 1:4 refinement.}
\label{fig:types-of-refinement}
\end{figure}
\subsection{Elements}
An element is a basic grid entity. Each element is a triangle.
Elements are generated upon refinement or deactivated upon coarsening.
Therefore, it is necessary to keep in memory both active and inactive
elements. A suitable data structure for this type of problems is the
so-called k-way tree~\cite{kn:MS05}. Figure~\ref{fig:tree} displays
the tree associated with a given AMR grid. The number of elements may
increase or decrease with time (calculations).
This change requires making use of dynamic memory allocation and of pointers,
two features which are readily available in Fortran 95~\cite{kn:A03}
and C++. Since an element is one node of the k-way tree, it should
have two pointers, one pointer to its parent and an array of four
pointers to its four children. Additional data may include the
refinement level, the type of element if multiple geometries are
allowed, whether the element is a boundary element or not, and all
the data related to the type of numerical method being used (such as
the coefficients of the expansion of the solution w.r.t. the basis
when using a finite element method).
\begin{figure}[!htb]
\includegraphics[width=4.9 in]{figures/tree}
\caption{k-way tree for the grid elements}
\label{fig:tree}
\end{figure}
\subsection{Nodes/Vertices}
There are two approaches to the storage of vertices (the three vertices
of a triangular element). In the first approach
(Figure~\ref{fig:node-queue}), each vertex is a unique entity and
the vertices are stored in a queue. In the second approach,
a vertex is part of the data structure for the element to which it
belongs. Advantages of the second approach include faster access
to vertices and simpler algorithms, while there are two drawbacks.
First, if a vertex belongs to several elements, it will be stored
in each element and this results in waste of memory. Second, it is
necessary to identify which vertex of one element corresponds
to the same physical vertex in an adjacent element. This may be
accomplished by comparing coordinates of the two vertices although
comparison between real numbers may be tricky. When using the
second approach, it seems difficult to assign a global number to a
vertex. The second approach is adopted in this work and the
corresponding data structure is described next.
One very important issue about the data structure of an element is the
storage of neighbors and edges, which are needed for both refinement/coarsening
and for flux evaluation for the numerical simulation.
The problem is complicated by the occurrence of dangling nodes
(nodes in the middle of an edge).
Figure~\ref{fig:neighbor-edge-numbering} shows a numbering that
results in a one-to-one neighbor-edge-vertex mapping. This is
achieved by adding dangling vertices to the list of actual vertices
of the element while splitting edges into smaller ones as appropriate.
A maximum of six vertices (edges or neighbors) is thus allowed. If
the actual vertices (the original three of the triangle) are needed
for refinement or solver algorithms, these can be stored in an array
of three integers ``$\mathsf{ind}$'' corresponding to the indices of
the actual vertices in the list of all vertices of the element. For
example, if there are 5 vertices and the actual vertices are $1$, $3$,
and $4$, then $\mathsf{ind}(1)=1$, $\mathsf{ind}(2)=3$, and
$\mathsf{ind}(3)=4$.
\begin{figure}[!hbt]
\includegraphics[width=2.5in]{figures/node-queue}
\caption{Storage of vertices in queue}
\label{fig:node-queue}
\end{figure}
\begin{figure}[!htb]
\centering
\includegraphics[width=1.5in]{figures/element-edge-vertex-neighbor} \\
\caption{Numbering of neighbors, edges, and vertices}
\label{fig:neighbor-edge-numbering}
\end{figure}
Neighbors of an element may be accessed through a pointer to an array
of pointers (Figure~\ref{fig:neighbor-pointers}-a).
This allows for easy accounting of the change of number of neighbors
upon grid refinement/coarsening. Null pointers are used for boundary
edges with no neighbors, thus preserving the one-to-one
edge-neighbor-vertex mapping (Figure~\ref{fig:neighbor-pointers}-b).
Vertices may be stored either in an allocatable array or an array of
pointers. When dealing with loops over indices of vertices, it is
advantageous to avoid modulo arithmetic by the use of an integer
array having twice as many elements as the number of vertices
(edges, neighbors); e.g. if there are four vertices, define an
array $\mathsf{perm} = [1\; 2\; 3\; 4\; 1\; 2\; 3\; 4\;]$
and, for example, use neighbor
$\mathsf{perm}(i+2)$ to refer to neighbor $(i+2)$
\begin{figure}[!htb]
\centering
\subfigure[Neighbor data structure]%
{\includegraphics[width=2in]{figures/array-of-pointer}} \hspace{0.1in}
\subfigure[Boundary neighbor pointer]%
{\includegraphics[width=2in]{figures/boundary-neighbor}} \\%
\caption{An element's neighbors} \label{fig:neighbor-pointers}
\end{figure}
\section{Before the refinement}
\subsection{When should the grid be refined?}
Refinement of the grid is performed locally according to some criterion
which depends on the type of problem being solved. When the solution is
known in advance, a suitable criterion is
$\|\nabla f \| h > \varepsilon$, where $f$ is the solution, $h$ the
size of the mesh, and $\varepsilon$ the threshold value for refinement.
If the solution has a discontinuity, an element would be refined if it
is intersected by the discontinuity. If the solution is not known in
advance, the refinement criterion is often based on the residual of the
model equation. There is extensive research on this type of criterion,
usually referred to as Error Estimates for AMR~\cite{kn:S03,kn:W03}.
\subsection{Refinement constraints}
Two fundamental issues arise when dealing with the
refinement process. First, it is necessary to choose a
maximum level of refinement, otherwise the algorithm may result in an
infinite loop or simply exceed available computer memory. A second
constraint, that two neighboring elements should not differ my more
than one refinement level~\cite{kn:SA04}, may be imposed to simplify
both book-keeping and algorithm, as well as ensure accurate
inter-element flux calculations.
Figure~\ref{fig:refine-constraints} shows the three possible neighbor
configurations (a), and an example of a not-allowed configuration (b).
\begin{figure}[!htb]
\centering
\subfigure[Possible neighbor configurations]%
{\includegraphics[width=2in]{figures/refinement-constraint}}%
\hspace{0.1in} \subfigure[Impossible configuration]%
{\includegraphics[width=2in]{figures/refinement-constraint-bookkeeping}} \\ %
\caption{Refinement/coarsening constraints}
\label{fig:refine-constraints}
\end{figure}
\subsection{Refinement strategy (order)}
The process of refinement of the grid is complex and involves
recursion. It is not enough for an element to satisfy the refinement
criterion in order to be refined. Indeed, the constraints mentioned
in the previous section may result in the following scenario: if any
of the element's neighbors are larger than the element itself, they
must be refined before the element is refined. Therefore, recursive
programming features need be used.
\section{Performing the refinement of an element.}
The process of refining an element, once it qualifies for refinement,
is described next.
\subsection{Creating the children}
First, the four children need to be created and an appropriate
numbering scheme should be adopted. Figure~\ref{fig:children-numbering}
displays a possible numbering choice. Edge nodes are numbered according
to their location vis-a-vis the element vertices and similarly for
the children. Children vertices may be numbered as follows: the
first vertex of a child corresponds to the vertex of the parent
element where the child is located, while for the fourth child
(central one) the first vertex would correspond to the first edge
of the parent element.
\begin{figure}[!htb]
\centering
\includegraphics[width=2in]{figures/child-numbering-bis} \\
\caption{Numbering of childrens} \label{fig:children-numbering}
\end{figure}
Data also needs to be transferred from the parent element to the
children in a consistent way. Accuracy, conservation, monotonicity,
and many other considerations may need to be taken into account
during the transfer operation.
\subsection{Finding neighbors of children}
Once the children are created, it is necessary to create the list
of vertices as well as find their neighbors. Vertices are easy to
determine. For neighbors, this can be accomplished using an
edge-based analysis.
Figure~\ref{fig:children-neighbors} shows the two possible
configurations as well as appropriate nomenclature for child $iv$
corresponding to main vertex $iv$. Keeping in mind that there is
a one-to-one mapping between vertices, edges, and neighbors of
an element, it is possible to determine the three new neighbors
$N1$, $N2$, and $N3$ of a child $iv$ corresponding to the vertex
$i=\mathsf{ind}(iv)$. As Figure 8 shows, the first neighbor
$N1$ is always the $i$th neighbor of the parent element,
the second neighbor $N2$ is always the 4th child, while the
third neighbor $N3$ is either the $\mathsf{ind}(iv+2)$ neighbor
of the parent element or the next one, depending on whether or not
neighbor $\mathsf{ind}(iv+2)$ of the parent element has
the same refinement level as the parent element.
\begin{figure}[!htb]
\centering
\includegraphics[width=4in]{figures/find-children-neighbors-after-refine} \\
\caption{Finding children's neighbors after refinement}
\label{fig:children-neighbors}
\end{figure}
\subsection{Updating neighbors}
Proceeding edge-wise, the neighbor(s) of the parent element on edge
$iv$ need(s) an update of its (their) vertices and list of neighbors
after the refinement process is performed.
Figure~\ref{fig:update-neighbors} shows the two possible
configurations. When the neighbor has the same level of refinement
as the element being refined, there is only one neighbor along the
edge. The refinement adds a vertex for that neighbor. Morover, the
neighbor element $i$ no longer has the element being refined as its
neighbor since it is deactivated. The two children $iv$ and $iv+1$
are now the new neighbors. This can be accomplished by changing the
target of one neighbor pointer as well as inserting a new neighbor
in the list (Figure~\ref{fig:update-neighbors}).
\begin{figure}[!htb]
\centering
\includegraphics[width=4in]{figures/refine-update-neighbors-bis} \\
\caption{Udpating vertices and neighbors of the refined element neighbors.}
\label{fig:update-neighbors}
\end{figure}
On the other hand, if the neighbors are smaller than the element being
refined, the only change to make is to change the pointer assignment of
the two neighbors $i$ and $i+1$ on that edge by making them point to
the appropriate children $iv$ and $iv+1$.
\subsection{Deallocating data storage of the parent element}
After refinement, the parent element is no longer in the list of
active elements and is not updated during the numerical simulation.
It may be useful to free the space allocated to store information
about its vertices, neighbors, as well as data values. If this element
is reactivated upon coarsening, reallocation of adequate space will
be needed.
\section{The coarsening process}
\subsection{Why coarsening?}
For unsteady problems, the regions in need of a finer mesh move with
time. In some regions, a refined grid may become useless. To avoid
wasting memory and computation, it is necssary to coarsen the grid
in those regions.
\subsection{Coarsening strategy (order)}
As for refinement, recursive programming is required for coarsening.
Coarsening involves an additional difficulty as an element may be
coarsened if and only if the neighbors that are finer can be coarsened
recursively. However, it may turn out that at the end of a refinement
cascade, one neighbor may not be coarsened because of accuracy
requirements. Hence, it is first necessary to check that all the
elements in the pyramid can be coarsened before actually proceeding
to the coarsening of an element. It may be advantageous to proceed
with coarsening level-wise from the finest levels to the coarsest
levels. This trick avoids the need for a recursive procedure.
\subsection{Coarsening of an element procedure}
Reactivation of a cell is equivalent to the removal of its four
children, which must all meet the coarsening criteria. The
coarsening process proceeds with the reallocation of the parent
vertices and neighbors and their update. Afterwards, data can be
transferred from the children to the parents and the children may be deleted.
\subsection{The refinement-coarsening conflict}
Visiting all grid elements once is usually not enough to complete
the operations of refinement and coarsening. Visiting the elements
several times, on the other hand, raises a new problem. Elements
that are refined in a sweep may turn out to qualify for coarsening
during the next sweep. Moreover, some elements are refined/coarsened
due to constraints (e.g. two neighbors should not differ by more than
one refinement level) and not because they meet the
refinement/coarsening criterion. This leads to the possiblilty
of ``chatter'' between the refinement and coarsening operations
and could result in infinite loops. A way around this is to tag
the cells that are refined during the refinement process so as
to prevent their coarsening subsequently.
\section{Maintaining an active list of elements (and output list)}
Rather than having to search the tree each time an operation is to
be performed on active elements, it may be useful to create an array
of pointers to the active elements in the tree. This array could be
used for calculations, output, and refinement/coarsening operations.
\section{Pointers and associated problems}
Dealing with pointers may be a nightmare.
Figure~\ref{fig:pitfal-allocated-memory} displays a typical setup
for a common problem. The allocated memory is created via a
pointer {\sf a}. Later, it is accessed via pointer {\sf b} which
modifies its content. If pointer {\sf a} is accidentally deallocated
(not nullified), then pointer {\sf b} will point to a location that
is no longer used for the purpose it was initially set for. The
dangerous outcome is this error can go undetected for long if that
particular location in memory happens not to be reused right away
by the computer's operating system.
One day, the memory configuration will be different, the
operating system will decide to assign that memory location to
another purpose, and the program will abort.
\begin{figure}[!htb]
\centering
\includegraphics[width=2in]{figures/pitfal-allocated-memory} \\
\caption{Using two pointers to access allocated memory.}
\label{fig:pitfal-allocated-memory}
\end{figure}
A typical remedy is the following. If a subroutine is called
with a pointer argument pointing to a location to be removed,
then it is best to use an additional temporary pointer that points
to that location to be removed as a calling argument.
\section{{\rm DO} loops and recursive refinement/coarsening}
The recursive nature of the refinement/coarsening processes results
in some unusual problems for the programmer. Consider the element
shown in Figure~\ref{fig:do-loop-over-neighbors}, along with the
data structure used for its neighbors (Fortran 95 is considered here).
Before coarsening the element, it is necessary to coarsen any of the
neighbors that are already finer than the element.
Assuming a {\sf DO} loop is used for that purpose, the process
will go as follows:
for $i=1,5$, if Neighbor $\mathrm{N_i}$ is too small, coarsen it.
When the program gets to $i=2$, $\mathrm{N_2}$ will be coarsened
and the number of pointers will decrease to 4 while the allocated
pointer array will be deallocated, then reallocated to meet the
number of neighbors. This means, the {\sf DO} loop will not
terminate !
\begin{figure}[!htb]
\centering
\subfigure[Neighbor data structure]%
{\includegraphics[width=2.2in]{figures/pitfal-loop-recursive}}%
\hspace{0.5in} \subfigure[Element with neighbor pointers]%
{\includegraphics[width=1.3in]{figures/pitfal-loop-recursive-2}} \\ %
\caption{Neighbor data structure}
\label{fig:do-loop-over-neighbors}
\end{figure}
A similar problem arises when considering recursive coarsening
of active elements. If an element is to be coarsened, this means
its three brothers/sisters are to be coarsened simultaneously. Hence
a {\sf DO} loop over the children will abort after the coarsening of
the first child.
\section{Output and visualization problems}
Using a standard visualization software package, such as Tecplot, to
draw two-dimensional contour plots may return surprising results. A
nonlinear field will generally display gaps near the dangling nodes
(Figure~\ref{fig:discontinuous-2D-plot}). This is due to the fact
that the contours are drawn element-wise when the Finite-Element
data structure is being used with Tecplot (version 9). Using the FE
data structure is necessary with unstructured grids such as AMR
grids.
\begin{figure}[!htb]
\centering
\includegraphics[width=2in]{figures/disc-contour-amr-grid} \\ \vspace{0.2in}
\includegraphics[width=2in]{figures/cont-contour-amr-grid} \hspace{0.4in}
\includegraphics[width=2in]{figures/cont-contour-output-grid} \\
\caption{Contour plots with AMR and dual grids: top (discontinuous
with AMR grid), bottom (continuous with both AMR and dual grids)}
\label{fig:discontinuous-2D-plot}
\end{figure}
A way around this problem is to use a dual grid obtained by
partitioning an element having a dangling node into smaller
triangles with no dangling nodes. This is shown in
Figure~\ref{fig:dual-grid}, where three possible configurations (1,
2, or 3 dangling nodes) are considered. The dual grid is used only
for output purposes. Figure~\ref{fig:discontinuous-2D-plot} shows
the discontinuous contours (top) as well as the corrected continuous
contour field along with two grids, the AMR grid and the dual grid.
\begin{figure}[!htb]
\centering
\includegraphics[width=4in]{figures/altering-mesh-for-contours} \\
\caption{Generating the dual grid from the AMR grid.} \label{fig:dual-grid}
\end{figure}
\section{Conclusion}
Although there is abundant literature on AMR grids, it seems difficult
to find a detailed presentation of the problems related to their
implementation. This paper examined the process of
implementing a typical AMR procedure and discussed some of the issues
that arise during this process. A particular choice of data structure
was selected and problems related to that choice were presented and
discussed.
\begin{thebibliography}{00} \frenchspacing
\bibitem{kn:A03} Akin, E.
{\em Object-Oriented Programming via Fortran 90/95.} {Cambridge University Press,
2003.}
\bibitem{kn:BO84} Berger, M. and Oliger, J.
{\em Adaptive Mesh Refinement for Hyperbolic Partial Differential Equations.}
{Journal of Computational Physics, Vol 53 (1984) pp. 484--512.}
\bibitem{kn:C02} Chung, T. J.
{\em Computational Fluid Dynamics.} {Cambridge University Press, 2002.}
\bibitem{kn:MS05} Mehta, D. P. and Sahni, S. Editors,
{\em Handbook of Data Structures and Applications.} {Chapman and Hall/CRC, 2005.}
\bibitem{kn:MR04} Michaelis, B. and Regg, B.
{\em FEM-Simulation of Laminar Flame Propagation I: Two-Dimensional Flames.}
{Journal of Computational Physics, Vol 196 (2004) pp. 417--447.}
\bibitem{kn:PGD98} Provatas, N., Goldenfeld, N., and Dantzig, J.
{\em Adaptive Mesh Refinement Computation of Solidification Microstructures Using
Dynamic Data Structures.} {Journal of Computational Physics, Vol 148 (1998) pp.
265--290.}
\bibitem{kn:SA04} Scalabrin, L. C. and Azevedo, J. L. F.
{\em Adaptive Mesh Refinement and Coarsening for Aerodynamic Flow Simulation.}
{International Journal for Numerical Methods in Fluids, Vol 45 (2004) pp.
1107--1122.}
\bibitem{kn:S03} Stein, E. (Editor),
{\em Error-Controlled Adaptive Finite Elements in Solid Mechanics.} {John Wiley and
Sons, 2003.}
\bibitem{kn:W03}
{\em Adaptive Mesh Refinement - Theory and Application.} {Proceedings of the Chicago
Workshop on Adaptive Mesh Refinement methods, Sept. 3--5, 2003. Lecture Notes in
Computational Science and Engineering, Springer 2005.}
\end{thebibliography}
\end{document}