\documentclass[reqno]{amsart}
\usepackage{graphicx}

\AtBeginDocument{{\noindent\small
2003 Colloquium on Differential Equations and Applications, Maracaibo, Venezuela.\newline
{\em Electronic Journal of Differential Equations},
Conference 13, 2005, pp. 1--11.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or
http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2005 Texas State University - San Marcos.}
\vspace{9mm}}
\setcounter{page}{1}

\begin{document}

\title[\hfilneg EJDE/Conf/13 \hfil Controllability, applications, and simulations]
{Controllability, applications, and numerical simulations
of cellular neural networks}

\author[W. Aziz, T. Lara \hfil EJDE/Conf/13 \hfilneg]
{Wadie Aziz, Teodoro Lara}  % in alphabetical order

\address{Wadie Aziz \hfill\break
Departmento de F\'{i}sica y Matem\'{a}ticas, N\'{u}cleo
Universitario ``Rafael Rangel", Universidad de los Andes,
Trujillo, Venezuela} 
\email{wadie@ula.ve}

\address{Teodoro Lara \hfill\break
Departmento de F\'{i}sica y Matem\'{a}ticas, N\'{u}cleo
Universitario ``Rafael Rangel", Universidad de los Andes,
Trujillo, Venezuela} 
\email{tlara@ula.ve \quad  teodorolara@cantv.net}


\date{}
\thanks{Published May 30, 2005.}
\subjclass[2000]{37N25, 34K20, 68T05}
\keywords{Cellular Neural Networks; circulant matrix;
tridiagonal matrix; \hfill\break\indent Controllability}

\begin{abstract}
 In this work we consider the model of cellular neural network
 (CNN) introduced by Chua and Yang in 1988. We impose the
 Von-Neumann boundary conditions and study the controllability
 of corresponding system, then these results are used in image
 detection by means of numerical simulations.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
 \newtheorem{thm}{Theorem}[section]
 \newtheorem{cor}[thm]{Corollary}
 \newtheorem{lem}[thm]{Lemma}
 \newtheorem{prop}[thm]{Proposition}
 \newtheorem{defn}[thm]{Definition}
 \newtheorem{rem}[thm]{Remark}
 \newtheorem{example}{Example}
\allowdisplaybreaks


\section{Introduction}

Since its introduction (\cite{chuya:cnnap,chuya:cnnth}) Cellular
neural networks (CNN) have been used in numerous problems.
Among them we have: Chua's circuit (\cite{aren:circhu}), Hopf
bifurcation model (\cite{zoussek:hopf}), Cellular Automata and
systolic arrays (\cite{automata:roskchua}), image detection
(\cite{tmchhs:cnndet}), population growth model
(\cite{cruchu1:cacn2}). In none of these works the Von-Neumann
boundary conditions have been imposed; only in (\cite{tlp:cacn})
periodic boundary conditions were considered.

The system obtained, after some changes
(\cite{chuya:cnnap,chuya:cnnth}) is
\begin{equation}\label{intro:eq2}
\dot{v}=-v+AG(v)+Bu+f(u,v)
\end{equation}
where, $u,v\in \mathbb{R}^{mn\times 1}$, are column vectors;
$A, B$ are matrices in $\mathbb{R}^{mn\times mn}$, $f(u,v)$
is a nonlinear perturbation, and $G(v)$ is a function which can be
either linear or non-linear. In this paper we set the
Von-Neumann boundary conditions, consider $G(v)=v$ and study the
controllability of the resulting system which is
\begin{equation}\label{intro:eq3}
 \dot{v} = (A-I)v+Bu+I
\end{equation}
where $A , B\in \mathbb{R}^{mn\times mn}$, after using
the boundary conditions, are tridiagonal matrices,
$I$ is the identity matrix in $\mathbb{R}^{mn\times mn}$.

Also, we implement some numerical simulations of these results
to show image detection; specifically Chinese characters.



\begin{figure}[ht]
 \centering
\setlength{\unitlength}{0.01in}
\begin{picture}(480,236)(51,595)
\thinlines \put( 80,700){\circle{42}} \put(112,800){\circle{16}}
\put(140,700){\circle{40}} \put(140,620){\circle*{10}}
\put(200,620){\circle*{10}} \put(260,620){\circle*{10}}
\put(320,620){\circle*{10}} \put(380,620){\circle*{10}}
\put(440,620){\circle*{10}} \put(200,800){\circle*{10}}
\put(260,800){\circle*{10}} \put(320,800){\circle*{10}}
\put(80,800){\line( 1, 0){ 25}} \put( 80,800){\line( 0,-1){ 60}}
\put( 80,620){\line( 0, 1){ 60}} \put( 80,740){\line( 0,-1){ 20}}
\put(140,800){\line( 0,-1){ 80}} \put(140,620){\line( 0, 1){ 60}}
\put(140,800){\line( 1, 0){160}} \put(300,800){\line( 1, 0){ 40}}
\put(350,800){\line( 1, 0){ 5}}
\put(360,800){\makebox(0.1111,0.7778){.}}
\put(360,800){\line( 1, 0){  5}} \put(370,800){\line( 1, 0){ 10}}
\put(380,800){\line( 0,-1){ 80}} \put(380,620){\line( 0, 1){ 60}}
\put(380,720){\line(-1,-1){ 20}} \put(360,700){\line( 1,-1){ 20}}
\put(380,680){\line( 1, 1){ 20}} \put(400,700){\line(-1, 1){ 20}}
\put(320,800){\line( 0,-1){ 80}} \put(320,620){\line( 0, 1){ 40}}
\put(320,660){\line( 0, 1){ 20}} \put(320,680){\line( 1, 1){ 20}}
\put(340,700){\line(-1, 1){ 20}} \put(320,720){\line(-1,-1){ 20}}
\put(300,700){\line( 1,-1){ 20}} \put(260,800){\line( 0,-1){ 80}}
\put(260,680){\line( 0,-1){ 60}} \put(200,615){\line( 0, 1){ 70}}
\put(200,800){\line( 0,-1){ 90}} \put(185,690){\line( 1, 0){ 30}}
\put(185,710){\line( 1, 0){ 30}} \put( 80,620){\line( 1, 0){420}}
\put(500,620){\line( 0, 1){ 60}} \put(500,720){\line( 0, 1){ 80}}
\put(500,800){\line(-1, 0){ 60}} \put(440,800){\line( 0,-1){ 80}}
\put(440,620){\line( 0, 1){ 60}} \put(440,720){\line(-1,-1){ 20}}
\put(420,700){\line( 1,-1){ 20}} \put(440,680){\line( 1, 1){ 20}}
\put(460,700){\line(-1, 1){ 20}} \put(320,620){\line( 0,-1){ 15}}
\put(305,605){\line( 1, 0){ 30}} \put(315,595){\line( 1, 0){ 10}}
\put(380,690){\vector( 0, 1){ 15}} \put(440,690){\vector(0,1){15}}
\put(320,690){\vector( 0, 1){ 15}} \put(140,690){\vector(0,1){15}}
\put(250,720){\line( 1, 0){ 20}} \put(250,680){\line( 1, 0){ 20}}
\put(270,720){\line( 0,-1){ 40}} \put(250,720){\line( 0,-1){ 40}}
\put(490,720){\line( 1, 0){ 20}} \put(490,680){\line( 1, 0){ 20}}
\put(510,720){\line( 0,-1){ 40}} \put(490,720){\line( 0,-1){ 40}}
\put(105,725){\makebox(0,0)[lb]{\smash{ij}}}
\put(240,730){\makebox(0,0)[lb]{\smash{v}}}
\put(225,805){\makebox(0,0)[lb]{\smash{ij}}}
\put(285,635){\makebox(0,0)[lb]{\smash{vu}}}
\put(345,735){\makebox(0,0)[lb]{\smash{vy}}}
\put(465,805){\makebox(0,0)[lb]{\smash{ij}}}
\put(410,725){\makebox(0,0)[lb]{\smash{yv}}}
\put(475,730){\makebox(0,0)[lb]{\smash{y}}}
\put(75,700){\makebox(0,0)[lb]{\smash{+}}}
\put(150,730){\makebox(0,0)[lb]{\smash{I}}}
\put(185,730){\makebox(0,0)[lb]{\smash{C}}}
\put(280,640){\makebox(0,0)[lb]{\smash{I}}}
\put(340,740){\makebox(0,0)[lb]{\smash{I}}}
\put(405,730){\makebox(0,0)[lb]{\smash{I}}}
\put(470,735){\makebox(0,0)[lb]{\smash{R}}}
\put(460,815){\makebox(0,0)[lb]{\smash{y}}}
\put(85,810){\makebox(0,0)[lb]{\smash{u}}}
\put(90,805){\makebox(0,0)[lb]{\smash{ij}}}
\put(220,810){\makebox(0,0)[lb]{\smash{v}}}
\put(95,730){\makebox(0,0)[lb]{\smash{E}}}
\put(230,735){\makebox(0,0)[lb]{\smash{R}}}
\put(75,690){\makebox(0,0)[lb]{\smash{--}}}
\end{picture}
 \caption{Typical circuit of CNN $ij$-position.}
 \label{fig:neurona}
\end{figure}

\section{Cellular Neural Networks}

A CNN consists, basically, in a collection of non linear circuit
displayed in a 2-dimensional array. The basic circuit of CNN is
called cell. A cell is made of elements of linear and non-linear
circuit which usually are linear capacitors, linear resistors,
linear and non linear controlled sources, and independents
sources. Each cell receives external signals through its input.
The state voltage of a give cell is influenced no only by its own
input through a feedback, its output; but also by the input and
output of the neighboring cells. These interactions are
implemented by voltage-controlled current sources. In the initial
papers (\cite{chuya:cnnap,chuya:cnnth}) any cell in CNN is
connected only to its neighbor cells; this is accomplished by
using the so called 1-neighborhood or simply neighborhood and
consequently $3 \times 3$-cloning templates. The adjacent cells
can interact directly with each other in the sense that are made
of a massive aggregate of regularity spaced cells which
communicate with each other directly only through its nearest
neighbors. In the figure \ref{fig:neurona} the basic circuit of a
CNN of a cell (located at, say, position $ij$ of the array) is
depicted. Here $v_{ij}$ is the voltage across the cell (state of
the cell) with its initial condition satisfying
$| v_{ij}(0)| \leq 1$. $E_{ij}$ is an independent voltage source,
and $u_{ij}=E_{ij}$ is called the input or control, also assumed
to satisfy $| u_{ij}| \leq 1$. $I$ is an independent current
source, $C$ is a linear capacitors, $R_{v}$ and $R_{y}$ are linear
resistors. $I_{vu}$, $I_{vy}$ are linear voltage-controlled
currents sources such that at each neighbor cell, say $kl$,
$I_{vy}=(I_{vy})_{kl}=a_{kl}g(v_{kl})$ are current source;
$I_{vu}=(I_{vy})_{kl}=b_{kl}u_{kl}$; is nonlinear
voltage-controlled source give by $I_{vy}=\frac{1}{R_y}g(v_{ij})$
where, $a_{kl}, \, b_{kl} \in \mathbb{R}$ and $g$ is an output
sigmoid function.


\section{Dynamics of CNN}

\begin{defn}[$r$-neighborhood] \rm %3.1
The $r$-neighborhood of a cell $c_{ij}$, in a cellular neural
network is defined by
\begin{equation}\label{arantxa1:eq:vecindad}
N^{ij}=\{c_{i_1 j_1}: \max\{|i-i_{1}|;|j-j_{1}|\} \leq r ; \;
1\leq i_{1} \leq m, \; 1 \leq j_{1} \leq n\}
\end{equation}
where $r$ is a positive integer.
\end{defn}

We consider the case $r=1$ which produces a couple of $3 \times 3$-matrices
(cloning templates); the feedback and control operator, given as
\begin{equation}\label{arantxa1:eq:marrano}
\widetilde{A} = \begin{pmatrix}
a_{11} & a_{12} & a_{13} \\
a_{21} & a_{22} & a_{23} \\
a_{31} & a_{32} & a_{33}
\end{pmatrix},
\quad \widetilde{B} = \begin{pmatrix}
b_{11} & b_{12} & b_{13} \\
b_{21} & b_{22} & b_{23} \\
b_{31} & b_{32} & b_{33}
\end{pmatrix},
\end{equation}
The output feedback depends on the interactive parameters $a_{ij}$
and the input control depends an parameters $b_{ij}$, $v\in
\mathbb{R}^{mn}$ is the voltage and represents the state vector,
and $u=(u_{11},u_{12}, \dots,u_{mn})^{T} \in \mathbb{R}^{mn}$ is
the control (input), and the output $y=G(v)$
\begin{equation}\label{arantxa1:eq:defineg}
G:\mathbb{R}^{mn}\to \mathbb{R}^{mn};\quad \quad G(v)=
(g(v_{11}),g(v_{12}), \dots, g(v_{mn}))^{T}
\end{equation}
$g$ is differentiable, bounded and $\| g \|\leq 1$ (in the most
general case $\| g \|\leq K$) and non decreasing ($g'\geq 0$); that is
a sigmoid function. We also assume $\| u\|\leq 1$, $\| u(0) \|\leq 1$.

\begin{defn} \rm %3.2
Let $K$ and $L$ be two square matrices of the same size and
elements $k_{ij}$, $l_{ij}$ respectively; we define $\odot$
product
\begin{equation}\label{arantxa1:eq:productoo}
K \odot L =\sum_{i,j}k_{ij}l_{ij}.
\end{equation}
\end{defn}

By imposing the Von-Neumann boundary conditions
\begin{equation}
\begin{array}{cc}
\left. \begin{array}{c}
 v_{ik} = v_{ik+1} \\
 v_{ik-1} = v_{ik+2}
\end{array}\right\}
 & i=-1, \dots,n+2, \quad k = 0,m \\
 &\\
\left. \begin{array}{c}
v_{kj} = v_{k+1j} \\
v_{k-1j} =  v_{k+2j}
\end{array}\right\}
 & j=-1,\dots,m+2, \quad k = 0,n;
\end{array}
\end{equation}
and applying the Kirchhoff Law of Voltage and Current, we obtain the equation
at cell $c_{ij}$,
\begin{equation}
 \dot{v}_{ij}=-v_{ij} +
\widetilde{A} \odot \widehat{G}(v_{ij}) + \widetilde{B} \odot
\widehat{u}_{ij} + I ,
\end{equation}
and in its vector form, by taking the row order in this vector,
that is, the first $n$-elements are formed by the first row of
matrix and so on, the resulting system is
\begin{equation}\label{problem}
\dot{v}=-v + AG(v) + B u + I ,
\end{equation}
where
\begin{gather*}
AG(v) = (\widetilde{A} \odot \widehat{G}(v_{11}), \dots ,
 \widetilde{A} \odot \widehat{G}(v_{mn}))^{T}, \\
Bu = (\widetilde{B} \odot \widehat{u}_{11}, \dots ,
 \widetilde{B} \odot \widehat{u}_{mn})^{T}, \quad
I = (I,\dots, I)^{T}
\end{gather*}
 matrices $A$, $B$ are block tridiagonal,
$AG(v)=(\widehat{A}+\overset{\circ}{A})v$ and
$Bu=(\widehat{B}+\overset{\circ}{B})u$ with

$$\widehat{A} = \begin{pmatrix}
 A_2 & A_3 & 0 & \dots & 0 & 0 \\
 A_1 & A_2 & A_3 & 0 & \dots & 0 \\
 \vdots & \ddots & \ddots & \ddots & 0 & 0 \\
 0 & \dots & A_1 & A_2 & A_3 & 0 \\
 0 & 0 & \dots & A_1 & A_2 & A_3 \\
 0 & 0 & 0 & \dots & A_1 & A_2
 \end{pmatrix}, \quad \widehat{B} =\begin{pmatrix}
 B_2 & B_3 & 0 & \dots & 0 & 0 \\
 B_1 & B_2 & B_3 & 0 & \dots & 0 \\
 \vdots & \ddots & \ddots & \ddots & 0 & 0 \\
 0 & \dots & B_1 & B_2 & B_3 & 0 \\
 0 & 0 & \dots & B_1 & B_2 & B_3 \\
 0 & 0 & 0 & \dots & B_1 & B_2
 \end{pmatrix}, $$
$$
A_i= \begin{pmatrix}
 a_{i2} & a_{i3} & 0 & \dots & 0 \\
 a_{i1} & \ddots & \ddots & &\vdots \\
 \vdots & \ddots & \ddots & \ddots & \vdots \\
 \vdots & & \ddots &\ddots & a_{i3} \\
 0 & \dots & \dots & a_{i1} & a_{i2}
\end{pmatrix}, \quad i=1,2,3.$$
The matrix $\widehat{B}$ has the same blocks. The perturbation matrices
look like,
$$\overset{\circ}{A} = \begin{pmatrix}
 L_1 + \Gamma_2 & \Gamma_3 & 0 & \dots & 0 & 0 \\
 \Gamma_1 & \Gamma_2 & \Gamma_6 & 0 & \dots & 0 \\
 0 & \ddots & \ddots & \ddots & & 0 \\
 \vdots & \ddots & \Gamma_1 & \Gamma_2 & \Gamma_3 & 0 \\
 \vdots &\dots & \ddots & \Gamma_1 & \Gamma_2 & \Gamma_3 \\
 0 &\dots & \dots & L_2 & \Gamma_1 & \Gamma_2
 \end{pmatrix},\quad \begin{cases}
 L_1 = A_1 + \Gamma_1 \\
 L_2 = A_3 + \Gamma_3
 \end{cases}.
$$
$$
\Gamma_i=\begin{pmatrix}
 a_{i1} & 0 & &\dots & 0 \\
 0 & 0 & & & \\
 0 &\ddots &\ddots & & \\
 & \ddots & \ddots & \ddots & \\
 0 & \dots & a_{i3} & 0 & 0
\end{pmatrix}\quad i=1,2,3.$$
The matrix $\overset{o}{B}$ is defined similarly.

\begin{rem} \rm %3.3
Other types of order were tested but they produce the same type of
matrix, block tridiagonal.
\end{rem}

\begin{lem} %3.4
If $A$, $B$ are two arbitrary square matrices of size $l \times l$ and real
entries, then $(A \otimes B)^{n}= A^{n} \otimes B^{n},$ for all
$n \in \mathbb{N}.$
\end{lem}

\begin{cor} %3.5
If $A$ is a matrix of order $n \times n$ and
$\Pi = \mathop{\rm circ}(0,1,0, \dots,0)$ is circulant matrix,
 then $(A \otimes \Pi)^k =A^{k} \otimes \Pi^{k}$; for
$k=1, \dots, m$.
\end{cor}


\section{CNN and Controllability}

In this section we study the controllability of the general system
(\ref{problem}) by means of the properties of block tridiagonal matrices
Instead of (\ref{problem}) we study the linear case
\begin{equation}\label{ctte1}
 \dot{v} = (A - I)v + Bu + I\,.
\end{equation}
The study of the controllability
of (\ref{ctte1}) is equivalent to study the controllability of
\begin{equation}\label{ctte}
 \dot{v} = (A - I)v + Bu.
\end{equation}
Note that $A-I$ is tridiagonal matrix same type as $A$.

\begin{lem} %4.1
Any block tridiagonal matrix
$$ A =\begin{pmatrix}
 A_2 & A_3 & 0 & \dots & 0 & 0 \\
 A_1 & A_2 & A_3 & 0 & \dots & 0 \\
 \vdots & \ddots & \ddots & \ddots & 0 & 0 \\
 0 & \dots & A_1 & A_2 & A_3 & 0 \\
 0 & 0 & \dots & A_1 & A_2 & A_3 \\
 0 & 0 & 0 & \dots & A_1 & A_2
 \end{pmatrix}
$$
can be written as $A=A_{3}\otimes \Pi + A_{1} \otimes \Pi ^{n-1} +
A_{2} \otimes \Pi^{n}$.
\end{lem}

\begin{lem} %4.2
For every block tridiagonal matrix $A$, the following takes place
\[
A^{k} = \sum_{i=0}^{k} \sum_{j=0}^{k-i}\begin{pmatrix}
 k \\
 i
 \end{pmatrix} \begin{pmatrix}
 k-i \\
 j
 \end{pmatrix}
(A_{3}^{k-i-j}A_{1}^{nj-j}A_2^{i} \otimes
\Pi)^{k-i-2j}; \quad k \in \mathbb{N}.
\]
\end{lem}

\begin{proof} For $l \in \mathbb{N}$ fixed
\begin{align*}
A^{l} & = [A_{3}\otimes \Pi + A_{1} \otimes \Pi ^{n-1} + A_{2}
\otimes \Pi^{n}]^{l} \\
 & = \sum_{i=0}^{l} \sum_{j=0}^{l-i} \begin{pmatrix}
 l \\
 i
 \end{pmatrix} \begin{pmatrix}
 l-i \\
 i
 \end{pmatrix} (A_{3} \otimes \Pi)^{l-i-j}(A_{1} \otimes
 \Pi^{n-1})^{j}(A_2 \otimes \Pi^{n})^{i} \\
 & = \sum_{i=0}^{l} \sum_{j=0}^{l-i} \begin{pmatrix}
 l \\
 i
 \end{pmatrix}\begin{pmatrix}
 l-i \\
 i
 \end{pmatrix}
A_{3}^{l-i-j}A_{1}^{j}A_2^{i} \otimes \Pi^{l-i-2j}.
\end{align*}
\end{proof}

\begin{thm} %4.3
Let $A$ and $B$ be two $n \times n$ block tridiagonal matrices.
Then
\begin{align*}
 A^k B =& \sum_{i=0}^{k} \sum_{j=0}^{k-i}
\begin{pmatrix}
 k \\
 i \end{pmatrix} \begin{pmatrix}
 k-i \\
 j \end{pmatrix}
(A_3^{k-i-j}A_1^{j}A_2^{i})\\
&\times [B_3 \otimes \Pi + B_1 \otimes
\Pi^{n-1} + B_2 \otimes \Pi^{n}] \Pi^{k-(i+2j)}
\end{align*}
 for $k \in \mathbb{N}$.
\end{thm}

\begin{proof} By induction: for $k=1$,
$$AB=\sum_{i=0}^{1} \sum_{j=0}^{1-i}
\begin{pmatrix}
 1 \\
 i \end{pmatrix} \begin{pmatrix}
 1-i \\
 j \end{pmatrix}
(A_3^{1-i-j}A_1^{j}A_2^{i}) [B_3 \otimes \Pi + B_1 \otimes
\Pi^{n-1} + B_2 \otimes \Pi^{n}] \Pi^{1-(i+2j)}.
$$
Assume the statement of the theorem is true for $k=m$.
Then for for $k=m+1$, we have
\begin{align*}
 A^{m+1} B & = AA^m B \\
 & = \sum_{i=0}^{m+1} \sum_{j=0}^{m+1-i} \begin{pmatrix}
 m+1 \\
 i \end{pmatrix}\begin{pmatrix}
 m+1-i \\
 j \end{pmatrix}
(A_3^{m+1-i-j}A_1^{j}A_2^{i}) \\
& \times [B_3 \otimes \Pi + B_1 \otimes
\Pi^{n-1} + B_2 \otimes \Pi^{n}] \Pi^{m+1-(i+2j)}
\end{align*}
\end{proof}

According to \cite[Theorem 3]{sontag:control}, the controllability
of (\ref{ctte}) depends on the rank of $(A,B)$. However,
\begin{align*}
\mathbf{Rg}[\mathbb{R}(A,B)] & =
\mathbf{Rg}([ B, AB, \dots, A^{n-1}B ]) \\
 & =
 \mathbf{Rg}\left[\left( \begin{array}{ll}
 \begin{bmatrix}
 C_1 &C_2 &\dots &C_{n-1} \\
 C_1 &C_2 &\dots &C_{n-1} \\
 \vdots &\vdots & & \vdots \\
 C_1 &C_2 &\dots &C_{n-1}
 \end{bmatrix}  \otimes \mathbf{B}
 \end{array} \right)
 \mathbf{D}\right],
\end{align*}
where
\[
\mathbf{C}= \begin{bmatrix}
C_1 &C_2 &\dots &C_{n-1} \\
C_1 &C_2 &\dots &C_{n-1} \\
\vdots &\vdots &\vdots & \vdots \\
C_1 &C_2 &\dots &C_{n-1}
\end{bmatrix},
\]
$\mathbf{B}=B_3 \otimes \Pi + B_1 \otimes \Pi^{n-1} + B_2 \otimes \Pi^{n}$,
\begin{gather*}
C_1  =  \sum_{i=0}^{0} \sum_{j=0}^{0-i} \begin{pmatrix}
 0 \\
 i \end{pmatrix}\begin{pmatrix}
 0-i \\
 j \end{pmatrix}
(A_3^{0-i-j}A_1^{j}A_2^{i}) \\
C_2 = \sum_{i=0}^{1} \sum_{j=0}^{1-i} \begin{pmatrix}
 1 \\
 i \end{pmatrix}\begin{pmatrix}
 1-i \\
 j \end{pmatrix}
(A_3^{1-i-j}A_1^{j}A_2^{i}) \\
  \vdots  \\
C_{n-1}  = \sum_{i=0}^{n-1} \sum_{j=0}^{n-1-i} \begin{pmatrix}
 n-1 \\
 i \end{pmatrix}\begin{pmatrix}
 n-1-i \\
 j
 \end{pmatrix}(A_3^{n-1-i-j}A_1^{j}A_2^{i}),
\end{gather*}
and
\[
\mathbf{D}=\begin{bmatrix}
\Pi^{n} &0 &0 &0 \\
0 &\Pi^{1-(i+2j)} &0 &0 \\
0 &0 &\ddots &0 \\
0 &0 &0 &\Pi^{(n-1)-(i+2j)}
\end{bmatrix}.
\]

\begin{prop} %4.4
Let
\begin{equation*}
 \mathbf{D}=\begin{bmatrix}
\Pi^{n} &0 &0 &0 \\
0 &\Pi^{1-(i+2j)} &0 &0 \\
0 &0 &\ddots &0 \\
0 &0 &0 &\Pi^{(n-1)-(i+2j)}
\end{bmatrix}\,.
\end{equation*}
Then $| \det(\mathbf{D})|= 1$.
\end{prop}

The proof of the above proposition can be found in  \cite{waa:tesis}
We are now ready to give the main result of this section, which is
quite technical, but applicable to several situations discussed
later.

\begin{thm} %4.5
The system (\ref{ctte}) is controllable if and only if
$\mathbf{Rg}(C \otimes B)=n.$
\end{thm}

\begin{proof} By \cite[Theorem 3]{sontag:control},
the system (\ref{ctte}) is controllable if and only if
\begin{equation*}
 \mathbf{Rg}[\mathbb{R}(A,B)] = \mathbf{Rg}
[B,  AB, \dots, A^{n-1}B]\,.
\end{equation*}
By the above proposition this is true if and only if
$\mathbf{Rg}(C \otimes B)=n$.
\end{proof}

\subsection*{Example}
Let $m=3$ and $n=3$; let matrices $\widetilde{A}$ and
$\widetilde{B}$ be as in (\ref{arantxa1:eq:marrano}); let the output
$y=G_2(v)$, with
$G_2:\mathbb{R}^{3 \times 3}\to \mathbb{R}^{3 \times 27}$
given as
\[
G_2(v)  = \big(G(v_{11}),G(v_{12}),G(v_{13}),G(v_{21}),G(v_{22}),G(v_{23}),
  G(v_{31}),G(v_{32}),G(v_{33})\big)^{T}\,.
\]
We impose Von-Neumann the boundary conditions and get
\newcommand{\matr}[9]{\begin{pmatrix}
 v_{#1}&v_{#2}&v_{#3}\\
 v_{#4}&v_{#5}&v_{#6}\\
 v_{#7}&v_{#8}&v_{#9}
 \end{pmatrix}}
\begin{gather*}
  G(v_{11}) = \matr{11}{11}{12}{11}{11}{12}{21}{21}{22},\quad
 G(v_{12}) = \matr{11}{12}{13}{11}{12}{13}{21}{22}{23}, \\
  G(v_{13}) = \matr{12}{13}{11}{12}{13}{11}{22}{23}{21},\quad
 G(v_{21}) = \matr{11}{11}{12}{21}{21}{22}{31}{31}{32}, \\
 G(v_{22}) = \matr{11}{12}{13}{21}{22}{23}{31}{32}{33},\quad
 G(v_{23}) = \matr{12}{13}{11}{22}{23}{21}{32}{33}{31}, \\
 G(v_{31}) = \matr{21}{21}{22}{31}{31}{32}{11}{11}{12},\quad
 G(v_{32}) = \matr{21}{22}{23}{31}{32}{33}{11}{12}{13}, \\
 G(v_{33}) = \matr{22}{23}{21}{32}{33}{31}{12}{13}{11}\,.
\end{gather*}
Now $AG_2(v)$ has the form
{\scriptsize
\newcommand{\sa}[2]{a_{#1}+a_{#2}}
\[
  \begin{pmatrix}
    (\sa{11}{12}+  &            &            &            &        &        &            &        &       \\
    \sa{21}{22})   & \sa{13}{23} &     0      & \sa{31}{32} & a_{33} & 0      & 0          & 0      & 0     \\
    \sa{11}{21}    & \sa{12}{22} & \sa{13}{23} & a_{31}     & a_{32} & a_{33} & 0          & 0      & 0     \\
    \sa{13}{23}    & \sa{11}{21} & \sa{12}{22} & a_{33}     & a_{31} & a_{32} & 0          & 0      & 0     \\
    \sa{11}{12}    & a_{13}     & 0          & \sa{21}{22} & a_{23} & 0      & \sa{31}{32} & a_{33} & 0     \\
    a_{11}        & a_{12}     & a_{13}     & a_{21}     & a_{22} & a_{23} & a_{31}     & a_{32} & a_{33}\\
    a_{13}        & a_{11}     & a_{12}     & a_{23}     & a_{21} & a_{22} & a_{33}     & a_{31} & a_{32}\\
    \sa{31}{32}    & a_{33}     & 0          & \sa{11}{12} & a_{13} & 0      & \sa{21}{22} & a_{23} & 0     \\
    a_{31}        & a_{32}     & a_{33}     & a_{11}     & a_{12} & a_{13} & a_{21}     & a_{22} & a_{23}\\
    a_{33}        & a_{31}     & a_{32}     & a_{13}     & a_{11} & a_{12} & a_{23}     & a_{21} & a_{22}\\
  \end{pmatrix}
  \begin{pmatrix}
    v_{11} \\
    v_{12} \\
    v_{13} \\
    v_{21} \\
    v_{22} \\
    v_{23} \\
    v_{31} \\
    v_{32} \\
    v_{33} \\
  \end{pmatrix}.
\]
}%\input{tacha}

We write $AG_2(v)$ as
\begin{equation*}
 AG_2(v)=(\widehat{A}+\overset{\circ}{A})G_2(v)=\widehat{A}G_2(v)
+ \overset{\circ}{A}G_2(v).
\end{equation*}
Then we do the same for matrix $Bu$.
Now (\ref{ctte1}) becomes
\begin{equation}\label{arantxa1:eq:general}
 \dot{v}= -v + \widehat{A}G_2(v) + \widehat{B}u + f(u,v)
\end{equation}
Note that $A$ and $B$ are tridiagonal matrices and $f(u,v)=I+
\overset{\circ}{A}G_2(v) + \overset\circ{B}u$ is a perturbation of
~(\ref{ctte}); if $f(u,v)=0$, (\ref{arantxa1:eq:general}) is
controllable, for \cite{lee:markus} (Theorem 11), then
~(\ref{arantxa1:eq:general}) also is controllable, where
$\overset\circ{A}G_2(v)$ and $\overset\circ{B}u$ have the form,
respectively,
\[
\overset{\circ}{A}G_2(v)  =
  \begin{pmatrix}
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    a_{13}+a_{23} & 0 & 0 & a_{33} & 0 & 0 & 0 & 0 & 0\\
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    a_{13} & 0 & 0 & a_{23} & 0 & 0 & a_{33} & 0 & 0\\
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    a_{33} & 0 & 0 & a_{13} & 0 & 0 & a_{23} & 0 & 0\\
  \end{pmatrix}
  \begin{pmatrix}
    v_{11} \\
    v_{12} \\
    v_{13} \\
    v_{21} \\
    v_{22} \\
    v_{23} \\
    v_{31} \\
    v_{32} \\
    v_{33} \\
  \end{pmatrix}\,,
\]
%\input{tacha1} \input{tacha2}
\[
 \overset{\circ}{B}u  =
  \begin{pmatrix}
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    b_{13}+b_{23} & 0 & 0 & b_{33} & 0 & 0 & 0 & 0 & 0\\
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    b_{13} & 0 & 0 & b_{23} & 0 & 0 & b_{33} & 0 & 0\\
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
    b_{33} & 0 & 0 & b_{13} & 0 & 0 & b_{23} & 0 & 0\\
  \end{pmatrix}
  \begin{pmatrix}
    u_{11} \\
    u_{12} \\
    u_{13} \\
    u_{21} \\
    u_{22} \\
    u_{23} \\
    u_{31} \\
    u_{32} \\
    u_{33} \\
  \end{pmatrix}.
\]


\begin{rem} %4.6 \rm
So far we have studied the case where $G(v)= \alpha v$, $\alpha > 0$; that is,
the linear case. The non-linear case
\begin{equation}\label{hugo}
\dot{v} = -v + AG(v) + Bu
\end{equation}
can be attacked just writing down
\begin{equation}\label{hugo2}
\dot{v}  =  (A-I)v + Bu + (AG(v) - Av)
  =  (A-I)v + Bu + A(G(v) - v)
\end{equation}
and imposing the condition of $A(G(v)-v)$ being globally Lipschitz.
In this case we guarantee controllability of (\ref{hugo}) if
 (\ref{hugo2}) is controllable.
\end{rem}


\begin{figure}[htb]
\begin{center}
 \includegraphics[width=0.6 \textwidth]{fig2} %diamant}
\end{center}
 \caption{Input and some iterations by a $30\times 30$ matrix.}
 \label{diamante}
\end{figure}

\section{Numerical Simulations}

In this section we use our model of CNN in image detection; most
of our examples are Chinese characters. The idea is input an image
and iterate equation (\ref{intro:eq3}) by using Runge-Kutta
4-order method. We shall use the corner detecting CNN since in
\cite{crounchu:detection}, but
taking $b_{22}=5$; in other words
\[
\widetilde{A}=\begin{pmatrix}
 0 &0 &0 \\
 0 &2 &0 \\
 0 &0 &0
 \end{pmatrix}, \quad \widetilde{B}=\begin{pmatrix}
 -7/20 & -1/4 &-7/20 \\
 -1/4 & 5 & -1/4 \\
 -7/20 & -1/4 & -7/20
 \end{pmatrix}, \quad I=3 \times 10^{-4} {\rm Amp.}
\]
First, we consider figure \ref{diamante} a diamond as input and some
iterations, we detect the main character of the stroke in the
first three steps of this process. In a $30 \time 30$ array; and after
a few iterations we reach the maximum detections.

In figure \ref{diamiter}, we find the same behavior as in the
figure \ref{diamante}; by taking now $k$ (number of iterations) a
little bigger.

\begin{figure}[htb]
 \includegraphics[width=0.7\textwidth]{fig3} % diamiter
 \caption{More iterations in case of the diamond.}
 \label{diamiter}
\end{figure}


Figure \ref{chi2entra} is a Chinese character with an $35 \times 35$ array.
After some iterations for $k=3$ and $k=10$ maximum detection is
achieved.

\begin{figure}[htb]
 \includegraphics[width=0.7\textwidth]{fig4} %chi2entra}
 \caption{Input and some iterations for a  $35\times 35$ matrix
 with an ideogram.}
 \label{chi2entra}
\end{figure}

Figure \ref{chi2ite} is made by an iteration of the input in
figure \ref{chi2entra}
with $k$ bigger, the output is the same as in the previous figure.

\begin{figure}[htb]
 \includegraphics[width=0.7 \textwidth]{fig5} %chi2ite
 \caption{$35 \times 35$-array; some more iterations.}
 \label{chi2ite}
\end{figure}

As a concluding remark, we want to mention that the two input
figures chosen here
are the same as two of the chosen in (\cite{chuya:cnnap},
\cite{tlp:cacn}), but now we are imposing Von-Neumann boundary
conditions. In our case maximum detection is attained in fewer
steps that the ones in the mentioned papers.
\newpage
\begin{thebibliography}{10}

\bibitem{waa:tesis}
Wadie Aziz, \emph{Redes neuronales celulares}, Master's thesis,
Universidad de Los Andes, N\'{u}cleo Universitario Rafael Rangel,
Trujillo - Venezuela, January 2003.

\bibitem{chuya:cnnap}
Leon~O. Chua and L.~Yang, \emph{Cellular neural networks:
Applications}, IEEE. Transc. Circuits Syst. \textbf{35} (1988),
1273--1290.

\bibitem{chuya:cnnth}
Leon~O. Chua and L.~Yang, \emph{Cellular neural networks: Theory},
IEEE. Transc. Circuits Syst. \textbf{35} (1988), 1257--1271.

\bibitem{tmchhs:cnndet}
K.~R. Crounse and L.~O. Chua, \emph{Methods for image processing
and pattern formation in cnn: A tutorial}. \textbf{42}, no.~10,
(1995), 583--601

\bibitem{cruchu1:cacn2}
J.~Cruz and L.~O. Chua, \emph{Application of cellular neural
networks to model population dynamics}, IEEE. Transc. Circuits
Syst. \textbf{42}, no.~10, (1995), 715--720.

\bibitem{aren:circhu}
S.~Baglio, L.~Fortuna, P.~Arenas and G.~Manganaro, \emph{Chua's
circuit can be generated by cnn cells}. IEEE Transc. on Circuit
Sys.I: Fundamental Theory and Applic. \textbf{42}, no.~2, (1995),
123--126.

\bibitem{tlp:cacn}
T.~Lara, \emph{Controllability and applications of cnn}, Ph.D. thesis, Georgia
 Institute of Technology, USA, December 1997.

\bibitem{lee:markus}
E.~B. Lee and L.~Markus, \emph{Foundations of optimal control
theory}. John Wiley and Sons, New York, 1967.

\bibitem{automata:roskchua}
T.~Roska and L.~Chua, \emph{Cellular neural network with
non-linear and delay-type templates elements}. IEEE Transc. on
Circuit Sys.I: Fundamental Theory and Applic. \textbf{37}, (1990),
12--25.

\bibitem{sontag:control}
E.~D. Sontag, \emph{Mathematical control theory}.
Springer--Verlag, New York 1990.

\bibitem{crounchu:detection}
T.~Boros, A.~Radva'nyi, T.~Roska, Leon~Chua and P.~Thiran,
\emph{Detecting moving and standing objects using cellular neural
networks}. Int. Journal on Circ. Theory and Applictions,
\textbf{20}, (1992), 613--628.

\bibitem{zoussek:hopf}
Fan Zou and Josef Nossek, \emph{Bifurcation and chaos in cellular neural
 networks}, IEEE Transc. on Circuit Sys.I: Fundamental Theory and Applic.
 \textbf{40} (1993), no.~3, 157--164.
\end{thebibliography}

\end{document}

