\documentclass[reqno]{amsart}
\usepackage{epic}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
2003 Colloquium on Differential Equations and Applications, Maracaibo, Venezuela.\newline
{\em Electronic Journal of Differential Equations},
Conference 13, 2005, pp. 13--19.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or
http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2005 Texas State University - San Marcos.}
\vspace{9mm}}
\setcounter{page}{13}

\begin{document}

\title[\hfilneg EJDE/Conf/13 \hfil Method of relaxation]
{Method of relaxation applied to optimization of discrete systems}

\author[J. L. Calvet, J. Cardillo, J. C. Hennet, F. Szigeti
\hfil EJDE/Conf/13 \hfilneg]
{Jean Louis Calvet, Juan Cardillo, \\
Jean Claude Hennet, Ferenc Szigeti}  % in alphabetical order

\address{Jean Louis Calvet \hfill\break
Laboratoire d'Analyse et d'Architecture de Syst\'emes (LAAS)
du CNRS, Toulouse, France, LAAS-CNRS}
\email{calvet@laas.fr}

\address{Juan Cardillo \hfill\break
Departamento de Sistemas de Control,
Escuela de Ingenier\'{\i}a de Sistemas,
Universidad de Los Andes, M\'erida, Venezuela}
\email{ijuan@ula.ve}

\address{Jean Claude Hennet \hfill\break
Laboratoire d'Analyse et d'Architecture de Syst\'emes (LAAS)
du CNRS, Toulouse, France, LAAS-CNRS}
\email{hennet@laas.fr}

\address{Ferenc Szigeti \hfill\break
Departamento de Sistemas de Control,
Escuela de Ingenier\'{\i}a de Sistemas,
Universidad de Los Andes, M\'erida, Venezuela}
\email{fszigeti@icnet.com.ve}

\date{}
\thanks{Published May 30, 2005.}
\subjclass[2000]{93C65, 49M20, 74P20}
\keywords{Discrete Systems; relaxation; optimization}

\begin{abstract}
 In this work, the relaxation method is presented and then applied
 to the optimization of a family of discrete systems.
 Thus, a condition for the relationship between the minimum of
 the original problem and of the relaxed problem is translated
 to  an equivalent condition of minimum principle.
 Several examples illustrate this technique and the condition
 obtained.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}

\section{Introduction}
The relaxation technique can be describe in a simple way for the
minimization of a function on a finite domain, $f:\{ {1,2,
\dots ,n} \} \to \Re $ as follows: To each function on the domain
$Z_n=\{1,2,\dots,n \}$,  associate another
function on the set of  probability measures as,
\begin{equation} %1.1
 \hat P = \{ {P = ( {p( i )} ):0
\le p( i )\;\sum_{i = 1}^n {p( i )}  = 1} \},
\end{equation}
in the form:
\begin{equation} %1.2
i \mapsto f( i ) \Longleftrightarrow P \to E_P ( f
) = \sum_{i = 1}^n {p( i )f( i)}.
\end{equation}
Also, $\hat P$, the domain of the mathematical expectation, is a
natural extension of the domain $Z_n$. Indeed, to each $i\in Z_n$
we can be associated the probability measure
\begin{equation} %1.3
P_i  = ( {0, \dots ,0,\mathop 1_i ,0 \dots ,0}),
\end{equation}
that is,
\begin{equation} %1.4
p_i ( j ) = \begin{cases}
   1, &j = i,  \\
   0, &j\ne i\,.
\end{cases}
\end{equation}
It is easy to see that the global minimum of the nonlinear problem
$\min_{i\in Z_n} f(i)$ and the minimum of the lineaf function
coincides in  $i_0 \to P_{i_0 } $ .

The extension of a nonlinear function to a lineal function
using the presented technique is called the relaxation
method. For classic control systems,  J. Warga and others used the
relaxation technique since 1960's. Surprisingly in
the previous  years the use of this technique has resurfaced, but in
another context. Thus, instead of  using of combinatorial
optimization, we propose using relaxed problems, with the aim
of diminishing the computational complexity. In \cite{JW62,JW162} is
has been analyzed the equivalence between the original problem and the
relaxed problem

In this work, we outline the relaxation problem in the context of
the optimization from a class of discrete events processes
(discrete events systems) equipped with concave and convex cost
functions. The first one, generically, always guarantees the
equivalence between the original problem and the relaxed problem;
moreover, the optimality criteria is given in the classical form of
the  minimum principle. While for the second problem, it is a
consequence of the separation theorem for convex set. The
equivalence between the original problem and the relaxed one, is
also a classical minimum principle.

\section{Discrete Event Process Optimization Problem (Discrete Event System)}

Consider the optimization problem
\begin{equation} \label{ec21}
x(k+1)=A(u(k))x(k)+b(u(k)), x(0)=0,
\end{equation}
where $A:\{-1,1\}^m\to  \mathbb{R}^{n\times n}$,
$b:\{-1,1\}^m \to \mathbb{R}^{n}$, are functions
with finite domain, defined on the set  $\{-1,1\}^m$ of
the vertices of the hypercube of dimension m. The sequence $u(0),
u(1),\dots,u(K-1)$ is denominated control (decision). The
trajectory associated to the control is the sequence $x(0),
x(1),\dots,x(K)$, calculated for (\ref{ec21}). The cost function
$\Phi:\mathbb{R}^n\to \mathbb{R}$ is defined by $J(x,u)=\Phi(x(K))$.

Now, the objective is to find a sequence $u^*(0),
u^*(1),\dots,u^*(K-1)$  , such that, for the corresponding
trajectory $x^*(0), x^*(1),\dots,x^*(K)$,
\begin{equation}
J(x^*,u^*)=\Phi(x^*(K))\leq \Phi(x(K))=J(x,u),
\end{equation}
be satisfied, for every $u(0), u(1),\dots,u(K-1)$.
The minimum principle is a necessary condition for the optimal
control $u^*(0), u^*(1),\dots,u^*(K-1)$.


To state our results, we need the concept of varied trajectory
\[
x_{i,u}(0), x_{i,u}(1),\dots, x_{i,u}(K)
\]
and the dual trajectory
$\varphi_{i,u}(i+1),\varphi_{i,u}(i+2),\dots, \varphi_{i,u}(K)$:
\begin{gather*}
 x_{i,u} ( k ) = x^* ( k), k \le i, \\
x_{i,u} ( i+1 ) =A(u) x^* ( i)+ b (u), \\
x_{i,u} ( k+1 ) =A(u^*(k)) x_{i,u}( k)+ b (u^*(k)), k>i, \\
\varphi _{i,u} ( k ) = A( {u^* ( k )})^T \varphi _{i,u} ( {k + 1} ),\\
\varphi_{i,u} ( K ) = R\Phi ( {x^* ( K);x_{i,u} ( K )} ),\quad  k > i,
\end{gather*}
where
\begin{equation} %2.3
R\Phi(x^*(K);x(K))=\int_{0}^{1} \mathop{\rm grad}\Phi(x^*(K)+t(x_{i,u}(k)-x^*(K)).
\end{equation}
Then the optimality criterion is the inequality:
\begin{equation} %2.4
\langle\varphi_{i,u}(i+1),A(u^*(i))x^*(i)+b(u^*(i))\rangle\leq
\langle\varphi_{i,u}(i+1),A(u(i))x^*(i)+b(u(i))\rangle\
\end{equation}
for everything $i$ and  $u$. (see minimum principle on finite domains
\cite{JC03}).
Here the relaxation is defined using the dynamics for $i$, and the
probability measure
$P=(p(u))$, $u\in\{-1,1\}^{m}$, as  follows.
The trajectory $x_{i,P} ( 0 ),x_{i,P} ( 1), \dots ,x_{i,P} ( K )$,
is defined by
\begin{gather*}
x_{i,P} ( k ) = x^* ( k ),{\rm{ k}} \le i,\\
x_{i,P} ( {i + 1} ) = \sum_u^{} {p( u)} ( {A( u )x^* ( k ) + b( u)} ),\\
x_{i,P} ( {k + 1} ) = A( {u^* ( k )})x_{i,P} ( k ) + b( {u^* ( k )}),
\quad k > i.
\end{gather*}
The relaxation problem consists on giving conditions such that
\[
\Phi ( {x^* ( K )} ) = \min_{u( k ) \in \{ { - 1,1} \}^m }
 \Phi( {x( K )} )_{\forall k = 0,1, \dots K}  \min_{P \in \hat P} \Phi ( {x_{i,P} (K )} )_{\forall i}\,.
\]

\section{Main Result}
Let us observe that if $P = P_v $, this is
\[
p_v ( u ) = \begin{cases}
   1,  & u = v, \\
   0,  & u \ne v,
\end{cases}
\]
then $x_{i,P} ( k ) = x_{i,u} ( k )$.
The corresponding dual trajectory, in this case, is defined as:
\[
\varphi _{i,P} ( k ) = A( {u^* ( k )}
)^T \varphi _{i,P} ( {k + 1} ), \varphi
_{i,P} ( K ) = R\Phi ( {x^* ( K);x_{i,P} ( K )} )
\]

\begin{lemma} \label{lem1}
If $0\leq\sum{c_i x(i)}$, with $\sum{x(i)}=1$, for all $x(i)\geq0$,
 then $0\leq c_i$.
\end{lemma}
\begin{proof} Let
$
x_0 ( i ) = \begin{cases}   1, &  i = i_0,  \\
   0,  & i \ne i_0, \end{cases}$.
Then  $0 \le \sum {c_i x_0 ( i )}  = c_{i_0 }$.
\end{proof}

\begin{lemma} \label{lem2}
If $x_{i,P} ( k )$ is the trajectory of the
$i$-th relaxed problem for $P = ( {p( u )} )$
 and $x_{i,u}(k)$ is the solution of the problem
perturbed in the step $i$ by $u$, then
\begin{equation}\label{ec31}
x_{i,P} ( k ) = \sum {p( u )} x_{i,u} ( k ),\quad  k = i + 1,\dots ,K
\end{equation}
\end{lemma}

\begin{proof}
By definition $x_{i,u} ( i+1 ) =A(u) x^* ( i)+ b(u)$ and
\[
x_{i,P} ( {i + 1} ) = \sum {p( u ) }
( {A( u )x^* ( i ) + b( u )}
) = \sum {p( u ) } x_{i,u} ( {i + 1})\,.
\]
Then (\ref{ec31}) is true for $i+1$. Using mathematical
induction assume that (\ref{ec31}) is valid for $k$. Then
for $k+1$, we have
\[
\begin{array}{ll}
 x_{i,P} ( {k + 1} )& = A( {u^* ( k )} )x_P^{} ( k ) + b( {u^* ( k )} ) =  \\
   & = A( {u^* ( k )} )( {\sum_u^{} {p( u )x_{i,v}^{} ( k )} } ) + b( {u^* ( k )} ) =  \\
  & = \sum_u^{} {p( u )} ( {A( {u^* ( k )} )x_{i,v}^{} ( k ) + b( {u^* ( k )} )} ) = \sum_u^{} {p( u )} x_{i,v}^{} ( {k + 1} ).\square
 \end{array}
\]
which completes the proof.
\end{proof}

\begin{theorem} \label{thm1}
Assume that $\Phi $ is concave over a convex domain
$D \subset \mathbb{R} ^n $; i.e.,  for every $\lambda,\mu\geq0$, with
$\lambda+\mu=1$, and $x,y \in D$,
\[
\lambda\Phi(x)+\mu\Phi(y)\leq\Phi(\lambda x+\mu y)\,.
\]
Then the relaxed problem and the original problem have the
same minimum.
That is, $u^*(0), u^*(1),\dots,u^*(K-1)$, is the optimal control,
then, for each $i$ fixed, the $i$-th relaxed problem
$\mathop {\min }_{P \in \hat P} \Phi ( {x_{i,P} (K )} )$
has the minimum in $P_{u^* ( i )} $, defined by
\[
p_{u^* ( i )} ( u ) = \begin{cases}
   1  &u = u^* ( i ),  \\
   0  &u \ne u^* ( i ).
\end{cases}
\]
\end{theorem}

\begin{proof}
First we will prove the equalities
\begin{align*}
&\Phi ( {x_{i,P} ( K )} ) - \Phi ( {x^*( K )} )\\
& = \sum_u^{} {p( u )}\langle {\varphi _{i,P} ( {i + 1} ),(
{A( u )x^* ( i ) + b( u )}) - ( {A( {u^* ( i )}
)x^* ( i ) + b( {u^* ( i )})} )}\rangle
\end{align*}
\begin{equation}\label{ec32}
\begin{aligned}
&\Phi ( {x_{i,P} ( K )} ) - \Phi ( {x^*( K )} )\\
& = R\Phi ( {x^* ( K);x_{i,P} ( K )} )( {x_{i,P} ( K) - x^* ( K )} )\\
& = \langle {\varphi _{i,P} ( K ),x_{i,P} ( K ) - x^*( K )} \rangle\\
&= \langle \varphi _{i,P} ( K ),A( {u^* ( {K - 1} )} )x_{i,P} ( {K - 1} )
+ b( {u^* ( {K - 1} )} ) \\
&\quad - A( {u^* ( {K - 1} )} )x^* ( {K - 1} ) - b( {u^* ( {K - 1} )} )
\rangle  \\
&= \langle {A( {u^* ( {K - 1} )} )^T \varphi _{i,P} ( K ),x_{i,P} ( {K - 1} )
- x^* ( {K - 1} )} \rangle  \\
&= \langle {\varphi _{i,P} ( {K - 1} ),x_{i,P} ( {K - 1} ) - x^* ( {K - 1} )}
\rangle  =  \dots   \\
& =  \langle {\varphi _{i,P} ( {i + 1} ),x_{i,P} ( {i + 1} ) - x^* ( {i + 1} )}
\rangle   \\
& =  \langle {\varphi _{i,P} ( {i + 1} ),\sum_u^{} {p( u )} ( {A( u )x^* ( i )
+ b( u )} ) - ( {A( {u^* ( i )} )x^* ( i ) + b( {u^* ( i )} )} )} \rangle  \\
&= \sum_u^{} {p( u )} \langle {\varphi _{i,P} ( {i + 1} ),( {A( u )x^* ( i )
+ b( u )} ) - ( {A( {u^* ( i )} )x^* ( i ) + b( {u^* ( i )} )} )} \rangle
\end{aligned}
\end{equation}
Now suppose that $i$-th relaxed problem has optimum  $P_{u^*( i )} $.
Then
\[
0 \le \Phi ( {x_{i,P} ( K )} ) - \Phi ({x^* ( K )} )\,.
\]
By the formula demonstrated previously,
\[
0 \le \sum_u^{} {p( u )} \langle {\varphi
_{i,P} ( {i + 1} ),( {A( u )x^* (
i ) + b( u )} ) - ( {A( {u^*
( i )} )x^* ( i ) + b( {u^*
( i )} )} )} \rangle
\]
for every $P$, as well as  for $P = P_v $
  and $\varphi _{P_v ,i} ( {i + 1} ) = \varphi _{i,v} (
{i + 1} ) $.
By Lemma \ref{lem1},
\begin{equation}\label{ec33}
 \langle {\varphi
_{i,v} ( {i + 1} ),A( u^*(i) )x^* ( i
) + b( u^*(i) )} \rangle \leq \langle
{\varphi _{i,v} ( {i + 1} ),A( u(i) )x^*
( i ) + b( {u ( i )}  )}
\rangle
\end{equation}
This being the minimum principle, without necessity of
 concavity or convexity of  $\Phi$.

Now, let us suppose that the minimum principle is fulfilled.
For a measure of probability $P = ( {p( u )})$, multiply the
inequality (\ref{ec33}) by $p(u)$ and add for
everything $v \in \{ { - 1,1} \}^m $. Then
\begin{align*}
0 &\le \sum_v^{} {p( v )} \langle {\varphi
_{i,v} ( {i + 1} ),( {A( u )x^* (
i ) + b( u )} ) - ( {A( {u^*
( i )} )x^* ( i ) + b( {u^*
( i )} )} )} \rangle  \\
&= \sum_v^{} {p( v )} \langle {\varphi _{i,v} ( {i + 1} ),x_{i,v} ( {i + 1} )
- x^* ( {i + 1} )} \rangle  =  \dots  \\
&= \sum_v^{} {p( v )} \langle {\varphi _{i,v} ( K ),x_{i,v} ( K )
- x^* ( K )} \rangle  = \sum_v^{} {p( v )} ( {\Phi ( {x_{i,v} ( K )} )
 - \Phi ( {x^* ( K )} )} ) \\
& \le \Phi ( {\sum_v^{} {p( v )} x_{i,v} ( K )} ) - \sum_v^{} {p( v )}
\Phi ( {x^* ( K )} ) = \Phi ( {x_{i,P} ( K )} ) - \Phi ( {x^* ( K )} ),
\end{align*}
for what the minimum of the relaxed problem is reached in
$P_{u^*( i )} $. Hence the equivalence between the original
problem and the relaxed problem is proven.
\end{proof}

\section{Examples}
\subsection*{Example 4.1}
Consider the following system, linear in $x( k )$,
\begin{equation} %4.1
x(k+1)=A(u(k))x(k)+b(u(k)), x(0)=\xi
\end{equation}
with the linear cost function
\[
\Phi ( {x( K )} ) = \langle {\varphi ,x( K )} \rangle\,.
\]
The linear application $\langle {\varphi ,x( K )}
\rangle $ is concave (can also be convex) there fare the
original problem and the relaxed problem have the same minimum by
the previously theorem. The dual equations are also the same,
since the initial condition $\varphi ( K ) = \varphi $
is independent of the perturbation or of the relaxation. Hence in
this case the minimum principle came be expressed in the classic
 form
\[
\langle {\varphi ( {i + 1} ),A( {u^* ( i
)} )x^* ( i ) + b( {u^* ( i
)} )} \rangle  = \mathop {\min }_u
\langle {\varphi ( {i + 1} ),A( u )x^*
( i ) + b( u )} \rangle
\]
or using the  Hamiltonian form
\begin{equation}
H(x^*(i),u^*(i),\varphi(i+1))\leq
H(x^*(i),u,\varphi(i+1)).
\end{equation}
Note that the Hamiltonian is defined as
$H( {x,u,\varphi }) = \langle {\varphi ,A( u )x + b( u)} \rangle $.

\subsection*{Example 4.2}
Consider the same linear system with
\[
x( {k + 1} ) = A( {u( k )}
)x( k ) + b( {u( k )}),x (0) = \xi
\]
with positive semi definite quadratic cost function
\[
\Phi ( {x( K )} ) = \langle {Qx( K
),x( K )} \rangle
\]
and the same considerations for the perturbed system and for the
relaxed system.
Then
\begin{align*}
&\Phi ( {x_{i,P} ( K )} ) - \Phi ( {x^*( K )} ) \\
&= \sum_u {p( u )}\langle {\varphi _{i,P} ( {i + 1} ),A( u
)x^* ( i ) + b( u ) -  A( {u^*( i )} )x^* ( i ) + b( {u^*
( i )} )} \rangle
\end{align*}
Suppose that $v$ is such that
\[
p_{i,v} ( u ) =  \begin{cases}
   1, & u = v,  \\
   0, & u \ne v\,.
\end{cases}
\]
If $ 0 \le \Phi ( {x_{i,v} ( K )} ) - \Phi( {x^* ( K )} )$, (minimum),
$\Phi $ is smooth in $ x^* ( K )$ and it is convex, then the tangent
hyperplane that separates the convex body
$\{ {x:\Phi ( x ) \le \Phi ( {x^* ( K )} )} \}$  at $x^*( K )$ containing
the points $x_{i.v} ( K )$, also separates the convex hull of the
points $\{ {x^*( K ),x_{i,v} ( K ),v \in \{ { - 1,1}\}^m } \}$;
thats is  the points $x_{i,P} ( K) = \sum_u {p( u )}  x_{i,v} (K )$.

Hence why, $ \Phi ( {x_{i,P} ( K )} ) \ge\Phi ( {x^* ( K )} )$. Thus
\[
0 \le \Phi ( {x_{i,P} ( K )} ) - \Phi (
{x^* ( K )} ) = \Phi ( {\sum_v^{}
{p( v )}  x_{i,v} ( K )} ) -
\Phi ( {x^* ( K )} )
\]
and the two problems are equivalent.
The previous idea is illustrated in Figure 1.

\begin{figure}[ht] \label{fig1}
\begin{center}
\setlength{\unitlength}{.8mm}
\begin{picture}(102,80)(8,20)
\qbezier(30,54)(19,48)(8,36)
\qbezier(8,36)(25,38)(32,20)
\qbezier(32,20)(67,50)(104,30)
\qbezier(104,30)(106,38)(110,40)
\qbezier(110,40)(101,53)(84,60)
\drawline(23,60)(50,38)(90,64)(68,78)(23,60)
\put(45,48){$x^*(K)$}
\drawline(60,60)(97,80) \drawline(95,80.5)(97,80)(96.2,78.2)
\put(97,76){$x_P(K)$}
\drawline(60,60)(30,82)  \drawline(30.7,80.2)(30,82)(32,82)
\put(20,76){$x_{P_v}(K)$}
\drawline(60,60)(60,100) \drawline(59,98)(60,100)(61,98)
\end{picture}
\end{center}
\caption{A geometric interpretatoion of
$\langle Qx,x\rangle\leq \langle Qx^*(K),x^*(K)\rangle$}
\end{figure}

\subsection*{Conclusions}
Most of the discrete optimization problems, do not escape from the
use of combinatorial optimization, hence these problem, are
sensitive to the computational complexity. Thus, in this work we
recapture the idea of the use of the method relaxation over finite
domains. This leads to the transformation of the discrete problems
in a classic formulation, which provides a way to solve the
optimization problem. Here, a family peculiar of discrete
processes are considered, linear in the state, equipped with
concave or convex cost functions. We obtain equivalence between
the relaxed optimum and the optimum of the original problem, in
the form of minimum principle. For linear cost function, the
obtained condition is a classical minimum principle.


\begin{thebibliography}{99}

\bibitem{AB00}
Amir Beck and Marc Teboulle; Global Optimality Condition for
Quadratic Optimization Problem with Binari Constraints, SIAM
Journal Optimization, vol. 11, nº 1, pp. 179-188, 2000.

\bibitem {JC03}
Juan Cardillo; Optimización de Sistemas a Eventos Discretos, Tesis
de Doctorado, 2003.

\bibitem {JL01}
J. B. Lasserre; An explicit exact SDP relaxation for nonlinear 0-1
programs, Rapport LAAS No00475 8th International Conference on
Integer Programming and Combinatorial Optimization (IPCO'VIII),
Utrecht (Pays-Bas), 13-15 Juin 2001, Lecture Notes in Computer
Sciences 2081, Eds. K.Aardal, B.Gerards, 2001, Springer, ISBN
3-540-42225-0, pp. 293-303

\bibitem {JL03}
J. B. Lasserre , T. Prieto; Rumeau SDP Vs. LP Relaxations for
the moment approach in some performance evaluation problems,
Rapport LAAS N°03125, Mars 2003, 16p.

\bibitem {JW62}
J. Warga; Relaxed Variational Problem, Journal o Mathematical
Analysis and Applications, 4, 111-128, 1962.

\bibitem {JW162}
J. Warga; Necessary Condition for Minimum in Relaxed
Variational Problem, Journal of Mathematical Analysis and
Applications 4, 129-145, 1962.

\bibitem {JW72}
J. Warga; Optimal Control of Differential and Functional
Equations, Academic Press 1972, Card Number 72-87229.

\end{thebibliography}
\end{document}
