
\documentclass[twoside]{article}
\usepackage{amssymb, amsmath, graphicx }  
\pagestyle{myheadings}

\markboth{\hfil Cauchy problem for derivors \hfil EJDE--2001/32}
{EJDE--2001/32\hfil J-F Couchouron,  C. Dellacherie, \& M. Grandcolas \hfil}
\begin{document}
\title{\vspace{-1in}\parbox{\linewidth}{\footnotesize\noindent
{\sc  Electronic Journal of Differential Equations},
Vol. {\bf 2001}(2001), No. 32, pp. 1--19. \newline
ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu
\newline ftp  ejde.math.swt.edu \quad ftp ejde.math.unt.edu (login: ftp)}
 \vspace{\bigskipamount} \\
 %
 Cauchy problem for derivors in finite dimension 
 %
\thanks{ {\em Mathematics Subject Classifications:} 34A12, 34A40, 34A45, 34D05.
\hfil\break\indent
{\em Key words:} derivor, quasimonotone operator, accretive operator,
 Cauchy problem, \hfil\break\indent
 uniqueness condition.
\hfil\break\indent
\copyright 2001 Southwest Texas State University. \hfil\break\indent
Submitted December 4, 2000. Published May 8, 2001.} }
\date{}
%
\author{
Jean-Fran\c{c}ois Couchouron,  Claude Dellacherie, \\
\& Michel Grandcolas}
\maketitle

\begin{abstract} 
In this paper we study the uniqueness of solutions to 
ordinary differential equations  which  fail to satisfy 
both  accretivity condition and the uniqueness condition of
Nagumo, Osgood and Kamke.  
The evolution systems  considered here are governed by a 
continuous operators $A$ defined on $\mathbb{R}^N$ such that 
$A$ is a derivor; i.e., $-A$ is quasi-monotone with respect 
to $(\mathbb{R}^{+})^N$.
\end{abstract}

\section{Introduction} 
For $T>0$, we study the Cauchy Problem (CP)
$$ \begin{gathered}
\dot u(t) +Au(t)=f(t),\quad t\in [0,T]\cr
u(0)=u_{0}, \end{gathered} \eqno(1.1) 
$$
where $A$ is a continuous operator
on $ \mathbb{R}^N$ and $f$  belongs to
$L^{1}([0,T]:\mathbb{R}^N)$.
We require in addition that $A$ be a derivor on $ \mathbb{R}^N$ 
(or equivalently that $-A$ be quasi-monotone  
with respect to the cone $ (\mathbb{R^{+}})^N$)
and  has an additional order property (see Assumption \textrm{H1T} 
in Section 2).
The existence of local solutions of (1.1) 
is proved by standard arguments (see \cite{w1} and Lemma 4.2).
For instance, in the continuous case, this local existence comes from the
Peano's Theorem.
So the problem is essentially to prove
 the uniqueness of a local solution and the existence of global solutions.
An important remark is that the identity operator minus the limit of
infinitesimal generators of increasing
semigroups is a  derivor on the domains of the operators (see remark 2.1.d).
The aim of this paper consists of giving a special converse of this previous 
property. General studies of  evolution problems
governed by derivors  can be found in \cite{b2,d1,d2,w1} (for
existence of extremal solutions of differential inclusions in $ \mathbb{R}^N$)
and in \cite{h2} for the behavior  of the flow (stability, etc.) in the 
regular case: $A$ is $C^{1}$.
This work establishes uniqueness for the Cauchy Problem and complements
previous studies.

Let us point out that derivors often occur in the theory of production 
processes in Economics (for cooperative systems, see \cite{d3,h3}), in 
Chemistry \cite{h1}, and in Biology \cite{h1}.
Our uniqueness result given in the sequel applies to these situations.
 Notice also that  the additional order property; namely,
existence of uniform ascents,  (see Definition 2.2)
has obvious  interpretations  in applications and may be considered
as a special extension of the submarkovian property (see remark 2.2.3) 
and \cite{b1}).
Nevertheless the notion of uniform ascents 
is a new concept built from the concept of progressions in \cite{d3}.
This ascent notion which extends the usual submarkovian property
seems to lead naturally to the maximum 
principle worked in \cite{d3}. Finally, we emphasize that 
the ascent notion is the key to obtain a suitable 
increasing resolvent (see Proposition 2.4 and Theorem 3.2).

In this paper, the operator $A$ does not satisfy  either uniqueness 
conditions such as those given by Nagumo, Osgood and Kamke \cite{c2,k1,k2}
nor accretivity conditions,  even in a generalized sense as in 
\cite{c2,c4,k2}.
We will exhibit in Section 5 a simple example of operator on 
$\mathbb{R}^{2}$ which satisfies all our conditions and none of the
 uniqueness conditions quoted above. Consequently our framework is 
not included in the submarkovian case, since a continuous
submarkovian derivor is accretive in 
($\mathbb{R}^N,\|\cdot \|_{\infty}$). Moreover based in our analysis, it
appear that a simple natural-order property can 
replace a classical Lipschitz condition about uniqueness in the Cauchy 
Problem.

Uniqueness  and  order-preserving  dependence
with respect to the initial value $u_{0}$ are stated in Theorem 3.1.  
In the case $f=0$,  Theorem 3.2 guarantees the existence of a global solution 
and a special form of the Crandall-Ligget exponential formula  
\cite[p. 319]{c5} involving suitable selections of  the multi-valued operators
$(I+\lambda A)^{-1}$ (while in \cite{d1}
$(I+\lambda A)^{-1}$ is single valued and Lipschitz).

This paper is organized as follows.
Section 2 is devoted to general definitions and preliminaries.
The main results are stated in Section 3, while the proofs
are given in the next section.
 Section 5 gives an example in $ \mathbb{R}^{2}$
which demonstrates the need for Theorems 3.1 and 3.2. 
Some remarks about the asymptotic behavior follow in Section 6.

\section{Generalities} 

We supply $\mathbb{R}^N$ with the usual partial order relation
 $u\leq v$ if $u^i\leq v^i$ for all $i=1,\dots ,N$,
where $u^i$ is the $i$-th component. The vector in $\mathbb{R}^N$ whose
components are $C,\dots,C$ is denoted by $C$. 
The symbol $\|\cdot \|$ stands for any norm in $\mathbb{R}^N$. 
The symbol $\mathbb{N}^{*}$ denotes the set of integers greater than zero.

\paragraph {Definition 2.1}
We say that the map $A$ is a {\bf derivor} on $\mathbb{R}^N$ 
 if it satisfies the condition
\begin{enumerate}
\item[(i)] For each  $(u,v)\in (\mathbb{R}^N)^{2}$ 
and each $i\in \{1,\dots ,N\}$
$$(u\leq v \hbox{ and } u^i=v^i) \quad \hbox{implies that}\quad 
A^iu\geq A^iv\eqno (2.1)$$
\end{enumerate}
We say that the map $A$ is a {\bf moderate derivor} 
(resp. a {\bf strong derivor})
if, in addition to (i), it satisfies
\begin{enumerate}
\item[(ii)] For each $u \in \mathbb{R}^N$, there exist  
$u_{1},u_{2} \in \mathbb{R}^N$
(resp. two sequences $(u_{k})_{k}\to +\infty$, $(v_{k})_{k}\to -\infty$)
such that $u_{1}\leq u \leq u_{2}$ and $Au_{1}\leq 0 \leq Au_{2}$ 
(see \cite{c2}).
(resp. $ \lim_{k \to +\infty}  Au_{k}=+\infty$
and $ \lim_{k \to +\infty}  Av_{k}=-\infty$).
\end{enumerate}
The previous notation  $ \lim_{k \to +\infty}  w_{k}=+\infty$ in $\mathbb{R}^N$
may be interpreted to mean that $ \lim_{k \to +\infty}  w_{k}^{j}=+\infty$ in 
$\mathbb{R}^N$ for each $j \in \{1,\dots ,N\}$.

 The derivor notion coincides with the notion
of  quasimonotone operator on $\mathbb{R}^N$, except the sign
(see \cite{b2}, \cite[p. 91]{d1}, \cite{k1}). 
In these references, $-A$ is  
quasimonotone with respect to $(\mathbb{R^{+}})^N$ if
$$(u\leq v \hbox{ and } x^{*}(u)=x^{*}(v))  
\quad\hbox{implies that}\quad x^{*}(-Au)\leq x^{*}(-Av)
\eqno (2.2)$$
for  any linear positive form $x^{*}$ on $\mathbb{R}^N$.
Hence, $A$ is a derivor, because
if $x^{*}$ is a  linear positive form on $ \mathbb{R}^N$, $x^{*}$
is a linear combination with positive coefficients
of coordinate forms on $\mathbb{R}^N$.

\paragraph{Remark 2.1}
a) Condition (i) in the definition of derivor is automatically fulfilled
 for any operator $A$ from $\mathbb{R}$ to $\mathbb{R}$, but it is not in
the case of Condition (ii).
A special case where (ii) holds for
an operator $A$ from 
$\mathbb{R}$ to $\mathbb{R}$ is the case  where
$A$ is a non-decreasing  operator
such that there is $v\in \mathbb{R}$ satisfying $Av=0$.
\\
b) When $A$ is a linear derivor, the reader
can check that Condition (ii)  is equivalent to:
 there is $u\geq 1$ satisfying $Au\geq 0$. 
\\
c) An equivalent form of definition 2.1.(i) is:
  $ A^i $ is decreasing with respect to $ x^{j}$ for each
$i\not= j$ with $i,j \in \{1,\dots ,N\}$ (see \cite{h3}).
\\
d) If $P$ is an increasing operator 
on $\mathbb{R}^N$, then $A=I-P$ is a derivor. 
Therefore, when $(P_{t})$ is an increasing semi-group on 
$\mathbb{R}^N$, then $A_{t}=\frac{I-P_{t}}{t}$ 
with $t>0$ is a derivor and so is $A_{0}$,
defined by $A_{0}u=\lim_{t\downarrow 0}A_{t}u$ 
(on the domain where this limit exists).


\subsection*{Ascents}
We denote by $V_{K}(u_{0})$ the set of compact neighborhoods of $u_{0}$.

\paragraph{Definition 2.2} We say that a derivor $A$ 
has a (strict) {\bf uniform ascent}
at $u_{0}$ if there are   $V \in V_{K}(u_{0})$ and  a
sequence $(v_{k})$ in $\mathbb{R}^N$ convergent to 0 such that
$(v_{k}^i)_{k \in \mathbb{N}^{*}}$ is strictly
decreasing for all $i=1,\dots ,N$ and
$$\min_{i\in \{1,\dots ,N\}}(A^i(u+v_{k})-A^iu)>0\eqno (2.3)$$ 
for each $k\in \mathbb{N}^{*}$ and each $u \in  V$.

\paragraph{Remark 2.2} 
1) In terms of production operator
($A^iu$  is the production of the  $i$-th input of
the product $u$),
the uniform ascent property at  $u_{0}$ means
that in a neighborhood of $u_{0}$ it is possible  to
increase the level of production by means of small uniform augmentations
around  $u_{0}$.
\\
2) The notion of uniform ascent plays
a crucial part  in this work.
In our opinion, this concept is new, but it was inspired
from the progression notion carried out in \cite{d3}.
\\
3) The uniform ascent property   may be connected to the submarkovian property namely,
$$A(u+C)-Au \geq 0 \eqno (2.4)$$
for all $u\in \mathbb{R}^N$ and all $C\in \mathbb{R}^{+}$.
Notice that a submarkovian derivor in $\mathbb{R}^N$ is accretive
in ($\mathbb{R}^N,\|\cdot \|_{\infty}$);
the verification of this claim is left to the reader.
\\
4) The following  dual notion of
uniform ascent at $u_{0}$ provides again  the  results of Section 3:
There are   $V \in V_{K}(u_{0})$ 
and  a strictly increasing 
sequence $(v_{k})$ in $\mathbb{R}^N$ convergent to 0 such that
$(v_{k}^i)_{k \in \mathbb{N}^{*}}$ is strictly
increasing for all $i=1,\dots ,N$ and
$$\sup_{i\in \{1,\dots ,N\}}(A^i(u+v_{k})-A^iu)<0$$ 
for each
$k\in \mathbb{N}^{*}$ and each $u \in  V$.
\\
5) In the case $A=I-P$ with an increasing operator  $P$,
(see Remark 2.1.d)), Definition 2.2 means that the required sequence
$(v_{k})$  satisfies
$$P^i(u+v_{k})-P^i(u)<v_{k}^i$$
 for all $i=1,\dots ,N$, for each
$k\in \mathbb{N}^{*}$ and all $u\in V$.

\subsection*{Assumptions}
In the sequel, by hypothesis \textbf{H1T} stands for the following three 
conditions:
\begin{itemize}
\item $A$ is a continuous derivor on $\mathbb{R}^N$

\item $A$  is a moderate derivor with uniform ascent at each $u_{0}$

\item $f\in L^{1}([0,T];\mathbb{R}^N)$.
\end{itemize}
When necessary, we will make precise the arguments involved for 
the Cauchy Problem (1.1) as follows:
$CP(A,f,u_{0})$ or $CP(A,f,u_{0},T)$ for the domain $[0,T]$, and 
$CP(A,f,u_{0},+\infty)$ for $[0,+\infty[$.

The hypothesis \textbf{H2T} stand for the following condition (cf Section 6).
\begin{itemize}
\item For each $u_{0}\in \mathbb{R}^N$, each local solution of
$CP(A,f,u_{0})$ can be extended to  a solution   on $[0,T]$.
\end{itemize}
It is well-known that sublinearity at infinity
($\| Au \| \leq a(\| u \| +1)$)
guarantees H2T, \cite{h2}. Moreover we will see in Theorem 3.2 that
in the autonomous case
 $f=0$, H1T implies H2T.

\subsection*{Resolvents for a moderate continuous derivor}
In this section, we assume that $A$ is a continuous moderate 
derivor. When $B$ is a continuous derivor, we have
the following Theorem \cite{d3}.

\paragraph{Theorem 2.4}
{\sl Let $u$ and $w$ fixed in $\mathbb{R}^N$, if the  system 
$$\begin{gathered}v\geq u\cr
 Bv \geq w\end{gathered}\eqno (2.5)$$
with $v$ as unknown quantity has a solution then it has a smallest 
solution. Analogously, the  system 
$$\begin{gathered} v\leq u\cr
 Bv \leq w\end{gathered}\eqno (2.6)$$
has a largest solution $v$ whenever it has a solution.
In addition, in these two systems
 the constraints are optimal,
i.e. $\forall i \in \{1,\dots ,N\}$, $v^i= u^i$ or ${Bv}^i = w^i$.}
\medskip

In the case where $A$ is accretive, the resolvent operators 
$(I+\lambda A)^{-1}$, $\lambda >0$
are single-valued contractions. But
in our case $(I+\lambda A)^{-1}$ is a priori multi valued. Nevertheless
it is possible to define
 suitable  selectors $J_{\lambda}$ of $(I+\lambda A)^{-1}$ 
as claimed in the following lemma.

\paragraph{Lemma 2.4}
{\sl Let $A$ be a moderate continuous derivor on $\mathbb{R}^N$.
Let  $u$ in $\mathbb{R}^N$, $\lambda \in \mathbb{R}^{+}$
and $v$ a solution of
$$ \begin{gathered} v\leq u\cr
 Av \leq 0.\end{gathered}\eqno (2.7)$$
Then  the system  
$$\begin{gathered}w\geq v\cr
(I+\lambda A)w \geq u\end{gathered}\eqno (2.8)$$
has a  smallest  solution  denoted by
 $J_{\lambda,v}u$.
Moreover we have 
$$J_{\lambda,v}u\in (I+\lambda A)^{-1}(u).\eqno (2.9)$$
}
\paragraph{Proof.}
According to  (ii) in definition 2.1, Systems (2.7) and (2.8)
have solutions. Let $v$ be a solution of (2.7).
Since $B=I+\lambda A$ is a continuous derivor,
the existence of the smallest solution
$J_{\lambda,v}u$ of (2.8) is guaranteed
by the  Theorem 2.4.

It remains to prove 
 $$(I+\lambda A)J_{\lambda,v}u=u.\eqno (2.10)$$
Since the constraints are optimal in (2.8),
we have for each $i\in \{1,\dots ,N\}$,
$((I+\lambda A)J_{\lambda,v}u)^i=u^i$
or $(J_{\lambda,v}u)^i=v^i$.
Thus we have 
to prove $((I+\lambda A)J_{\lambda,v}u)^i=u^i$
 when $(J_{\lambda,v}u)^i=v^i$.
So assume $(J_{\lambda,v}u)^i=v^i$
for some $i\in \{1,\dots ,N\}$,
Relation (2.1) and $J_{\lambda,v}u\geq v$ yield
 $$(AJ_{\lambda,v}u)^i\leq (Av)^i.\eqno (2.11)$$
Now (2.11) and (2.7) provide
$$
(J_{\lambda,v}u)^i+\lambda (AJ_{\lambda,v}u)^i
=v^i+\lambda (AJ_{\lambda,v}u)^i
\leq u^i+\lambda (AJ_{\lambda,v}u)^i
\leq u^i.
$$
 Therefore, $((I+\lambda A)J_{\lambda,v}u)^i\leq u^i$. But 
 from (2.8) we have $((I+\lambda A)J_{\lambda,v}u)^i\geq u^i$.
Finally  (2.10) is proved. \hfill$\square$\medskip

In the same way, let $v$ be a solution of 
$$\begin{gathered} v\geq u\cr
 Av \geq 0.\end{gathered}\eqno (2.12)$$
Then the system
$$\begin{gathered} w\leq v\cr
(I+\lambda A)w \leq u\end{gathered}\eqno (2.13)$$
has a  largest  solution $w={\tilde J}_{\lambda,v}u$.
Moreover ${\tilde J}_{\lambda,v}u$  satisfies again (2.9).

Set $J_{\lambda}u=J_{\lambda,v}u$ (resp. $J_{\lambda}u={\tilde
J}_{\lambda,v}u$) for an arbitrary $v$
satisfying (2.7) (resp. (2.12)). 
Let us notice that $J_{\lambda}$ is defined on 
$D_{v}=\{u \in \mathbb{R}^N, u\geq v\}$ 
(resp. $D_{v}=\{u \in \mathbb{R}^N, u\leq v\}$).
The family of selectors
$(J_{\lambda})_{\lambda\geq 0}$ of $(I+\lambda A)^{-1}$ 
is said to be the {\bf resolvent} of $A$.

\paragraph{Definition 2.4}
For $u$ given, the notation  ${\overline u}$ (resp. $ {\hat u}$ ) stands 
for the largest solution of (2.7) (resp. the smallest
solution of (2.12)). \smallskip

Thanks to  Theorem 2.4, such extremal elements ${\overline u}$ and 
$ {\hat u}$ exist.
Furthermore we have clearly
$$u\leq v  \Longrightarrow ({\overline u}\leq {\overline v} 
\quad\mbox{and}\quad {\hat u}\leq {\hat v})\eqno (2.14) $$
and
$${\overline {\overline u}}={\overline u}\quad\mbox{and}\quad
 {\hat {\hat u}}={\hat u}.\eqno (2.15)$$
The resolvent operators satisfy the following properties.

\paragraph{Proposition 2.4}
{\sl For a given $u\in \mathbb{R}^N$,
let $v,v'\in \mathbb{R}^N$ satisfying (2.7) and $w,w'
\in \mathbb{R}^N$ satisfying (2.12). Then \begin{enumerate}
\item[(a)] The map $J_{\lambda}$ is  single-valued and increasing  
on $D_{v}$.

\item[(b)] We have
$$v\leq J_{\lambda}u \leq w\eqno (2.16)$$
In particular
$${\overline u}\leq J_{\lambda}u \leq {\hat u}\eqno (2.17)$$

\item[(c)] If $Au\geq 0$ (resp. $Au\leq 0$), then 
$AJ_{\lambda}u\geq 0$ (resp. $AJ_{\lambda}u\leq 0$)
for each $\lambda \geq 0$.

\item[(d)] If $Au\geq 0$, then $\lambda \to J_{\lambda}u$ is decreasing 
on $\mathbb{R}^N$ (and increasing if $Au\leq 0$).

\item[(e)] We have $J_{\lambda,v}u\leq {\tilde J}_{\lambda,w}u$.
In particular 
$J_{\lambda,{\overline u}}u\leq {\tilde J}_{\lambda,{\hat u}}u$.

\item[(f)] $J_{\lambda,{\overline u}}{\overline u}={\overline u}$
and ${\tilde J}_{\lambda,{\hat u}}{\hat u}={\hat u}$.

\item[(g)] If $v\leq v'$ and $w\leq w'$, then
$J_{\lambda,v}u\leq J_{\lambda,v'}u$ and
${\tilde J}_{\lambda,w}u\leq {\tilde J}_{\lambda,w'}u$.
\end{enumerate}
} %end proposition

\paragraph{Proof.}
We prove only results (a),(b),(c),(d) in the case 
$J_{\lambda}=J_{\lambda,v}$.\\
(a) Let $u\geq w$. Then $J_{\lambda,v}u$ satisfies
$$\displaylines{J_{\lambda,v}u\geq v\cr
(I+\lambda A)J_{\lambda,v}u \geq u \geq w.}$$
Hence we get (a) from minimality of $J_{\lambda,v}w$ for the previous 
system. \\
(b) Inequality $J_{\lambda,v}u\geq v$ 
is required in the definition of $J_{\lambda,v}$.
Since $w$ satisfies $$\displaylines{w\geq v\cr
(I+\lambda A)w \geq w\geq u,}$$
 we get (b) from minimality of $J_{\lambda,v}u$ in the previous system.
\\
(c) Let $Au\geq 0$ and $\lambda \geq 0$. We have
$$\displaylines{(I+\lambda A)u\geq u\cr
 u \geq {v}}$$ so $J_{\lambda,v}u \leq u$.
Hence $u=J_{\lambda,v}u +\lambda AJ_{\lambda,v}u \leq u+\lambda
AJ_{\lambda,v}u$ and so $AJ_{\lambda}u \geq 0$.
\\
(d) Let  $0\leq \lambda \leq \mu$. Then
$u=(I+\lambda A)J_{\lambda,v}u \leq (I+\mu A)J_{\lambda,v}u$.
Since we have $J_{\lambda,v}u \geq v$,
from minimality of $J_{\mu}u$ for these two constraints,
it comes $J_{\mu,v}u \leq J_{\lambda,v}u$.
The proof is similar when $Au\leq 0$.
\\
(e) Since $Av\leq 0$ and $Aw\geq 0$, from (c) it follows
$AJ_{\lambda,v}u\geq 0$ and $A{\tilde
J}_{\lambda,w}u\leq 0$. Hence (e) results from
$J_{\lambda,v}u+\lambda AJ_{\lambda,v}u=u={\tilde
J}_{\lambda,w}u+\lambda A{\tilde J}_{\lambda,w}u$.
\\
Properties (f) and (g) result immediately from the definitions.
\hfill$\square$

\subsection*{Solution of (1.1)}
We recall that a (local) \textbf{strong solution} of (1.1) 
is a continuous  function $u$ defined on 
$[0,\theta) \subset [0,T],\theta >0$ such that
 $u(t)=u_{0}+\int_{0}^{t}(-Au(\tau)+f(\tau))d\tau$
for $t\in [0,\theta)$.
In the sequel we only look for (local) strong  solutions of (1.1).

A {\bf maximal} (resp. {\bf minimal}) {\bf solution } of (1.1)  
is the strong solution  $u=S_{A,f}^{\text{max}}(t)u_{0}$ 
(resp. $u=S_{A,f}^{\text{min}}(t)u_{0}$) of (1.1) defined as follows: 
\\
(i) The interval of definition $[0,\theta)$
of $S_{A,f}^{\text{max}}(.)u_{0}$ (resp. $S_{A,f}^{\text{min}}(.)u_{0}$) is maximal 
on $[0,T]$, i.e. there is no solution $v \not= u$,
such that $v=u$ on $[0,\theta]$.
\\
(ii) For each solution $v$ of (1.1) on $[0,T_{1})\subset [0,T]$, we have
 $v(t)\leq S_{A,f}^{\text{max}}(t)u_{0}$ 
(resp. $v(t)\geq S_{A,f}^{\text{min}}(t)u_{0}$) on $[0,\inf (\theta, T_{1}))$.

\section{Main results}

For the following results, we assume the hypothesis \textrm{H1T} defined 
in Section 2.

\paragraph{Theorem 3.1}
{\sl The problem $CP(A,f,u_{0})$ has a unique local solution  denoted by 
$S_{A,f}(t)u_{0}$ (or $S_{A}(t)u_{0}$ if $f=0$) and
defined on a maximal interval  $[0,T_{\max}) \subset [0,T]$.
Moreover 
 if $u_{0}\leq u_{1}$ in $\mathbb{R}^N$ and if $f\leq g$ 
in  $L^{1}([0,T],\mathbb{R}^N)$ then
$S_{A,f}(t)u_{0}\leq S_{A,g}(t)u_{1}$
on the common domain of existence of these  two solutions.} \medskip

The next result concerns the autonomous case, for which 
we have global solutions.

\paragraph{Theorem 3.2}
{\sl  Assume that $f\equiv 0$. Then
$S_{A}(.)u_{0}$ is defined on  the whole interval $[0,T]$ 
and
 $$S_{A}(t)u_{0}=\lim_{n\to +\infty}J_{{t/n},n}(u_{0}),\eqno (3.1)$$
for $t\in [0,T]$, where  $J_{\lambda}=J_{\lambda,{\overline u}_{0}}$ 
is as defined in Section 2.}

This is an exponential Crandall-Liggett's type formula, but 
 here  $(I+{\lambda}A)^{-1}$ is a priori multi-valued.
In the  non-autonomous case $f \not\equiv 0$, it is possible to exhibit 
a formula as (3.1) which gives  the solution of (1.1) as a limit of a
 discrete scheme. But such a formula is more complicated than (3.1)
and thus, is not of a particular  interest.
When $f \in  L^{\infty}([0,T], \mathbb{R}^N)$, from Theorem 3.1 and 
Theorem 3.2, we can deduce that $CP(A,f)$
has solution on $[0,T]$ if $A$ is a strong continuous derivor
(see def. 2.1). Unfortunately, we do not know what happens in the 
general case  $f \in L^{1}([0,T], \mathbb{R}^N)$
without extra assumptions.

\section{Proofs}
The proof of Theorem 3.1 follows immediately from the three lemmas
below. 

\paragraph{Lemma 4.1}
{\sl Let $A$ be a continuous derivor. Let  $V$ be an element of 
$V_{K}(u_{0})$. Then the operator B defined by
 $$B(v):=\inf_{w\in V}[A(w+v)-A(w)]\eqno (4.1)$$
is a  continuous derivor.}

\paragraph{Proof.}
1.) Let us show that $B$ is a derivor on $\mathbb{R}^N$.
If $u\leq v$ and $u^i=v^i$
for some $i\in \{1,\dots ,N\}$,
 we have $u+w\leq v+w$ and $(u+w)^i=(v+w)^i$ 
for each $w\in V$. Since A is a derivor, it follows
$A^i(u+w)-A^iw\geq A^i(v+w)-A^iw$.
Thus 
$$\inf_{w\in V}(A^i(u+w)-A^iw)\geq \inf_{w\in V}(A^i(v+w)-A^iw).$$
So $B^iu\geq B^iv$ for $u\leq v$ and $u^i=v^i$.
\\
2.) At this stage we will show that $B$ is continuous on $\mathbb{R}^N$.
According to (4.1), $B$ is clearly upper semi-continuous 
(see \cite[pp. 132-137]{c1}).
So it is enough to prove that for each $i\in \{1,\dots ,N\}$, $B^i$
is lower semi-continuous on $\mathbb{R}^N$. Fix $i\in \{1,\dots ,N\}$.
For each $u\in \mathbb{R}^N$, thanks to the 
compactness of $V$, there exists $\chi(u)$ (which depends on $i$) in $V$
 satisfying
$$B^i u=A^i( u+{\chi(u)})-A^i({\chi(u)})\eqno (4.2)$$
We have to prove now that $B^i$ is lower semi-continuous,
that is $(B^i)^{-1}(]-\infty,\alpha])$ is closed for all 
$\alpha \in \mathbb{R}$. In this goal, consider $\alpha \in \mathbb{R}$
 and a sequence  $(u_{k})_{k \in \mathbb{N}^{*}}$
of elements of $\mathbb{R}^N$ such that  $\lim u_{k}=u_{\infty}$
and $B^i(u_{k})\leq \alpha$. It suffices to prove  
$B^i(u_{\infty})\leq \alpha$.

By contradiction, let us suppose  $B^i(u_{\infty})> \alpha$.
Without loss of generality, thanks to the compactness of $V$,
we can suppose 
$$\lim_{k\to +\infty}{\chi(u_{k})}=v_{\infty}\in V.$$
Equation (4.1) yields  
$$\alpha <B^i(u_{\infty})=A^i({\chi(u_{\infty})}+u_{\infty})
-A^i({\chi(u_{\infty})})\leq A^i(v_{\infty}+u_{\infty})
-A^i(v_{\infty}).\eqno (4.3)$$
From the continuity of $A^i$, it results
$$A^i(v_{\infty}+u_{\infty})-A^i( v_{\infty})=
\lim_{k\to +\infty}A^i(\chi(u_{k})+u_{k})-A^i(\chi(u_{k}))=B^i(u_{k})
\leq \alpha .\eqno(4.4)$$
Equations (4.3) and (4.4) lead to a contradiction.\hfill$\square$

\paragraph{Lemma 4.2}
{\sl Let $A$ be a continuous derivor. \\
(a) Problem (1.1) has a local unique maximal solution  
$S_{A,f}^{\text{max}}(t)u_{0}$ defined on its maximal interval of existence 
$[0,T^{1})$
(resp. a unique minimal solution $S_{A,f}^{\text{min}}(t)u_{0}$ on $[0,T^{2})$).
\\
(b) If $v_{0}\leq u_{0}$ and if $v(t)$ satisfies $v(0)=v_{0}$ and
$v'(t)\leq -Av(t)+f(t)$  a.e. on $[0,\tilde T)$ with
$\tilde T < T^{1}$,
then for $t\in [0,\tilde T)$ we have
$v(t)\leq S_{A,f}^{\text{max}}(t)u_{0}$.
\\ 
(c) if $v_{0}\geq u_{0}$ and if $v(t)$  satisfies $v(0)=v_{0}$ and
$v'(t)\geq -Av(t)+f(t)$ a.e. on $[0,\tilde T)$ with $\tilde T<T^{2}$,
then for $t\in [0,\tilde T)$ we have
$v(t)\geq S_{A,f}^{\text{min}}(t)u_{0}$.} \medskip

The previous lemma will be proved by standards arguments
in an analogous way as the Kamke's  Lemma \cite{k1}
and the arguments given in \cite{h2,w1}.

\paragraph{Proof.} We shall prove only parts (a) and (b). 
The  proof of part (c) can be obtained in an analogous way.
Let $v$ be a solution on $[0,\tilde T)\subset [0,T]$ of
$$\displaylines{\dot v(t) \leq -Av(t)+f(t),
\quad t\in [0,\tilde T)\cr
v(0)=v_{0}.}$$
For each $n\in \mathbb{N}^{*}$, Problem $CP(A,f+\frac 1n ,u_{0})$ 
has at least a local solution $u_{n}$ (see \cite{w1})
defined on a maximal interval of $[0,T_{n}]$.
\\
1) Let us show  $v\leq u_{n}$ on $[0,\tilde T \wedge T_{n})$,
where 
$\tilde T \wedge T_{n}$ means $\min (\tilde T ,T_{n})$. One has
$$\begin{gathered} u_{n}(t)-v(t)\geq
u_{n}(t_{0})-v(t_{0})+\int_{t_{0}}^{t}(\epsilon_{n}(\tau)+\frac 1n)d\tau
\cr
\epsilon_{n}(\tau)=-Au_{n}(\tau)+Av(\tau)
\end{gathered}.\eqno (4.5)$$
for all $t_{0},t \in [0,\tilde T \wedge T_{n})$,
$t_{0}\leq t$.
Let 
$$E=\{t\in [0,\tilde T \wedge T_{n}),v(\tau)\leq u_{n}(\tau) 
\quad\mbox{for all} \quad \tau \in [0,t]\}$$
First, remark that  $E$ is (not empty and) closed on  
$[0,\tilde T \wedge T_{n})$.
Second, if $t_{0}\in E$, $t_{0}<\tilde T \wedge T_{n}$ and
$(v(t_{0}))^i=(u_{n}(t_{0}))^i$, for some $i\in \{1,\dots ,N\}$
then the derivor property of Definition 2.1 (i)
yields $$\epsilon_{n}^i(t_{0})\geq 0\eqno (4.6)$$
Consequently, relations (4.5), (4.6)  and the definition
of $t_{0}$ provide some $\eta >0$
such that $v^i(\tau)\leq u_{n}^i(\tau)$
for  $\tau \in [t_{0},t_{0}+\eta]\subset [0,\tilde T \wedge T_{n})$.
Finally $E$ is open in $[0,\tilde T \wedge T_{n})$ and thus
$E=[0,\tilde T \wedge T_{n})$.
\\
2) We have $u_{n+1}\leq u_{n}$ on $[0,T_{n+1} \wedge T_{n})$.
Indeed the proof is the same as 1) if we replace $v$ by $u_{n+1}$
and $-Au_{n}(\tau)+Av(\tau)$ by $-Au_{n}(\tau)+Au_{n+1}(\tau)$.
\\
3)  We have $\tilde T \wedge T_{n} \geq \tilde T \wedge T_{1}$.
Indeed, from parts 1) and 2), for each $n\in \mathbb{N^{*}}$
we have
$$v\leq u_{n+1} \leq u_{n} \leq u_{1}\eqno (4.7)$$
on the common interval of existence of these  solutions.
Then the extension principle of solutions
 implies $\tilde T \wedge T_{n+1}\geq \tilde T \wedge T_{n}$
since a bounded solution is extendable.
\\
4) The sequence $(u_{n})$ converges uniformly to $u_{\infty}$
 on each compact sub-interval of $[0,\tilde T \wedge T_{1})$ thanks to 
(4.7) and the Lebesgue's Dominated Convergence Theorem. Furthermore
$u_{\infty}$ is solution of $CP(A,f,u_{0},\tilde T \wedge T_{1})$ 
on $[0,\tilde T \wedge T_{1})$.
Moreover, clearly $u_{\infty}$  is the maximal solution 
of $CP(A,f,u_{0},\tilde T \wedge T_{1})$ (see Section 2).

Let $F$ be the set of  $S\in [0,T]$ such that
$u_{\infty}$ is extendable into a continuous function on $[0,S)$
which is the maximal solution of   $CP(A,f,u_{0},S)$.
One has $\tilde T \wedge T_{1}\in F$.
By considering $S_{\infty}=\sup F$,
we obtain a maximal extension of $u_{\infty}$
as a local solution of $CP(A,f,u_{0},T)$
which is by construction the maximal solution of
$CP(A,f,u_{0},T)$. \hfill$\square$\smallskip

The next lemma makes use of the ascent assumption.

\paragraph{Lemma 4.3}
{\sl With the notation in Lemma 4.2, if \textrm{H1T} holds, we have
$$S_{A,f}^{\text{min}}(t)u_{0}=S_{A,f}^{\text{max}}(t)u_{0}$$ on $[0,T^{1}\wedge T^{2})
=[0,T^{1})$.}

\paragraph{Proof.}
Thanks to Lemma 4.2(a), (1.1) has a maximal solution 
$S_{A,f}^{\text{max}}(t)u_{0}$ defined on a sub-interval $[0,T^{1})$ of $[0,T]$
and a minimal solution $u(t)=S_{A,f}^{\text{min}}(t)u_{0}$ 
defined on a sub-interval $[0,T^{2})$ of $[0,T]$.
Set $T_{3}=T^{1}\wedge T^{2}$ and
$$w(t):=S_{A,f}^{\text{max}}(t)u_{0}-S_{A,f}^{\text{min}}(t)u_{0}\eqno (4.8)$$
for $t\in [0,T_{3})$.
 We have to prove  $w=0$  on $[0,T_{3})$, that is $E=[0,T_{3})$ 
where $E=\{ t\in [0,T_{3}), w(\tau)=0,\forall \tau \in [0,t]\}$.
Since $E=w^{-1}(0)$ is closed in $[0,T_{3})$ ($w$ being continuous), it
just remains to show that $E$ is open to the right.
Let $t_{0} \in E, t_{0}< T_{3}$. We have to prove that there exists $h>0$
such that $w=0$ on $[t_{0},t_{0}+h]$. Eventually, by changing
$w$ into $w(t_{0}+.)$ and $f$ into $f(t_{0}+.)$, we will suppose 
$t_{0}=0$.

Let $V \in V_{K}(u_{0})$ and $B$ as in (4.1),
in view of the continuity of $u$ at 0, there exists $T_{4}\in ]0,T_{3}[$
such that, for each $t\in [0,T_{4}]$, $u(t)\in V$,
 hence $w$ satisfies a.e.:
$$\begin{gathered} w'(t)=-(A(u(t)+w(t))-Au(t))\leq -Bw(t) \cr
w(0)=0,\end{gathered}\eqno (4.9)$$
a.e. $t\in [0,T_{4}]$.
By using Lemma 4.2 (b) with $B$ instead of $A$,
we have $$w(t)\leq S_{B}^{\text{max}}(t)(0)\eqno (4.10)$$ for each 
$t\in [0,T_{4}\wedge T_{5}]$,
where $[0,T_{5}]$ is the maximal interval of existence
of $S_{B}^{\text{max}}(t)(0)$.
 The function $x(t)=S_{B}^{\text{max}}(t)(0)$ satisfies
$$\begin{gathered} x'(t)= -Bx(t)\cr
x(0)=0.\end{gathered}\eqno (4.11)$$
 Let $(v_{k})_{k\in \mathbb{N^{*}}}$ be a sequence
which defines a uniform ascent at the point $u_{0}$
for the operator $A$ on the set$ V$ (see section 2).
 $$B^i(v_{k})=A^i(v_{k}+{\hat v_{k}}(i))-A^i({\hat v_{k}}(i))>0 
\eqno (4.12)$$
for $k\in \mathbb{N^{*}}$ and $i \in \{1,\dots ,N\}$
where ${\hat v_{k}}(i)$ is a vector minimizing
$ v \to A^i(v_{k}+ v)-A^i(v)$ on  $V$.

Let $k\in \mathbb{N}$ be fixed, then due to Lemma 4.2(b) 
there exists $s_{k}>0$ such that $s_{k}\leq T_{4}\wedge T_{5}$
and 
$$S_{B}^{\text{max}}(t)(0)\leq S_{B}^{\text{max}}(t)(v_{k})\eqno (4.13)$$
for each $t \in [0,s_{k}]$.

Equation (4.12) and the continuity of $B$
give the existence of $t_{k}>0$ and $t_{k}\leq s_{k}$
such that:
$$B(S_{B}^{\text{max}}(t)(v_{k}))\geq 0$$ for $t\in [0,t_{k}]$.
Thus $t \to S_{B}^{\text{max}}(t)(v_{k})$ is decreasing on $[0,t_{k}]$.
Consequently, from (4.10) and (4.13), it results
 $$w(t)\leq S_{B}^{\text{max}}(t)(v_{k})\leq   S_{B}^{\text{max}}(0)(v_{k})=v_{k}\eqno (4.14)$$
for each $t\in [0,t_{k}]$.

In particular, we have  $w(t_{k})\leq v_{k} $.
If we put $y(t)=w(t_{k}+t)$, we get 
$$\begin{gathered} y'(t)\leq -By(t) \cr
y(0)=w(t_{k})\leq v_{k}\end{gathered}\eqno (4.15)$$
 for  a.e. $t\in [0,t_{k}]$.
Hence, according to (4.14) and (4.15), one has
$$w(t_{k}+t)\leq S_{B}^{\text{max}}(t)(v_{k})\leq v_{k}$$
for $t\in [0,t_{k}]$.
So $w(t)\leq v_{k}$ for $t\in [0,2t_{k}\wedge T_{4}]$.
Whence by induction, we get
$$0\leq w(t)\leq v_{k}\eqno (4.16)$$ for  $t\in [0,T_{4}]$.
Since (4.16)
is valid for each $k\in \mathbb{N^{*}}$ and 
$\lim v_{k}=0$, it follows $w(t)=0$
for each $t\in [0,T_{4}]$. Hence 
for  $h=T_{4}>0$, we have $[0,h] \subset E$ which completes the proof.
\hfill$\square$

\subsection*{Proof of  Theorem 3.2}
In this subsection, we assume that $A$ satisfies \textrm{H1T}, and  
$f \equiv 0$ on $[0,T]$. First, let us recall some basic facts 
about the  discretization (1.1) in the Theory of Nonlinear Semigroups. 
It is known \cite{c5} that a strong solution of (1.1) is a mild
solution, i.e. a continuous function which is a uniform limit of Euler's
implicit discrete schemes. Such discrete schemes are defined as follows.

Let  $\epsilon >0$ be fixed. Then an $\epsilon$-discretization on $[0,T]$
of $\dot u +Au=0$ on $[0,T]$ consists of a partition $0=t_{0}\leq t_{1}\leq \dots \leq
t_{n}$ of the interval $[0,t_{n}]$ and a finite sequence
$(f_{1},f_{2},\dots ,f_{n})$ in $\mathbb{R}^N$ such that
\\
(a) $t_{i}-t_{i-1}<\epsilon$ for $i=1,\dots ,n$
and $T-\epsilon<t_{n}\leq T$.
\\
(b) $\Sigma_{i=1}^{n} (t_{i}-t_{i-1})\| f_{i} \| \leq \epsilon$.

We will indicate these data by writing $D_{A}(0=t_{0},t_{1},\dots ,
t_{n}:f_{1},\dots ,f_{n})$.

A solution of a discretization
$D_{A}(0=t_{0},t_{1},\dots ,t_{n}:f_{1},\dots ,f_{n})$ is a 
piecewise constant function 
$v:[0,t_{n}]\to \mathbb{R}^N$ whose values $v_{i}$
on $(t_{i-1},t_{i}]$ satisfy
$$\begin{gathered} \frac{v_{i}-v_{i-1}}{t_{i}-t_{i-1}}+Av_{i}=f_{i}\cr
v_{0}=u_{0}\end{gathered}\eqno (4.17)$$ 
for $i\in \{1,\dots ,n\}$.
An $\epsilon$-approximate solution of $CP(A,0,u_{0})$
is a solution $v$ of an  $\epsilon$-discretization 
$D_{A}(0=t_{0},t_{1},\dots ,t_{n}:f_{1},\dots ,f_{n})$.

A {\bf mild solution} of  $CP(A,0,u_{0})$ on $[0,T]$ is a 
 continuous function $u$ on $[0,T]$ 
with the property that for each $\epsilon>0$
there is an $\epsilon$-approximate solution $v$
of  $CP(A,0,u_{0})$ on $[0,T]$ such that $\| v(t)-u(t) \| \leq \epsilon$
for $t$ in the domain of $v$.

Now, for $n\in \mathbb{N^{*}}$, let 
$J=J_{T/n, {\overline u_{0}}}$, and
define the function $u_{n}$ by
$u_{n}(0)=u_{0}$ and $u_{n}(t)=J^i(u_{0})$
for $(i-1)T/n <t\leq iT/n$
where $J^i$ is the $i^{th}$ power of $J$.
Then, thanks to (2.9), (4.17) holds with $v_{i}=u_{n}(iT/n)$, 
$t_{i}=iT /n$ and $f_{i}=0$ (Lemma 4.4 below guarantees
$ {\overline u}_{0}\leq v_{i-1}$ for all $i\geq 1$).
In other words $u_{n}$ is a $T/n$-approximate solution of
$CP(A,0,u_{0})$.

Then Theorem 3.2 results immediately from the following two lemmas.

\paragraph{Lemma 4.4}
{\sl With the previous notations, for each $t\in [0,T]$, we have
$${\overline u}_{0}\leq u_{n}(t) \leq {\hat u}_{0}. \eqno (4.18)
$$}

\paragraph{Proof.}
We set $v_{i}$ for $u_{n}(iT/n)$. By Proposition 2.4 (b),
we have 
$${\overline u}_{0}\leq v_{1}=J(u_{0})\leq 
{\hat u}_{0}\eqno (4.19)$$
Then (4.18) results by induction from (4.19)
and Proposition 2.4 parts (a) and (f). \hfill$\square$\medskip

The following lemma studies the continuous and discrete approach
and gives an exponential formula such as the Crandall-Liggett's formula 
(for the accretive autonomous case in \cite{c5}).

\paragraph{Lemma 4.5}
{\sl The sequence of approximate solutions $(u_{n})$ 
defined in Lemma 4.4 converges uniformly  on  $[0,T]$ to
$S_{A}(.)u_{0}$. Moreover, for all $t\in [0,T]$,
$${\overline u}_{0}\leq S_{A}(t)u_{0} \leq {\hat u_{0}}\eqno (4.20)$$ 
and
$$S_{A}(t)u_{0} =\lim_{n}J_{t/n}^{n}(u_{0})$$
where $J_{t/n}=J_{{t/n}, {\overline u_{0}}}$.
}

\paragraph{Proof.}
 The approximate solutions  $u_{n}$ satisfy
an  Ascoli-Arzel€'s type condition $\cal A$ on $[0,T]$ 
\cite[p. 260-268]{d4}, namely: for each $\epsilon >0$ there exists 
$N_{\epsilon}\in \mathbb{N}$ and $\eta_{\epsilon}>0$
such that ($n\geq N_{\epsilon}$ and 
$\vert t-s \vert \leq \eta_{\epsilon}$)
implies $\| u_{n}(t)-u_{n}(s) \|_{\infty}\leq \epsilon$.
Indeed, relations (4.17) lead to
$$
u_{n}(t_{j}^{n})-u_{n}(t_{i}^{n})=
-\int_{t_{i}^{n}}^{t_{j}^{n}}Au_{n}(t)dt.\eqno(4.21)
$$
Using (4.18), Relation (4.21) yields
$$\| u_{n}(t)-u_{n}(s) \| \leq M(\vert t-s \vert +2\frac{T}{n}),
\eqno (4.22)$$
where $M=\sup_{{\overline u}_{0}\leq w \leq {\hat u_{0}}} \| Aw \|$.
Consequently (see \cite[p. 260]{d4}) the sequence
$(u_{n})$ is  relatively compact  in the Banach space 
${\cal B}([0,T],\mathbb{R}^N, \| \hskip 0.2cm \|_{\infty})$ of bounded 
functions on $[0,T]$ with values in $\mathbb{R}^N$. So 
 there  exists a subsequence $(u_{n_{k}})$ converging  to a continuous 
function $u_{\infty}$ which is a mild solution of $CP(A,0,u_{0})$.
Then, passing to the limit in (4.21)
(or from \cite[p. 314]{c5}), we see that $u_{\infty}$ is a strong 
(even a classical) solution of  $CP(A,0,u_{0})$ on $[0,T]$.

From  Theorem 3.1, it results 
$$u_{\infty}=S_{A}(.)u_{0}\eqno (4.23)$$
on $[0,T]$. Thus (4.20) follows from (4.23) and (4.18) on $[0,T]$.
Then, taking $T=t$,  (4.23) yields
$$S_{A}(t)u_{0}=\lim_{n\to +\infty}J_{t/n}^{n}
(u_{0}),
$$
where $J_{t/n}=J_{{t/n},{\overline u_{0}}}$.
The proof is complete. 

\section{An example in $\mathbb{R}^{2}$}

Let $A_{0}$ be the  operator   defined on $\mathbb{R}^{2}$ by
$$A_{0}\begin{pmatrix} x\cr y\end{pmatrix}
:=\begin{pmatrix} x+(x-2y)^{1/3}\cr
y+(2y-x)^{1/5}\end{pmatrix} \eqno (5.1)$$

\paragraph{Lemma 5.1}
{\sl The operator $A_{0}$ satisfies \textrm{H1T} and
\textrm{H2T} for all $T>0$.} \medskip

The proof is left to the reader. In particular, the relation
$$A_{0}\begin{pmatrix} x\cr y\end{pmatrix}
+\begin{pmatrix} 2t\cr t\end{pmatrix} 
=A_{0}\begin{pmatrix} x\cr y\end{pmatrix} 
+\begin{pmatrix} 2t\cr t\end{pmatrix}\eqno (5.2)$$
for $t\in \mathbb{R}^{+}$, provides uniform ascents at each point.
The sublinearity at  infinity implies \textrm{H2T}.
Therefore we can apply the results of Section 3 to the operator $A_{0}$
for any $T>0$. Hence $CP(A_{0},f,u_{0},+\infty)$ has a unique global 
solution, on $[0,+\infty[$. Now, our task is to prove that
no condition of Nagumo-Osgood-Kamke and no accretivity condition
(even in a generalized sense) can be applied
to obtain the uniqueness of solutions of $CP(A_{0},f,u_{0})$.

\subsection*{Generalized accretivity conditions}
Let $\|\cdot \|_{p}$, $p\in [1,+\infty]$, be
the classical $l_{p}$-norm in $\mathbb{R}^{2}$.
As usual (see \cite{c5,d1}), we set
$$[u,v]=\lim_{\lambda \downarrow 0}
\frac{\| u+\lambda v \|-\| u \|}{\lambda}\eqno (5.3)
$$
for $u,v\in \mathbb{R}^{2}$.
For $p\in [1,+\infty]$, the notation $[u,v]_{p}, p\in [1,+\infty]$ means 
$[u,v]$, with  $\| \cdot \|_{p}$ instead of $\| \cdot \|$ in (5.3).

In the sequel,  $\phi$ stands for a continuous function
$\phi:\mathbb{R}\to \mathbb{R}^{+}$ satisfying
the following condition ${\cal U}$: 
For each $T_{0}$, the function
$x\equiv 0$ is the unique positive solution on $[0,T_{0}]$ of
$$\displaylines{\dot x(t)=\phi(x(t))\cr 
x(0)=0.
}$$

\paragraph{Definition 5.2}  
We will say that an operator $B$
defined on $\mathbb{R}^{2}$ is {\bf $\phi$-accretive }
in $(\mathbb{R}^{2},\| \cdot \|)$ if
$$-[u-v,Bu-Bv]\leq \phi(\| u-v \|)\eqno (5.4)$$
for all $u,v\in \mathbb{R}^{2}$. 

We will say that $B$ satisfies a
{\bf $\phi$-Osgood condition} if
$$\| Bu-Bv \| \leq \phi(\| u-v \|)$$ for all $u,v\in \mathbb{R}^{2}$.

\paragraph{Remark}
a) The condition  $B+\omega I$ is accretive  $(\omega \geq 0)$
means  $B$ is  $\phi$-accretive with  $\phi(x)=\omega x$.
General studies of  $\phi$-accretive conditions can be found in
\cite{c4,k2}.
\\
b) A $\phi$-{\bf Osgood condition} is a particular case of
 $\phi$-accretivity.

\paragraph{Lemma 5.2}
{\sl Let $p\in [1,+\infty]$. Then, there is no $\phi$,
such that $A_{0}$ is  $\phi$-accretive in 
($\mathbb{R}^{2},\|\cdot \|_{p}$).
Moreover, there are no $\phi$ and no norm $\|\cdot \|$
such that $A_{0}$ satisfies a $\phi$-Osgood condition in 
($\mathbb{R}^{2},\|\cdot\|$).}

\paragraph{Proof.}
a) Suppose first $p=+\infty$. By contradiction, suppose that $A_{0}$ is
$\phi$-accretive  in ($\mathbb{R}^{2},\| \cdot \|_{\infty}$)
for some $\phi$. Let $x\in [0,1[$. A direct computation
yields $A_{0}\begin{pmatrix}0\cr 0\end{pmatrix}
=\begin{pmatrix}0\cr 0\end{pmatrix}$
and $$[\begin{pmatrix}x\cr x-\frac 12 x^{2}\end{pmatrix},
A\begin{pmatrix} x\cr  x-\frac 12 x^{2}\end{pmatrix}]_{\infty}
=x^{1/3}(x^{2/3}+(-1+x)^{1/3}).\eqno (5.5)
$$
So, thanks to the $\phi$-accretivity, (5.5) implies 
$$\frac 12 x^{1/3}\leq \phi(x),\eqno (5.6)$$
for $x\geq 0$ sufficiently small.
Set 
$$z(t)=H^{-1}(t)\,,\quad H(\sigma)
=\int_{0}^{\sigma}\frac{d\xi}{\phi(\xi)}.\eqno (5.7)$$
From (5.6), $H$ is defined for $\sigma\geq 0$
sufficiently small and $$z(t)>0\eqno (5.8)$$ on some interval 
$]0,T_{0}]$ with $T_{0}>0$.
By using (5.7), a straightforward computation gives
 $z'(t)=\phi (z(t))$ and $z(0)=0$.
Then  ${\cal U}$  provides
$$z\equiv 0 \quad\mbox{on } [0,T_{0}].\eqno (5.9)
$$
Hence there is a contradiction between (5.8) and (5.9).

\noindent b) Suppose now $p\in [1,+\infty[$.
 By contradiction again, suppose that $A_{0}$ is $\phi$-accretive in
($\mathbb{R}^{2},\| \cdot \|_{p}$).
In this case, for $x\in [0,1]$, by setting
 $$u=\begin{pmatrix} x\cr \frac 12 x-\frac 12 x^{2} \end{pmatrix}
$$
 a direct computation gives
$$[u, A_{0}u]_{p}
=\left(1+\frac{x^{p-\frac 13}-(\frac{x-x^2}{2})^{p-1}x^{2/5}}
{\| u\|_{p}^{p}}\right)\| u \|_{p}.\eqno (5.10)$$
According to (5.10), the reader can check that the
 $\phi$-accretivity property implies
$\phi(\| u \|_{p})\geq -[u,A_{0}u]_{p}\geq \frac{1}{2^{p}+1}x^{2/5}$
for $x\in [0,1]$ sufficiently small. Then we can deduce
that for some $x_{0}\in ]0,1]$, there is $C>0$ (for instance
$C=\frac{e^{-1/5}}{2(2^{p}+1)}$), such that
$$C\| u \|_{p}^{2/5}\leq \phi(\| u \|_{p})
$$ for all $x\in  [0,x_{0}]$. 
Finally, there exists $\xi_{0}> 0$ such that
$\phi(\xi)\geq C\xi^{2/5}$ for  $\xi \in [0,\xi_{0}]$.
Now, as in step a), using the function $H$ defined in (5.7),
we can easily derive a contradiction.

\noindent c) Let $\| \cdot \|$ be a norm in $\mathbb{R}^{2}$
and suppose that $A_{0}$ satisfies a $\phi$-Osgood condition in
 $(\mathbb{R}^{2}, \| \cdot \|)$.
Then, by taking $u=\begin{pmatrix} 0\cr x\end{pmatrix}$, in the 
$\phi$-Osgood property
we obtain $\phi(\xi)\geq c{\xi^{1/5}}$ for a constant $c>0$, 
$\xi_{1}>0$ and all $\xi \in [0,\xi_{1}]$.
So we can conclude as before and the lemma is proved.
\hfill$\square$

\begin{figure}
\begin{center}
\includegraphics[width=0.7\textwidth]{fig1.eps}
\end{center}
\caption{Flow relative to $A_{0}$}
\end{figure}

\section{Asymptotic behavior}
Figure 5.1 motivates the following remarks about asymptotic behavior 
of solutions of (1.1). Hypothesis \textbf{H3} stands for following three 
conditions\begin{itemize}
\item $f\equiv 0$
\item The assumption \textrm{H2T} holds for all $T>0$ 
\item $A$ is a continuous derivor on $ \mathbb{R}^N$. 
\end{itemize} 
We do not assume the uniqueness of solutions
of $CP(A,0,u_{0},+\infty)$.
We set $A^{+}=\{u;Au\geq 0\}$ and $A^{-}=\{u;Au\leq 0\}$.

\paragraph{Definition 6.1}
A derivor $A$ is \textit{absorbent} if $ u_{0}\in A^{+}$
(resp. $ u_{0}\in A^{-}$) implies $u(t)\in A^{+}$
(resp. $ u(t)\in A^{-}$) for all $t\geq 0$ and
each solution $u(.)$ of the autonomous problem
$CP(A,0,u_{0},+\infty)$. We say that A is $u_{\infty}$-absorbent
if B defined by $Bu=Au-Au_{\infty}$ is absorbent.

\paragraph{Proposition 6.2} {\sl
Assume \textrm{H3}. Let $u_{0},v_{0},w_{0}$ be in  $\mathbb{R}^N$
such that $Av_{0}\leq 0$, $Aw_{0}\geq 0$, $v_{0}\leq 
w_{0}$ and $v_{0} \leq u_{0} \leq w_{0}$.
Suppose $A$ is a  continuous $u_{\infty}$-absorbent derivor on 
$\mathbb{R}^N$ such that the equation $Av=0$
has a unique solution $u_{\infty}$ in $[v_{0},w_{0}]$. 
Then every  solution $u$ of $CP(A,0,u_{0},+\infty)$ satisfies
$$\lim_{t\to +\infty}u(t)=u_{\infty}
$$} %\end proposition

\paragraph{Proof.}
It is sufficient to prove the result for $S_{A}^{\text{max}}(t)w_{0}$
and $S_{A}^{\text{min}}(t)v_{0}$ since from Lemma 4.2 
such extremal solutions exist and satisfy
$S_{A}^{\text{min}}(t)v_{0}\leq u(t) \leq S_{A}^{\text{max}}(t)w_{0}$,
 $t \in [0,+\infty [$. If $w(t)=S_{A}^{\text{max}}(t)w_{0}$
we have $$w(t)-w_{0}=-\int_{0}^{t}Aw(x)dx. \eqno (5.11)$$
Consequently $t\to w(t)$ is decreasing because 
from the absorbent property
$w'(t)=-Aw(t)\leq 0$.
In an analogous way $v(t)=S_{A}^{\text{min}}(t)u$ is increasing
because $v'(t)=-Av(t)\leq 0$ for each $t\in [0,+\infty [$.
 Hence we get
 $$v_{0}\leq v(t)
\leq w(t)\leq w_{0}.$$
Then $l_{1}=\lim_{t\to +\infty}w(t)$
and $l_{2}=\lim_{t\to +\infty}v(t)$ exist in $\mathbb{R}^N$.
Hence, according to (5.11), $\int_{0}^{+\infty}Av(\tau)d\tau$ and 
$\int_{0}^{+\infty}Aw(\tau)d\tau$ converge.
Since $\lim_{t\to +\infty}Aw(t)=Al_{1}$ and
$\lim_{t\to +\infty}Av(t)=Al_{2}$, we have necessarily
$Al_{1}=Al_{2}=0$. So by  hypothesis
$\lim_{t\to +\infty}w(t)=\lim_{t\to +\infty}v(t)=u_{\infty}$.

\paragraph{Corollary 6.3}
{\sl For the operator $A_{0}$ introduced in (5.1), we have
$$\lim_{t\to \infty}S_{A_{0}}(t)(u_{0})=
\begin{pmatrix} 0\cr 0\end{pmatrix}.$$}

\paragraph{Proof.} 
We can show that $A_{0}u=\begin{pmatrix} 0\cr 0\end{pmatrix}$ holds if 
and only if $u=\begin{pmatrix} 0\cr 0\end{pmatrix}$. Moreover $A_{0}$
is absorbent. 
Indeed, with the notation of Lemma 4.4, let $u_{0}\in A_{0}^{+}$
(resp. $A_{0}^{-}$) and
 $u_{n}(t)=J_{T/n}^i(u_{0})$
for $(i-1)T/ n <t\leq iT/n$.
Then, owing to Proposition 2.4.(c),
 $u_{n}(t)\in A_{0}^{+}$  (resp. $u_{n}(t)\in A_{0}^{-}$).
Consequently,  Lemma 4.5 yields
$S_{ A_{0}}(t)u_{0}\in A_{0}^{+}$  (resp. $S_{ A_{0}}(t)u_{0} \in A_{0}^{-}$)
for all $t\geq 0$.
So Corollary 6.3 is a direct consequence of Proposition 6.2.

\begin{thebibliography}{00} {\frenchspacing

\bibitem{b1} J. Bliedtner, W. Hansen: Potential Theory, Springer-Verlag.

\bibitem{b2} D. Bothe: Minimal solutions of multivalued differential equations,
{\sl Differential and Integral Equations}, Vol.4,No. 2, p. 445-447, 1991.

\bibitem{c1} G. Choquet: Cours d'analyse, tome 2, Topologie,  {\sl Masson}, 1973.

\bibitem{c2} J-F. Couchouron: Equations d'\'evolution, le probl\`eme de Cauchy, {\sl Th\`ese de
Rouen}.

\bibitem{c3} J-F. Couchouron: Equations d'\'evolution, application € la th\'eorie discr\`ete du 
potentiel (C.R. Acad. Sci. Paris, t. 315, S\'erie 1, p. 275-278, 1992).

\bibitem{c4} J-F. Couchouron:  Equations d'\'evolution, le probl\`eme de Cauchy 
(C.R. Acad. Sci. Paris, t.319, S\'erie 1, p. 133-136, 1994).

\bibitem{c5} M. G. Crandall: Nonlinear Semigroups and Evolution
Governed by Accretive Operators, {\sl Proceedings of Symposia
in pure Mathematics}, vol. 45, 1986, part 1.

\bibitem{d1}  K. Deimling: Multivalued Differential Equations,
{\sl de Gruyter}.

\bibitem{d2} K. Deimling, V. Lakshmikantham: On existence of extremal solutions of 
differential equations in Banach spaces.
(Nonlinear Analysis, Theory, Methods and Applications, Vol.3, n.5, 
p. 563-568, 1979).

\bibitem{d3}  C. Dellacherie: Th‰orie des processus de production,
S‰m. Proba. 24,
Lectures Notes in Math, {\sl Springer Verlag}, Vol. 1426, p. 52-104, 1990.

\bibitem{d4} N. Dunford, J.T. Schwartz: Linear Operators Part 1, 
John Wiley and Sons, 1957.

\bibitem{h1} L. S. Hal: An introduction to the theory of competitive
and cooperative systems,
{\sl Mathematical Surveys and Monographs}, vol. 41, Amer. Math. Soc., 
Providence, RI, 1995.


\bibitem{h2}  P. Hartmann: Ordinary differential equations,
{\sl John Wiley and Sons}, 1964.

\bibitem{h3}  M. W. Hirsch: Systems of Differential equations that are competitive or cooperative, 
Convergence almost everywhere,
{\sl Siam J. Math. Anal.}, vol.16, n. 3, 1985.

\bibitem{k1} E. Kamke: Zur  Theorie der Systeme gewohnlicher
Differentialgleichungen, II, Acta Math. {\bf 58} (1932)
57-85 (II 4,III 4).

\bibitem{k2}  Y. Kobayashi and K. Tanaka: Nonlinear semigroups
and evolution governed by 'generalized' dissipative operators,
{\sl Advances in mathematical Sciences and Applications, Gakkotosho, Tokyo}, Vol.3,
93/94, p. 401-426.

\bibitem{w1} W. Walter: Differential and Integral Equations, Springer Verlag, Berlin,
Heidelberg, New York, 1970.

}\end{thebibliography}


\noindent\textsc{Jean-Fran\c{c}ois Couchouron} 
(e-mail: couchour@loria.fr)\\
\textsc{Michel Grandcolas} (e-mail: grandcol@poncelet.univ-metz.fr)\\[3pt]
UFR MIM Departement de Math\'ematiques, Universit\'e de Metz \\
Ile du Saulcy \\
57045 Metz Cedex 01 France \medskip



\noindent\textsc{Dellacherie Claude} \\
Departement de Math\'ematiques\\
UFR Sciences, Site Colbert, Universit\'e de Rouen \\
76821 Mont Saint Aignan, france\\
e-mail: dellache@univ-rouen.fr

\end{document}
