\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2012 (2012), No. 74, pp. 1--15.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu}
\thanks{\copyright 2012 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2012/74\hfil Initial-value problems]
{Initial-value problems for first-order differential systems with
 general nonlocal conditions}

\author[O. Nica\hfil EJDE-2012/74\hfilneg]
{Octavia Nica} 

\address{Octavia Nica \newline
Department of Mathematics, 
Babe\c s-Bolyai University\\
400084 Cluj, Romania}
\email{octavia.nica@math.ubbcluj.ro}

\thanks{Submitted February 17, 2012. Published May 14, 2012.}
\subjclass[2000]{34A34, 34A12, 45G10}
\keywords{Nonlinear differential system; 
 nonlocal initial condition; \hfill\break\indent
 fixed point; vector norm; matrix convergent to zero}

\begin{abstract}
 This article concerns the existence of solutions to
 initial-value problems for nonlinear first-order differential
 systems with nonlocal conditions of functional type.
 The fixed point principles by Perov, Schauder and Leray-Schauder
 are applied to a nonlinear integral operator split into two operators,
 one  of Fredholm type and the other of Volterra type.
 The novelty in this article is combining this approach with the
 technique that uses convergent to zero matrices and vector norms.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{example}[theorem]{Example}
\allowdisplaybreaks

\section{Introduction}

 In this article, we study the nonlocal initial-value
problem for the first-order differential system
\begin{equation}
\begin{gathered}
x'(t)=f_1(t,x(t),y(t))\\
y'(t)=f_2(t,x(t),y(t))\quad\text{a.e. on }[0,1]\\
x(0)=\alpha [x],\quad 
y(0)=\beta [y].
\end{gathered}  \label{1}
\end{equation}
Here,  $f_1,f_2:[0,1]\times \mathbb{R}^2\to \mathbb{R}$ are
Carath\'{e}odory functions, $\alpha ,\beta :C[0,1]\to \mathbb{R}$
are linear and continuous functionals.

Nonlocal problems have been extensively discussed in the literature by different
methods; see Boucherif \cite{ref4}, Boucherif-Precup \cite{ref6},
 Byszewski \cite{ref7}, Byszewski-Lakshmikantham \cite{ref8},
 Nica-Precup \cite{prec-nica}, Ntouyas-Tsamatos \cite{nt}, Precup \cite{ref11},
 Webb-Lan  \cite{webb-lan}, Webb \cite{webb}, 
Webb-Infante \cite{webb-infante, webb-infante1,webb-infante2} and references therein.

In the recent paper, \cite{nica-func-w}, Problem \eqref{1}
was studied using as main tools the fixed point principles
by  Perov, Schauder and Leray-Schauder, together with the technique
that uses convergent to zero matrices and vector norms.
Note that the  $m$-point boundary condition
$x(0)+\sum_{k=1}^m  a_kx(t_k)=0$ is a particular case of condition
 $x(0)=\alpha [x]$  when
\begin{equation}
\alpha [x]=-\sum_{k=1}^m a_kx(t_k).
\label{intro_1}
\end{equation}
In  \cite{ref6}, the authors studied the nonlocal initial-value
problem for first-order differential equations
\begin{gather*}
x'(t)=f(t,x(t))\quad  (\text{a.e. on }[0,1]) \\
x(0)+\sum_{k=1}^m a_kx(t_k)=0,
\end{gather*}
assuming that  $f:[0,1]\times \mathbb{R}^2\to \mathbb{R}$ is a
Carath\'{e}odory function, $t_k$ are given points with
 $0\leq t_1\leq t_2\leq  \dots \leq t_{m}<1$ and
 $\ a_k$, $\widetilde{a}_k$
 are real numbers with $1+\sum_{k=1}^m a_k\neq 0$
 and $1+\sum_{k=1}^m \widetilde{a}_k\neq 0$.
The main idea there was to rewrite the problem as a fixed point problem,
involving a sum of two operators, one of Fredholm type whose values depend
only on the restrictions of functions to $[0,t_{m}]$, and the other one,
a Volterra type operator depending on the restrictions to $[t_{m},1]$.
The same strategy was adapted in \cite{prec-nica} for the first-order
differential system
\begin{gather*}
x'(t)=f(t,x(t),y(t)) \\
y'(t)=g(t,x(t),y(t))\quad (\text{a.e. on }[0,1])\\
x(0)+\sum_{k=1}^m a_kx(t_k)=0, \quad 
y(0)+\sum_{k=1}^m \widetilde{a}_ky(t_k)=0.
\end{gather*}

In this article, the nonlocal conditions are expressed by means of linear
continuous functionals on $C[0,1]$, as in the works by Webb-Lan
\cite{webb-lan}, Webb \cite{webb}, Webb-Infante \cite{webb-infante},
\cite{webb-infante1}, \cite{webb-infante2}.
 Our main assumption on functionals $\alpha ,\beta $ extends to the general
case the specific property of the particular functional \eqref{intro_1} of depending only on the points from a
proper subinterval $[0,t_0]$ of $[0,1]$, namely $[0,t_{m}]$
(taking $t_0:=t_{m}$). More exactly, we require the following property:
\begin{equation}
x|_{[0,t_0]}=y|_{[0,t_0]}\text{ implies }\alpha [x-y]=0,
\text{ whenever }x,y\in C[0,1].  \label{intro_point}
\end{equation}
Therefore, \eqref{intro_point} reads that the value of functional $\alpha $
on any function $x$ only depends on the restriction of $x$ to the fixed
subinterval $[0,t_0]$.
The key property of functional $\alpha $ satisfying \eqref{intro_point} is
that
\begin{equation}
\alpha [u]\leq \| \alpha \| \cdot | u| _{C[0,t_0]}\,,  \label{intro_key}
\end{equation}
for every $u\in C[0,1]$. Normally, for a given functional
\[
\alpha :C[0,1]\to \mathbb{R},
\]
we have
\[
| \alpha [g]| \leq \| \alpha \| \cdot | g| _{C[0,1]}.
\]
However, if $\alpha $ satisfies condition \eqref{intro_point}, then
\[
| \alpha [g]| \leq \| \alpha \|\cdot | g| _{C[0,t_0]}.
\]
Indeed, for each $g\in C[0,1]$, if we let $\widetilde{g}\in C[0,1]$ be
defined by
\[
\widetilde{g}(t)=\begin{cases}
g(t), &\text{if } t\in [0,t_0] \\
g(t_0),& \text{if }t\in [t_0,1],
\end{cases}
\]
then
\[
| \alpha [g]|=| \alpha [ \widetilde{g}]|
 \leq  \| \alpha \| \cdot | \widetilde{g} | _{C[0,1]}
=\| \alpha \| \cdot | g| _{C[0,t_0]}.
\]
The goal of this work is to revisite system \eqref{1} under the assumption
that both functionals $\alpha $ and $\beta $ satisfy \eqref{intro_point},
using the strategy from \cite{prec-nica}.

Problem \eqref{1} is equivalent to the following integral system in $\ C
[0,1]^2$:
\begin{gather*}
x(t)=\frac{1}{1-\alpha [1]}\alpha [g_1]+\int_0^{t}f_1(s,x(s),y(s))ds \\
y(t)=\frac{1}{1-\beta [1]}\beta [g_2]+\int_0^{t}f_2(s,x(s),y(s))ds,
\end{gather*}
where
\[
g_1(t):=\int_0^{t}f_1(s,x(s),y(s))ds,\quad
g_2(t):=\int_0^{t}f_2(s,x(s),y(s))ds.
\]
This can be viewed as a fixed point problem in $C[0,1]^2$ for
the completely continuous operator
 $T:C[0,1]^2\to C[0,1]^2$, $T=(T_1,T_2)$, where $T_1$ and $T_2$\ are
given by
\begin{gather*}
T_1(x,y)(t)=\frac{1}{1-\alpha [1]}\alpha
[g_1]+\int_0^{t}f_1(s,x(s),y(s))ds, \\
T_2(x,y)(t)=\frac{1}{1-\beta [1]}\beta
 [g_2]+\int_0^{t}f_2(s,x(s),y(s))ds.
\end{gather*}
In fact, under assumption \eqref{intro_point} on $\alpha $ and $\beta $,
operators $T_1$ and $T_2$ appear as sums of two integral operators, one
of Fredholm type, whose values depend only on the restrictions of functions
to $[0,t_0]$, and the other one, a Volterra type operator depending on the
restrictions to $[t_0,1]$, as this was pointed out in
 \cite{ref6}. Thus, $T_1$ can be rewritten as $T_1=T_{F_1}+T_{V_1}$, where
\begin{gather*}
T_{F_1}(x,y)(t)=\begin{cases}
\frac{1}{1-\alpha [1]}\alpha [g_1]+\int_0^{t}f_1(
s,x(s),y(s))ds,& \text{if }t<t_0 \\
\frac{1}{1-\alpha [1]}\alpha [
g_1]+\int_0^{t_0}f_1(s,x(s),y(s))ds,
&\text{if }t\geq t_0;
\end{cases}
\\
T_{V_1}(x,y)(t)=\begin{cases}
0,& \text{if }t<t_0 \\
\int_{t_0}^{t}f_1(s,x(s),y(s))ds, & \text{if }t\geq t_0.
\end{cases}
\end{gather*}
Similarly, $T_2=T_{F_2}+T_{V_2}$, where
\begin{gather*}
T_{F_2}(x,y)(t)=\begin{cases}
\frac{1}{1-\beta [1]}\beta [g_2]+\int_0^{t}f_2(
s,x(s),y(s))ds,& \text{if }t<t_0 \\
\frac{1}{1-\beta [1]}\beta [g_2]+\int_0^{t_0}f_2(
s,x(s),y(s))ds,& \text{if }t\geq t_0;
\end{cases}
\\
T_{V_2}(x,y)(t)=\begin{cases}
0, & \text{if }t<t_0 \\
\int_{t_0}^{t}f_2(s,x(s),y(s))ds,& \text{if }t\geq t_0.
\end{cases}
\end{gather*}
This allows us to split the growth condition on the nonlinear terms
 $f_1(t,x,y)$ and $f_2(t,x,y)$ into two parts, one for
$t\in [0,t_0]$ and another one for $t\in [t_0,1]$, in
such way that one reobtains the classical growth when $t_0=0$, that is for
the local initial condition $x(0)=0$.

We conclude this introductory part by some notation, notions and basic
results that are used in the next sections.
The symbol $| x| _{C[a,b]}$ stands for the
max-norm on $C[a,b]$,
\[
| x| _{C[a,b]}=\max\nolimits_{t\in [a,b]}| x(t)| ,
\]
while $\| x\| _{C[a,b]}$ denotes the Bielecki norm
\[
\| x\| _{C[a,b]}=| x(t)
e^{-\theta (t-a)}| _{C[a,b]}
\]
for some suitable $\theta >0$.

In the next sections, three fixed point principles will be used
to prove the existence of solutions for the semilinear problem, namely
the fixed point theorems by Perov, Schauder and Leray-Schauder
(see \cite{ref11}). In all three cases a key role will be played by the so called
convergent to zero matrices. A square matrix $M$ with nonnegative elements
is said to be \emph{convergent to zero} if
\[
M^{k}\to 0\quad \text{as }k\to \infty .
\]
It is known that the property of being convergent to zero is equivalent to
each of the following three conditions
(for details see \cite{ref11,p1}):
\begin{itemize}
\item[(a)] $I-M$ is nonsingular and $(I-M)^{-1}=I+M+M^2+\dots$, where $I$ stands
for the unit matrix of the same order as $M$;

\item[(b)] the eigenvalues of $M$ are located in the interior of e the
 unit disc of the complex plane;

\item[(c)] $I-M$ is nonsingular and $(I-M)^{-1}$ has nonnegative elements.
\end{itemize}

The following lemma whose proof is immediate from characterization (b) of
convergent to zero matrices will be used in the sequel:

\begin{lemma} \label{lem1.1}
If $A$ is a square matrix that converges to zero and the elements of an
other square matrix $B$ are small enough, then $A+B$ also converges to zero.
\end{lemma}

We finish this introductory section by recalling
(see \cite{ref12,ref11}) three fundamental results which will be
used in the next sections.
Let $X$ be a nonempty set. By a \emph{vector-valued metric} on $X$ we mean a
mapping $d:X\times X\to \mathbb{R}_{+}^{n}$ such that
\begin{itemize}
\item[(i)] $d(u,v)\geq 0$ for all $u,v\in X$ and if $d(u,v)=0$ then $u=v$;

\item[(ii)] $d(u,v)=d(v,u)$ for all $u,v\in X$;

\item[(iii)] $d(u,v)\leq d(u,w)+d(w,v)$ for all $u,v,w\in X$.

\end{itemize}
 Here, for  $x=(x_1,x_2,\dots ,x_{n})$,
$y=(y_1,y_2,\dots ,y_{n})$, by $x\leq y$ we mean $x_i\leq y_i$ for
 $i=1,2,\dots ,n$. We call the pair $(X,d)$ a \emph{generalized metric space}.
For such a space convergence and completeness are similar to those in usual
metric spaces.

An operator $T:X\to X$ is said to be \emph{contractive} (with
respect to the vector-valued metric $d$ on $X$) if there exists a convergent
to zero matrix $M$ such that
\[
d(T(u),T(v))\leq Md(u,v)\quad \text{for all }u,v\in X.
\]

\begin{theorem}[Perov] \label{thm1.2}
Let $(X,d)$ be a complete generalized metric space and $T:X\to X$ a
contractive operator with Lipschitz matrix $M$. Then $T$ has a unique fixed
point $u^{\ast }$ and for each $u_0\in X$ we have
\[
d(T^{k}(u_0),u^{\ast })\leq M^{k}(I-M)^{-1}d(u_0,T(u_0))\quad
\text{for all }k\in \mathbf{N}.
\]
\end{theorem}

\begin{theorem}[Schauder] \label{thm1.3}
Let $X$ be a Banach space, $D\subset X$ a nonempty closed bounded convex set
and $T:D\to D$ a completely continuous operator (i.e., $T$ is
continuous and $T(D)$ is relatively compact). Then $T$ has at least one
fixed point.
\end{theorem}

\begin{theorem}[Leray-Schauder] \label{thm1.4}
Let $(X,\|\cdot\|_{X})$ be a Banach space, $R>0$ and
$T:\overline{B}_{R}(0;X)\to X$ a completely continuous operator.
If $\|u\|_{X}<R$ for every solution $u$ of the equation $u=\lambda T(u)$
and any $\lambda \in (0,1)$, then $T$ has at least one fixed point.
\end{theorem}

Throughout the paper we shall assume that the following conditions are
satisfied:
\begin{itemize}
\item[(H1)] $1-\alpha [1]\neq 0$ \ and\ $1-\beta [1]\neq 0$.

\item[(H2)] $f_1,f_2:[0,1]\times \mathbb{R}^2\to \mathbb{R}$ are such
that $\ f_1(.,x,y),f_2(.,x,y)$ are measurable for each $(
x,y)\in $\ $\mathbb{R}^2$ and $f_1(t,.,.),f_2(t,.,.)$ are
continuous for almost all $t\in [0,1]$.
\end{itemize}

\section{Nonlinearities with the Lipschitz property.
Application of Perov's fixed point theorem}

 Here we show that the existence of solutions to
problem \eqref{1} follows from Perov's fixed point theorem when
$f_1,f_2$ satisfy Lipschitz conditions in $x$ and $y$:
\begin{gather}
| f_1(t,x,y)-f_1(t,\overline{x},\overline{y})| \leq
\begin{cases}
a_1| x-\overline{x}| +b_1| y-\overline{y}| , &\text{if }t\in [0,t_0]\\
a_2| x-\overline{x}| +b_2| y-\overline{y}| , &\text{ if }t\in [t_0,1],
\end{cases}  \label{200}
\\
| f_2(t,x,y)-f_2(t,\overline{x},\overline{y})| \leq
\begin{cases}
A_1| x-\overline{x}| +B_1| y-\overline{y}|, &\text{if }t\in [0,t_0]\\
A_2| x-\overline{x}| +B_2| y-\overline{y}|, &\text{if }t\in [t_0,1],
\end{cases}  \label{201}
\end{gather}
for all $x,y,\overline{x},\overline{y}\in \mathbb{R}$.

In what follows we denote by
 $A_{\alpha }:=\frac{\| \alpha \| }{| 1-\alpha [1]| }+1$,
$B_{\beta }:=\frac{\| \beta \| }{| 1-\beta [1]| }+1$.

\begin{theorem} \label{thm2.1}
If $f_1,f_2$ satisfy the Lipschitz conditions \eqref{200},
 \eqref{201} and the matrix
\begin{equation}
M_0:=\begin{bmatrix}
a_1t_0A_{\alpha } & b_1t_0A_{\alpha } \\
A_1t_0B_{\beta } & B_1t_0B_{\beta }
\end{bmatrix} \label{m}
\end{equation}
converges to zero, then problem \eqref{1} has a unique solution.
\end{theorem}

\begin{proof}
We shall apply Perov's fixed point theorem in $C[0,1]^2$
endowed with the vector norm $\|\cdot\| $ defined by
\[
\| u\| =(\| x\| ,\| y\| )
\]
for $u=(x,y)$, where for $z\in C[0,1]$, we let
\[
\| z\| =\max \{ | z|_{C[0,t_0]},\| z\| _{C[t_0,1]}\} .
\]
We have to prove that $T$ is contractive, more exactly that
\[
\| T(u)-T(\overline{u})\| \leq M_{\theta }\| u-\overline{u}\|
\]
for all $u=(x,y),\overline{u}=(\overline{x},\overline{y})\in C[0,1]^2$
and some matrix $M_{\theta }$ converging to zero. To this end,
let $u=(x,y),\overline{u}=(\overline{x},\overline{y})$ be any elements of
 $C[0,1]^2$. For $t\in [0,t_0]$, we have
\begin{align*}
&| T_1(x,y)(t)-T_1(\overline{x},\overline{y})(t)| \\
&=\Big| \frac{1}{1-\alpha [1]}\alpha [
g_1]+\int_0^{t}f_1(s,x(s),y(s))ds
 -\frac{1}{1-\alpha [1]}\alpha [\overline{g}
_1]-\int_0^{t}f_1(s,\overline{x}(s),\overline{y}
(s))ds\Big|   \\
&\leq | \frac{1}{1-\alpha [1]}| | \alpha [g_1-\overline{g}_1]|
+\int_0^{t}| f_1(s,x(s),y(s))
-f_1(s,\overline{x}(s),\overline{y}(s))| ds.
\end{align*}
Thus, using \eqref{intro_point},
\[
\alpha [g_1-\overline{g}_1]\leq \| \alpha \|\cdot | g_1-\overline{g}_1| _{C[0,t_0]}
\]

and therefore by \eqref{intro_key}, we obtain the following evaluation:
\begin{equation} \label{fw1_1}
\begin{split}
& | T_1(x,y)(t)-T_1(\overline{x},\overline{y})(t)| \\
&\leq \frac{\| \alpha \| }{| 1-\alpha [1]| }| g_1-\overline{g}_1| _{C[0,t_0]}
 +\int_0^{t}(a_1| x(s)-\overline{x}(s)|+b_1| y(s)-\overline{y}(s)| )ds.
\end{split}
\end{equation}
Now, taking the supremum, we have
\begin{align*}
&| T_1(x,y)-T_1(\overline{x},\overline{y})|_{C[0,t_0]} \\
&\leq \frac{\| \alpha \| }{| 1-\alpha [
1]| }| g_1-\overline{g}_1|
_{C[0,t_0]}+a_1t_0| x-\overline{x}|
_{C[0,t_0]}+b_1t_0| y-\overline{y}| _{C[0,t_0]}.
\end{align*}
Also
\begin{align*}
| g_1(t)-\overline{g}_1(t)| &\leq \int_0^{t}| f_1(s,x(s),y(s))
-f_1(s,\overline{x}(s),\overline{y}(s))| ds \\
&\leq \int_0^{t}(a_1| x(s)-\overline{x}(s)|
+b_1| y(s)-\overline{y}(s)| )ds \\
&\leq a_1t_0| x-\overline{x}|_{C[0,t_0]}+b_1t_0| y-\overline{y}| _{C[0,t_0]},
\end{align*}
which gives
\begin{equation}
| g_1-\overline{g}_1| _{C[0,t_0]}
\leq a_1t_0| x-\overline{x}|_{C[0,t_0]}+b_1t_0| y-\overline{y}| _{C[0,t_0]}.
\label{g_fw}
\end{equation}
From \eqref{fw1_1} and \eqref{g_fw}, we obtain
\begin{equation} \label{100}
\begin{split}
&| T_1(x,y)-T_1(\overline{x},\overline{y})|_{C[0,t_0]}   \\
&\leq \big(\frac{\| \alpha \| }{| 1-\alpha [1]| }+1\big)
(a_1t_0| x-\overline{x}| _{C[0,t_0]}+b_1t_0| y-\overline{y}|_{C[0,t_0]}) \\
&= A_{\alpha }a_1t_0| x-\overline{x}|_{C[0,t_0]}
 +A_{\alpha }b_1t_0| y-\overline{y}|_{C[0,t_0]}.
\end{split}
\end{equation}
For $t\in [t_0,1]$ and any $\theta >0$, we have
\begin{align*}
&| T_1(x,y)(t)-T_1(\overline{x},\overline{y})(t)| \\
&\leq | \frac{1}{1-\alpha [1]}| | \alpha
[g_1-\overline{g}_1]| +\int_0^{t}|
f_1(s,x(s),y(s))-f_1(s,\overline{x}
(s),\overline{y}(s))| ds \\
&\quad +\int_{t_0}^{t}| f_1(s,x(s),y(s))
-f_1(s,\overline{x}(s),\overline{y}(s))| ds.
\end{align*}
Hence, \eqref{intro_key} gives
\begin{align*}
&| T_1(x,y)(t)-T_1(\overline{x},\overline{y})(t)| \\
&\leq \big(\frac{\| \alpha \| }{| 1-\alpha
[1]| }+1\big) \big(a_1t_0| x-\overline{x}
| _{C[0,t_0]}+b_1t_0| y-\overline{y}|_{C[0,t_0]}\big)\\
&\quad +\int_{t_0}^{t}| f_1(s,x(s),y(s))
-f_1(s,\overline{x}(s),\overline{y}(s))| ds.
\end{align*}
The last integral can be further estimated as follows:
\begin{align*}
&\int_{t_0}^{t}| f_1(s,x(s),y(s))
-f_1(s,\overline{x}(s),\overline{y}(s))
| ds \\
&\leq \int_{t_0}^{t}(a_2| x(s)-\overline{x}
(s)| +b_2| y(s)-\overline{y}(s)| )ds \\
&= a_2\int_{t_0}^{t}| x(s)-\overline{x}(s)| \cdot
e^{-\theta (s-t_0)}\cdot e^{\theta (s-t_0)}ds \\
&\quad +b_2\int_{t_0}^{t}| y(s)-\overline{y}(s)| \cdot
e^{-\theta (s-t_0)}\cdot e^{\theta (s-t_0)}ds \\
&\leq \frac{a_2}{\theta }e^{\theta (t-t_0)}\| x-\overline{x}
\| _{C[t_0,1]}+\frac{b_2}{\theta }e^{\theta
(t-t_0)}\| y-\overline{y}\| _{C[t_0,1]}.
\end{align*}
Thus
\begin{align*}
| T_1(x,y)(t)-T_1(\overline{x},\overline{y})(t)|
&\leq A_{\alpha }a_1t_0| x-\overline{x}|
_{C[0,t_0]}+A_{\alpha }b_1t_0| y-\overline{y}|
_{C[0,t_0]} \\
&\quad +\frac{a_2}{\theta }e^{\theta (t-t_0)}\| x-\overline{x}
\| _{C[t_0,1]}+\frac{b_2}{\theta }e^{\theta
(t-t_0)}\| y-\overline{y}\| _{C[t_0,1]}.
\end{align*}
Dividing by $e^{\theta (t-t_0)}$ and taking the supremum when
 $t\in [t_0,1]$, we obtain
\begin{equation}  \label{101_1}
\begin{split}
\| T_1(x,y)-T_1(\overline{x},\overline{y})\|_{C[t_0,1]} 
&\leq A_{\alpha }a_1t_0| x-\overline{x}|
_{C[0,t_0]}+A_{\alpha }b_1t_0| y-\overline{y}|
_{C[0,t_0]}   \\
&\quad+\frac{a_2}{\theta }\| x-\overline{x}\| _{C[t_0,1]}+
\frac{b_2}{\theta }\| y-\overline{y}\| _{C[t_0,1]}.
\end{split}
\end{equation}
Now \eqref{100} and \eqref{101_1} imply
\begin{equation}
\| T_1(x,y)-T_1(\overline{x},\overline{y})\| \leq
(A_{\alpha }a_1t_0+\frac{a_2}{\theta })\| x-
\overline{x}\| +(A_{\alpha }b_1t_0+\frac{b_2}{\theta }
)\| y-\overline{y}\| .  \label{103}
\end{equation}
Similarly,
\begin{equation}
\| T_2(x,y)-T_2(\overline{x},\overline{y})\| \leq
(B_{\beta }A_1t_0+\frac{A_2}{\theta })\| x-
\overline{x}\| +(B_{\beta }B_1t_0+\frac{B_2}{\theta }
)\| y-\overline{y}\| .  \label{104}
\end{equation}
Using the vector norm we can put both inequalities \eqref{103}, \eqref{104}
under the vector inequality
\[
\| T(u)-T(\overline{u})\| \leq M_{\theta }\| u-\overline{u}\| ,
\]
where
\begin{equation}
M_{\theta }=\begin{bmatrix}
A_{\alpha }a_1t_0+\frac{a_2}{\theta } & A_{\alpha }b_1t_0+\frac{
b_2}{\theta } \\
B_{\beta }A_1t_0+\frac{A_2}{\theta } & B_{\beta }B_1t_0+\frac{B_2
}{\theta }
\end{bmatrix}.  \label{m0}
\end{equation}
Clearly the matrix $M_{\theta }$ can be represented as
 $M_{\theta}=M_0+M_1$, where
\[
M_1=\begin{bmatrix}
\frac{a_2}{\theta } & \frac{b_2}{\theta } \\
\frac{A_2}{\theta } & \frac{B_2}{\theta }
\end{bmatrix}.
\]
Since $M_0$ is assumed to be convergent to zero, from Lemma \ref{lem1.1}
we have that $M_{\theta }$ also converges to zero for large enough
 $\theta >0.~$The result follows now from Perov's fixed point theorem.
\end{proof}

\section{Nonlinearities with growth at most linear.
Application of Schauder's fixed point theorem}

 Here we show that the existence of solutions to
problem \eqref{1} follows from Schauder's fixed point theorem when 
$ f_1,f_2$, instead of the Lipschitz condition, satisfy the more relaxed
condition of growth at most linear:
\begin{gather}
| f_1(t,x,y)| \leq \begin{cases}
a_1| x| +b_1| y| +c_1, & \text{if }t\in [0,t_0]\\
a_2| x| +b_2| y| +c_2, & \text{if }t\in [t_0,1],
\end{cases}  \label{300}
\\
| g(t,x,y)| \leq \begin{cases}
A_1| x| +B_1| y| +C_1, & \text{if }t\in [0,t_0]\\
A_2| x| +B_2| y| +C_2, & \text{if }t\in [t_0,1].
\end{cases}  \label{301}
\end{gather}

\begin{theorem} \label{thm3.1}
If $f_1,f_2$ satisfy \eqref{300}, \eqref{301} and matrix
\eqref{m} converges to zero, then \eqref{1} has at least one
solution.
\end{theorem}

\begin{proof}
To apply Schauder's fixed point theorem, we look for a nonempty,
bounded, closed and convex subset $B$ of $C[0,1]^2$ so that
 $T(B)\subset B$. Let $x,y$ be any elements of $C[0,1]$.
For $t\in [0,t_0]$, using \eqref{intro_point} and \eqref{intro_key}, we have
\begin{equation}
\begin{split}
| T_1(x,y)(t)|
&=| \frac{1}{1-\alpha [1]}\alpha [g_1]+\int_0^{t}f_1(s,x(s),y(s))ds|
 \\
&\leq | \frac{1}{1-\alpha [1]}| | \alpha [g_1]| +\int_0^{t}(a_1|
x(s)| +b_1| y(s)| +c_1)ds   \\
&\leq \frac{\| \alpha \| }{| 1-\alpha [1]| }| g_1| _{C[0,t_0]}
+a_1t_0| x| _{C[0,t_0]}+b_1t_0| y| _{C[0,t_0]}+c_1t_0.
\end{split} \label{fw2}
\end{equation}
Also
\begin{align*}
| g_1(t)|
&\leq \int_0^{t}| f_1(s,x(s),y(s))| ds \\
&\leq \int_0^{t}(a_1| x(s)| +b_1|y(s)| +c_1)ds \\
&\leq a_1t_0| x| _{C[0,t_0] }+b_1t_0| y| _{C[0,t_0]}+c_1t_0,
\end{align*}
which gives
\begin{equation}
| g_1| _{C[0,t_0]}\leq a_1t_0| x| _{C[0,t_0]}
+b_1t_0| y| _{C[0,t_0]}+c_1t_0.
\label{g2_fw}
\end{equation}
From \eqref{fw2} and \eqref{g2_fw}, we obtain
\begin{equation}
\begin{split}
| T_1(x,y)| _{C[0,t_0]}
&\leq (\frac{\| \alpha \| }{| 1-\alpha [1]| }+1)
 (a_1t_0| x| _{C[0,t_0]}+b_1t_0| y| _{C[0,t_0]})+\widetilde{c}_1   \\
&= a_1t_0A_{\alpha }| x| _{C[0,t_0]}+b_1t_0A_{\alpha }| y| _{C[0,t_0]}+
\widetilde{c}_1,
\end{split}  \label{302_1}
\end{equation}
where $\widetilde{c}_1:=c_1t_0A_{\alpha }$.
For $t\in [t_0,1]$ and any $\theta >0$, we have
\begin{align*}
| T_1(x,y)(t)|
&= a_1t_0A_{\alpha }| x| _{C[0,t_0]}+b_1t_0A_{\alpha }|
y| _{C[0,t_0]}+\widetilde{c}_1 \\
&\quad +\int_{t_0}^{t}(a_2| x(s)| +b_2|y(s)| +c_2\text{ })ds \\
&\leq a_1t_0A_{\alpha }| x| _{C[0,t_0]}+b_1t_0A_{\alpha }| y| _{C[0,t_0]}
+\widetilde{c}_1+(1-t_0)c_2 \\
&\quad +a_2\int_{t_0}^{t}| x(s)| \cdot e^{-\theta
 (s-t_0)}\cdot e^{\theta (s-t_0)}ds \\
&\quad +b_2\int_{t_0}^{t}| y(s)| \cdot e^{-\theta
 (s-t_0)}\cdot e^{\theta (s-t_0)}ds \\
&\leq a_1t_0A_{\alpha }| x| _{C[0,t_0]}+b_1t_0A_{\alpha }| y| _{C[0,t_0]}+c_0 \\
&\quad +\frac{a_2}{\theta }e^{\theta (t-t_0)}\| x\|
_{C[t_0,1]}+\frac{b_2}{\theta }e^{\theta (t-t_0)}\|y\| _{C[t_0,1]},
\end{align*}
where $\ c_0:=\widetilde{c}_1+(1-t_0)c_2$. Dividing by
$e^{\theta (t-t_0)}$ and taking the supremum, it follows that
\begin{equation}  \label{303}
\begin{split}
\| T_1(x,y)\| _{C[t_0,1]}
&\leq a_1t_0A_{\alpha}| x| _{C[0,t_0]}+b_1t_0A_{\alpha}| y| _{C[0,t_0]}  \\
&\quad +\frac{a_2}{\theta }e^{\theta (t-t_0)}\| x\|
_{C[t_0,1]}+\frac{b_2}{\theta }e^{\theta (t-t_0)}\|
y\| _{C[t_0,1]}+c_0.
\end{split}
\end{equation}
Clearly, \eqref{302_1} and \eqref{303} give
\begin{equation}
\| T_1(x,y)\| \leq (a_1t_0A_{\alpha }+\frac{
a_2}{\theta })\| x\| +(b_1t_0A_{\alpha }+
\frac{b_2}{\theta })\| y\| +\widetilde{c}_0,
\label{304}
\end{equation}
where $\widetilde{c}_0=\max \left\{ \widetilde{c}_1,c_0\right\} $.
Similarly,
\begin{equation}
\| T_2(x,y)\| \leq (A_1t_0B_{\beta }
+\frac{A_2}{\theta })\| x\| +(B_1t_0B_{\beta }
+\frac{B_2}{\theta })\| y\| +\widetilde{C}_0,
\label{305}
\end{equation}
with $\widetilde{C}_0=\max \{ \widetilde{C}_1,C_0\} $,
where $\widetilde{C}_1:=C_1t_0B_{\beta }$ and $C_0:=\widetilde{C}_1+(1-t_0)C_2$.
Now \eqref{304} and \eqref{305} can be put together as
\[
\begin{bmatrix}
\| T_1(x,y)\| \\
\| T_2(x,y)\|
\end{bmatrix}
\leq M_{\theta }
\begin{bmatrix}
\| x\| \\
\| y\|
\end{bmatrix}
+\begin{bmatrix}
\widetilde{c}_0 \\
\widetilde{C}_0
\end{bmatrix},
\]
where the matrix $M_{\theta }$ is given by \eqref{m0} and converges to zero
for a large enough $\theta >0$. Next we look for two positive numbers
$R_1,R_2$ such that if $\| x\| \leq R_1,\|y\| \leq R_2$, then
 $\| T_1(x,y)\| \leq R_1$, $\| T_2(x,y)\| \leq R_2$.
To this end it is sufficient that
\begin{equation}
\begin{gathered}
(a_1t_0A_{\alpha }+\frac{a_2}{\theta })R_1+(
b_1t_0A_{\alpha }+\frac{b_2}{\theta })R_2+\widetilde{c}
_0\leq R_1 \\
(A_1t_0B_{\beta }+\frac{A_2}{\theta })R_1+(
B_1t_0B_{\beta }+\frac{B_2}{\theta })R_2+\widetilde{C}
_0\leq R_2,
\end{gathered}  \label{r1r2}
\end{equation}
or equivalently
\[
M_{\theta }\begin{bmatrix}
R_1 \\
R_2
\end{bmatrix}
+\begin{bmatrix}
\widetilde{c}_0 \\
\widetilde{C}_0
\end{bmatrix}
\leq \begin{bmatrix}
R_1 \\
R_2
\end{bmatrix},
\]
whence
\[
\begin{bmatrix}
R_1 \\
R_2
\end{bmatrix}
\geq (I-M_{\theta })^{-1}
\begin{bmatrix}
\widetilde{c}_0 \\
\widetilde{C}_0
\end{bmatrix}.
\]
Note that $I-M_{\theta }$ is invertible and its inverse
$(I-M_{\theta })^{-1}$ has nonnegative elements since $M_{\theta }$
converges to zero. Thus, if
 $B=\{ (x,y)\in C[0,1]^2:\| x\| \leq R_1,\| y\| \leq R_2\} $, then
 $T(B)\subset B$ and Schauder's fixed point theorem can
be applied.
\end{proof}

\section{More general nonlinearities. Application of the Leray-Schauder
principle}

 We now consider that nonlinearlities $f_1,f_2$
satisfy more general growth conditions, namely:
\begin{gather}
| f_1(t,u)| \leq \begin{cases}
\omega _1(t,| u| _{e}), & \text{if }t\in [0,t_0] \\
\gamma (t)\beta _1(| u| _{e}), & \text{if }t\in [t_0,1],
\end{cases}  \label{400}
\\
| f_2(t,u)| \leq \begin{cases}
\omega _2(t,| u| _{e}), & \text{if }t\in [0,t_0] \\
\gamma (t)\beta _2(| u| _{e}), & \text{if }t\in [t_0,1],
\end{cases}  \label{401}
\end{gather}
for all $u=(x,y)\in \mathbb{R}^2$, where by $| u| _{e}$
we mean the Euclidean norm in $\mathbb{R}^2$. Here $\omega _1,\omega
_2 $ are Carath\'{e}odory functions on $[0,t_0]\times \mathbb{R}_{+}$,
nondecreasing in their second argument, $\gamma \in L^{1}[t_0,1
]$, while $\beta _1,\beta _2:\mathbb{R}_{+}\to \mathbb{R}
_{+}$ $\ $are nondecreasing and $1/\beta _1,1/\beta _2\in L_{loc}^{1}(
\mathbb{R}_{+})$.

\begin{theorem} \label{thm4.1}
Assume that \eqref{400}, \eqref{401} hold. In addition
assume that there exists a positive number $R_0$ such that for 
$\rho=(\rho _1,\rho _2)\in (0,\infty )^2$,
\begin{equation}
\frac{1}{\rho _1}\int_0^{t_0}\omega _1(t,| \rho |
_{e})dt\geq \frac{1}{A_{\alpha }} \text{ and }
\frac{1}{\rho _2}\int_0^{t_0}\omega _2(t,| \rho |
_{e})dt\geq \frac{1}{B_{\beta }}
\quad \text{imply}\quad  | \rho | _{e}\leq R_0
  \label{410}
\end{equation}
and
\begin{equation}
\int_{R^{\ast }}^{\infty }\frac{d\tau }{\beta _1(\tau )+\beta _2(\tau )}
>\int_{t_0}^{1}\gamma (s)ds,  \label{450}
\end{equation}
where $R^{\ast }=\big[(A_{\alpha }\int_0^{t_0}\omega
_1(t,R_0)dt)^2+(B_{\beta }\int_0^{t_0}\omega
_2(t,R_0)dt)^2\big]^{1/2}$. Then \eqref{1}
has at least one solution.
\end{theorem}

\begin{proof}
The result will follow from the Leray-Schauder fixed point theorem once we
have proved the boundedness of the set of all solutions to equation 
$u=\lambda T(u)$, for $\lambda \in [0,1]$. Let $u=(x,y)$ be such a
solution. Then, for $t\in [0,t_0]$, also using condition 
\eqref{intro_point} and \eqref{intro_key}, we have
\begin{equation} \label{402}
\begin{split}
| x(t)| &= | \lambda T_1(x,y)(t)| \\
&= \lambda | \frac{1}{1-\alpha [1]}\alpha [
g_1]+\int_0^{t}f_1(s,x(s),y(s))ds| \\
&\leq \frac{\| \alpha \| }{| 1-\alpha [1]| }| g_1|
_{C[0,t_0]}+\int_0^{t}| f_1(s,x(s),y(s))| ds   \\
&\leq (\frac{\| \alpha \| }{| 1-\alpha
[1]| }+1)\int_0^{t_0}\omega _1(s,|
u(s)| _{e})ds   \\
&= A_{\alpha }\int_0^{t_0}\omega _1(s,| u(s)|_{e})ds.
\end{split}
\end{equation}
Similarly,
\begin{equation}
| y(t)| \leq B_{\beta }\int_0^{t_0}\omega _2(s,| u(s)| _{e})ds.  \label{403}
\end{equation}
Let $\rho _1=| x| _{C[0,t_0]}$,
$\rho _2=|y| _{C[0,t_0]}$. Then from \eqref{402}, \eqref{403}, we deduce
\begin{gather*}
\rho _1\leq A_{\alpha }\int_0^{t_0}\omega _1(s,|u(s)| _{e})ds \\
\rho _2\leq B_{\beta }\int_0^{t_0}\omega _2(s,|u(s)| _{e})ds.
\end{gather*}
By \eqref{410}, this guarantees
\begin{equation}
| \rho | _{e}\leq R_0.  \label{453}
\end{equation}
Next we let $t\in [t_0,1] $. Then
\begin{align*}
| x(t)|
&= | \lambda T_1(x,y)(t)| \\
&\leq A_{\alpha }\int_0^{t_0}\omega
_1(s,R_0)ds+\int_{t_0}^{t}| f_1(s,x(s),y(s))| ds \\
&\leq A_{\alpha }\int_0^{t_0}\omega
_1(s,R_0)ds+\int_{t_0}^{t}\gamma (s)\beta _1(|
u(s)| _{e})ds \\
&= :\phi _1(t)
\end{align*}
and similarly
\[
| y(t)| \leq B_{\beta }\int_0^{t_0}\omega
_2(s,R_0)ds+\int_{t_0}^{t}\gamma (s)\beta _2(|u(s)| _{e})ds
= :\phi _2(t).
\]
Denote $\psi (t):=(\phi _1^2(t)+\phi _2^2(t))^{1/2}$.
Then
\begin{equation}
\begin{gathered}
\phi _1' (t)=\gamma (t)\beta _1(| u(t)|
_{e})\leq \gamma (t)\beta _1(\psi (t)) \\
\phi _2' (t)=\gamma (t)\beta _2(| u(t)|
_{e})\leq \gamma (t)\beta _2(\psi (t))).
\end{gathered}  \label{ls6}
\end{equation}
Consequently,
\begin{align*}
\psi ' (t)
&= \frac{\phi _1(t)\phi _1' (t)+\phi _2(t)\phi _2' (t)}{\psi (t)} \\
&\leq \gamma (t)\cdot \frac{\phi _1(t)}{\psi (t)}\cdot \beta
_1(\psi (t))+\gamma (t)\cdot \frac{\phi _2(t)}{\psi (
t)}\cdot \beta _2(\psi (t)) \\
&\leq \gamma (t)[\beta _1(\psi (t))+\beta _2(\psi
(t))].
\end{align*}
It follows that
\[
\int_{t_0}^{t}\frac{\psi '(s)}{\beta _1(\psi
(s))+\beta _2(\psi (s))}ds\leq
\int_{t_0}^{t}\gamma (s)ds.
\]
Furthermore,  using \eqref{450} we obtain
\begin{equation}
\int_{\psi (t_0)}^{\psi (t)}\frac{d\tau }{\beta
_1(\tau )+\beta _2(\tau ))}\leq \int_{t_0}^{t}\gamma (s)ds\leq
\int_{t_0}^{1}\gamma (s)ds<\int_{R^{\ast }}^{\infty }\frac{d\tau }{\beta
_1(\tau )+\beta _2(\tau )}.  \label{451}
\end{equation}
Note that $\psi (t_0)=R^{\ast }$. Then from \eqref{451} it
follows that there exists $R_1$ such that
\[
\psi (t)\leq R_1,
\]
for all $t\in [t_0,1]$. Then $| x(t)| \leq R_1$
and $| y(t)| \leq R_1$, for all $t\in [t_0,1]$,
whence
\begin{equation}
| x| _{C[t_0,1]}\leq R_1,\quad |y| _{C[t_0,1]}\leq R_1.  \label{454}
\end{equation}
Let $R=\max \{R_0,R_1\}$. From \eqref{453}, \eqref{454} we have
$| x| _{C[0,1]}\leq R$ and $| y|_{C[0,1]}\leq R$ as desired.
\end{proof}

\begin{remark} \rm
If $\omega _1(t,\tau )=\gamma _0(t)\beta _0(\tau )$,
 then the first inequality in \eqref{410} implies that 
$\beta _0(\tau )\leq c\tau +c' $ for all 
$\tau \in R_{+}$ and some constants $c$
and $c'$; i.e., the growth of $\beta _0$ is at most linear. 
However, $\beta _1$ may have a superlinear growth.
Thus we may say that under the assumptions of 
Theorem \ref{thm4.1}, the growth of 
$f_1(t,u)$ in $u$ is at most linear for 
$t\in [0,t_0]$ and can be superlinear for 
$t\in [t_0,1]$.  The same can be said about $f_2(t,u)$.
\end{remark}

In particular, when $\alpha =\beta =0$, problem \eqref{1}
becomes the classical local initial value problem
\begin{equation}
\begin{gathered}
x'=f_1(t,x,y)\\
y'=f_2(t,x,y)\quad (\text{a.e. }t\in [0,1])\\
x(0)=y(0)=0,
\end{gathered}  \label{1*}
\end{equation}
and our assumptions reduce to the classical conditions 
(see \cite{ref9,op}) and Theorem \ref{thm4.1} gives the following result.

\begin{corollary}
Assume that
\[
| f_1(t,u)| \leq \gamma (t)\beta _1(|u| _{e}), \quad
| f_2(t,u)| \leq \gamma (t)\beta _2(| u| _{e})
\]
for $t\in [0,1]$ and $u\in \mathbb{R}^2$, where 
$\gamma \in L^{1}[0,1]$, while 
$\beta _1,\beta _2:\mathbb{R} _{+}\to \mathbb{R}_{+}$ 
are nondecreasing and $1/\beta _1,1/\beta _2\in L_{loc}^{1}(\mathbb{R}_{+})$.
 In addition assume that
\[
\int_0^{\infty }\frac{d\tau }{\beta _1(\tau )+\beta _2(\tau )}
>\int_0^{1}\gamma (s)ds.
\]
Then problem \eqref{1*} has at least one solution.
\end{corollary}

A result similar to the above corollary was given in \cite{nica-func-w}.

\begin{remark}
Since the trivial solution satisfies the boundary conditions, the
solution given by Theorem \ref{thm4.1} might be zero.
\end{remark}

\section{Numerical examples}

In this section, we give some numerical examples to illustrate the existence
results from Sections 2 and  3.

\begin{example} \label{examp5.1} \rm
Consider the  initial value problem
\begin{equation}
\begin{gathered}
x'(t)=0.1+\frac{1}{4}\frac{y^2(t)}{1+y^2(t)}\sin (2x(t))=:f(x,y) \\
y'(t)=0.1+\frac{2}{3}\frac{y^2(t)}{1+y^2(t)}\cos (2x(t))=:g(x,y) \\
x(0)=\int_0^{1/2}x(s)ds, \quad
y(0)=\int_0^{1/2}y(s)ds,
\end{gathered}\label{ex5.1}
\end{equation}
for $t\in [0,40]$.
\end{example}
We have that
\[
\alpha [u]=\int_0^{1/2}u(s)ds\Longrightarrow \alpha [1
]=\frac{1}{2}\Longrightarrow \| \alpha \| =\frac{1}{2
}.
\]
Consequently, $t_0=1/2$, $A_{\alpha }=2=B_{\beta }$ and
\[
M_0=\begin{pmatrix}
a_1 & b_1 \\
A_1 & B_1
\end{pmatrix}.
\]
However,
\begin{gather*}
\sup_{\xi ,\eta \in \mathbb{R}}
| \frac{\partial f(\xi ,\eta )}{\partial x}|  \leq \frac{1}{2}=a_1, \quad
\sup_{\xi ,\eta \in\mathbb{R}}| \frac{\partial f(\xi ,\eta )}{\partial y}
|  \leq \frac{3\sqrt{3}}{32}=b_1,
\\
\sup_{\xi ,\eta \in\mathbb{R}}| \frac{\partial g(\xi ,\eta )}{\partial x}
|  \leq \frac{4}{3}=A_1, \quad
\sup_{\xi ,\eta \in\mathbb{R}}| \frac{\partial g(\xi ,\eta )}{\partial y}
|  \leq \frac{\sqrt{3}}{4}=B_1
\end{gather*}
and then
\[
M_0=\begin{pmatrix}
\frac{1}{2} & \frac{3\sqrt{3}}{32} \\
\frac{4}{3} & \frac{\sqrt{3}}{4}
\end{pmatrix}
\]
has the eigenvalues
$\lambda _1=0$, $\lambda _2=0.9330\cdots$.
From  Theorem \ref{thm2.1},  problem \eqref{ex5.1} has a unique
solution, see Figure \ref{fig1}.

\begin{figure}[ht]
\begin{center}
\setlength{\unitlength}{1mm}
\begin{picture}(103,60)(-6,-3)
\put(0,0){\line(1,0){97}}
\multiput(12.5,-.5)(12.5,0){7}{\line(0,1){1}}
\put(23,-4){10}
\put(48,-4){20}
\put(73,-4){30}
\put(98,-4){40}
\put(96.6,-.89){$\rightarrow$}
\put(100,2){$t$}
\put(0,0){\line(0,1){50}}
\multiput(-.5,10)(0,10){5}{\line(1,0){1}}
\put(-6,-1){0.0}
\put(-6,9){0.5}
\put(-6,19){1.0}
\put(-6,29){1.5}
\put(-6,39){2.0}
\put(-6,49){2.5}
\qbezier(0,2)(9,9)(17,25)
\qbezier(17,25)(35,55)(62,46)
\qbezier(62,46)(81,40)(100,42)
\put(75,46){$x(t)$}
\qbezier(0,2)(4,5)(10,16)
\qbezier(10,16)(14,22)(18,16)
\qbezier(18,16)(25,5)(60,17)
\qbezier(60,17)(70,22)(80,19)
\qbezier(80,19)(90,16)(100,17)
\put(75,22){$y(t)$}
\end{picture}
\end{center}
\caption{The Chebpack $x,y$ solutions of the problem \eqref{ex5.1}.
 The numerical errors for the nonlocal boundary conditions are 
$bc1$ and $bc$2}
\label{fig1}
\end{figure}

The exact solution is approximated by the Matlab package \emph{Chebpack}
\cite{cp} and verified by the \texttt{ode45} solver of Matlab 
(i.e. \texttt{ode45} is applied to \eqref{ex5.1} with the initial conditions
 $x(0)$,  $y(0) $ given by Chebpack).

\begin{example} \label{examp5.2} \rm
Consider the  initial value problem
\begin{equation} \label{ex5.2}
\begin{gathered}
x'=-0.9x-1.8\frac{xy}{2+x^2}+90:= f(x,y) \\
y'=-0.2y-1.8\frac{xy}{2+x^2}+750:= g(x,y) \\
x(0)=\int_0^{1/2}x(s)ds, \quad
y(0)=\int_0^{1/2}y(s)ds,
\end{gathered}
\end{equation}
for $t\in [0,1]$.
\end{example}
We consider
\[
M_0=\begin{pmatrix}
a_1 & b_1 \\
A_1 & B_1
\end{pmatrix}.
\]
We have
\[
| \frac{x}{2+x^2}| \leq \frac{\sqrt{2}}{4},
\]
so that the matrix
\[
M_0=\begin{pmatrix}
0.9 & 0.6364 \\
0 & 0.8364
\end{pmatrix}
\]
has the eigenvalues
$\lambda _1=0.9$, $\lambda _2=0.8364$.
From  Theorem \ref{thm3.1},  problem \eqref{ex5.2} has at least one
solution.
Let us denote
\[
bc1(x_0,y_0)=x_0-\int_0^{1/2}x(s)ds,\quad
bc2(x_0,y_0)=y_0-\int_0^{1/2}y(s)ds
\]
where $x(s)$ and $y(s)$ are obtained by integrating the differential system
\eqref{ex5.2} with initial conditions $x(0)=x_0$, $y(0)=y_0$.
In Figure \ref{fig2}, approximated $x_0$, $y_0$, shows the numerical
contour lines of $bc1(x_0,y_0)=0$ (solid line) and of $bc2(x_0,y_0)=0$
 (dashed line).
Their intersections give the initial conditions for which the solutions
 $x(s)$, $y(s)$ approximate the nonlocal conditions from \eqref{ex5.2}.
 We have in that region three intersection points $1,2,3$ corresponding to three
different solutions, which are improved by \texttt{fsolve} from Matlab to
\begin{gather*}
\texttt{init1}=[11.7467173136538\quad 167.2358959061741];\\
\texttt{init2}=[3.6799740768135\quad 156.9860214200612];\\
\texttt{init3}=[0.1962071293693\quad 152.5406128950519].
\end{gather*}
Taking these values as initial conditions for a Matlab solver for
differential systems, we obtain the corresponding three numerical solutions
of \eqref{ex5.2} represented in Figure \ref{fig2} with an accuracy
about $10^{-7}$ in nonlocal conditions.

\begin{figure}[ht] 
\begin{center} \scriptsize
\setlength{\unitlength}{1mm}
\begin{picture}(55,25)(-6,-3)
\put(0,20){\line(1,0){50}}
\put(0,0){\line(1,0){50}}
\multiput(5,-.5)(15,0){4}{\line(0,1){1}}
\put(4.5,-3){0}
\put(19.5,-3){5}
\put(33.5,-3){10}
\put(48.5,-3){15}
\put(0,0){\line(0,1){20}}
\put(-.5,10){\line(1,0){1}}
\put(-5.5,-.7){150}
\put(-5.5,9.3){160}
\put(-5.5,19.3){170}
\put(50,0){\line(0,1){20}}
\qbezier(0,4)(14,0)(17,5)
\qbezier(17,5)(20,20)(50,18)
\put(28,14){$bc2$}
\qbezier(5,20)(5.5,10)(7,0)
\qbezier(17,0)(18,10)(21,20)
\qbezier(38,20)(41,10)(42,0)
\put(40,15.5){1}
\put(19.5,7.8){2}
\put(7,3.2){3}
\put(42.5,7){$bc1$}
\put(6.5,16){$bc1$}
\end{picture}
%
\qquad
%
\begin{picture}(55,25)(-6,-3)
\put(0,20){\line(1,0){50}}
\put(0,0){\line(1,0){50}}
\put(25,-.5){\line(0,1){1}}
\put(-.5,-3){0}
\put(23,-3){0.5}
\put(48,-3){1.0}
\put(0,0){\line(0,1){20}}
\multiput(-.5,5)(0,10){2}{\line(1,0){1}}
\put(-4,4.3){20}
\put(-4,14.3){40}
\put(50,0){\line(0,1){20}}
\qbezier(0,1)(25,15)(50,19)
\put(23,9.6){$x_1(t)$}
\end{picture}
%
\\ 
%%
\begin{picture}(55,30)(-6,-3)
\put(0,20){\line(1,0){50}}
\put(0,0){\line(1,0){50}}
\put(25,-.5){\line(0,1){1}}
\put(23,-3){0.5}
\put(48,-3){1.0}
\put(0,0){\line(0,1){20}}
\put(-.5,10){\line(1,0){1}}
\put(-3,-.7){0}
\put(-3,9.7){5}
\put(-4,19.7){10}
\put(50,0){\line(0,1){20}}
\qbezier(0,8)(30,30)(30,1)
\put(30,1){\line(1,0){20}}
\put(22,9.6){$x_2(t)$}
\end{picture}
%
\qquad
%
\begin{picture}(55,30)(-6,-3)
\put(0,20){\line(1,0){50}}
\put(0,0){\line(1,0){50}}
\put(25,-.5){\line(0,1){1}}
\put(23,-3){0.5}
\put(48,-3){1.0}
\put(0,0){\line(0,1){20}}
\put(-.5,14.5){\line(1,0){1}}
\put(-3,-.7){0}
\put(-4.5,13.7){0.5}
\put(50,0){\line(0,1){20}}
\qbezier(0,2)(3,25)(5,16)
\qbezier(5,16)(10,2)(50,3)
\put(23,7){$x_3(t)$}
\end{picture}
\end{center}
\caption{Contour lines of $bc1(x_0,y_0)=0$ and
 of $bc2(x_0,y_0)=0$.
The solutions of problem \eqref{ex5.2} in example \ref{examp5.2}} \label{fig2}
\end{figure}


\subsection*{Acknowledgements}
 The author would like to warmly thank the anonymous referee for
his or her  careful reading and constructive suggestions,
Professor Damian Trif for his advice in using Chebpack software
for the numerical examples,  and  Professor Radu Precup for his constant
 support during the research activity.

This research was supported by the Sectoral
Operational Programme for Human Resources Development 2007-2013,
by the European Social Fund, under the project number
 POSDRU/107/1.5/S/76841 with the title
 ``Modern Doctoral Studies: Internationalization and
Interdisciplinarity'', 
and by  the Romanian National Authority for
Scientific Research, CNCS - UEFISCDI, project number
PN-II-ID-PCE-2011-3-0094.

\begin{thebibliography}{99}

\bibitem{ref12} R. P. Agarwal, M. Meehan, D. O'Regan;
 \emph{Fixed Point Theory and Applications}, Cambridge University Press, 
Cambridge, 2001.

\bibitem{ref4} A. Boucherif;
 \emph{Differential equations with nonlocal
boundary conditions}, Nonlinear Anal. \textbf{47} (2001), 2419-2430.

\bibitem{ref6} A. Boucherif, R. Precup;
 \emph{On the nonlocal initial
value problem for first order differential equations,} Fixed Point Theory
\textbf{4} (2003), 205-212.

\bibitem{bp2} A. Boucherif, R. Precup, \emph{Semilinear evolution
equations with nonlocal initial conditions}, Dynamic Systems Appl. \textbf{16
} (2007), 507-516.

\bibitem{ref7} L. Byszewski;
\emph{Theorems about the existence and
uniqueness of solutions of a semilinear evolution nonlocal Cauchy problem},
J. Math. Anal. Appl. \textbf{162} (1991), 494-505.

\bibitem{ref8} L. Byszewski, V. Lakshmikantham;
 \emph{Theorem about the existence and uniqueness of a solution of a nonlocal 
abstract Cauchy problem in a Banach space}, Appl. Anal. \textbf{40} (1990), 11-19.

\bibitem{ref9} M. Frigon;
\emph{Application de la th\'{e}orie de la
transversalite topologique a des problemes non linearies pour des equations
differentielles ordinaires}, Dissertationes Math. 296, PWN, Warsawa, 1990.

\bibitem{frig-lee} M. Frigon and J.W. Lee;
 \emph{Existence principle for
Carath\'{e}odory differential equations in Banach spaces}, Topol. Methods
Nonlinear Anal. \textbf{1 }(1993), 95-111.

\bibitem{prec-nica} O. Nica, R. Precup;
 \emph{On the nonlocal initial
value problem for first order differential systems}, Stud. Univ. Babe\c{s}
-Bolyai Math. \textbf{56 }(2011), No. 3, 125--137.

\bibitem{nica-func-w} O. Nica;
\emph{Nonlocal initial value problems for
first order differential systems,}
Fixed Point Theory (submitted).

\bibitem{nt} S. K. Ntouyas, P. Ch. Tsamatos;
 \emph{Global existence for
semilinear evolution equations with nonlocal conditions,} J. Math. Anal.
Appl. \textbf{210 }(1997), 679-687.

\bibitem{op} D. O'Regan, R. Precup;
 \emph{Theorems of Leray-Schauder
Type and Applications}, Gordon and Breach, Amsterdam, 2001.

\bibitem{ref11} R. Precup;
 \emph{Methods in Nonlinear Integral Equations},
Kluwer, Dordrecht, 2002.

\bibitem{p1} R. Precup;
 \emph{The role of matrices that are convergent to
zero in the study of semilinear operator systems}, Math. Comp. Modelling
\textbf{49} (2009), 703-708.

\bibitem{cp} D. Trif;
\emph{Chebpack}, 2011, http://www.mathworks.com/matlabcentral/fileexchange/32227.

\bibitem{webb-lan} J.R. L. Webb, K. Q. Lan;
 \emph{Eigenvalue criteria for
existence of multiple positive solutions of nonlinear boundary value
problems of local and nonlocal type}, Topol. Methods Nonlinear Anal. \textbf{
27 }(2006), 91-115.

\bibitem{webb} J. R. L. Webb;
\emph{A unified approach to nonlocal boundary
value problems}, Dynamic Systems and Applications. Vol. 5. Proceedings of
the 5th International Conference, Morehouse College, Atlanta, GA, USA, May
30-June 2, 2007, 510-515.

\bibitem{webb-infante} J. R. L. Webb,  G. Infante;
\emph{Positive solutions of nonlocal initial boundary value problems 
involving integral conditions}, 
Nonlinear Diff. Eqn. Appl. \textbf{15} (2008), 45-67.

\bibitem{webb-infante1} J. R. L. Webb, G. Infante;
\emph{Non-local boundary value problems of arbitrary order, }
J. London Math. Soc. (2) \textbf{79} (2009), 238--258.

\bibitem{webb-infante2} J. R. L. Webb, G. Infante;
\emph{Semi-positone nonlocal boundary value problems of arbitrary order},
 Commun. Pure Appl. Anal. (9) \textbf{2} (2010), 563-581.

\end{thebibliography}

\end{document}

