\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
{\em Electronic Journal of Differential Equations},
Vol. 2006(2006), No. 100, pp. 1--24.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2006 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2006/100\hfil Different types of solvability conditions]
{Different types of solvability conditions
 for differential operators}

\author[S. G.  Kryzhevich, V. A. Volpert\hfil EJDE-2006/100\hfilneg]
{Sergey G. Kryzhevich, Vitaly A. Volpert }  % in alphabetical order

\address{Sergey G. Kryzhevich \newline
Mathematics Department, Saint Petersburg University, 28,
Universitetskiy pr., 198504 Petrodvoretz, Russia}
\email{kryzhevitz@rambler.ru}

\address{Vitaly A. Volpert \newline
Institute of Mathematics, UMR 5208 CNRS, University Lyon 1,
69622 Villeurbanne, France}
\email{volpert@math.univ-lyon1.fr}

\date{}
\thanks{Submitted January 9, 2006. Published August 31, 2006.}
\thanks{Supported by grants 03-01-06493 from RFFI,
 PD05-1.1-94 from the Government of \hfill\break\indent
Saint-Petersburg,  2271.2003.1  by the program
``State support of leading scientific schools". \hfill\break\indent
 Also supported by the scientific program of the Ministry of science
 and education of Russia \hfill\break\indent
``Russian Universities"}
\subjclass[2000]{34A30, 35J25, 47A53}
\keywords{Linear differential equations; solvability conditions;
\hfill\break\indent non-Fredholm operators}

\begin{abstract}
  Solvability conditions for linear differential equations
  are usually formulated in terms of orthogonality of the right-hand side
  to solutions of the homogeneous adjoint equation.
  However, if the corresponding operator does not satisfy the
  Fredholm property   such solvability conditions may be not applicable.
  For this case, we obtain another type of solvability conditions,
  for ordinary differential equations on the real axis,
  and for elliptic problems in unbounded cylinders.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}[theorem]{Definition}
\newtheorem{condition}[theorem]{Condition}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{example}[theorem]{Example}

\section{Introduction}

  Many methods of linear and nonlinear analysis are based
  on Fredholm type solvability conditions.
  We recall that an operator $L$ satisfies the Fredholm property
  if, by definition, the dimension of its kernel is finite,
  the image is closed, the codimension of the image is also
  finite.
  If it is the case then the nonhomogeneous equation $L u = f$
  is solvable if and only if $\phi(f)=0$ for a finite number
  of linearly independent functionals $\phi$ from the dual space.
  These functionals are solutions of the homogeneous adjoint equation
  $L^* \phi = 0$.

  General elliptic boundary-value problems in bounded domains
  satisfy the Fredholm property if they satisfy the conditions
  of ellipticity, proper ellipticity and the Lopatinskii condition
  (see \cite{adn}, \cite{Ag}, \cite{Volev} and the references
therein).
  In the case of unbounded domains these conditions are not
  sufficient.
  Some additional conditions formulated in terms of limiting
  operator should be imposed (see \cite{VV2} and the references
  therein).
  To illustrate these conditions consider the one-dimensional
  second order operator
  $$
 L u = a(x) u'' + b(x) u' + c(x) u , \quad    x \in \mathbb{R}
$$
  where $a, b$, and $c$ are bounded sufficiently smooth matrices.
  We can consider it as acting in Sobolev or in H\"older spaces.
  Let $h_k$ be a sequence of numbers, $h_k \to +\infty$ or $h_k\to-\infty$.
  Consider the shifted coefficients
  $ \tilde a_k(x) = a(x+h_k)$,
  $\tilde b_k(x) = b(x+h_k)$, $\tilde c_k(x) = c(x+h_k) $
   and choose locally convergent subsequences of these sequences.
  Then the operator with the limiting coefficients
  $$
 \hat L u = \hat a(x) u'' + \hat b(x) u' + \hat c(x) u ,
  \quad  x \in \mathbb{R}
$$
  is called limiting operator.
  There can exists many limiting operator for the same operator
  $L$.
  The operator $L$ is Fredholm if in addition to the conditions
  mentioned above all limiting operators are invertible.
  This condition is necessary and sufficient.

  It is known that if an elliptic operator in an unbounded
  domain satisfies the Fredholm property, then the bounded solutions of
  the homogeneous equation $L u = 0$ decay exponentially at infinity.
  Suppose that, for the operator considered above,
  there exists a bounded solution $u_0(x)$ of this equation
  that does not converge to zero at infinity.
  Then there exists a sequence $h_k$ and a subsequence
  of the shifted solutions $u_0(x+h_k)$ locally converging to some
  limiting function $\hat u(x)$ such that it is a bounded nonzero
solution of one of the limiting problems $\hat L \hat u = 0$.
  Therefore the limiting operator is not invertible and
  the operator $L$ does not satisfy the Fredholm property.

  Thus, if the homogeneous equation has a bounded solution that
  does not decay at infinity, then the usual solvability conditions
  may be not applicable.
  In some cases it is possible to reduce an operator that does
  not satisfy the Fredholm property to an operator that satisfies
  it. It can be done by introduction of some special weighted
  spaces or replacing, for example, a differential operator
  by an integro-differential operator (see e.g. \cite{DMV}).
  In this work we develop another approach to study non Fredholm
  operators.
  In the case where the Fredholm type solvability conditions
  are not applicable we
  obtain another type of solvability conditions.
  They are also formulated in terms of solutions of the homogeneous
  adjoint equation but they cannot be written in terms of linear
  functionals from the dual space.

  First we obtain these solvability conditions for ordinary
  differential operators on the real axis.
  Then we apply these results to study elliptic problems in
  unbounded cylinders.
  Some spectral projections allow us to reduce them to a sequence
  of ordinary differential operators.

  Consider the operators $\mathbf{L} : U \to X$,
\begin{equation}
\mathbf{L} u=u_{xx}+\Delta_y u+A_0(x,y)u_x+\sum_{k=1}^m
  A_k(x,y)u_{y_k}+B(x,y)u \label{e1.1}
\end{equation}
  in an unbounded cylinder $\Omega={\mathbb{R}}\times\Omega'$
  with the homogeneous Dirichlet boundary condition.
  Here $\Omega'$ is a bounded domain in ${\mathbb{R}}^m$ with $C^{2+\delta}$
  boundary, $0<\delta<1$, the coefficients of the operator
  belong to $C^{\delta}(\bar\Omega)$,
  $x$ is a variable along the axis of the cylinder $\Omega$, and
  $y=(y_1,\dots,y_m)$ is a vector variable in the section
  $\Omega'$.
  The function spaces are
  $$
U=\{u\in C^{2+\delta}(\bar \Omega):
  u|_{\partial \Omega}=0\} \quad \text{and}\quad X=C^\delta(\bar \Omega) .
$$
 Here $C^{\delta}(\bar\Omega)$ is a H\"older space with the norm
 $$
\| u \| = \sup_\Omega |u(x)| + \sup_{x,y \in \Omega}
 \frac{|u(x) - u(y)|}{|x-y|^\delta} \, ,
$$
 $C^{2+\delta}(\bar\Omega)$ is the space of functions whose second
 derivatives belong to $C^{\delta}(\bar\Omega)$.

  The Fredholm property of such operators is studied in
  \cite{cvo}--\cite{vk3}.
  The particular form of the operator $\mathbf{L}$,
\begin{equation}
\mathbf{L} u=u_{xx}+\Delta_y u+A(x)u_x+ B(x)u , \label{e1.2}
\end{equation}
  where its coefficients are independent of the variable $y$
  is more convenient to study it by the Fourier decomposition
  (see below).
  In some cases more general operator \eqref{e1.1} can be reduced
  to the form \eqref{e1.2} by a continuous deformation in the class
  of Fredholm operators (see \cite{cvo}) or be approximated by
  an operator \eqref{e1.2}.

We shall study the linear boundary problems
\begin{equation}
\mathbf{L} u=0 \label{e1.3}
\end{equation}
and the nonhomogeneous one
\begin{equation}
\mathbf{L} u=f,\quad f\in X. \label{e1.4}
\end{equation}
Denote by $\omega_k$ eigenvalues of the Laplace operator $\Delta_y$
  on the space
  $$
U'=\{v\in C^{2+\delta}(\bar\Omega')\, :\, v|_{\partial
\Omega'}=0\}
$$
  and by $p_k$ their multiplicities. Note that all $\omega_k$ are
  negative, tend to $-\infty$ as $k\to \infty$ and their
  multiplicities $p_k$ are finite \cite{her}. The corresponding
   eigenfunctions $\varphi_k^i$ ($k\in \mathbb{N}$, $i=1,\dots,p_k$)
   form an orthogonal basis in the space
   ${\mathbb L}^2(\Omega')$,
   so the functions $u$ and $f$ can be presented as Fourier series
\begin{equation}  \label{e1.5}
\begin{gathered}
u(x,y)=\sum_{k=1}^\infty \sum_{i=1}^{p_k} u_k^i(x)
  \varphi_k^i (y); \\
f(x,y)=\sum_{k=1}^\infty
\sum_{i=1}^{p_k} f_k^i(x) \varphi_k^i (y).
\end{gathered}
\end{equation}
Having denoted
$\lambda_k=\sqrt{-\omega_k}$, $v_k^i={u_k^i}'/\lambda_k$,
$w_k^i=(u_k^i, v_k^i)^T$, we can reduce boundary problems \eqref{e1.3}
and \eqref{e1.4} to infinite sequences of $2n$ - dimensional ordinary
differential systems
\begin{equation}
{w_k^i}'=P_k(x)w_k^i \label{e1.6}
\end{equation}
and
\begin{equation}
{w_k^i}'=P_k(x)w_k^i+F_k^i(x) \label{e1.7}
\end{equation}
respectively. Here
$$
P_k(x)=
\begin{pmatrix}
0 & \lambda_k E_n\\
-\big(B(x)/\lambda_k\big) +\lambda_k E_n
-A(x)
\end{pmatrix}; \quad F_k^i(x)=
\begin{pmatrix}
0 \\
f_k^i(x)/ \lambda_k
\end{pmatrix},
$$
where $E_n$ is $n\times n$ unit matrix.



\begin{definition}[\cite{pl1}] \label{def1.1} \rm
 Let $I$ be closed convex subset
of ${\mathbb{R}}$. Consider a $n\times n$ matrix $P(x)$, continuous
and bounded on $I$. The system
$$  u'=P(x)u
$$
is \emph{dichotomic} on $I$ if there exist positive constants $c$
and $\lambda$, and subspaces $U^s(x)$ and $U^u(x)$ of ${\mathbb{R}}^n$,
defined for all $x\in I$ and such that
\begin{enumerate}
\item $\Phi(x,\xi)U^{s,u}(\xi)=U^{s,u}(x)$ for all $x,\xi\in I$;
\item $U^s(x) \oplus U^u(x)={\mathbb{R}}^n$ for every $x\in I$;
\item $|\Phi(x,\xi)u_0|\le c\exp(-\lambda (x-\xi))|u_0|$ for all
$x,\xi\in I$: $x \ge \xi$, $u_0\in U^s(\xi)$;
\item $|\Phi(x,\xi)u_0|\le c\exp(\lambda (x-\xi))|u_0|$,
if $x,\xi\in I$: $x \le \xi$, $u_0\in U^u(\xi)$.
\end{enumerate}
\end{definition}

This property is also called hyperbolicity and the corresponding
system is called hyperbolic. Nevertheless, we shall always call it
dichotomic in order not to confuse this notion with hyperbolicity
of partial differential equations. Note that Definition \ref{def1.1}
coincides with the definition of exponential dichotomy given by
Coppel \cite[p.\,10]{cop} with the additional assumption of the
boundedness of the matrix $P$.

Here and below we denote by $|\cdot|$ the Euclidian vector norm
and the corresponding matrix norm,while by $\|\cdot\|$ the norms
in function spaces. We shall use the following hypotheses:

\begin{condition} \label{cond1.2} \rm
All systems \eqref{e1.6} are dichotomic on ${\mathbb{R}}$.
\end{condition}

\begin{condition} \label{cond1.3} \rm
All systems \eqref{e1.6} are dichotomic both on
${\mathbb{R}}^+=[0,+\infty)$ and on ${\mathbb{R}}^-=(-\infty,0]$.
\end{condition}

It is shown in \cite{vk1} that there exists a number
$N\in \mathbb{N}$ (which depends on the operator $\mathbf{L}$) such
that every system \eqref{e1.6} for $k>N$ is dichotomic on ${\mathbb{R}}$.
  Therefore it is sufficient to check conditions \ref{cond1.2} and
\ref{cond1.3}  for a finite set of systems \eqref{e1.6}.

The following results are established in \cite{vk2}.


\begin{theorem} \label{thm1.4}
The operator $\mathbf{L}$ of the form
\eqref{e1.2} is invertible if and only if it satisfies condtion \ref{cond1.2}.
\end{theorem}


\begin{theorem} \label{thm1.5}
The operator $\mathbf{L}$ of the form
\eqref{e1.2} is Fredholm if and only if it satisfies \ref{cond1.3}.
  Its index, that is the difference between the dimension
  of the kernel and the codimension of the image
  is given by the expression
$$
{\mathop{\rm ind}\nolimits} \mathbf{L}=\sum_{k=1}^{+\infty} p_k(d^+_k-d^-_k),
$$
where $d^+_k$ and $d_k^-$ are dimensions of spaces
$M^{s,+}_{k}(x)$ and $M^{s,-}_{k}(x)$, stable for systems
\eqref{e1.6} for $t\ge 0$ and $t\le 0$ respectively, and $p_k$
is a multiplicity of the eigenvalue $\omega_k$.
\end{theorem}

  These theorems show that the dichotomy condition for elliptic
  operators introduced by
  Palmer \cite{pal} (see also \cite{boy1}, \cite{boy2})
  can be reduced to a sequence of dichotomy conditions for
  systems \eqref{e1.6}.

  If one of systems \eqref{e1.6} has a bounded solution that does
  not converge to zero at infinity, then Conditions
\ref{cond1.2} and \ref{cond1.3}
  are not satisfied, and the elliptic operator does not satisfy
  the Fredholm property.
  To study such operators we introduce almost dichotomic systems
  (Section 3, 4) and weakly hyperbolic systems (Section 5)
  and obtain for them solvability conditions.
  These results are applied in Section 6 to study elliptic
  operators.
  In the next section we present a simple example illustrating
  non Fredholm solvability conditions.

\section{Example of non Fredholm solvability conditions}

  We present here a simple example that illustrates the classical
  Fredholm type solvability conditions and other type solvability
conditions
  when the Fredholm property is not satisfied.
  Consider the scalar equation
\begin{equation}
\frac{du}{dt} = a(t) u + f(t), \quad t \in \mathbb{R} .
\label{e2.1}
\end{equation}
  One of solutions of \eqref{e2.1} is given by the equality
\begin{equation}
  u(t) = u_0(t) \int_0^t v_0(\tau) f(\tau) d\tau ,
  \label{e2.2}
\end{equation}
  where
  $$ u_0(t) = e^{\int_0^t a(\tau) d\tau} , \quad
     v_0(t) = e^{-\int_0^t a(\tau) d\tau}=\frac1{u_0(t)},
$$
where  $u_0(t)$ is a solution of the homogeneous equation,
  and $v_0(t)$ is a solution of the homogeneous adjoint equation
  $$
  \frac{du_0}{dt} = a(t) u_0 , \quad
  \frac{dv_0}{dt} = -a(t) v_0 .
$$
  Let us introduce the functions
\begin{gather*}
 \Phi^+(t) = |u_0(t)| \int_0^t |v_0(\tau)| d\tau , \quad
     \Psi^+(t) = |u_0(t)| \int_t^\infty |v_0(\tau)| d\tau ,
     \quad t > 0,  \\
 \Phi^-(t) = |u_0(t)| \int_t0 |v_0(\tau)| d\tau , \quad
     \Psi^-(t) = |u_0(t)| \int_{-\infty}^t |v_0(\tau)| d\tau ,
     \quad t < 0 .
\end{gather*}


\begin{condition} \label{cond2.1} \rm
  There exists a positive constant $M$ such that:\\
 - either
  $\Phi^+(t) \leq M$ for all $t \geq 0$
  or the integral in the expressions for $\Psi^+(t)$ is defined and
  $\Psi^+(t) \leq M$ for all $t \geq 0$,
\\
  - either
  $\Phi^-(t) \leq M$ for all $t \leq 0$
  or the integral in the expressions for $\Psi^-(t)$ is defined and
  $\Psi^-(t) \leq M$ for all $t \leq 0$.
\end{condition}

 \begin{proposition} \label{prop2.2}
Let Condition \ref{cond2.1} be satisfied. If at least one of the functions
$\Phi^+(t)$ and $\Phi^-(t)$ is bounded then equation \eqref{e2.1} has a
bounded solution for any bounded function $f$. If both of them are
not bounded, then a bounded solution exists if and only if
\begin{equation}
\int_{-\infty}^\infty v_0(t) f(t) dt = 0 .
  \label{e2.3}
\end{equation}
\end{proposition}

\begin{proof}
 Suppose that both functions $\Phi^+(t)$ and $\Phi^-(t)$ are
  bounded. Then the solution of equation \eqref{e2.1} is given by expression
  \eqref{e2.2}, and it is obviously bounded.

  Suppose next that $\Phi^+(t)$ is bounded and $\Phi^-(t)$ is not
  bounded. Then $\Psi^-(t)$ is defined. Put
\begin{equation}
u^-(t) = u_0(t) \int_{-\infty}^t v_0(\tau) f(\tau) d\tau ,
  \label{e2.4}
\end{equation}
  It is easy to verify $u^-(t)$ is bounded on the whole axis for any bounded
$f$.
  Moreover, since $u_0(t)$ is not bounded as $t\to-\infty$, this
  function $u^-(t)$ is the only solution of \eqref{e2.1}, bounded as
  $t\to-\infty$.

The case then $\Phi^-(t)$ is bounded and $\Phi^+(t)$ is not, is
similar. The bounded solution is given by formula
\begin{equation}
u^+(t) = - u_0(t) \int_t^\infty v_0(\tau) f(\tau) d\tau. \label{e2.5}
\end{equation}
This is the only solution, bounded as $t\to+\infty$.

  If both functions $\Phi^+(t)$ and $\Phi^-(t)$ are not
  bounded but $\Psi^+(t)$ and $\Psi^-(t)$ are bounded, then
  $u_0(t)$ is not bounded as $t \to \pm \infty$. Therefore the
  functions $u^-$ and $u^+$, defined by \eqref{e2.4} and \eqref{e2.5}, are the
  only solutions, bounded as $t\to-\infty$ and $t\to+\infty$
  respectively. The solution, bounded on the whole axis exists if
  and only if $u^+(0) = u^-(0)$. This gives us the necessity
  and sufficiency of condition \eqref{e2.3}
The proposition is proved.
\end{proof}



\begin{example} \label{exa2.3} \rm
  Suppose that $a(t) = a^+$ for $t$ sufficiently large, and
  $a(t) = a^-$ for $-t$ sufficiently large.
  If $a^\pm \neq 0$, then $u_0(t)$ and $v_0(t)$ behave
  exponentially at infinity.
  Then Condition \ref{cond2.1} is satisfied.
\end{example}

Note that  Proposition \ref{prop2.2} shows that Condition \ref{cond2.1}
 is sufficient for the
  Fredholm property.
  Condition \eqref{e2.3} is a typical Fredholm type solvability
  condition.
  It may be not satisfied.
  Suppose for example that $v_0(t)$ is integrable.
  We can choose such $t_0$ that for the function
  $$
f(t) =  \begin{cases}
  1 , & t \geq t_0 \\
  -1, & t < t_0
  \end{cases}
  $$
  then Condition \eqref{e2.3} is satisfied.
  From the integrability of $v_0(t)$ it follows that $u_0(t)$ is not
  bounded as $t \to \pm \infty$.
  Therefore, the functions $\Phi^+(t)$ and $\Phi^-(t)$ are not
  bounded neither.
  If Condition \ref{cond2.1} is not satisfied, then at least one of the functions
  $\Psi^+(t)$ and $\Psi^-(t)$ is not bounded.
  Hence there is no bounded solution of equation \eqref{e2.1}
  with such $f$.
  Thus, Condition \eqref{e2.3} may be not sufficient for solvability of
  equation \eqref{e2.1}.

  To illustrate another type of solvability conditions suppose that
  the function
  $$ b(t) = \int_0^t a(s) ds $$
  is bounded uniformly.
  Then $v_0(t)$ is bounded and $|u_0(t)| \geq \varepsilon > 0$
  for some $\varepsilon$.
  Therefore the solution given by \eqref{e2.2} is bounded if
  and only if
 \begin{equation}
 \sup_t \big|\int_0^t v_0(s) f(s) ds\big| < \infty .
  \label{e2.6}
\end{equation}
  As above, the solvability condition is given in terms of
  bounded solutions of the homogeneous adjoint equation.
  However, the principal difference is that condition \eqref{e2.6},
  contrary to Fredholm type solvability conditions,
  cannot be formulated in the form $\phi(f)=0$, where
  $\phi$ is a functional from the dual space.

  We will see below that solvability conditions of this type
  are also applicable for systems of equations.

\section{Ordinary differential systems on the real line}

  In this section we study invertibility and Fredholm property for
  linear operators, corresponding to o.d.e. systems.
  Let
  $u\in{\mathbb{R}}^n$.
  Denote by $|\cdot|$ the Euclidian vector norm in
  ${\mathbb{R}}^n$ and the corresponding matrix norm and by $\langle
  \cdot,\cdot \rangle$ the scalar product in ${\mathbb{R}}^n$.
  Consider the linear system
  \begin{equation}
u'=P(x)u\label{e3.1}
\end{equation}
  where the matrix $P(x)$ is defined, bounded and continuous on the interval
  $(a,b)\subset {\mathbb{R}}$.
  Here $a$ is a real number or $-\infty$ and $b$
  is a real number or $+\infty$.
  Let $\Phi(x,t)$ be the Cauchy matrix of system \eqref{e3.1}.


\begin{definition} \label{def3.1}
 The system \eqref{e3.1} is \emph{almost dichotomic}
on $(a,b)$ with positive constants $c$ and $\lambda$ if for every
$x\in (a,b)$ there exist three spaces $M_S(x)$ (stable space),
$M_U(x)$ (unstable space) and $M_B(x)$ (zero space), satisfying
following conditions:
\begin{enumerate}
   \item $M_S(x)\bigoplus M_U(x)\bigoplus M_B(x)={\mathbb{R}}^n$ for all $x\in
   (a,b)$;
   \item $\Phi(x,t)M_\sigma(t)=M_\sigma(x)$ for all $\sigma\in
   \{S,U,B\}$, $x,t\in (a,b)$;
   \item $|\Phi(x,t)u_0|\le c\exp(-\lambda (x-t))|u_0|$ for
   all $x\ge t$, $x,t\in (a,b)$, $u_0\in M_S(t)$;
   \item $|\Phi(x,t)u_0|\le c\exp(\lambda (x-t))|u_0|$ for
   all $x\le t$, $x,t\in (a,b)$, $u_0\in M_U(t)$;
   \item $|\Phi(x,t)u_0|\le c|u_0|$ for
   all $x,t\in (a,b)$, $u_0\in M_B(t)$;
\end{enumerate}
\end{definition}

The following statement is evident.

\begin{lemma} \label{lem3.2}
Let matrix $P(x)$ be constant, i.e.
$P(x)\equiv P$. The system \eqref{e3.1} is almost dichotomic if
and only if for every purely imaginary eigenvalue $\lambda$ of the
matrix $P$ the number of linearly independent eigenvectors
corresponding to $\lambda$ is equal to the multiplicity of
$\lambda$.
\end{lemma}

\begin{remark} \label{rmk3.3} \rm
In other words, the condition is the following:
for every $\lambda\in i{\mathbb{R}}$ every block in the Jordan form
of the matrix $A$ corresponding to $\lambda$ is simple.
\end{remark}


\begin{remark} \label{rmk3.4} \rm
The statement of the lemma holds true if the
matrix $P$ does not have purely imaginary eigenvalues at all.
  In this case the space $M_B$ is trivial and system \eqref{e3.1} is
dichotomic.
\end{remark}


\begin{definition}[\cite{adr}] \label{def3.5} \rm
Consider the change of variables
\begin{equation}
u=L(x)v, \quad x\in {\mathbb{R}}.\label{e3.2}
\end{equation}
  It is called \emph{Lyapunov} transform if the matrix $L(x)$
is $C^1$ - smooth invertible and all matrices $L(x)$, $L^{-1}(x)$
and $L'(x)$ are bounded.
\end{definition}

\begin{lemma} \label{lem3.6}
  Let system \eqref{e3.1} be almost
  dichotomic and let the dimensions of the corresponding spaces
$M_S(x)$, $M_U(x)$ and $M_B(x)$ be $n_S$, $n_U$ and $n_B$,
  respectively. Then, for every $x$ there exist continuous
projectors $\Pi_S(x)$,
$\Pi_U(x)$ and $\Pi_B(x)$ on the spaces $M_S(x)$, $M_U(x)$ and
$M_B(x)$ respectively, such that $\Pi_S(x)+\Pi_U(x)+\Pi_B(x)\equiv
{\mathop{\rm id\,}}$.  These projectors are uniformly
bounded.

Also, there exists a Lyapunov transform \eqref{e3.2},
  which reduces system \eqref{e3.1} to the form
\begin{equation}
 v'=\widetilde{P}(x)v, \label{e3.3}
\end{equation}
  where $v=(v_S,v_U,v_B)$,
  $\widetilde{P}(x)=\mathop{\rm diag}(P_S(x),P_U(x),P_B(x))$, and system
  \eqref{e3.3} splits into three subsystems:
 \begin{gather}
 {v_S}'=P_S(x)v_S , \label{e3.4}\\
 {v_U}'=P_U(x)v_U , \label{e3.5}\\
 {v_B}'=P_B(x)v_B. \label{e3.6}
\end{gather}
  Systems \eqref{e3.4}--\eqref{e3.6} satisfy
  the following properties:
\begin{enumerate}
   \item The system \eqref{e3.4} is steadily dichotomic, i.e. it is
   dichotomic and the corresponding stable space coincides with the
space ${\mathbb{R}}^{n_S}$ for all $x$.
   \item The system \eqref{e3.5} is unsteadily dichotomic, i.e. it
is dichotomic and the corresponding unstable space coincides with
the space ${\mathbb{R}}^{n_U}$ for all $x$.
   \item Every solution of the system \eqref{e3.6} is bounded.
\end{enumerate}
\end{lemma}

\begin{remark} \label{rmk3.7} \rm
 The matrix $\widetilde{P}(x)$ can be found by   the formula
\begin{equation}
\widetilde{P}(x)=L^{-1}(x)P(x)L(x)-L^{-1}(x) L'(x).\label{e3.7}
\end{equation}
 Since the matrix $P(x)$ is bounded, the matrix $\widetilde{P}(x)$
is also bounded.
  If for a certain $\delta\ge 0$, $P(x) \in C^\delta$ and
  $L(x) \in C^{1+\delta}$, then $\tilde P(x) \in C^\delta$.
\end{remark}

  The proof of Lemma \ref{lem3.6} is the same as the proof for
  dichotomic (hyperbolic) ordinary differential systems
\cite[Lemma 3, p.41]{cop}, \cite[Theorem 0.1,  p.14]{pliss}.

  \begin{lemma} \label{lem3.8}
 If the system \eqref{e3.1} is steadily dichotomic, the dual system
\begin{equation}
 u'=-P^T(x) u,\label{e3.8}
\end{equation}
  is unsteadily dichotomic. If \eqref{e3.1} is an unsteadily
  dichotomic system, then the system \eqref{e3.8} is steadily
  dichotomic. If the system \eqref{e3.1} is almost dichotomic
with all solutions bounded,   the dual system also is.
\end{lemma}

  The lemma above follows from the fact that for every fundamental
  matrix $\Phi(x)$ of system \eqref{e3.1}, the matrix
  $(\Phi^{-1})^T(x)$ is fundamental for system \eqref{e3.8}.

The following statement is evident.

\begin{lemma} \label{lem3.9}
Any system \eqref{e3.1}, which splits into
almost dichotomic blocks, is almost dichotomic. The stable,
unstable and bounded spaces are direct products of the
corresponding spaces for blocks.
\end{lemma}

Having fixed a number $\delta\ge 0$, define spaces
$X=C^\delta({\mathbb{R}}\to {\mathbb{R}}^n)$,
$Y=C^{1+\delta}({\mathbb{R}}\to {\mathbb{R}}^n)$ and consider a
function $f\in X$.


\begin{theorem} \label{thm3.10}
 Let system \eqref{e3.1} be almost dichotomic on ${\mathbb{R}}$,
  and the matrix $P(x)$ be bounded in
$C^{\delta}({\mathbb{R}}\to{\mathbb{R}}^{n^2})$.
  Then for any $f\in X$ the system
\begin{equation}
u'=P(x)u+f(x)\label{e3.9}
\end{equation}
  has a solution $\upsilon(x)\in Y$ if and only if
\begin{equation}
\sup_{x\in{\mathbb{R}}}\big|\int_0^x\langle \varphi(s),f(s)
   \rangle\,ds\big|<+\infty   \label{e3.10}
\end{equation}
for every bounded solution $\varphi(s)$ of
  system \eqref{e3.8}.
\end{theorem}

\begin{proof} Transformation \eqref{e3.2}, which exists due to
Lemma \ref{lem3.6}, reduces system \eqref{e3.9} to the form
 \begin{equation}
v'=\widetilde{P}(x)v+g(x)\label{e3.11}
\end{equation}
  where $\widetilde{P}(x)$ satisfies \eqref{e3.7}, and $g(x)=L^{-1}(x)f(x)$. If
  $f(x)\in X$, then $g(x)\in X$ and vice versa.
  System \eqref{e3.11} splits into three subsystems
\begin{gather}
{v_S}'=P_S(x)v_S+g_S(x), \label{e3.12} \\
{v_U}'=P_U(x)v_U+g_U(x) , \label{e3.13} \\
{v_B}'=P_B(x)v_B+g_B(x). \label{e3.14}
\end{gather}
  Here $g(x)=(g_S(x),g_U(x),g_B(x))$.
  Systems \eqref{e3.9} and \eqref{e3.11} have bounded solutions if and only if
  each system \eqref{e3.12}, \eqref{e3.13}, and \eqref{e3.14} has a
bounded solution.

  Let $\Psi(x,t)$ be the Cauchy matrix of system \eqref{e3.3}.
  It can be written in the form
$$
\Psi(x,t)={\mathop{\rm diag\,}\nolimits}(\Psi_S(x,t),\Psi_U(x,t),\Psi_B(x,t))
$$
where $\Psi_S(x,t)$, $\Psi_U(x,t)$ and $\Psi_B(x,t)$ are the
Cauchy matrices for systems \eqref{e3.4}, \eqref{e3.5} and \eqref{e3.6},
respectively.
Since systems \eqref{e3.4} and \eqref{e3.5} are dichotomic, the nonhomogeneous
systems \eqref{e3.12} and \eqref{e3.13} have for every $g$ bounded solutions of
the form
$$
{v_S}(x)=\int_{-\infty}^x \Psi_S(x,t)g_S(t)\,dt;\quad
{v_U}(x)=-\int_x^{\infty} \Psi_U(x,t)g_U(t)\,dt.
$$
  All solutions of the system \eqref{e3.14} have the form
$$
\Psi_B(x)C+\int_0^x \Psi_B(x,t) g_B(t)\, dt.
$$
  Here $\Psi_B(x)=\Psi_B(x,0)$.
  Every solution of system \eqref{e3.6} is bounded.
  Therefore the matrix $\Psi_B(x)$ is also bounded.
  Hence, it is sufficient to verify that the solution
  $$
v_B(x)=\int_0^x \Psi_B(x,t) g_B(t)\, dt=
  \Psi_B(x)\int_0^x \Psi_B^{-1}(t) g_B(t)\, dt
$$
  is bounded.
  Let $c$ be the constant from Definition \ref{def3.1} for system \eqref{e3.1}, and
  $K>0$ be such that
  $\max(\|L(x)\|_{C^{1+\delta}},\|L^{-1}(x)\|_{C^{1+\delta}})<K$.
  Then every column of the matrices $\Psi_B(x)$ and $\Psi_B^{-1}(x)$
is bounded by $cK$. Hence
  $\max(\|\Psi_B(x)\|_{C^{1+\delta}},\|\Psi_B^{-1}(x)\|_{C^{1+\delta}})
  \le \sqrt{n}cK$.

  Thus, $v_B(x)$ is bounded if and only if the integral
$$
I(x)=\int_0^x \Psi_B^{-1}(t) g_B(t)\, dt
$$
  is bounded.
  Consider the matrix $\Xi(x)$ which is obtained from
$\Psi_B^{-1}$ by adding $n_U+n_S$ zero rows.
  It follows from Lemmas \ref{lem3.8} and \ref{lem3.9} that every bounded
  solution of the system
\begin{equation}
v'=-\widetilde{P}^T(x)v \label{e3.15}
\end{equation}
is a linear combination of columns of $\Xi(x)$.
  Hence $I(x)$ is bounded if and only if the condition
\begin{equation}
\sup_{x\in  {\mathbb{R}}}\big| \int_0^x\langle \eta(t),g(t)\rangle\,
  dt\big|<+\infty \label{e3.16}
\end{equation}
is satisfied for every bounded solution $\eta(x)$ of \eqref{e3.15}.

  On the other hand, $\Phi(x)=L(x)\Psi(x)$ is a fundamental matrix of
  system \eqref{e3.1}. Then $\Psi^{-1}(x)=\Phi^{-1}(x)L(x)$.
  Hence every bounded solution
  $\eta(x)$ of system \eqref{e3.15} can be written in the form
  $\eta(x)=L^T(x)\varphi(x)$, where $\varphi(x)$ is a bounded
  solution of \eqref{e3.8}.
   It is easy to see that this correspondence is
  one to one.
  Consequently, we can rewrite the integral in \eqref{e3.16} in the form
\begin{equation}
\int_0^x\langle
  L^T(t)\varphi(t),L^{-1}(t)f(t)\rangle\, dt= \int_0^x\langle
  \varphi(t),f(t)\rangle\, dt. \label{e3.17}
\end{equation}
 Thus, there exists a bounded solution of system \eqref{e3.9} if and only if
  expression \eqref{e3.17} is uniformly bounded.
  The theorem is proved.
\end{proof}

\begin{remark} \label{rmk3.11} \rm
Condition \eqref{e3.10} is not a Fredholm type
solvability condition.
\end{remark}

  Bounded solutions of system \eqref{e3.8} form a linear space $H$
  of the dimension
$$
n_B=\dim M_B(x).
$$
  Therefore, it is sufficient to verify \eqref{e3.10} for some basis in
  $H$, that is for solutions of \eqref{e3.8} with initial data in a
  basis of $M_B(0)$.

For every function $f\in X$, satisfying \eqref{e3.10}, a bounded solution
may be found by the formula
\begin{equation}
\begin{aligned}
\mathcal{L}f(x)
&= \int_{-\infty}^x \Phi(x,s)\Pi_S(s)f(s)\,ds+
   \int_0^x \Phi(x,s)\Pi_B(s)f(s)\,ds\\
&\quad -   \int_x^{+\infty} \Phi(t,s)\Pi_U(s)f(s)\,ds.
\end{aligned} \label{e3.18}
\end{equation}
If the integral
$$
\int_0^x \Phi(x,s)\Pi_B(s)f(s)\,ds
$$
is not bounded, it increases
polynomially. On the other hand, any function $\Phi(x,0)C$ for any
$C\in {\mathbb{R}}^n$ is bounded or increases exponentially (this
follows from Definition \ref{def3.1}). Hence, if the expression \eqref{e3.18} is
not bounded, then system \eqref{e3.9} has no bounded solutions at all.
  If \eqref{e3.18} is bounded, then all solutions of the form
\begin{equation}
u(x)=\mathcal{L}f(x)+\Phi(x,0)C , \label{e3.19}
\end{equation}
where $C\in M_B(0)$, are also bounded.

Define the operator $\mathbf{T}_P :Y\to X$ by the formula
$\mathbf{T}_P u=u'-P(x)u$. If the space $M_B(x)$ is not trivial,
then the operator $\mathbf{T}_P$ is not Fredholm but it can
satisfy the Fredholm
  property in other function spaces.

 Assume that
  system \eqref{e3.1} is almost dichotomic on all the line.
   Denote by
  $\mathcal{B}$ the set of all bounded solutions of this system and
  by $\mathcal{B}^*$ the set of bounded solutions of the adjoint
  system \eqref{e3.8}.
  Define the space
$$
X_{P,\delta}=\big\{f\in C^{\delta}({\mathbb{R}}\to{\mathbb{R}}^n):
 \big\|\int_0^x \langle f(s), \varphi(s)\rangle \, ds
\big\|_{C^0}<+\infty\mbox{ for all }\varphi(x)\in
\mathcal{B}^*\big\}.
$$
  It follows from \cite[Theorem 3.10]{vk1} that the codimension of the
space $X_{P,\delta}$ in $X$   is infinite if the space $M_B(x)$ is
 not trivial (otherwise   $X_{P,\delta}=X$).

  Let   $\varphi_1(x), \dots, \varphi_{n_B}(x)$
  be a basis in $\mathcal{B}^*$.
  The space $X_{P,\delta}$ with the norm
  $$
\|f\|_{P,\delta}=\|f\|_{C^\delta}+\sum_{k=1}^{n_B}
  \big\|\int_0^x \langle f(s),
  \varphi_k(s)\rangle\,ds\big\|_{C^0}
$$
  is a Banach space.
  We have $\mathbf{T}_P Y=X_{P,\delta}$ since every bounded solution of the
  system \eqref{e3.9} is of the form \eqref{e3.19}.
  Taking into consideration the
  space $Y'=\mathcal{L}X_{P,\delta}\in Y$, we obtain
$Y=\mathcal{B}\oplus Y'$.
  Thus, $\mathbf{T}_P$ considered as an operator from $Y$ to $X_{P, \delta}$
is
  Fredholm, and ${\mathop{\rm ind\,}} \mathbf{T}_P=n_B$.

\section{Systems on half-lines}

  Similarly to the previous section we can consider systems \eqref{e3.1}
almost
  dichotomic on half-axis ${\mathbb{R}}^-$ and ${\mathbb{R}}^+$.
  Let system \eqref{e3.1} be almost dichotomic on ${\mathbb{R}}^+$. Denote the
  corresponding spaces by $M_S^+(x)$, $M_U^+(x)$ and $M_B^+(x)$
  and their dimensions by $n_S^+$, $n_U^+$ and $n_B^+$,
  respectively.

  System \eqref{e3.1} has a bounded solution on the half-axis ${\mathbb{R}}^+$
  if and only if
\begin{equation}
 \sup_{x\ge 0}\big|\int_0^x\langle \varphi^+(s),f(s)
   \rangle\,ds\big|<+\infty\label{e4.1}
\end{equation}
  for any solution $\varphi^+(x)$ of the adjoint system \eqref{e3.8} such
  that $\varphi^+(x)$ is bounded on ${\mathbb{R}}^+$.
  Note that if $\varphi^+(x)$ is
  exponentially decaying, then condition \eqref{e4.1} is satisfied
  for any bounded $f$.
  If \eqref{e4.1} is satisfied, the there exists a bounded on ${\mathbb{R}}^+$
  solution of \eqref{e3.9} given by the formula
  $$
\mathcal{L}^+f(x)=
   \int_0^x \Phi(x,s)(\Pi_S^+(s)+\Pi_B^+(s))f(s)\,ds-
   \int_x^{+\infty} \Phi(x,s)\Pi_U^+(s)f(s)\,ds.
$$
  Here $\Pi_S^+$, $\Pi_U^+$ and $\Pi_B^+$ are projectors on the
  corresponding spaces.
  All other solutions bounded for positive
  $x$ have the form $u^+(x)=\mathcal{L}^+f(x)+\Phi(x,0)C^+$,
  where $C^+$ is
  an arbitrary vector of the space $M^+=M_S^+(0)\oplus M_B^+(0)$.
Similarly, if the system \eqref{e3.1} is almost dichotomic on
${\mathbb{R}}^-$, denote the corresponding spaces by $M_S^+(x)$, $M_U^+(x)$
and $M_B^+(x)$ and their dimensions by $n_S^+$, $n_U^+$ and
$n_B^+$, respectively. Consider $\Pi_S^-$, $\Pi_U^+$ and $\Pi_B^+$
as projectors on $M_S^-$, $M_U^+$ and $M^-_B$. The solvability
conditions are
\begin{equation}
\sup_{x\le 0}\left|\int_0^x\langle \varphi^-(s),f(s)
   \rangle\,ds\right|<+\infty \label{e4.2}
\end{equation}
for any solution $\varphi^-(x)$ of \eqref{e3.8}, bounded for $x\le 0$. If
this condition is satisfied, there is a bounded solution of the
form
$$\mathcal{L}^-f(x)=
   \int_{-\infty}^x \Phi(x,s)\Pi_S^-(s)f(s)\,ds+
   \int_0^x \Phi(x,s)(\Pi_U^-(s)+\Pi_B^-(s))f(s)\,ds.$$
   All other bounded solutions are given by the expression
  $$u^-(x)=\mathcal{L}^-f(x)+\Phi(x,0)C^-,$$
where $C^-$ is an arbitrary vector of the space
$M^-=M_U^-(0)\oplus M_B^-(0)$.

  Assume that system \eqref{e3.1} is almost dichotomic both for $x\ge
  0$ and for $x\le 0$.
  If the function $f$ satisfies conditions
\eqref{e4.1} and \eqref{e4.2}, then the existence of a solution $u(x)\in Y$ of
  system \eqref{e3.9} is provided by the following condition
\begin{equation}
u^+(0)=u^-(0) \label{e4.3}
\end{equation}
 for certain values $C^+\in M^+$ and $C^-\in M^-$.
  We can rewrite \eqref{e4.3} in the form
$$
\mathcal{L}^+f(0)-\mathcal{L}^-f(0)\in M^++M^-.
$$
This Fredholm condition provides the existence of an affine space
of bounded solutions of the dimension $m_0=\dim (M^+\bigcap M^-)$.

Now we change the space $X$ in order to make $\mathbf{T}_P$
Fredholm. Denote by $\varphi^+(x)$ an arbitrary solution of the
system \eqref{e3.8} bounded for $x\ge 0$. By $\varphi^-(x)$ we denote an
arbitrary solution of the system \eqref{e3.8} bounded for $x\le 0$.
Consider the minimal linear space $\mathcal{A}$ containing all
functions of the form
$$
\varphi(x)=\begin{cases}
0\ &\mbox{for }  x<0,\\
\varphi^+(x) &\mbox{for } x\ge 0
\end{cases}
$$
and
$$
\psi(x)=\begin{cases}
\varphi^-(x) &\mbox{for } x\le 0,\\
0 &\mbox{for }  x>0.
\end{cases}
$$
Denote by $m^+$ and $m^-$ dimensions of spaces $M^+$ and $M^-$
respectively. Then the dimension of $\mathcal{A}$ equals to $m^++m^-$.
  Define the space
  $$
X_{P,\delta}=\big\{f\in C^{\delta}({\mathbb{R}}\to{\mathbb{R}}^n):
\big\|\int_0^x \langle f(s), \varphi(s)\rangle \, ds
\big\|_{C^0}<+\infty\mbox{ for all }\varphi(x)\in {\mathcal{A}}\big\}.
$$
  with the norm
$$
\|f\|_{P,\delta}=\|f\|_{C^\delta}+\sum_{k=1}^{m^++m^-}
\big\|\int_0^x \langle f(s),\varphi_k(s)\rangle\,ds\big\|_{C^0}.
$$
Here $\varphi_1(x),\dots, \varphi_{m^++m^-}(x)$ is a basis in $\mathcal{A}$.

Since the system \eqref{e3.9} is solvable in $Y$ only if $f\in
X_{P,\delta}$, one may consider $\mathbf{T}_P$ as an operator
from $Y$ to $X_{P,\delta}$. This operator is Fredholm. The
dimension of the space $M^++M^-$ is $m^++m^--m_0$, so
\begin{equation}
{\mathop{\rm ind}} \mathbf{T}_P=m_0-(n-m^+-m^-+m_0)=m^++m^--n.\label{e4.4}
\end{equation}
Taking into
consideration the facts that $m^+=n^+_S+n^+_B$, that
$m^-=n^-_U+n^-_B$ and that $n_S^\pm+n_U^\pm+n_B^\pm=n$, we obtain
from \eqref{e4.4} other formulae for index:
\begin{equation}
\mathop{\rm ind} \mathbf{T}_P=n_S^++n_B^+-n_S^-=n_U^-+n_B^--n_U^+. \label{e4.5}
\end{equation}

\section{Weakly hyperbolic systems}

Suppose, that the linear system \eqref{e3.1} is defined on the half-line
${\mathbb{R}}^+$.

\begin{definition}[\cite{kr1,kr2}] \label{def5.1} \rm
Let $\lambda>0$, and
$\varepsilon \ge 0$. We call the system \eqref{e3.1} \emph{weakly
hyperbolic} with constants $\lambda$ and $\varepsilon$, if there
exists such $K>0$, that for every continuous vector function
$g:[0,\infty) \to {\mathbb{R}}^n$, satisfying for $x \ge 0$ the
estimate
\begin{equation}
|g(x)| \le \exp(-\lambda(1+\varepsilon)x),\label{e5.1}
\end{equation}
there is  a solution $\varphi(x)$ of the nonhomogeneous system
\begin{equation}
u'=P(x)u+g(x), \label{e5.2}
\end{equation}
such that
\begin{equation}
 |\varphi(x)| \le K \exp(-\lambda x) \quad \mbox{for } x\ge
0.\label{e5.3}
\end{equation}
\end{definition}

Assume that the matrix $P(x)$ in \eqref{e3.1} is bounded. Denote the
class, introduced by this definition by $\mathop{\rm WH}^+(\lambda,\varepsilon)$ (we shall write $P\in
{\mathop{\rm WH}\nolimits}^+(\lambda,\varepsilon)$). Here the
superscript $+$ underlines the fact that the solution $\varphi(x)$
exponentially decays on the right half-line.


\begin{remark} \label{rmk5.2} \rm
 If $\lambda_{1,2}>0$ and $\varepsilon_{1,2}\ge 0$ are such
that $\lambda_1(1+\varepsilon_1) \le
\lambda_2(1+\varepsilon_2)$ and $\lambda_1 \ge \lambda_2$, then
${\mathop{\rm WH}\nolimits}^+(\lambda_1 ,\varepsilon_1)\subseteq
{\mathop{\rm WH}\nolimits}^+ (\lambda_2 ,\varepsilon_2)$.
\end{remark}

\begin{lemma} \label{lem5.3} \rm
Let $\lambda>0 ,\varepsilon \ge 0$ and let
$\Phi (x)$ be a fundamental matrix of the system \eqref{e3.1}.
Suppose that there exist such continuous matrices $\Pi^s(x)$ and
$\Pi^u(x)$, that
 $$
\Pi^s(x)+\Pi^u(x)\equiv E
$$
 is a $n \times n$ unit
matrix and for a certain $K>0$ the following inequality is
satisfied
\begin{equation}
\begin{aligned}
&\int_{0}^x | \Phi (x) \Phi^{-1} (t) \Pi^s(t)|
\exp(-\lambda (1+\varepsilon)t)\,d t\\
&+\int_x^{\infty} | \Phi (x) \Phi^{-1} (t) \Pi^u(t)|
\exp(-\lambda (1+\varepsilon)t)\,d t \le K \exp (-\lambda x).
\end{aligned} \label{e5.4}
\end{equation}
Then $P\in {\mathop{\rm WH}\nolimits}^+(\lambda,\varepsilon)$.
\end{lemma}

\begin{proof}
 Denote $\Phi(x,t)=\Phi (x) \Phi^{-1} (t)$,
$$
\Phi^s(x,t)=\Phi (x) \Phi^{-1} (t) \Pi^s(t),\quad
\Phi^u(x,t)= \Phi (x) \Phi^{-1} (t) \Pi^u(t).
$$
Fix a vector function $g(x)$, satisfying \eqref{e5.1}, and define
\begin{equation}
\varphi(x)=\int_{0}^x
  \Phi^s(x,t) g(t) \,dt- \int_x^{\infty}
\Phi^u(x,t) g(t) \,dt. \label{e5.5}
\end{equation}
It follows from \eqref{e5.4} that
integrals in the right-hand side of \eqref{e5.5} converge and the
solution $\varphi(x)$ satisfies \eqref{e5.3}.
 The lemma is proved.
\end{proof}

\begin{theorem} \label{thm5.4}
If the system \eqref{e3.1} is
dichotomic on the real line, then there exists such a value
$\lambda_0>0$ that $P\in {\mathop{\rm WH}\nolimits}^+(\lambda,0)$ for all
$0<\lambda<\lambda_0$.
\end{theorem}

\begin{proof} Consider constants $c$ and $\lambda$ from
 Definition \ref{def1.1} for the system \eqref{e3.1}, and take as $\Pi^s(x)$ and
$\Pi^u(x)$ projectors on the stable and the unstable space of the
system considered. It is well-known \cite[Chapter 1]{pliss} that
$\max(|\Pi^s(x)|,|\Pi^u(x)|)\le M$ for a certain $M>0$ and all
$x\ge 0$. Fix a value $0<\mu<\lambda$. Thus, we obtain
\begin{align*}
&\int_{0}^x | \Phi^s(x,t)| \exp(-\mu t)\,d t
+\int_x^{\infty} | \Phi^u(x,t)|
\exp(-\mu t)\,d t \\
&\le \int_{0}^x Mc \exp(-\lambda(x-t)) \exp(-\mu t)
\,dt+\int_x^{\infty} Mc \exp(\lambda(x-t)) \exp(-\mu
t)\,dt\\
&= Mc\Big(\exp(-\lambda x) \int_{0}^x \exp((\lambda-\mu)t)\,
d t +  \exp(\lambda x) \int_x^{\infty}
\exp(-(\lambda+\mu)t)\, d t \Big)\\
&\le K \exp (-\mu x).
\end{align*}
The theorem is proved.
\end{proof}

Let $f(x)$ be a function (vector function, matrix function)
defined on the interval $[0,+\infty)$.


\begin{definition}[\cite{adr,lyap}] \label{def5.5} \rm
The number (or the symbol $\pm\infty$), defined as
$$
\chi^+[f]=\limsup_{x\to+\infty} \frac1x \ln |f(x)|
$$
is called the \emph{Lyapunov exponent} of the function $f(x)$.

For a function $f(x)$, defined on ${\mathbb{R}}^-$ one can define
the Lyapunov exponent in negative direction
$$
\chi^-[f]=\limsup_{x\to-\infty} \frac1x \ln |f(x)|.
$$
\end{definition}

Let $\Phi(x)=(\varphi_1(x),\dots,\varphi_n(x))$ be a fundamental
matrix of system \eqref{e3.1} and let $\chi^+[\varphi_j]=\lambda_j$
($j=1,\dots,n$). Further, let
$\Psi(x)=[\Phi^{-1}(x)]^*=(\psi_1(x),\dots, \psi_n(x))$ and
$\chi^+[\psi_j]=\mu_j$ ($j=1,\dots,n$). Denote by
$\gamma(\Phi)=\max(\lambda_i+\mu_i)$ the so-called \emph{defect of
reciprocal bases} $\{\varphi_j\}$ and $\{\psi_j\}$.

\begin{definition}[{\cite[p.\,67]{adr}}] \label{def5.6} \rm
 The system \eqref{e3.1} is
\emph{regular} if there is such a fundamental matrix $\Phi(x)$ of
this system that $\gamma(\Phi)=0$.
\end{definition}

It was shown by Grobmann \cite{grob}, that this definition was
equivalent to one, given by Lyapunov \cite{lyap}. The class of
regular systems is very wide. At least, it includes all systems
with constant and periodic matrices of coefficients \cite{adr}.
Note that regularity in positive direction does not imply the
regularity in negative direction and vice versa.


\begin{theorem} \label{thm5.7}
If system \eqref{e3.1} is regular, then
for all $\lambda, \varepsilon>0$ this system belongs to the class
$\mathop{\rm WH}^+(\lambda,\varepsilon)$.
\end{theorem}

\begin{proof}
 Fix positive numbers $\lambda$ and
$\varepsilon$. Choose $\Phi(x)$, a fundamental matrix of system
\eqref{e3.1}, which exists due to the Definition \ref{def5.6}, and consider an $n
\times n$ matrix $\Psi^s(x)$ which consists of those rows of the
matrix $\Phi^{-1}(x)$, whose Lyapunov exponents are not less than
$\lambda(1+\varepsilon)$, and zero strings. Without loss of
generality, one may assume that first $k$ rows of the matrix
$\Psi^s(x)$ coincide with first $k$ ones of the matrix
$\Phi^{-1}(x)$, for a certain $0\le k \le n$ and all other rows of
the matrix $\Psi^s(x)$ are zero. Denote
$$
\Pi^s(x)=\Phi(x) \Psi^s(x), \quad
\Pi^u(x)=E-\Pi^s(x)=\Phi(x)\Psi^u(x),
$$
where the matrix
$\Psi^u(x)$ consists of $k$ zero rows and $n-k$ last rows of the
matrix $\Phi^{-1}(x)$.

Now we check inequality \eqref{e5.4}. Denote the elements of the matrix
$\Phi^s(x,t)$ by $u^s_{ij}(x,t)$, and the elements of matrices
$\Phi (x)$ and $\Phi^{-1} (x)$ by $u_{ij}(x)$ and $\eta_{ij}(x)$,
respectively. Since $\Phi^{-1}(x)\Pi^s(x)=\Psi^s(x)$, we have
\begin{equation}
\begin{aligned}
&\int_{0}^x |u^s_{ij}(x,t)|
\exp(-\lambda(1+\varepsilon)t)\, dt\\
&=\int_{0}^x \big|\sum_{r=1}^k u_{ir}(x)
\eta_{rj}(t) \big|\exp(-\lambda(1+\varepsilon)t)\, dt \\
&\le  \sum_{r=1}^k |u_{ir}(x)| \int_{0}^x |\eta_{rj}(t)|
\exp(-\lambda(1+\varepsilon)t)\, dt.
\end{aligned} \label{e5.6}
\end{equation}
Let $\eta_r(x)$ be the $r$-th row of the matrix $\Phi^{-1}(x)$.
Due to the choice of $k$ it is clear that $\chi^+(|\eta_r(x)
|\exp(-\lambda(1+\varepsilon)x)) \ge 0$ for such $r$ that
$1 \le r \le k$. Thus,
 $$
\chi^+\Big(\int_{0}^x |\eta_{rj}(\tau)
|\exp(-\lambda(1+\varepsilon)\tau) \,d \tau\Big) \le
\chi^+(\eta_r(x))-\lambda(1+\varepsilon).
$$
 Since system \eqref{e3.1}
 is regular, for all $i,r=1,\dots,n$ we have
$$
\chi^+(u_{ir}(x))+\chi^+(\eta_r(x))-\lambda\varepsilon <0.
$$
 Therefore, the Lyapunov exponent of the right-hand side of \eqref{e5.6}
is less than $-\lambda$ and this function could be estimated by
$c_{ij}\exp(-\lambda t)$. Thus,
\begin{equation}
\begin{aligned}
\int_{0}^x | \Phi^s(x,t)| \exp(-\lambda(1+\varepsilon)t)\,d t
&=\int_{0}^x \max_i \sum_{j=1}^n
|u^s_{ij}(x,t)| \exp(-\lambda(1+\varepsilon)t) \, d t \\
&\le \frac{ K\exp(-\lambda x)}{ 2}
\end{aligned}\label{e5.7}
\end{equation}
 for a certain $K>0$. A similar estimate can be obtained
 for the second integral in \eqref{e5.4}.
 Together with \eqref{e5.7} it gives \eqref{e5.4}. This proves the theorem.
\end{proof}

The following results allow us to obtain new weakly hyperbolic
 systems.

\begin{theorem} \label{thm5.8}
Let the matrix $P(x)$ be of the form
$$P(x)=\begin{pmatrix}
P_1(x) & 0 \\ 0 & P_2(x)
\end{pmatrix},
$$
and let the systems $u_1'=P_1(x)u_1$ and $u_2'=P_2(x)u_2$ of $k$
and $n-k$ equations, respectively, belong to the class
$\mathop{\rm WH}^+(\lambda,\varepsilon)$. Then system
\eqref{e3.1} also belongs to the same class.
\end{theorem}

The proof of the above theorem is evident; se we omit it.

Let us denote by $ \exp(-\mu x){\mathbb L}^\infty $ for any
$\mu>0$ the space of vector functions obtained as a product of
$\exp(-\mu x)$ and a vector function, bounded for $x\ge 0$. The
norm in this space is defined by the formula
$\|h\|_\mu=\sup_{x \ge 0}(\exp(\mu x)|h(x)|)$.

\begin{theorem} \label{thm5.9}
Let system \eqref{e3.1} belong to the
class ${\mathop{\rm WH}}^+(\lambda,\varepsilon)$. Then
there exists such a continuous linear mapping
$$
\mathcal{L}^+:\exp(-\lambda(1+\varepsilon) x){\mathbb L}^\infty \to
\exp(- \lambda x){\mathbb L}^\infty
$$
that for any vector function
$g \in \exp(-\lambda(1+\varepsilon) x){\mathbb L}^\infty $ the
function $\mathcal{L}^+g(x)$ is a solution of system \eqref{e5.2} for the
given $g.$
\end{theorem}

\begin{proof}
Let $k$ be the dimension of the space of all
solutions of equation \eqref{e3.1}, which belongs to the space
$\exp(-\lambda x){\mathbb L}^\infty$. Denote by $\Phi(x)$ the
fundamental matrix of system \eqref{e3.1}, whose first $k$ columns belong
to the space $\exp(-\lambda x){\mathbb L}^\infty$ and no
nontrivial combination of other columns does. We consider an
arbitrary function $g \in \exp(- \lambda(1+\varepsilon) x){\mathbb
L}^\infty $. Provided $\|g\|_{\lambda(1+\varepsilon)}=K$, it
follows from the conditions of the theorem that there exists the
solution $\varphi(x)$ of system \eqref{e5.2} satisfying the inequality
\begin{equation}
|\varphi(x)|\le cK\exp(-\lambda x) \quad \mbox{for } x\ge 0.
\label{e5.8}
\end{equation}
Obviously, there exists such a constant vector $C_\varphi$ that
$$
\varphi(x)=\Phi(x)\Big(C_\varphi+\int_{0}^x
\Phi^{-1}(\tau)g(\tau)\,d\tau \Big).
$$
 One may split the vector
$C_\varphi$ into a sum $C_\varphi= C_\varphi^{(1)}+
C_\varphi^{(2)}$ where the first $k$ components of the vector $
C_\varphi^{(1)}$ and the last $n-k$ ones of the vector $
C_\varphi^{(2)}$ equal zero. We show that the vector
$C_\varphi^{(1)}$ does not depend on $\varphi$ for a fixed $g$.
Then we can write $C_g^{(1)}$ instead of $C_\varphi^{(1)}$. Assume
that for the same $g$ there exist two solutions $\varphi_1(x)$ and
$\varphi_2(x)$ of system \eqref{e5.2} satisfying \eqref{e5.8}. So the solution
$\varphi_1(x) - \varphi_2(x)$ of system \eqref{e3.1} belongs to the space
$\exp(-\lambda x){\mathbb L}^\infty$. On the other hand,
\begin{equation}
\varphi_1(x)-\varphi_2(x)=\Phi(x)(C_{\varphi_1}- C_{\varphi_2})=
\Phi(x)(C_{\varphi_1}^{(1)}-C_{\varphi_2}^{(1)})+
\Phi(x)(C_{\varphi_1}^{(2)}-C_{\varphi_2}^{(2)}). \label{e5.9}
\end{equation}
 The second term in the right-hand side of equality \eqref{e5.9} belongs
 to the space $\exp(-\lambda x){\mathbb L}^\infty$.
 Therefore the whole sum does.
 So the equality $C_{\varphi_1}^{(1)} = C_{\varphi_2}^{(1)}$ follows from
 the choice of the matrix $\Phi(x)$.
 Let us define
 $$
\mathcal{L}^+g(x)=\Phi(x)\Big(C^{(1)}_g+\int_{0}^x
\Phi^{-1}(\tau)g(\tau)\,d\tau\Big).
 $$
 We check now the properties of the mapping $\mathcal{L}^+$.

\noindent {\sl Linearity.} Let $a,b \in {\mathbb{R}}$,
$g _{1,2}\in \exp(-\lambda(1+\varepsilon) x){\mathbb L}^\infty $.
 By virtue of the definition of the operator $\mathcal{L}^+$
\begin{equation}
\mathcal{L}^+(ag_1+bg_2)(x)=\Phi(x)C_{ag_1+bg_2}^{(1)}+
\int_{0}^x \Phi(t,\tau)(ag_1(\tau)+bg_2(\tau)) \,
d\tau.\label{e5.10}
\end{equation}
The right-hand side of \eqref{e5.10} belongs to the space
$\exp(-\lambda(1+\varepsilon) x){\mathbb L}^\infty $.
 It is a solution of system
\eqref{e5.2} with $g(x)=ag_1(x)+bg_2(x)$.
 Hence $C^{(1)}_{ ag_1+bg_2}= aC^{(1)}_ {g_1}+bC^{(1)}_{g_2}$
 because of the uniqueness of $C^{(1)}_g$.
 This proves the linearity of the mapping $\mathcal{L}^+$.

\noindent{\sl Continuity.} We will prove that there exists a constant $H>0$
such that for every vector-function $g$,
\begin{equation}
\|g\|_{\lambda(1+\varepsilon)}= 1 \label{e5.11}
\end{equation}
the inequality
\begin{equation}
\|\mathcal{L}^+g\|_\lambda\le H \label{e5.12}
\end{equation}
is true.  We choose
an arbitrary solution $\varphi(x)$ of system \eqref{e5.2} such that
\begin{equation}
|\varphi(x)|\le c\exp(-\lambda x) \label{e5.13}
\end{equation}
for every $x\ge 0$. According to the definition of the mapping
$\mathcal{L}^+$,
 $$
\varphi(x)-\mathcal{L}^+g(x)=\Phi(x)C_{\varphi}^{(2)}=
\sum_{i=1}^k c_i X_i(x) ,
$$
 where $X_i(x)$ are columns of the matrix $\Phi(x)$ and
$C_\varphi^{(2)}=(c_1,\dots,c_k,0,\dots,0)^T$.
 Assuming that the numbers $M$ and $l$ are such that
 $\max(|X_1(x)|,\dots,
|X_k(x)|) < M \exp(-\lambda x) $ for any $x \ge 0$ and
$|c_1|+\dots+|c_k|<l|C_\varphi|$, we obtain
\begin{equation}
|\varphi(x)-\mathcal{L}^+g(x)|\le \sum_{i=1}^k |c_i|
\max_{i\le k} |X_i(x)| \le lM|C_{\varphi}|\exp(-\lambda x).
\label{e5.14}
\end{equation}
On the other hand, $\varphi(0)=\Phi(0)C_\varphi$
and
$$
|C_{\varphi}| \le |\Phi^{-1}(0)| |\varphi(0)| \le
c|\Phi^{-1}(0)|.
$$
 Substituting this estimate into \eqref{e5.14}, we obtain
\begin{equation}
\|\varphi-\mathcal{L}^+g\|_\lambda\leq LMc|\Phi^{-1}(0)|\label{e5.15} .
\end{equation}
 Suppose $H=c(1+LM|\Phi^{-1}(0)|)$. The inequality \eqref{e5.12}
follows from \eqref{e5.13} and \eqref{e5.15}. The theorem is proved.
\end{proof}


\begin{theorem} \label{thm5.10}
Let system \eqref{e3.1} belong to the
class ${\mathop{\rm WH}}^+(\lambda,\varepsilon)$ and
let the invertible matrix $L(x)$ be such that
\begin{equation}
\begin{gathered}
L(x) \in C^1([0,\infty)), \\
\chi^+(| L(x)|+|L^{-1}(x)|)=0.
\end{gathered}\label{e5.16}
\end{equation}
Then for any $\lambda_1$ and $\varepsilon_1$ such that
$\lambda_1<\lambda$,
$\lambda_1(1+\varepsilon_1)>\lambda(1+\varepsilon)$ the system
\begin{equation}
v'=\widetilde{P}(x)v, \label{e5.17}
\end{equation}
with the matrix $\widetilde{P}(x)=L^{-1}(x)P(x)L(x)-L^{-1}(x)\dot
L(x)$ obtained from \eqref{e3.1} by the transformation
\begin{equation}
u=L(x)v,\label{e5.18}
\end{equation}
belongs to the class ${\mathop{\rm WH}}^+(\lambda_1,\varepsilon_1)$.
\end{theorem}

\begin{proof}
Let us choose  a constant $c_1>0$ such that
 $$
|L(x)|\le c_1\exp((\lambda_1(1+\varepsilon_1)-\lambda(1+\varepsilon))x) ,
 \quad |L^{-1}(x)|\le c_1 \exp ((\lambda-\lambda_1)x)
$$
for all $x\ge 0$. Consider a vector function
$$
g(x)\in \exp(-\lambda_1(1+\varepsilon_1)x){\mathbb L}^\infty
$$
and the system
\begin{equation}
v'=\widetilde{P}(x)v+g(x). \label{e5.19}
\end{equation}
The transformation inverse to \eqref{e5.18} reduces this system to the
form
\begin{equation}
u'=P(x)u+L(x)g(x) \label{e5.20}.
\end{equation}
Since $-\lambda_1(1+\varepsilon_1)<-\lambda(1+\varepsilon)$, the
vector function $L(x)g(x)$ belongs to the space
$\exp(-\lambda(1+\varepsilon)x){\mathbb L}^\infty$.
 Hence system
\eqref{e5.19} has a solution $\varphi(x) \in \exp(-\lambda x){\mathbb
L}^\infty$, and system \eqref{e5.20} has a solution
$$
\psi(x)=L^{-1}(x)\varphi(x) \in \exp(-\lambda_1 x)\mathbb{L}^\infty.
$$
Let $c=\|\mathcal{L}^+\|$, where $\mathcal{L}^+$ is the operator which
corresponds to the weakly hyperbolic system \eqref{e3.1}. Clearly,
$$
\|\psi\|_{\lambda_1} \le c c2_1 \|g\|_{\lambda_1(1+\varepsilon_1)}.
$$
 Therefore, system
 \eqref{e5.17} is weakly hyperbolic with constants $\lambda_1$ and
 $\varepsilon_1$. The theorem is proved.
\end{proof}

\begin{remark} \label{rmk5.11} \rm
Linear transformations \eqref{e5.18} satisfying \eqref{e5.16}
 are called \emph{generalized Lyapunov} transformations. It is
 proved in \cite{bas}, see also [1], that system \eqref{e3.1} is regular if and only if
 it can be reduced to a system with a constant matrix
 by a generalized Lyapunov transformation.
\end{remark}

 One can also consider weakly hyperbolic systems in the negative
 direction, that is on a half-axis ${\mathbb{R}}^-$. All results similar
 to theorems of this section may be proved. Denote the
 corresponding classes by
 $\mathop{\rm WH}^-(\lambda,\varepsilon)$ and
 corresponding operators by $\mathcal{L}^-$.

 Consider the class $\mathop{\rm WH}^0(\lambda,\varepsilon)$
 which consists of
 systems \eqref{e3.1}, defined on $\mathbb{R}$ which are weakly hyperbolic
 both on the left and the right half-axis with constants
 $\lambda$ and $\varepsilon$.
 Let $\Phi(t)$ be such a fundamental matrix of \eqref{e3.1} that
 $\Phi(0)=E$. Consider the following two spaces
\begin{gather*}
M^+=\{u_0\in {\mathbb{R}}^n:|\Phi(t)u_0|\le c\exp(-\lambda t)\quad
\mbox{for all }t\ge 0\} ,\\
M^-=\{u_0\in {\mathbb{R}}^n:|\Phi(t)u_0|\le c\exp(\lambda t)\quad
\mbox{for all }t\le 0\}.
\end{gather*}
 Let $\dim M^+=m^+$, $\dim M^-=m^-$, $M^0=M^+\bigcap M^-$,
$\widetilde{M}=M^++M^-$.

Fix nonnegative parameters $\delta$ and $\mu$ and take into
consideration two sets of functional spaces
\begin{gather*}
U_{\delta,\mu}=\{u(x):{\mathbb{R}}\to{\mathbb{R}}^n:
\exp(\mu\sqrt{1+x^2})u(x)\in
C^{1+\delta}({\mathbb{R}}\to{\mathbb{R}}^n)\};\\
X_{\delta,\mu}=\{f(x):{\mathbb{R}}\to{\mathbb{R}}^n:
\exp(\mu\sqrt{1+x^2})f(x)\in C^{\delta}({\mathbb{R}}\to{\mathbb{R}}^n)\}.
\end{gather*}
One can define norms in the space $X_{\delta,\mu}$ by the formula
$$
\|f\|_{\delta,\mu}=\|\exp(\mu \sqrt{1+x^2})f(x)\|_{C^\delta}.
$$
The norm in $U_{\delta,\mu}$ can be defined similarly.

\begin{theorem} \label{thm5.12}
If system \eqref{e3.1} belongs to the class
$\mathop{\rm WH}^0(\lambda,0)$, then the operator
$$\mathbf{T}_P:U_{\delta,\lambda}\to X_{\delta,\lambda},$$
defined by the formula $\mathbf{T}_Pu=u'-P(x)u$ is Fredholm and
$\mathop{\rm ind} \mathbf{T}_P=m^++m^--2n$. If
$M^0=\{0\}$ and $\widetilde{M}={\mathbb{R}}^n$, the operator
$\mathbf{T}_P$ is invertible.
\end{theorem}

 The proof of this statement is similar to the reasonings presented
 in Section 4.
 The following statement is a corollary of the theory of Fredholm
 operators \cite[3, \S 19.1]{her}.

\begin{theorem} \label{thm5.13}
 If system \eqref{e3.1} belongs to the
class $\mathop{\rm WH}^0(\lambda,0)$ and
$\widetilde{M}={\mathbb{R}}^n$, then there is an operator
 $$
{\mathcal{L}_P}\in C(X_{\delta,\lambda}\to
U_{\delta,\lambda}),
$$
which transforms the function $f\in
X_{\delta,\mu}$ to a solution ${\mathcal{L}_P}f$ of system
\eqref{e3.9}, that is $\mathbf{T}_P{\mathcal{L}_P}f=f$ for any
$f\in X_{\delta,\mu}$.
\end{theorem}

 These results can be used in the following theorem.
 To simplify its formulation we will assume that
 there exist bounded solutions of systems \eqref{e5.16} and will not
 present the existence conditions.

\begin{theorem} \label{thm5.14}
Let  $\lambda_0>0$ be a number such that the system \eqref{e3.1}
belongs to all classes $\mathop{\rm WH}^0(\lambda,\varepsilon)$ for any
$\lambda\in (0,\lambda_0)$ and $\varepsilon>0$.
 Consider a function $f\in C^\delta({\mathbb{R}}\to{\mathbb{R}}^n)$,
where $\delta\in (0,1)$.
 Suppose that $P(x) \in C^\delta$ and that
 there are two sequences $\lambda_k$ and
$\varepsilon_k$ of positive numbers and a sequence of functions
$f_k$ satisfying the following conditions:
\begin{enumerate}
\item $\lambda_k\to 0$ as $k\to\infty$,

\item $\lambda_k\varepsilon_k\to 0$ as $k\to\infty$,

\item the norms $\|f_k\|_{C^\delta}$ are uniformly bounded and
     for every compact set $K\subset {\mathbb{R}}$ the sequence
   $f_k$ converges to $f$ in $C(K\to{\mathbb{R}}^n)$,

\item There is a sequence $\varphi_k$ of solutions of systems
   \begin{equation}
   u'=P(x)u+f_k(x) \label{e5.16b}
   \end{equation}
   such that
   $\sup_k\|\varphi_k\|_{C^0}<+\infty$.
\end{enumerate}
Then system \eqref{e3.9} is solvable in
$C^{1+\delta}({\mathbb{R}}\to{\mathbb{R}}^n)$.
\end{theorem}

\begin{proof}
Since the functions $\varphi_k$ are uniformly
bounded in $C({\mathbb{R}}\to{\mathbb{R}}^n)$, then by virtue of
 the conditions on the matrix $P(x)$ and on the functions $f_k$
 they are also uniformly bounded in
$C^{1+\delta}({\mathbb{R}}\to{\mathbb{R}}^n)$.
Therefore we can choose a subsequence
$\varphi_{k_l}$ that converges to $\varphi_0\in
C^{1+\delta}({\mathbb{R}}\to{\mathbb{R}}^n)$ uniformly on every
compact set $K\subset {\mathbb{R}}$. The function $\varphi_0$
satisfies equation \eqref{e3.9}. The theorem is proved.
\end{proof}

\section{Applications to elliptic partial differential operators}

The results of the previous sections will be applied to obtain
solvability conditions for elliptic operators in unbounded
cylinders considered in Section 1. Let $\mathbf{L}$ be the
operator defined by \eqref{e1.2}. The following lemma is essential for
what follows.

\begin{lemma} \label{lem6.1}
There exists a number $N \in \mathbb{N}$ such
  that for $k>N$ every system \eqref{e1.6} is
  dichotomic on ${\mathbb{R}}$ with constants $c=2$ and $\lambda=1/2$.
 Furthermore, the norms of the projectors $\Pi^{s,u}$ do not exceed 2.
\end{lemma}

\begin{proof}
  Consider the change of the independent
  variable $t=\lambda_k x$.
  It reduces system \eqref{e1.6} to
\begin{equation}
\dot {w}_k^i=Q_k(t)w_k^i. \label{e6.1}
\end{equation}
Here
  $$Q_k(t)=\begin{pmatrix}
0 & E_m\\
\big(\frac{ B(t/\lambda_k)}{\omega_k}+
E_m\big) &-\frac{A(t/\lambda_k)}{\lambda_k}
\end{pmatrix}.
$$
Evidently, the system
\begin{equation}
\dot w=\begin{pmatrix}
0 & E_m\\
E_m & 0
\end{pmatrix} w \label{e6.2}
\end{equation}
is dichotomic with constants $c=1$, $\lambda=1$.
  Moreover, the stable and the unstable subspace are
  always orthogonal.
  Therefore, the norms of the projectors on these subspaces equal $1$.
  Due to the Perron theorem \cite[Proposition 1, p.34]{cop} there is
such a value   $\varepsilon>0$ that if
\begin{equation}
\|B(x)\|/|\omega_k|<\varepsilon \quad
  \mbox{and} \quad \|A(x)\|/\lambda_k<\varepsilon, \label{e6.3}
\end{equation}
  then system \eqref{e6.1} is dichotomic with constants $c=2$ and
  $\lambda=1/2$.
  We can take this $\varepsilon$ so small that the
  angle between stable spaces of systems \eqref{e6.1} and \eqref{e6.2} for every
  $x$ is less then $\pi/100$.
  Then the norms of the corresponding projectors are less than $2$.
  Hence system \eqref{e1.6} is dichotomic with constants $c=2$ and
  $\lambda=\lambda_k/2$.
  The norms of the projectors rest the same
  because they do not depend on the scaling of the independent
  variable.

 Thus, we can choose the number $N$ big enough in order to obtain
 the estimate $|\lambda_N| >\max(1,M/\varepsilon)$.
  The lemma is proved.
\end{proof}

\begin{remark} \label{rmk6.2}\rm
 The dichotomicity constants for systems \eqref{e1.6}
  can be chosen independently of $k$.
\end{remark}

 Assume that the operator $\mathbf{L}$ and the function $f$ satisfy the
 condition.

\begin{condition} \label{cond6.3} \rm
Every system \eqref{e1.7} is solvable in
$C^0({\mathbb{R}}\to{\mathbb{R}}^n)$.
\end{condition}

This condition implies that system \eqref{e1.7} is solvable in the space
  $C^{1+\delta}({\mathbb{R}}\to{\mathbb{R}}^n)$ because the coefficients of this
  system belong to the space $C^\delta({\mathbb{R}}\to{\mathbb{R}}^n)$.

Note that if system \eqref{e3.1} is dichotomic then for every bounded $g$
the corresponding system \eqref{e5.2} has a bounded solution, which can
be found by the following formula \cite[p.22]{cop}:
$$
\varphi(x)=\int_{-\infty}^x
\Phi(x,t) \Pi_S(t) g(t) \,dt- \int_x^{\infty} \Phi(x,t)
\Pi_U(t) g(t) \,dt.
$$
  This solution depends linearly on the right-hand side $g$ and
satisfies the inequality
  $$
|\varphi(x)|\le 2cH/\lambda.
$$
  Here $c$ and $\lambda$ are the constants of dichotomicity for
  system \eqref{e3.1} and $H$ is a constant, which bounds norms of
  projectors on the stable and unstable subspaces. Thus, due to
  Lemma \ref{lem6.1} it is sufficient to verify Condition \ref{cond6.3} for systems \eqref{e1.7}
  with $k = 1,\dots,N$. To check the solvability of these systems
  one can either use
  the results on almost dichotomic systems (Sections 3 and 4) or
  use the the theorems on weakly hyperbolic systems (Section 5).
  The last approach is applicable if the right-hand sides $F_k^i$ decay
  exponentially or satisfy conditions of Theorem \ref{thm5.14}.


\begin{theorem} \label{thm6.4}
Let the operator $\mathbf{L}$ defined
by \eqref{e1.1} and the function $f$ satisfy Condition \ref{cond6.3}.
  Then problem \eqref{e1.4} is solvable in $U$.
\end{theorem}

\begin{proof}
 We will prove convergence of the series \eqref{e1.5}. We take a number
 $N$, which exists due to Lemma \ref{lem6.1} and consider the spectral decomposition
 of the operator $\mathbf{L}$ developed in \cite{cvo}.
 Consider first the projector $P_N'$ acting in the space
 $C^\delta(\bar\Omega')$ and corresponding to the first $N$
 eigenvalues of the Laplace operator $\Delta'$ in the section
 of the cylinder,
 $$
P_N' v = \frac{1}{2 i \pi} \int_\Gamma
 (\Delta' - \lambda)^{-1} v d \lambda .
$$
 Here $\Gamma$ is the contour in the complex plane containing
 the first $N$ eigenvalues.
 Consider the operator $Q_N'$ acting in the same space and
 defined by the equality
 $$
Q_N' u = u - P_N' u .
$$
 Denote
 $$
E_N' = P_N'(C^\delta(\bar\Omega')), \quad
    \tilde E_N' = Q_N'(C^\delta(\bar\Omega')) .
$$
 Then
 $$
C^\delta(\bar\Omega') = E_N' \oplus \tilde E_N' .
$$
 Let us set
\begin{gather*}
 E_N = \{ u \in C^\delta(\bar\Omega) : \forall
             x \in {\mathbb{R}}, u(x, \cdot) \in E_N' \}, \\
 \tilde E_N = \{ u \in C^\delta(\bar\Omega) : \forall
             x \in {\mathbb{R}}, u(x, \cdot) \in \tilde E_N' \}.
\end{gather*}
 We define now the operators
 $$
(P_N u) (x, \cdot ) = P_N'(u(x, \cdot)) , \quad
     (Q_N u) (x, \cdot ) = Q_N'(u(x, \cdot)) .
$$
 It is shown in \cite{cvo} that $P_N$ and $Q_N$ are bounded
 projectors in $C^\delta(\bar \Omega)$ that commute with the
 operator $\mathbf{L}$. The subspace $E_N$ is invariant with
 respect to $P_N$, and $\tilde E_N$ is invariant with respect to $Q_N$.

 The operator $\mathbf{L}$ can be considered as an unbounded operator
 acting in $C^\delta(\bar\Omega')$ with the domain
 $$
D(\mathbf{L}) = \{ u \in C^{2+\delta}(\bar\Omega), \;
              u |_{\partial \Omega} = 0 \} .
$$
 Denote by $\mathbf{L}_{\rm I}$ and
 $\mathbf{L}_{\rm II}$ the
 restrictions of the operator $\mathbf{L}$
 to the subspaces $E_N$ and $\tilde E_N'$, respectively.
 The domains of these operators are the intersections of the
 corresponding subspaces with the domain of the operator $\mathbf{L}$.

 It is proved in \cite{cvo} that for $N$ sufficiently large
 $\mathbf{L}_{\rm I}$ is a Fredholm operator
with the zero index. Note that
 this result remains valid without the assumption that the
 coefficients have limits at infinity.

 Since its kernel is empty, then it is invertible.
 We can represent a function $f \in C^{\delta}(\bar\Omega)$
 as a sum, $f = f_{\rm I} +
 f_{\rm II}$, where
$f_{\rm I} \in E_N$ and
 $f_{\rm II} \in \tilde E_N$.
 Then the equation $\mathbf{L}_{\rm I} u
 = f_{\rm I}$ is solvable in $E_N$.
 Denote its solution by $u_{\rm I}$.
 Then $\mathbf{L} u_{\rm I} = \mathbf{L}_{\rm I}
 u_{\rm I} = f_{\rm I}$.
 On the other hand, if we look for the solution of the equation
 $\mathbf{L} u = f_{\rm I}$ in the form of the
 Fourier series with respect to the eigenfunctions of the Laplace
 operator in the section of the cylinder,
 $$
u_{\rm I}(x,y) = \sum_{k=N+1}^\infty
 \sum_{j=1}^{p_k} u_k^j(x) \varphi_k^j(y) , \quad
    f_{\rm I}(x,y) = \sum_{k=N+1}^\infty \sum_{j=1}^{p_k}
    f_k^j(x) \varphi_k^j(y),
$$
 then by virtue of the condition of the theorem we find unique
 solutions $u_k^j$ of the corresponding ordinary differential
 systems of equations.
 Hence for $k>N$
 $$
u_k^j(x) = \int_{\Omega'} u_{\rm I}(x,y)\varphi_k^j(y)
dy ,
$$
 and the Fourier series converges to $u_{\rm I}(x)$.
 It remains to note that \eqref{e1.5} differs from the Fourier
 representation for $u_{\rm I}$ by a finite number
of terms and,  consequently, converges.
 The theorem is proved.
\end{proof}

In the remaining part of this section we will consider almost
dichotomic systems on half-lines.

\begin{condition} \label{cond6.5} \rm
Every system \eqref{e1.6} is almost dichotomic both
 on the left- and on the right-half axis.
\end{condition}

Let $M_{S,k}^\pm(x)$, $M_{U,k}^\pm(x)$ and $M_{B,k}^\pm(x)$ be
respectively stable, unstable and bounded subspaces of systems
\eqref{e1.6} in positive and negative direction. Let $n_{S,k}^\pm$,
$n_{U,k}^\pm$ and $n_{B,k}^\pm$ be corresponding dimensions.
Denote by $\overline{M}_{B,k}$ the space of solutions of system
\eqref{e1.6} bounded on \emph{all} the axis. Let
$d_{B,k}=\dim \overline{M}_{B,k}(x)$,
$$
N_B=\sum_{k=1}^\infty p_k d_{B,k}.
$$
Since there is only finite number of nonzero values $d_{B,k}$ (see
Lemma \ref{lem6.1}), the number $N_B$ is finite. Let us select a basis
$\eta_k1(x), \dots, \eta_k^{d_{B,k}}(x)$ in every space
$M_{B,k}(x)$ such that every $\eta_k^j(x)$ is a solution of the
corresponding system \eqref{e1.6}. Consider the problem
\begin{equation}
\mathbf{L}^*u=0, \label{e6.4}
\end{equation}
 adjoint to \eqref{e1.3}. It is described by
operator $\mathbf{L}^*u=u_{xx}+\Delta_y u-A(x)u_x+ B(x)u$.

Then the space $\mathbf{B}$ of bounded solutions of the problem
\eqref{e6.4} has a finite basis
 $$
\{\eta_k^l(x)\varphi_k^j(y):\quad k=1,\dots \mathbb{N}, \quad j=1,\dots
 p_k, \quad l=1,\dots d_{B,k} \}
$$
 and $\dim \mathbf{B}=N_B$. For every
$F\in X\times X$ and $\eta\in \mathbf{B}$ we consider a
continuous function $R[F,\eta]:{\mathbb{R}}\to {\mathbb{R}}^{2n}$
defined by the formula
$$
R[F,\eta](x)=\int_0^x \,dt \int_{\Omega'} \langle F(t,y),
\eta(t,y)\rangle dy.
$$
This function depends linearly both on $F$
and on $\eta$.

 Consider the following condition
\begin{equation}
\sup_{x\in {\mathbb{R}}}|R[F,\eta](x)|<+\infty \quad
 \forall \eta \in \mathbf{B}.
 \label{e6.5}
\end{equation}
 It can be also written in the form
 $$
\sup_{x\in {\mathbb{R}}}|R[F,\eta_k^l \varphi_k^j](x)|<+\infty\quad
 \forall k=1,\dots \mathbb{N}, \quad j=1,\dots p_k,\quad
 l=1,\dots,d_{B,k}.
$$
For every $f\in X$ we take the corresponding $F(f)=(0,f)\in
X\times X$. We shall say that $f$ satisfies \eqref{e6.5} if it is true
for $F(f)$.

 Denote by $\widetilde{X}\subset X$ the subspace of
 functions $f$ satisfying \eqref{e6.5}.
 It becomes a Banach space with the norm
 $$
\|f\|_{\widetilde{X}}=
 \|f\|_X+\max_{j,k,l}\|R[f,\eta_k^l\varphi_k^j](x)\|_{C^0}.
$$
 If every system \eqref{e1.6} is dichotomic both on the left- and the
 right-half axis, we may consider the corresponding operators
$\mathcal{L}^\pm_k$ and spaces $M_k^\pm$ introduced in Section 4.

\begin{theorem} \label{thm6.6}
 If conditions \eqref{e6.5} and
 $$
\mathcal{L}_k^+f_k^i(0)-\mathcal{L}_k^-f_k^i(0)\in M_k^++M_k^-
$$
 are satisfied for all $k\in \mathbb{N}$, then problem \eqref{e1.4}
 is solvable.
 Moreover, the operator $\mathbf{L}:U\to \widetilde{X}$ is Fredholm
 with the index
\begin{equation}
\mathop{\rm ind} \mathbf{L}=\sum_{k=1}^\infty
 (n_{S,k}^++n_{B,k}^+-n_{S,k}^-) p_k =
 \sum_{k=1}^\infty (n_{U,k}^-+n_{B,k}^--n_{U,k}^+) p_k.\label{e6.6}
\end{equation}
 Both sums in \eqref{e6.6} are finite.
\end{theorem}

\begin{proof}
Split the space $\widetilde{X}$ into the direct sum
$\widetilde{X}=\widetilde{X}_1\oplus \widetilde{X}_2$, where
\begin{gather*}
\widetilde{X}_1=\{f\in \widetilde{X}: f(x,y)=
\sum_{k=1}^N\sum_{i=1}^{p_k} f_k^i(x)\varphi_k^i(y)\} , \\
 \widetilde{X}_2=\{f\in \widetilde{X}:
  f_k^i(x)=0, \: k=1,\dots,N,\: i=1,\dots,p_k\},
\end{gather*}
 the number $N\in\mathbb{N}$ is the same as in Lemma \ref{lem6.1}.
 As it was mentioned above,
 one may consider the splitting $U=U_1\oplus U_2$ and the restrictions
 $\mathbf{L}_i : U_i \to \widetilde{X}_i$
 of the corresponding operators to these subspaces. It is shown
 in the proof of Theorem \ref{thm6.4} that the operator $\mathbf{L}_2$ is
 invertible.

 The space $\widetilde{X}_1$ splits into the direct sum
 $$
\widetilde{X}_1=p_1X^{(1)}\oplus p_2 X^{(2)}\oplus\dots \oplus p_N X^{(N)}.
$$
 Every term in this sum corresponds to an eigenfunction
 $\varphi_k^i$. Similarly, we may present the space $U$.
 The operator $\mathbf{L}_1$ is the sum of operators
$\mathbf{L}^{(k)}:U^{(k)}\to X^{(k)}$,
 $$
\mathbf{L}^{(k)}=\frac{ d^2}{ dx^2}+
 A(x)\frac{ d}{ dx}+B(x)+\omega_kE_n.
$$
 and the operators
 $$
\mathbf{T}_k=\frac{ d}{ dx}-P_k(x):
 Y^{(k)}\to X^{(k)}\times X^{(k)},
$$
 where the matrices $P_k$ are defined in Section 1 and $Y^{(k)}$ is
the subspace in
 $C^{1+\delta}({\mathbb{R}}\to{\mathbb{R}}^{2n})$, containing
functions $F=(f_1,f_2)$, which satisfy the
 condition
 $$
\sup_{x\ge 0}\big|\int_0^x \langle F(t),
 \eta(t)\rangle \,dt\big| <+\infty
$$
 for every $\eta(x) \in M_{B,k}^+(x)$ and
 $$
\sup_{x\le 0}\big|\int_0^x \langle F(t),
 \eta(t)\rangle \,dt\big| <+\infty
$$
 for every $\eta(x) \in M_{B,k}^-(x)$.
 It has been shown that all operators $T_k$ are Fredholm and their
 indices satisfy \eqref{e4.5}. On the other hand, it is
proved in \cite{cvo} that
 $$
\mathop{\rm ind} \mathbf{L} =\sum_{k=1}^N p_k
 \mathop{\rm ind} \mathbf{L}^{(k)}.
$$
 Therefore it remains to prove the following lemma.
\end{proof}

\begin{lemma} \label{lem6.7}
 If the operator $\mathbf{T}_k$ is
Fredholm, then the operator $\mathbf{L}^{(k)}$ is also Fredholm
and their indices are equal to each other.
\end{lemma}

\begin{proof}
 The substitution $v(x)=u'(x)/\lambda_k$ defines an
 isomorphism of the spaces $\ker \mathbf{L}^{(k)}$ and $\ker \mathbf{T}_k$. We shall
 show that numbers of solvability conditions are also equal.
 Consider the system
\begin{equation}
\begin{gathered}
u'=\lambda_k v+f_1(x),\\
v'=\big(-\frac{
B(x)}{\lambda_k}+ \lambda_k E_m\big)u-A(x)v+f_2(x),
\end{gathered}
\label{e6.7}
\end{equation}
 where
 $f_1(x)\in C^{1+\delta}({\mathbb{R}}\to{\mathbb{R}}^m)$ and
 $f_2(x)\in C^{\delta}({\mathbb{R}}\to{\mathbb{R}}^m)$.

The transformation $q=v+f_1(x)/\lambda_k$ reduces system \eqref{e6.7} to
\begin{equation}
\begin{gathered}
u'=\lambda_k q,\\
q'=\left(-\frac{B(x)}{\lambda_k}+ \lambda_k
E_m\right)u-A(x)q+g(x),\end{gathered} \label{e6.8}
\end{equation}
 where
$$
g(x)=\frac{ f'_1(x)+A(t)f_1(x)}{
\lambda_k}+ f_2(x)=:\pi(f_1,f_2)(x)\in C^\delta ({\mathbb{R}}\to{\mathbb{R}}^m).
$$
 System \eqref{e6.8} has a bounded solution if and only if
 system \eqref{e6.7} has one.
 If $\alpha_k=\dim\ker \mathbf{T}_k$, then there exist
 $\beta_k=\alpha_k+2n-m^+_k-m^-_k$ linearly independent functions
 $f^{(j)}=(f^{(j)}_1,f^{(j)}_2)^T$, $j=1,\dots,N$ such that for
 every
 $$
0\neq f=(f_1,f_2)^T\in \mathop{\rm Lin}\{f^{(1)},
 \dots,f^{(\beta_k)}\}
$$
 system \eqref{e6.7} has no bounded solutions.
 Without loss of generality we can assume that all components
 $f_1^{(j)}$, ($j=1,\dots, \beta_k$) belong to
 $C^{1+\delta}(\mathbb{R})$.

Denote $g^{(j)}=\pi(f^{(j)}_1,f^{(j)}_2)$. Then for every
nontrivial linear combination
$$
g(x)= c_1 g^{(1)}+\dots+c_{\beta_k} g^{(\beta_k)}
$$
the corresponding system \eqref{e6.8} has no bounded solutions.
In particular this means
that all functions $g^{(k)}$ are linearly independent. This system
of linearly independent functions is complete, otherwise the
number of solvability conditions for the operator $\mathbf{T}_k$
 would exceed $\beta_k$.

 On the other hand, system \eqref{e6.8} has a solution bounded in
 $C^{2+\delta}({\mathbb{R}}\to{\mathbb{R}}^m)$
 if and only if it is true for the system
$$
u''+A(x)u'+(B(x)+\omega_k E_m)u=g(x).
$$
 Therefore, the numbers of solvability conditions for the
 corresponding operators are equal to each other.
 Due to Lemma \ref{lem6.1} the operator $\mathbf{L}_2$ is
 invertible, so that $\mathop{\rm ind} \mathbf{L}_2=0$.
 Hence,
 $$
\mathop{\rm ind} \mathbf{L}=\mathop{\rm ind}
 \mathbf{L}_1=\sum_{k=1}^N
 (n_{S,k}^++n_{B,k}^+-n_{S,k}^-)p_k=
 \sum_{k=1}^N(n_{U,k}^-+n_{B,k}^--n_{U,k}^+)p_k.
$$
This completes the proof of Lemma \ref{lem6.7} and of Theorem \ref{thm6.6}.
\end{proof}


\begin{corollary} \label{coro6.8}
If $M^+_k+M^-_k=\mathbb{R}^n$ and
$M^+_k\bigcap M^-_k=0$ for $k=1,\dots, N$, then the
operator $\mathbf{L}:U\to\widetilde{X}$ is continuously
invertible.
\end{corollary}

\begin{thebibliography}{99}

\bibitem{adr} L. Ya. Adrianova,  \emph{Introduction to Linear
Systems of Differential Equations} Translations of Mathematical
Monographs V.\,146. American Mathematical Society. Providence,
Rhode Island. 1995.

\bibitem{adn} S. Agmon, A. Douglis, L. Nirenberg;
\emph{Estimates near the boundary for solutions of elliptic partial
differential equations satisfying general boundary conditions II.}
Com. Pure Appl. Math.17, 1, 35-92, 1964.

\bibitem{Ag} M. S. Agranovich. \emph{Elliptic boundary problems.} In
Encyclopaedia Math. Sci., vol. 79, Partial Differential Equations,
IX, Springer: Berlin, 1997, vol. 79; 1-144.

\bibitem{bas} V. P. Basov, \emph{On the structure
of a solution of a regular system,} Vestnik Leningr. Univ. (1952),
no.\,12, 3--8. (Russian)

\bibitem{boy1} A. A. Boichuk, \emph{Solutions of weakly nonlinear
differential equations bounded on the whole line}, Nonlinear
oscillations, \textbf{2}, (1999), no. 1, 3--10.

\bibitem{boy2} A. ,A. Boichuk, \emph{Dichotomy, Trichotomy and Solutions of Nonlinear
Systems Bounded on R}, Applications of Mathematics in
Engineering and Economics'26, eds. B. I. Cheshankov and
M. D. Todorov. Heron Press, Sofia, 2001, 9--15.

\bibitem{cop} W. A. Coppel, \emph{Dichotomies in Stability Theory},
Lecture Notes in Mathematics No 629, Springer-Verlag, Berlin,
1978.

\bibitem{DMV} A. Ducrot, M. Marion, V. Volpert;
\emph{Syst\`emes de r\'eaction-diffusion sans propri\'et\'e de Fredholm}.
CRAS, 340 (2005), 659-664.

\bibitem{cvo} J. F. Collet, V. A. Volpert;
\emph{Computation of the Index of Linear Elliptic Operators in
Unbounded Cylinders,} J. Func. Anal. \textbf{164} (1999), 34--59.

\bibitem{grob} D. M. Grobmann, \emph{Characteristic
exponents of systems, close to linear ones,} Mat. Sb. \textbf{30}
(1952), 121--166. (Russian)

\bibitem{her} L. H\"{o}rmander, \emph{The analysis
of linear partial differential operators. III. Pseudo-differential
operators.} Corrected reprint of the 1985 original. Grundlehren
der Mathematischen Wissenschaften [Fundamental Principles of
Mathematical Sciences], 274. Springer-Verlag, Berlin, 1994.
viii+ 525 pp.

\bibitem{kr1} S. G. Kryzhevich, \emph{On asymptotical
properties of solutions of the systems with a small nonlinearity
in a neighborhood of an equilibrium point,} Mathematical Notes,
\textbf{75} (2004), no.\, 5. 683--692.

\bibitem{kr2} S. G. Kryzhevich, \emph{The conditional stability of
ordinary differential equations and semihyperbolic systems,} Diff.
equations. \textbf{36} (2000), no.\,1. 39--46.

\bibitem{vk1} S. G. Kryzhevich, V. A. Volpert; \emph{On the
Fredholm Conditions and Solvability of the Elliptic Problems in
Unbounded Cylinders,} Vestnik S.-Peterb. Univ. Ser.\,1 (2004),
no.\,1, 22--32.

\bibitem{vk2} S. G. Kryzhevich, V. A. Volpert; \emph{On
Solvability of Linear Elliptic Problems in Unbounded Cylinders,}
Vestnik S.-Peterb. Univ. Ser.\,1 (2004) no.\,3, 31--37.

\bibitem{vk3} S. G. Kryzhevich, V. A. Volpert; \emph{On
Solvability of Noninear Elliptic Problems in Unbounded Cylinders,}
Vestnik S.-Peterb. Univ. Ser.\,1 (2004) no.\,4, 31--39.

\bibitem{pal} K. J. Palmer; \emph{Exponential Dichotomies
and Transversal Homoclinic Points} J. Diff. Eqns. \textbf{55}
(1984) , 225 -- 256.

\bibitem{lyap} A. M. Lyapunov; \emph{The
general problem of motion stability.} Moscow, Nauka, 1956. Volume
2. 474 pp.

\bibitem{pl1} V. A. Pliss; \emph{Uniformly bounded solutions of
linear differential systems,} Diff. equations. \textbf{13} (1977)
no.\,5, 883--891.

\bibitem{pliss} V. A. Pliss, \emph{Integeral Sets of Periodic
Differential Systems.} Moscow. Nauka, 1977, 304 pp. (Russian)

\bibitem{Volev} L. R.  Volevich; \emph{Solvability of boundary problems for
general elliptic systems.} Mat. Sbor. 1965, 68, 373-416. English
translation: Amer. Math. Soc. Transl. 1968, Ser. 2, 67,182-225.

\bibitem{VV2} A.  Volpert, V.  Volpert.
\emph{Fredholm property of elliptic operators in unbounded
domains.} Trans. Moscow Math. Society, to appear.

\end{thebibliography}

\end{document}
