\input amstex
\documentstyle{amsppt}
\loadmsbm
\magnification=\magstephalf \hcorrection{1cm} \vcorrection{-6mm}
\nologo \TagsOnRight \NoBlackBoxes
\headline={\ifnum\pageno=1 \hfill\else%
{\tenrm\ifodd\pageno\rightheadline \else
\leftheadline\fi}\fi}
\def\rightheadline{EJDE--1999/31\hfil asymptotic expansions
\hfil\folio}
\def\leftheadline{\folio\hfil O. Hawamdeh \& A. Perjan
 \hfil EJDE--1999/31}

\def\pretitle{\vbox{\eightrm\noindent\baselineskip 9pt %
 Electronic Journal of Differential Equations,
Vol. {\eightbf 1999}(1999), No.~31, pp.~1--12.\hfil\break
ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu
\hfill\break
ftp ejde.math.swt.edu (login: ftp)\bigskip} }

\topmatter
\title
Asymptotic expansions for linear symmetric hyperbolic systems with small parameter
\endtitle

\thanks
{\it 1991 Mathematics Subject Classifications:} 35B25, 35L45.\hfil\break\indent
{\it Key words and phrases:} hyperbolic systems, asymptotic expansions,
boundary layer functions, \hfil\break\indent singular perturbations.
\hfil\break\indent
\copyright 1999 Southwest Texas State University  and
University of North Texas.\hfil\break\indent
Submitted March 22, 1999. Published September 9, 1999.
\endthanks
\author O. Hawamdeh \&  A. Perjan   \endauthor
\address O. Hawamdeh \& A. Perjan \hfill \break
          Department of Mathematics \hfill \break
          Moldova State University \hfill \break
          60, A. Mateevici str. \hfill \break
          Chi\c sin\u au, MD-2009  MOLDOVA
\endaddress
\email perjan\@usm.md
\endemail

\abstract
 The boundary layer functions method of Lyusternik-Vishik is used
 to obtain asymptotic expansions of the solutions to the Cauchy problem
 for linear symmetric hyperbolic systems with constant coefficients
 as the small parameter $\varepsilon$ tends to zero.
\endabstract

\endtopmatter
\document

\head 1. Introduction \endhead

We consider the following Cauchy problem, which will be called
$(P_\epsilon)$,
$$\gather
(P_0 +\varepsilon P_1)U = F(x,t), \quad x\in \Bbb R^d, t>0,  \tag
1.1 \cr
U(\varepsilon,x,0)=U_0(x),\quad x\in \Bbb R^d  \tag 1.2\endgather
$$
where $P_i=A_i\partial_t +B_i (\partial_x) +G_i$, $B_i(\partial_x)=
\sum_{j=1}^d\;B_{ij}\;\partial_{x_j}$,  $i=0,1, B_{i},G_i $ are real
constant $n\times n$ matrices, $d\ge 1, \varepsilon >0$ is a small
parameter,
$U,F:\Bbb R^d\times [0,\infty) \to \Bbb R^n$,
$$
A_0 = \left(\matrix
 I_m & 0\\
 0 & 0
 \endmatrix \right),\quad
A_1 = \left(\matrix
 0 & 0\\
 0 & I_{n-m} \endmatrix \right),\quad 0\le m \le n,
  $$
and $I_k$ is a identity matrix.

We shall investigate the behavior of the solution
$U(\varepsilon,x,t)$ to
the perturbed system $(P_\varepsilon)$ as $\varepsilon \to 0$. The
main
question of perturbation theory is if the solution
$U(\varepsilon,x,t)$ to the perturbed system
tends to the solution $U(0,x,t)$ of the unperturbed system as
$\varepsilon \to 0$.
The answer depends on the structure of the operator
$P=P_0+\varepsilon P_1$ and also on the norm which determines
the convergence.
If the smooth solution $U(\varepsilon,x,t)\to U(0,x,t)$ uniformly on
its domain of definition ${\Cal D}$, then
$(P_0)$ is called a {\it regularly perturbed system}. In the opposite
case, the system $(P_0)$
is called {\it singularly perturbed}. In this case,
there arises a subset of
${\Cal D}$ in which the solution $U(\varepsilon,x,t)$ has a
singular behavior relative to $\varepsilon$. This subset is called {\it
the
boundary layer}. The function which defines the singular behavior of
$U(\varepsilon,x,t)$ relative to $\varepsilon$ within the boundary
layer is
called {\it the boundary layer function}. At present
the investigations of the singularly perturbed problems are very
much advanced.
We refer the reader to sources \cite{1} - \cite{8},
which contain a very large bibliography and also a survey of the
results in the perturbation theory connected with the partial
differential
equations. Here we develop the results of the paper \cite{9} in the
$d$-dimensional case. We obtain the asymptotic expansions for
the solutions
$U(\varepsilon,x,t)$ on the positive power of the small parameter
$\varepsilon$
when the matrices $B_i$ are symmetric, i.e. the operator
$P_{\varepsilon}$
is the hyperbolic one.

Below we use the following notations. For $s\in \Bbb R$ we denote
by $H^s$ the
usual Sobolev spaces with the scalar product $(u,v)_s =
\int_{\Bbb R^n} (1+\xi ^2)^s
\hat u(\xi) \bar{\hat v}(\xi)\,d\xi$, where $\hat u =F[u]$ and $F^{-1}[u]$
are the direct and the inverse Fourier transforms of $u$ in $S'$.
$H^s_n =(H^s)^n$ is the Hilbert space equipped with the scalar
product
$(f_1,f_2)_{s,n}=\sum _{j=1}^n (f_{1j},f_{2j})_s$, $f_i =(f_{i1},...,f_{in}),
i=1,2$ and with the norm $\Vert \cdot \Vert _{s,n}$ generated by
this scalar
product.
Let ${\Cal D}'((a,b),X)$ be the space of vectorial distributions on
$(a,b)$ with values in Banach space $X$. Then for $k\in \Bbb N^*$
and
$1\le p \le \infty$
we set $W^{k,p}(a,b;X)=\{u\in{\Cal D}'((a,b);X);\;u^{(j)}\in
L^p(a,b;X), j=0,1,\dots, k\}$, where $u^{(j)}$ is the distributional
derivative of order $j$. If $k=0$ we set $W^{0,p}(a,b;X)=L^p(a,b;X)$.
Let us denote $A=A_0+\varepsilon A_1$, $B=B_0+\varepsilon B_1$,
$G=G_0+\varepsilon G_1$, $L_j=B_j(\partial _x) + G_j$, $j=0,1$,
where $\partial_x =(\partial
 /\partial_{x_1},\dots,\partial/\partial_{x_d})$. The special
forms of matrices $A_0$ and $A_1$ involve
the natural representations of matrices $B_i,G_i$ by blocks
$$
B_j = \left(\matrix
 B_{j1} & B_{j2}\\
 B^*_{j2} & B_{j3} \endmatrix \right),\quad
G_j = \left(\matrix
 G_{j1} & G_{j2}\\
 G^*_{j2} & G_{j3} \endmatrix \right),\quad
j=0,1,
  $$
 and $B_{j1}(\xi)$, $G_{j1} \in M^m(\Bbb R)$, $ B_{j2}(\xi)$,
 $G_{j2} \in M^{m\times (n-m)}(\Bbb R)$, $B_{j3}(\xi)$,
 $G_{j3} \in$ $M^{n-m}(\Bbb R)$, and ``*'' means
        transposition. Denote
 $L_{ij}(\partial _x)=
 B_{ij} (\partial_x) + G_{ij}$, $i=0,1$, $j=1,2,3$,    and
  $F=\operatorname{col} (f,g)$,
 $U_0=\operatorname{col}(u_0,u_1)$, where $f,u_0 \in M^{m\times 1}(\Bbb R)$,
$g,u_1 \in M^{(n-m)\times 1}(\Bbb R)$.

Let us formulate the main assumptions to be used in the sequel.

 {\bf(H1)}  $B_i(\xi)$, $G_i$, $i=0,1,$ are real symmetric
matrices
  for $\xi\in \Bbb R^n$;

{\bf(H2)} $(G\xi ,\xi )_{\Bbb R^n}
 \ge (G_{03}\eta,\eta)_{\Bbb R^{n-m}}\ge
 q_0\vert\eta \vert ^2$, with $q_0>0$,    \newline\indent
\qquad for all $\xi =(\xi',\eta)\in \Bbb R^n$ and
$ \eta \in \Bbb R^{n-m}.$ \smallskip

\noindent Under the hypothesis (H1), the system $(P_\varepsilon)$ is
symmetric of
the hyperbolic type. According to \cite{7}, the analysis of systems
$(P_0)$ and $(P_\varepsilon)$ shows that:

a) If $m=n$, then the system $(P_0)$ is of the hyperbolic type,
regularly
perturbed because in this case the boundary layer function  is zero;

b) If $m=0$, then the system $(P_0)$ is of the elliptic type,
singularly
perturbed;

c) If $0<m<n$, then the system $(P_0)$ is well-posed in the sense
of
Petrovskii, singularly perturbed. In particular, if {\it det} $B_{03} \ne
0$
and $B_{02}=0$, then the system $(P_0)$ is of the elliptic-
parabolic type.

In the following section we shall give the formal asymptotic
expansions  of
the solutions to the problem $(P_\varepsilon)$ on the positive
powers of
the small parameter $\varepsilon$. The last two sections contain
the
validity of these formal expansions which lead to the main result
theorem 3.5.

 \head 2. Formal asymptotic expansions \endhead

According to the method of Lyusternik-Vishik \cite {2}, for the
solution
$U(\varepsilon,x,t)$ to the problem $(P_{\varepsilon})$ we postulate
the
following asymptotic expansion
$$
U(\varepsilon,x,t) = \sum_{k=0}^N
\,\varepsilon^k(V_k(x,t)+Z_k(x,\tau))+
R_{N}(\varepsilon,x,t),\quad \tau=\frac {t}{\varepsilon},\qquad  \tag
2.1
$$
where $Z(x,\tau)=Z_0(x,\tau)+\cdots + \varepsilon^NZ_N(x,\tau)$ is
the
boundary layer function. It describes the singular behavior of
solution
$U(\varepsilon,x,t)$ relative to $\varepsilon$ within a neighborhood of
the set $\{(x,0),x\in \Bbb R^d\}$
which is the boundary layer. The function $V(x,t)=V_0(x,t)+ \cdots
+
\varepsilon^N V_N(x,t)$ is the regular part of expansion $(2.1)$.
Usually
function $Z(x,\tau)$ is considered small in some sense for large
$\tau$, i.e.
$Z \to 0$ as $ \tau \to \infty$. On the other hand, because
$U(\varepsilon,x,t)
\not \to U(0,x,t)$ as $\varepsilon \to 0$ within the boundary layer,
then the
function $Z(x,\tau)$ has to reduce the discrepancy between
$U(\varepsilon,x,0)$ and $U(0,x,0)$.

Now, we formally substitute expansion (2.1) into (1.1) and identify
the
coefficients of the same powers of $\varepsilon$ which contain the
same
variables. Then we get the following equations:
$$
P_0V_k= F_k(x,t),\quad x\in \Bbb R^d,\,t>0,      \tag 2.2
$$
where
$F_0=F$, $F_k=-P_1V_{k-1}$, $k=1,\dots ,N$,
 $$ \gathered
A_0 \partial_{\tau} Z_k={\Cal F}_k(x,\tau),\quad
k=0,1,\dots,N, \cr
A_1(L_0Z_N+L_1Z_{N-1}+\partial _{\tau} Z_N)=0, \quad x\in \Bbb
R^d,
\tau >0,\endgathered
\tag 2.3
$$
where
$
{\Cal F}_0=0$, ${\Cal F}_1 =-L_0Z_0-A_1\partial _{\tau}Z_0$,
${\Cal F}_k=-L_0Z_{k-1}-
L_1Z_{k-2}-A_1\partial _{\tau}Z_{k-1}$, $k=2,\dots ,N$,
and
$$
(P_0+\varepsilon P_1)R_N={\Cal F}(x,t,\varepsilon),
\quad x\in \Bbb R^d,\,t>0,  \tag 2.4
$$
where
${\Cal F} =-\varepsilon^{N+1}(P_1V_N+L_1Z_N)-\varepsilon
^NA_0(L_0Z_N+L_1Z_{N-1})$.

Similarly, substituting (2.1) into initial condition (1.2) we obtain
$$\gather
R_N(\varepsilon,x,0)=0,\quad x\in \Bbb R^d, \tag 2.5 \cr
V_0(x,0)+Z_0(x,0)=U_0(x),\quad x\in \Bbb R^d, \tag 2.6 \cr
V_k(x,0)+Z_k(x,0)=0,\quad x\in \Bbb R^d,\,k=1,\dots,N. \tag 2.7 \endgather
$$
Let
$$
Z_k=\left(\matrix X_k \\
Y_k
\endmatrix \right),\quad
V_k= \left(\matrix
v_k\\
 w_k \endmatrix \right),\quad
F_k=\left(\matrix f_k\\
g_k \endmatrix \right),\quad
{\Cal F}_k= \left(\matrix
{\Cal F}_{k1}\\
{\Cal F}_{k2} \endmatrix \right),
$$
where $X_k,v_k,f_k,{\Cal F}_{k1} \in M^{m\times 1}(\Bbb
R)$, $Y_k,w_k,g_k,
{\Cal F}_{k2}\in M^{(n-m)\times 1}(\Bbb R)$. Then from
(2.3), (2.6), and (2.7) for $X_k$ and $Y_k$,  we get
$$
\partial_{\tau} X_k={\Cal F}_{k1},\quad X_k\to 0,\quad \tau \to
+\infty,
\tag 2.8
$$
and
 $$ \gathered
     \partial_{\tau} Y_k+L_{03}Y_k={\Cal F}_{k2}(x,\tau), \quad
   x\in \Bbb R^d,\,\tau>0  \\
  Y_k(x,0)=\cases
   u_1(x)-w_0(x,0), & \text{for $k=0$},\\
   -w_k(x,0) & \text{for $k=1,\dots,N$, $x\in \Bbb R^d$},
   \endcases
   \endgathered
   \tag 2.9
   $$
where
$$\gather
{\Cal F}_{01}=0,\quad{\Cal F}_{11}=-L_{01}X_0-L_{02}Y_0,\quad
{\Cal F}_{k1}=-L_{01}X_{k-1}-L_{02}Y_{k-1} \\
 -L_{11}X_{k-2}-
L_{12}Y_{k-2},\quad k=2,\dots,N, \\
 {\Cal F}_{02}=-L^*_{02}X_0,\quad {\Cal F}_{k2}=-L^*_{02}X_k-
   L_{13}Y_{k-1}-L^*_{12}X_{k-1},\\
  L^*_{ij}(\xi)=B^*_{ij}(\xi)+G^*_{ij},\quad
   k=1,\dots ,N. \endgather
$$
Similarly, from (2.2) and (2.6), (2.7) we obtain the problems for
$v_k$ and $w_k$,
 $$\gathered
   \partial_t v_k+L_{01}v_k+L_{02}w_k=f_k(x,t), \\
   L^*_{02}v_k+L_{03}w_k=g_k(x,t),\quad x\in \Bbb R^d,\, t>0,  \\
  v_k(x,0)=\cases
   u_0(x)-X_0(x,0),\quad \text{for $k=0$},\\
   -X_k(x,0),\quad \text{for $k=1,\dots,N$},\quad x\in \Bbb R^d,
   \endcases
   \endgathered
   \tag 2.10
   $$
 Thus, we have obtained the problems for the functions
$X_k,Y_k,v_k,w_k$
 and $R_N$. In the following sections we shall present the
 validity of the expansion (2.1).

 \head {3. Justification of expansion (2.1)}  \endhead

 To study the problem (2.10) we examine the problem
 $$ \gathered
  \partial_t v+L_{01}v+L_{02}w=f(x,t), \\
   L^*_{02}v + L_{03}w=g(x,t),\quad x\in \Bbb R^d,\,t>0,  \\
  v(x,0)=h(x),\quad x\in \Bbb R^d,
  \endgathered
   \tag PV $$
 which is of the same type. To obtain the solvability of this problem
 and the regularity of their solutions we pass to the following
problem
 for $\hat v$ and $\hat w$
 $$\gathered
  \partial_t \hat v+(G_{01}+i|\xi| b_{01}(\xi))\hat v+
   (G_{02}+i|\xi| b_{02}(\xi))\hat w=\hat f(\xi,t), \\
    (G^*_{02}+i|\xi| b^*_{02}(\xi))\hat v +
   (G_{03}+i|\xi| b_{03}(\xi))\hat w=\hat g(\xi,t), \\
 \hat v(\xi ,0)=\hat h(\xi).\quad
   \endgathered
   \tag {$P\hat V$}
   $$
where $b_{ij}(\xi)=B_{ij}(\xi/|\xi|)$.

The following two lemmas will be proved in the following section.

\proclaim{Lemma 3.1}
Under the hypotheses (H1), (H2) the matrix $G_{03}+i|\xi|
b_{03}(\xi)$
is invertible for $\xi\in \Bbb R^d$ and the function $\xi \to (G_{03}
+i|\xi| b_{03}(\xi))^{-1}$ is bounded on $\Bbb R^d$.
\endproclaim

From Lemma 3.1 the problem ($P\hat{V}$) receives the form
 $$\gathered
   \frac{d}{dt} \hat v(\xi,t)+K(\xi)\hat v(\xi,t)=H(\xi,t), \\
   \hat v(\xi ,0)=\hat h(\xi),
    \endgathered
   \tag 3.1
   $$
$$
\hat w(\xi,t)=(G_{03}+i|\xi| b_{03}(\xi))^{-1}(\hat g(\xi,t)-(G^*_{02}+
i|\xi| b^*_{02}(\xi))\hat v(\xi,t)),
\tag 3.2
$$
 where
$$\aligned
    K(\xi)=&G_{01}+i|\xi| b_{01}(\xi)\\
    &-(G_{02}+i|\xi|b_{02}(\xi))(G_{03}+
    i|\xi| b_{03}(\xi))^{-1}
    (G^*_{02}+i|\xi| b^*_{02}(\xi)) \\
    H(\xi,t)=&\hat f(\xi,t)-(G_{02}+i|\xi| b_{02}(\xi))(G_{03}+
    i|\xi| b_{03}(\xi))^{-1}\hat g(\xi,t).
 \endaligned \tag 3.3
$$

\proclaim{Lemma 3.2}
Under the hypotheses (H1), (H2) the matrix $K(\xi)$
can be represented in the form
$$
  K(\xi)=K_0(\xi)+i|\xi| K_1(\xi)+|\xi|^2K_2(\xi),\quad \xi \in \Bbb
R^d,
\tag 3.4
$$
where the functions $\xi \to K_j(\xi)$, $j=0,1,2$ are bounded on $
\Bbb R^d$,
$K_1, K_2$ are real symmetric and $K_2 \ge 0$.\endproclaim

These lemmas permit us to prove the following proposition.

\proclaim{Proposition 3.3}
Let the hypotheses (H1), (H2) be fulfilled and $l\in \Bbb N^*$.
If $h\in H^{s+2l+1}_m$, $F=\operatorname{col}(f,g) \in
W^{l,1}(0,T;H^{s+2}_n)$,
then there exists a unique strong solution
$V=\operatorname{col}(v,w)\in W^{l,\infty}(0,T;H^s_n)$
of the problem (PV) and
$$
\| V \| _{W^{l,\infty}(0,T;H^s_n)}\le C(T)(\|
h\|_{s+2l+1,m}+
\| F\|_{W^{l,1}(0,T;H^{s+2}_n)}).
\tag 3.5
$$  \endproclaim

\noindent{\bf Proof.} Consider the Cauchy problem
 $$
   \frac{d}{dt} \hat v(t)+K(\xi)\hat v(t)=0,\quad
   \hat v(0)=\hat h, \quad 0<t<T,
   \tag 3.6
   $$
in the Hilbert space $H=\{f=(f_1,\dots,f_m);\;(1+|\xi|^2)^{\frac{s}{2}}
f_k(\xi)\in L^2(\Bbb R^d),\;k=1,\dots,m\}$, equipped with the scalar
product
$(f,g)_H =\int_{\Bbb R^d}\;(1+|\xi|^2)^s(f,{\bar g})_{\Bbb R^m}\;d\xi$.
The
representation (3.4) shows that the operator $-K(\xi):H\to H$
satisfies the
conditions
$$
\text{{\rm Re}}(-Kf,f)_H\le \omega(f,f)_H,\quad
\text{{\rm Re}} (-{\bar K}^*f,f)_H\le\omega(f,f)_H,\quad f\in H,
$$
where $\omega$ ={\it sup}$_{\xi \in \Bbb R^d} \| K_0(\xi)\|
_{\Bbb R^m\to \Bbb R^m}+
\delta$ with some $\delta>0$. This means that the operator $-
(K+\omega I)$ is
maximal dissipative on $H$. According to \cite{10} the Cauchy
problem (3.6)
generates a $C_0$ semigroup of operators $\{{\hat T}(t),\;t\ge 0\}$
on $H$.
Since
 $$
 \frac {d}{dt} \| {\hat v}(\cdot,t)\| ^2_H \le -( K_0{\hat v}(\cdot,t),
  {\hat v}(\cdot,t))_H-({\hat v}(\cdot,t), K_0{\hat v}(\cdot,t))_H\le
2\omega
 \| {\hat v}(\cdot,t)\| ^2_H,
 $$
we have $\| {\hat v}(\cdot,t)\|_H \le e^{\omega t}\| h\|
_H$ for
any $h\in H,$ i.e. $\|{\hat T}(t)\| \le e^{\omega t}$. Due to
Parseval's equality we get that the Cauchy problem
 $$
 \frac{d}{dt} v(t)+{\check K} v(t)=0,\quad
    v(0)=v_0, \quad 0<t<T,\quad
   (F[{\check K}v]=K(\xi){\hat v})
  $$
  generates a $C_0$ semigroup of operators  $\{T(t), t\ge 0\}$ on
$H^s_m$,
  such that $v(\cdot,t)=T(t)v_0$ and $\| T(t)\| \le e^{\omega
t}$.
  Then the semigroup $T_0(t)=T(t)e^{-\omega t}$ solves the Cauchy
problem
   $$
   \frac{d}{dt} z(t)+({\check K}+\omega I) z(t)=f(t)e^{\omega t},\quad
    z(0)=y_0, \quad 0<t<T.
  \tag 3.7
  $$
  According to \cite{11} for every $y_0\in H^s_m$ and $f\in
L^1(0,T;H^s_m)$
  there exists a unique
  mild solution of this problem $z\in C([0,T];H^s_m)$, such that
  $$
  z(t)=T_0(t)y_0+\int_0^t\;T_0(t-s)f(s)e^{\omega s}\;ds
  $$
  and hence
  $$
  \| z\|_{C([0,T];H^s_m)}\le \| y_0\|_{s,m}+
  \| f\| _{L^1(0,T;H^s_m)}e^{\omega T}
  $$
  Moreover, if $y_0\in H^{s+2l}_m, f\in W^{l,1}(0,T;H^s_m)$ and $
  l\in \Bbb N^*,$
  then $z$ is a strong solution  of the problem (3.7),
  $z\in W^{l,\infty}(0,T;H^s_m)$ and
  $$
  \| z\|_{W^{l,\infty}(0,T;H^s_m)}\le C(T)(\|
y_0\|_{s+2l,m}+
  \| f\|_{W^{l,1}(0,T;H_m^s)}).
  $$
  Note that the solution $y$ to the Cauchy problem
  $$
  \frac{d}{dt}y(t)+{\check K}y(t)=f,\quad y(0)=y_0,\quad 0<t<T,
  $$
  and the solution $z$ to the problem (3.7) are connected by
means of the
  equality
  $y(t)=e^{-\omega t}z(t)$. Consequently, for the same $y_0$, $f$
and
  $l\in N^*$
  we have
  $$
  \| y\|_{W^{l,\infty}(0,T;H^s_m)}\le C(T)(\|
y_0\|_{s+2l,m}+
  \| f\|_{W^{l,1}(0,T;H_m^s)}).
  $$
  In view of (3.1), using the last estimate and boundedness of the
matrix
  $(G_{03}+i|\xi|b(\xi))^{-1}$ we obtain the estimate
  $$
  \| v\|_{W^{l,\infty}(0,T;H^s_m)}\le C(T)(\|
h\|_{s+2l,m}+
  \| f\|_{W^{l,1}(0,T;H_m^s)}+\| g\|_{W^{l,1}(0,T;H_{n-
m}^{s+1})}).
  \tag 3.8
  $$
  From (3.2) and (3.8) we get the estimate
  $$
  \| w\|_{W^{l,\infty}(0,T;H^s_m)}\le C(T)(\| h\|_{s+2l+1,m}+
  \| f\|_{W^{l,1}(0,T;H_m^{s+1})}
  +\| g\|_{W^{l,1}(0,T;H_{n-m}^{s+2})}).
  \tag 3.9
  $$
 Now, the estimates (3.8) and (3.9) imply the estimate (3.5).
Proposition 3.3
 is proved.

Consider the Cauchy problem
$$ \gathered
   \partial_{\tau} Y+L_{03}Y={\Cal F}(x,\tau),\quad x\in  \Bbb
R^d,\,\tau>0,\\
 Y(x,0)= y_0(x),\quad x\in \Bbb R^d.
   \endgathered
   \tag PY
   $$

\proclaim{Proposition 3.4}
Let hypotheses (H1), (H2) be fulfilled and $l\in \Bbb N^*$.
If $y_0\in H^{s+l}_{n-m}$, ${\Cal F} \in W^{l,1}_{\text {{\rm loc}}}
(0,\infty;H^s_{n-m})$, then there exists a unique strong solution
$Y\in W^{l,\infty}_{\text {{\rm loc}}}(0,\infty;H^s_{n-m})$ of the
problem $(PY)$. For this solution
$$\aligned
\| \partial _{\tau}^l Y(\cdot,\tau) \| _{s,n-m}\le
& C e^{- q_0\tau}(\| y_0\|_{s+l,n-m}
+\sum _{\nu =0}^{l-1}\,\| \partial^{\nu}_{\tau} {\Cal
F}(\cdot,0)\|_{s+l-\nu -1,n-m} \\
&+ \int_0^{\tau}\,e^{q_0\theta}\|
\partial^l_{\tau} {\Cal F}(\cdot,\theta)\| _{s,n-m}\,d\theta)
\endaligned \tag 3.10
$$
\endproclaim

\noindent{\bf Proof.} Under the hypotheses (H1), (H2) the operator
$-L_{03}(\partial _x)$ is dissipative and generates the $C_0$
semigroup of contractions $S(\tau)$ on $H^s_{n-m}$. Then there
exists a unique mild solution $Y \in C([0,\infty);H^s_{n-m})$
of Cauchy problem (PY). In the usual way it is not difficult
to obtain the estimate $\| S(\tau)\| \le e^{-q_0\tau}, \tau \ge
0$.
This estimate and formula
$$
Y(\cdot,\tau)=S(\tau)y_0+ \int_0^{\tau}\,S(\theta){\Cal F}(\cdot,
\tau -\theta)\,d\theta
$$
involve the estimate (3.10) in the case $l=0$. In the cases $l\ge 1$
the estimate (3.10)  will be obtained by differentiating relative to
$\tau$
the equation from (PY). Proposition 3.4 is proved.

Due to these propositions, we can determine the functions $V_k$
and $Z_k$.
Indeed, if $k=0$, then from (2.8) it follows that $X_0=0$. Then from
(2.10),
due to Proposition 3.3,  we find the main regular term
$V_0=\operatorname{col}(v_0,w_0)$ of expansion (2.1). Instantly, we have
$$w_0(x,0)=F^{-1}[(G_{03}+i|\xi| b_{03}(\xi))^{-1}(\hat g(\xi,0)-
(G^*_{02}+
i|\xi| b^*_{02}(\xi))\hat u_0(\xi))].$$
Moreover, Lemma 3.1 and the Parseval equality permit us to obtain
the estimate
$$
\| w_0(\cdot,0)\|_{s,n-m} \le C(\| g(\cdot,0)\|_{s,n-m}+
\| u_0\|_{s+1,m})\le
C(\| U_0\|_{s+1,n} +
\| F(\cdot,0)\|_{s,n}).  \tag 3.11
$$
Due to proposition 3.4, this fact permits us to define the function
$Y_0$
as a solution of Cauchy problem (2.9). Moreover, from (3.10) and
(3.11) we
have
$$
\| \partial _{\tau}^l Y_0(\cdot,\tau) \| _{s,n-m}\le C e^{-q_0\tau}
(\| U_0\|_{s+l+1,n}+\|  F(\cdot,0)\|_{s+l,n}).
\tag 3.12
$$
Thus, we have defined the main singular term $Z_0=\operatorname{col}(0,Y_0)$
of expansion (2.1).

Let us define the next terms of this expansion. Suppose that the
terms
$V_0,\dots$, $V_{k-1}$ and $Z_0,\dots,Z_{k-1}$ are already found.
We shall find the terms $V_k$ and $Z_k$ and show that the
estimates
$$ \aligned
\| V_k \|_{W^{l,\infty}(0,T;H^s_n)} \le &
 C(T)(\|U_0\|_{s+2l+3k+1,n} \\
 &+\| F(\cdot,0)\|_{s+2l+3k-2,n}+\| F \|
_{W^{l,1}(0,T;H^{s+3k+2}_n)}), \endaligned
 \tag 3.13
$$
and
$$
\| \partial _{\tau} ^lZ_k(\cdot,\tau) \| _{s,n}\le C e^{-q_0\tau}
(1+\tau^k)(\| U_0\|_{s+l+k+1,n}+
\|  F(\cdot,0)\|_{s+l+k,n})
\tag 3.14
$$
hold, supposing that such estimates are true for previous terms.
Note, that
the estimates (3.13), (3.14) for $V_0$ and $Z_0$ follow from (3.5)
and (3.12).

At first, solving the problem (2.8), we get $X_k(\cdot,\tau)=-
\int_{\tau}^{\infty}
\,{\Cal F}_{k1}(\cdot,\theta)\,d\theta$, where the integral exists due
to the
estimate (3.14) for $Z_{k-1}$. From this formula using (3.14) for
$Z_{k-1}$
and for $Z_{k-2}$ we obtain
$$ \aligned
\| \partial ^l_{\tau}X_k (\cdot,\tau)\| _{s,m}=
&\| \partial ^{l-1}_{\tau}{\Cal F}_{k1} (\cdot,\tau)\| _{s,m}\\
\le& C(\| \partial ^{l-1}_{\tau}Z_{k-1} (\cdot,\tau)\| _{s+1,n}+
\| \partial ^{l-1}_{\tau}Z_{k-2} (\cdot,\tau)\| _{s+1,n})\\
\le & C e^{-q_0\tau}(1+\tau^{k-1})(\| U_0\|_{s+l+k,n}+
\|  F(\cdot,0)\|_{s+l+k-1,n}) \endaligned
\tag 3.15
$$
for $l\ge 1$. Similarly we get the estimate (3.15) in the case $l=0$.

Because $v_k(\cdot,0)=-X_k(\cdot,0)$, due to Proposition 3.3 we
solve the
problem
(2.10) and find $V_k$. Moreover, using (3.5), (3.13) for $V_{k-1}$,
(3.15)
for $X_k$ and the estimate
$$
\| V_k \| _{W^{l,\infty}(0,T;H^s_n)}
\le C(T)(\| X_k(\cdot,0)\|_{s+2l+1,m}+
\| V_{k-1} \| _{W^{l,\infty}(0,T;H^{s+3}_n)}),
$$
we obtain the estimate (3.13) for $V_k$.

Instantly, we find
$$
w_k(x,0)=F^{-1}[(G_{03}+
i|\xi| b_{03}(\xi))^{-1}(\hat g_k(\xi,0)-(G^*_{02}
+i|\xi| b^*_{02}(\xi))\hat X_k(\xi,0))]
$$
and establish the estimate
$$  \aligned
\| w_k(\cdot,0)\| _{s,n-m} \le& C(\| g_k(\cdot,0)\| _{s,n-
m}+
\| X_k(\cdot,0)\| _{s+1,m})\\
\le&
C(\| X_{k-1}(\cdot,0)\| _{s+1,m}+
\| X_k(\cdot,0)\| _{s+1,m}\\
&+\| w_{k-1}(\cdot,0)\|_{s+1,n-m})\\
\le&C(\| U_0\| _{s+k+1,n}+\| F(\cdot,0)\| _{s+k,n}).\endaligned
\tag 3.16
$$
Also, using (3.14) for $Z_{k-1}$ and (3.15) for $X_k$ we have
$$ \aligned
\| \partial _{\tau} ^l{\Cal F}_{k2}(\cdot,\tau) \| _{s,n-m}
\le &C( \| \partial _{\tau} ^lX_k(\cdot,\tau) \| _{s+1,m}+
\| \partial _{\tau} ^lZ_{k-1}(\cdot,\tau) \| _{s+1,n})\\
\le&C e^{-q_0\tau}(1+\tau^{k-1})(\| U_0\|_{s+l+k+1,n}+
\|  F(\cdot,0)\|_{s+l+k,n}). \endaligned
\tag 3.17
$$
From (3.10), (3.16) and (3.17) follows the estimate
$$ \aligned
\| \partial _{\tau}^l Y_k(\cdot,\tau) \| _{s,n-m}
\le& Ce^{-q_0\tau}
(\| w_k(\cdot,0)\|_{s+l,n-m}
+\sum _{\nu =0}^{l-1}\,\| \partial ^{\nu}_{\tau}{\Cal
F}_{k2}(\cdot,0)\|_{s+l-\nu -1,n-m}\\
&+\int_0^{\tau}\,e^{q_0\theta}\|
\partial^l_{\tau} {\Cal F}_{k2}(\cdot,\theta)\| _{s,n-m}\,d\theta)\\
\le&
C e^{-q_0\tau}(1+\tau^k)(\| U_0\|_{s+l+k+1,n}+
\| F(\cdot,0)\|_{s+l+k,n}). \endaligned
\tag 3.18
$$
The estimates (3.15) and (3.18) imply the estimate (3.14) for $Z_k$.

Now we are ready to prove the main result.

\proclaim{Theorem 3.5}
Suppose that $B$ and $G$ satisfy conditions (H1), (H2)
and $0\le l<N+1$.
If $U_0 \in H^{s+2l+3(N+1)}_n$, $F\in
W^{l+1,1}(0,T;H^{s+2l+3(N+1)}_n)$, then
there exists a unique strong solution $U \in W^{l,\infty}(0,T;H^s_n)$
of
the problem
$(P_\varepsilon)$. For this solution  expansion (2.1) is true, where
$V_k$
and $Z_k$ are determined by problems (2.10) and (2.8), (2.9)
respectively
and they satisfy the estimates (3.13), (3.14). For the remainder
term $R_N=\operatorname{col}(R_{N1},R_{N2})$  the estimate
$$
\| R_{N1}\| ^2_{W^{l,\infty}(0,T;H^s_m)} +\varepsilon^{1/2}
\|  R_{N2}\| ^2_{W^{l,\infty}(0,T;H^s_{n-m})}\le
C(T)\varepsilon^{N+1-l}
\tag 3.19
$$
is true with  $C(T)$ depending on $T$, $\| U_0 \|
_{s+2l+3(N+1),n}$,
$\| F \| _{W^{l+1,1}(0,T;H^{s+2l+3(N+1)}_n)}$ and $q_0$.
In particular, if $N=0$, then
$$
\| U-V_0-Z_0 \| _{C([0,T];H^s_n)} \le C(T) \varepsilon^{1/4}.
$$
\endproclaim

\noindent{\bf Proof.} The solvability of the problem $(P_\varepsilon)$ can be
 obtained using the theory of
 $C_0$ semigroup of operators \cite {11}. Indeed, operator $-
(B(\partial _x)
 +G)$ is closed and dissipative on $H^s_n$. This operator
generates
 the $C_0$ semigroup of contractions on $H^s_n$, which solves
the problem
 $(P_\varepsilon)$. Moreover the conditions $U_0 \in H^{s+l}_n$,
$F \in W^{l,1}(0,T;H^s_n)$, $\partial^{\nu}_t F(\cdot,0) \in H^{s+l-\nu -
1}_n$, $\nu = 0,\dots,l-1$, $l\ge 1$ imply the regularity of solution
 $U\in W ^{l,\infty}(0,T;H^s_n)$.
 It remains to prove the
 estimate (3.19). We shall prove this estimate using the method
from
 \cite{12}. Further all constants depending on the norms indicated in
 the Theorem 3.5 will be denoted by $C(T)$. Let us denote by ${\Cal
R}_l=
 \partial ^l_t R_N$, ${\Cal R}_{li}=\partial ^l_t R_{Ni}$, $i=1,2 $. From
 condition (H1) it follows that $(B{\Cal R}_l,{\Cal R}_l)_{s,n}$
 is a pure imaginary value. Consequently,
 $$
 \frac{d}{dt}(A{\Cal R}_l(\cdot,t),{\Cal R}_l(\cdot,t))_{s,n}+
 2(G{\Cal R}_l(\cdot,t),{\Cal R}_l(\cdot,t))_{s,n}=
 2 Re (\partial _t^l{\Cal F}(\cdot,t),{\Cal R}_l(\cdot,t))_{s,n}
 $$
 Then using
 $(H2)$, it is not difficult to get the inequality
 $$
 \frac{d}{dt} (A{\Cal R}_l(\cdot,t),{\Cal R}_l(\cdot,t))_{s,n} +2q_0
 ({\Cal R}_{l 2}(\cdot,t),{\Cal R}_{l2}(\cdot,t))_{s,n-m} \le
 2| (\partial ^l_t{\Cal F}(\cdot,t),{\Cal R}_l(\cdot,t))_{s,n}|.
 \tag 3.20
 $$
 The estimates (3.13) and (3.14) yield
$$\aligned
| (\partial ^l_t&{\Cal F}(\cdot,t),{\Cal R}_l(\cdot,t))_{s,n}| \\
\le&
\varepsilon^{N+1}| (P_1(\partial ^l_tV_N(\cdot,t))+
\varepsilon ^{-l}L_1(\partial ^l_{\tau}Z_N(\cdot,\tau)),
{\Cal R}_{l}(\cdot,t))_{s,n}|\\
& +\varepsilon^{N-l}| (L_0(\partial ^l_{\tau}Z_N(\cdot,\tau))+
L_1(\partial ^l_{\tau}Z_{N-1}(\cdot,\tau)),
A_0{\Cal R}_{l}(\cdot,t))_{s,n}| \\
\le&
C(T)(\varepsilon^{N-l} \kappa(t) \| {\Cal
R}_{l1}(\cdot,t)\|_{s,m}+
(\varepsilon^{N+1}+\kappa(t)\varepsilon^{N+1-l})
 \| {\Cal R}_l(\cdot,t))\| _{s,n}), \endaligned
  \tag 3.21
 $$
 where $ 0\le t\le T, \tau =t/\varepsilon$ and $\kappa(t)=
 e^{-q_0t/\varepsilon}(1+(t/\varepsilon)^N)$.
 Integrating (3.20) by $t$ and using (3.21) we get
 $$ \aligned
 \| {\Cal R}_{l1}&(\cdot,t))\| _{s,m}^2+
 \varepsilon\| {\Cal R}_{l2}(\cdot,t))\| _{s,n-m}^2+
 2q_0 \int_0^t\,\| {\Cal R}_{l2}(\cdot,\theta)\| _{s,n-
m}^2\,d\theta \\
\le& \| {\Cal R}_{l1}(\cdot,0)\| _{s,m}^2+
 \varepsilon\| {\Cal R}_{l2}(\cdot,0))\| _{s,n-m}^2+
 C(T)(\varepsilon^{N-l}\int_0^t\,\kappa(\theta) \| {\Cal R}_{l1}
 (\cdot,\theta)\| _{s,m}\,d\theta\\
 &+
 \int_0^t\,(\varepsilon^{N+1}+\kappa(\theta) \varepsilon^{N-l+1})
 \| {\Cal R}_l(\cdot,\theta)\|_{s,n} \,d\theta), \quad 0\le t\le T,
 \endaligned \tag 3.22
 $$
 Note that
 $$
 {\Cal R}_l(\cdot,0)=\sum ^{l-1}_{\nu=0}\,(-A^{-1}(B(\partial_x)
 +G))^{l-\nu-1}A^{-1} \partial ^{\nu}_t {\Cal F}(\cdot,0),\qquad l\ge 1,
 $$
 and according to (2.5) ${\Cal R}_0(\cdot,0)=0$. Therefore,
  using (3.14), (3.15) and the equality  $A^{-1}A_0=A_0$, we have
 $$ \align
 \| A^{-1}\partial ^{\nu}_t {\Cal F}(\cdot,0)\| _{s,n}
 \le&
 \varepsilon ^{N+1}\| (A^{-1}P_1\partial ^{\nu}_tV_N)(\cdot,0)
 \| _{s,n} + \varepsilon ^{N+1-\nu}\| (A^{-1}L_1\partial
^{\nu}_{\tau}Z_N)(\cdot,0)
 \| _{s,n}\\
 &+ \varepsilon ^{N-\nu}
 \| A_0(L_0\partial ^{\nu}_{\tau}Z_N +
 L_1\partial ^{\nu}_{\tau}Z_{N-1})(\cdot,0)\| _{s,n}\\
 \le& C(T)(\varepsilon ^N + \varepsilon ^{N-\nu})\le C(T)\varepsilon ^{N-
\nu}, \quad 0<\varepsilon <1,\; 0\le \nu \le N,
 \endalign $$
 from which it follows that
 $$ \aligned
 \| {\Cal R}_l(\cdot,0)\| _{s,n} \le&
 \sum_{\nu=0}^{l-1}\;\| A^{-1}(B(\partial_x)
 +G))^{l-\nu-1}A^{-1} \partial ^{\nu}_t {\Cal F}(\cdot,0)\| _{s,n}\\
 \le& C(T)\sum _{\nu=0}^{l-1}\;\varepsilon ^{-(l-\nu -1)}\cdot
 \varepsilon ^{N-\nu}\\
 \le& C(T) \varepsilon^{N-l+1}.\endaligned
 \tag 3.23
 $$
 Further, if $l<N+1$ and $\varepsilon$ is small, then for
 $0\le t\le T$ we have the estimates
 $$ \aligned
 \int_0^t\,\kappa(\theta) \| {\Cal R}_{l1}
 (\cdot,\theta)\| _{s,m}\,d\theta
 \le&  \int_0^t\,\kappa(\theta) \,d\theta+
 \int_0^t\,\kappa(\theta) \| {\Cal R}_{l1}
 (\cdot,\theta)\| ^2_{s,m}\,d\theta \\
 \le& C(T)\varepsilon + \int_0^t\,\kappa(\theta) \| {\Cal R}_{l1}
 (\cdot,\theta)\| ^2_{s,m}\,d\theta, \endaligned
 \tag 3.24
 $$
 and
 $$\aligned
 C(T)&\int_0^t\,(\varepsilon^{N+1}+\kappa(\theta) \varepsilon^{N-l+1})
 \| {\Cal R}_l(\cdot,\theta)\|_{s,n} \,d\theta \\
 \le& C(T)\varepsilon ^{N-l+1} +
 q_0\int_0^t\,\| {\Cal R}_{l2}(\cdot,\theta)\| ^2_{s,n-m}
\,d\theta \\
&+ C(T)\int_0^t\,(\varepsilon^{N+1}+\kappa(\theta) \varepsilon^{N-l+1})
 \| {\Cal R}_{l1}(\cdot,\theta)\| ^2_{s,m} \,d\theta.
\endaligned  \tag 3.25
 $$
 Then due to estimates (3.23), (3.24) and (3.25) the inequality (3.22)
 receives the form
 $$ \align
 \| {\Cal R}_{l1}&(\cdot,t))\| _{s,m}^2+
 \varepsilon \| {\Cal R}_{l2}(\cdot,t))\| _{s,n-m}^2+
 q_0 \int_0^t\,\| {\Cal R}_{l2}(\cdot,\theta)\| _{s,n-
m}^2\,d\theta \\
\le& C(T)(\varepsilon^{N-l+1}+\int_0^t\,(\varepsilon^{N+1}
 +\kappa(\theta) \varepsilon^{N-l})
 \| {\Cal R}_{l1}(\cdot,\theta)\| ^2_{s,m}\,d\theta ), \;0\le t\le T.
 \endalign$$
 Thanks to Gronwall's lemma, from the last inequality we get the
estimates
 $$
 \| {\Cal R}_{l1}(\cdot,t)\|^2_{s,m} \le C(T)\varepsilon^{N-l+1},
 \quad 0\le t\le T,
 \tag 3.26
 $$
and $$
 \varepsilon \| {\Cal R}_{l2}(\cdot,t)\|^2_{s,n-m}
 +q_0\int^t_0\,\| {\Cal R}_{l2}(\cdot,\theta)\|^2_{s,n-
m}\,d\theta
 \le C(T)\varepsilon^{N-l+1},\;0\le t\le T.
 \tag 3.27
 $$
 From (3.27) and (3.23) follows the estimate
 $$ \align
 \| {\Cal R}_{l2}(\cdot,t)\|^2_{s,n-m}
 \le& \| {\Cal R}_{l2}(\cdot,0)\|^2_{s,n-m}
 +2\int^t_0\,\| {\Cal R}_{l2}(\cdot,\theta)\| _{s,n-m}
 \| {\Cal R}_{(l+1)2}(\cdot,\theta)\| _{s,n-m}\;d\theta \\
 \le &   C(T)\varepsilon ^{2(N-l+1)}
  +2\left(\int^t_0\,\| {\Cal R}_{l2}(\cdot,\theta)\| ^2_{s,n-m}\;
 d\theta\right)^{1/2} \times \\
 &\left( \int ^t_0\;\| {\Cal R}_{(l+1)2}(\cdot,\theta)\| ^2_{s,n-m}\;
 d\theta\right)^{1/2}     \tag 3.28 \\
 \le& C(T)\varepsilon^{N-l+1/2}, \quad 0\le t\le T.
\endalign
 $$
 The estimates (3.26) and (3.28) imply the estimate (3.19).
 Therefore, Theorem 3.5 is proved.

\head{\bf 4. Proof of Lemmas}\endhead

\noindent{\bf Proof of Lemma 3.1.} To prove this lemma we shall use the
method of
simultaneous reduction of two matrices to the diagonal form
\cite{13}.
As $G^*_{03}=G_{03}$ and $G_{03}>0$, then there exists an
orthogonal
matrix $T_1\in M^m(\Bbb R)$, $T^*_1T_1=I_m$, such that
$T_1^*G_{03}T_1=
\Lambda _0^2=\operatorname{diag} (\lambda _1,\dots,\lambda_m)$, where
$\lambda_k>0, k=1,\dots,m$, are the eigenvalues of matrix
$G_{03}$.
Let $C(\xi)=\Lambda _0^{-1}T^*_1b_{03}(\xi)T_1\Lambda _0^{-1}$.
As
the matrix $C(\xi)$ is real symmetric, then there exists an
orthogonal matrix
$T_2(\xi)\in M(\Bbb R^m)$, such that $T^*_2C(\xi)T_2=\Lambda
(\xi)=\operatorname{diag}(\mu_1(\xi),\dots,\mu_m(\xi))$, where
$\mu_1(\xi),\dots,\mu_m(\xi)$ are real
eigenvalues of matrix $C(\xi)$. Thus we have
$$
T^*(\xi)G_{03}T(\xi)=I_m,\qquad T^*(\xi)b_{03}(\xi)T(\xi)=\Lambda
(\xi),
\tag 4.1
$$
where $T(\xi)=T_1\Lambda_0^{-1}T_2(\xi)$. From (4.1) it follows
$$
G_{03}+i|\xi|b_{03}(\xi)=T^{*^{-1}} (\xi)(I_m+i|\xi| \Lambda(\xi))T^{-
1}(\xi).
$$
It means that the matrix $G_{03}+i|\xi|b_{03}(\xi)$
is invertible and
$$
(G_{03}+i|\xi|b_{03}(\xi))^{-1}=T(\xi)\Lambda_1(\xi)(I_m-
i|\xi|\Lambda(\xi))
T^*(\xi),
\tag 4.2
$$
where $\Lambda _1(\xi) =\operatorname{diag} ((1+|\xi|^2\mu _1^2)^{-
1},\dots,(1+|\xi|^2\mu_m^2)^{-1})$.
The orthogonality of the matrix $T_2(\xi)$
implies the boundedness of the function $\xi\to T(\xi)$ on $\Bbb
R^d$.
Then the boundedness of matrix $(G_{03}+i|\xi|b_{03}(\xi))^{-1}$
follows
from (4.2). Lemma 3.1 is proved. \medskip

\noindent{\bf Proof of Lemma 3.2.} Let us substitute (4.2) into (3.3). Then we
obtain the representation (3.4), where
$$\aligned
K_0(\xi)=& G_{01}-G_{02}T^*\Lambda _1T^*G^*_{02}-
|\xi|^2(G_{02}T\Lambda _1
\Lambda T^*b_{02}^*+b_{02}T\Lambda _1\Lambda T^*G^*_{02}),\\
K_1(\xi)=&b_{01}+G_{02}T\Lambda_1\Lambda T^*G_{02}^*-
G_{02}T\Lambda_1T^*b^*_{02} \\
&-b_{02}T\Lambda_1T^*G^*_{02}-|\xi|^2b_{02}T\Lambda_1\Lambda
T^*b^*_{02},\\
K_2(\xi)=&b_{02}T\Lambda_1T^*b^*_{02}. \endaligned
$$
It is easy to see that $K_j(\xi)$, $j=0,1,2$ are bounded on $\Bbb
R^d$, and
$K_1^*=K_1$, $K_2^*=K_2$. It remains to prove that $K_2\ge 0$.
According
to Ostrowski's theorem \cite{14, p.270}, denoting by $\lambda
_j(A)$, $j=1,\dots, m$ the eigenvalues of real symmetric matrix $A$,
$\lambda_1\le\lambda_2\le\dots\le\lambda_m$, we have
$\lambda_j(K_2(\xi))=\lambda_j(b_{02}T\Lambda_1T^*b^*
_{02})=\theta_j \lambda_j(\Lambda_1)\ge 0$, where
$0\le\lambda_1(b_{02}TT^*b^*_{02})
\le \theta_j \le \lambda_m(b_{02}TT^*b^*_{02})$. It means that
$K_2\ge 0$. Therefore, Lemma 3.2 is proved.

\Refs

 \ref\no 1\by A. N. Tikhonov \paper The dependence of the solutions of
 differential equations on a small parameter \jour Math. Sb.
\vol 22 \yr 1948 \pages 193-204 \paperinfo (in Russian) \endref

 \ref\no 2\by M. I. Vishik, L. A. Lyusternik \paper  Regular degeneration
and  boundary layer for linear differential equations with a small
parameter   multiplying the highest derivatives
\jour Usp. Math. Nauk \vol 12 \yr 1957
\pages 3-122 \paperinfo (in Russian) \endref

 \ref\no 3\by V. A. Trenogin, \paper Development and applications of the
 Lyusternik-Vishik method \jour Russian Math. Servies, \vol 25 \yr 1970
 \pages 119-156 \endref

 \ref\no 4\by A. V. Vasilieva, V. F. Butuzov \paper Asymptotic
methods in the  theory of the singular perturbations
\jour Vysshaya Shcola, Moscow \yr  1990 \paperinfo (in Russian)
 \endref

 \ref\no 5\by S. A. Lomov \book Introduction to the general theory of
the
 singular perturbations \publ  Nauka \publaddr Moscow \yr 1981
 \bookinfo (in Russian) \endref

 \ref\no 6\by A. M. Ilyin \book Matching of asymptotic expansions of
solutions  of the boundary value problems \publ Nauka \publaddr Moscow
\yr 1989 \bookinfo (in Russian) \endref

 \ref\no 7\by J. L. Lions \book Perturbations singulieres dans les
problems aux
 limites et al control optimal, Lecture Notes in Math., Vol. 323
\publ Springer- Verlag \yr 1973 \endref

 \ref\no 8\by V. Dragan, A. Halanay \book The singular perturbations.
 The Asymptotic expansions \publ  Ed. Academ. Rom\^an\u{a}
publaddr Bucure\c{s}ti \yr  1989 \bookinfo (in Romanian) \endref

\ref\no 9\by A. Perjan, \paper The asymptotic expansions for the
symmetric hyperbolic systems with a small parameter
\jour Buletunul A\c{S}RM (ser. mathem.) \vol 3 No. 28 \yr 1998
\pages 119-129 \endref

\ref\no 10\by S. G. Krein \book Linear differential equations in the
Banach space \publ Moscow, Nauka \yr 1967 \bookinfo (in Russian) \endref

\ref\no 11\by A. Pazy \book Semigroups of linear operators and
applications to partial differential equations \publ Springer-Verlag
\publaddr New York \yr 1983 \endref

 \ref\no 12\by Gh. Morosanu, A. Perjan, \paper The singular limit of
telegraph equations \jour  Comm. Appl. Nonlinear Analysis \vol 5 No. 1
\yr 1988,  \pages 91-106 \endref

\ref\no 13\by M. Marcus, H. Mine \book Matrix theory and matrix
inequality \publ Nauka \publaddr Moscow \yr 1972 \bookinfo (in Russian) \endref

\ref\no 14\by R. A. Horn, C. R. Jonson, \book Matrix analysis
\publ Moscow, Mir \yr 1989 \bookinfo (in Russian) \endref

\endRefs
\enddocument
