\documentclass[reqno]{amsart} \usepackage{hyperref} \AtBeginDocument{{\noindent\small \emph{Electronic Journal of Differential Equations}, Vol. 2008(2008), No. 54, pp. 1--6.\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu (login: ftp)} \thanks{\copyright 2008 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2008/54\hfil Liapunov exponents] {Liapunov exponents for higher-order linear differential equations whose characteristic equations have variable real roots} \author[M. I. Gil'\hfil EJDE-2008/54\hfilneg] {Michael I. Gil'} \address{Michael I. Gil' \newline Department of Mathematics \\ Ben Gurion University of the Negev \\ P.0. Box 653, Beer-Sheva 84105, Israel} \email{gilmi@cs.bgu.ac.il} \thanks{Submitted December 27, 2007. Published April 15, 2008.} \thanks{Supported by the Kamea Fund of the Israel} \subjclass[2000]{34A30, 34D20} \keywords{Linear differential equations; Liapunov exponents; \hfill\break\indent exponential stability} \begin{abstract} We consider the linear differential equation $$ \sum_{k=0}^n a_k(t)x^{(n-k)}(t)=0\quad t\geq 0, \; n\geq 2, $$ where $a_0(t)\equiv 1$, $a_k(t)$ are continuous bounded functions. Assuming that all the roots of the polynomial $z^n+a_1(t)z^{n-1}+ \dots +a_n(t)$ are real and satisfy the inequality $r_k(t)<\gamma$ for $t\geq 0$ and $k=1, \dots, n$, we prove that the solutions of the above equation satisfy $|x(t)|\leq \mathop{\rm const} e^{\gamma t}$ for $t\geq 0$. \end{abstract} \maketitle \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{lemma}[theorem]{Lemma} \section{Introduction and statement of the main result} Consider the scalar equation \begin{equation} \sum_{k=0}^n a_k(t)D^{n-k}x(t)=0,\quad t>0, \label{e1.1} \end{equation} where $D^kx(t):=\frac{ d^k x(t)}{dt^k}$, $a_0(t)\equiv 1$, and $a_k(t)$ are continuous functions defined and bounded on $[0,\infty)$ for $k=1, \dots, n$. As initial conditions, we have \begin{equation} x^{(k)}(0)=x_{0k} \quad (x_{0k}\in\mathbb{R};\; k= 0, \dots, n-1). \label{e1.2} \end{equation} A solution of problem \eqref{e1.1}--\eqref{e1.2} is a function $x(t)$ having continuous derivatives up to order $n$ and satisfying \eqref{e1.1} and \eqref{e1.2} for all $t>0$. Put $$ P(z, t)=\sum_{k=0}^n a_k(t)z^{n-k}\quad (z\in\mathbb{C}). $$ Levin \cite[Section 5]{lea} proved the following result, among other remarkable results: Suppose that the roots $r_1(t), \dots, r_n(t)$ of $P(z, t)$ for each $t\geq 0$ are real and satisfy \begin{equation} \nu_0\leq r_1(t)< \nu_1\leq r_2(t)< \nu_2\leq \dots < \nu_{n-1} \leq r_n(t)\leq \gamma\quad (t\geq 0), \label{e1.3} \end{equation} where $\nu_j$ ($j=0, \dots, n-1$) and $\gamma$ are constants. Then any solution $x(t)$ of \eqref{e1.1} satisfies the inequality \begin{equation} |x(t)|\leq {\rm const}\; e^{\gamma t}\quad (t\geq 0). \label{e1.4} \end{equation} This result is very useful for various applications, see for instance \cite{gi04,gi05,lib} and references therein. The aim of this paper is to prove the following theorem. \begin{theorem} \label{thm1.1} Assume that all the roots $r_k(t)$ of polynomial $P(z,t)$ for each $t\geq 0$ are real and \begin{equation} r_k(t)<\gamma\quad (t\geq 0;\; k=1, \dots, n) \label{e1.5} \end{equation} with a constant $\gamma<\infty$. Then any solution $x(t)$ of \eqref{e1.1} satisfies inequality \eqref{e1.4}. \end{theorem} This theorem is proved in the next section. Condition \eqref{e1.5} is weaker than \eqref{e1.3}, since \eqref{e1.3} does not allow the roots to intersect. Theorem \ref{thm1.1} supplements the very interesting recent investigations of asymptotic behavior of solutions of differential equations, cf. \cite{be,ca,car,ho,il,mo}. Clearly, Theorem \ref{thm1.1} gives us the exponential stability conditions. Note that the problem of stability analysis of various linear differential equations continues to attract the attention of many specialists despite its long history \cite{de,gi05a,hov,li,tu}. It is still one of the most burning problems of the theory of differential equations. The basic method for the stability analysis of differential equations is the direct Liapunov method. By this method many very strong results are obtained, but finding Liapunov's functions is often connected with serious mathematical difficulties. At the same time, Theorem \ref{thm1.1}, gives us the exact explicit stability conditions. \section{Proof of Theorem \ref{thm1.1}} Put $R_+:=[0,\infty)$ and denote by $C(R_+)$ the Banach space of functions continuous and bounded on $R_+$ with the sup norm $\|\cdot\|$. Let us consider the nonhomogeneous equation \begin{equation} \sum_{k=0}^n a_k(t)D^{n-k}v(t)=f(t), \quad t>0, \label{e2.1} \end{equation} where $f\in C(R_+)$ and with the zero initial conditions \begin{equation} v^{(k)}(0)=0 \quad (k=0, 1, \dots, n-1). \label{e2.2} \end{equation} Introduce the set $$ \mathop{\rm Dom}(L):=\{w\in C(R_+): w^{(k)}\in C(R_+),\; w^{(k)}(0)=0\;( k=0, 1, \dots, n-1)\}. $$ \begin{lemma} \label{lem2.1} Under the hypothesis of Theorem \ref{thm1.1}, with $\gamma<0$, problem \eqref{e2.1}--\eqref{e2.2} has a unique solution $v\in Dom(L)$. Moreover, $$ \|v\|\leq \frac{\|f\|}{|\gamma|^{n}}. $$ \end{lemma} \begin{proof} For $w$ in $\mathop{\rm Dom}(L)$, define the operator $$ Lw(t):=P(t, D)w=\sum_{k=0}^n a_k(t)D^{n-k}w(t). $$ So that \eqref{e2.1} can be written as $Lv(t)=f(t)$. Since the coefficients of equation \eqref{e2.1} are bounded, the roots of $P(z,t)$ are bounded on $R_+$. Thus, $$ r_k(t)\geq -\alpha\quad (t\geq 0;\;k=1, 2, \dots, n) $$ for a finite positive number $\alpha$. On $\mathop{\rm Dom}(L)$ also define the operator $L_0$ by $$ L_0f(t):= (D+\alpha )^nf(t)=(\frac{d}{dt}+\alpha)^n f(t). $$ Then the inverses to $L$ and $L_0$ satisfy the relations \begin{equation} L^{-1} = L_0^{-1} L_0 L^{-1}=L_0^{-1} (L L_0^{-1} )^{-1}. \label{e2.3} \end{equation} Below we check that $L_0$ and $L L_0^{-1}$ are really invertible. By the Laplace transform for any $y\in C(R_+)$ we have $$ L_0^{-1} y(t)=\frac 1{2\pi i} \int_{-i\infty}^{i\infty} \frac{ e^{\lambda t} \tilde y(\lambda)} {(\lambda+\alpha )^n}\;d\lambda $$ where $\tilde y$ is the Laplace transform of $y$. So $$ f_0(t):=(LL_0^{-1} y)(t)=\frac 1{2\pi i} \int_{-i\infty}^{i\infty} \frac{ e^{\lambda t} P(\lambda, t)\tilde y(\lambda)d\lambda}{(\lambda+\alpha )^n}. $$ Hence, $$ f_0(t)=\frac 1{2\pi i} \int_{-i\infty}^{i\infty} e^{\lambda t} \tilde y(\lambda) \prod_{k=1}^n \frac{\lambda-r_k(t)}{\lambda+\alpha }\;d\lambda. $$ Put $$ F(t, \nu)=\frac 1{2\pi i} \int_{-i\infty}^{i\infty} e^{\lambda t} \tilde y(\lambda) \prod_{k=1}^n \frac{\lambda-r_k(\nu)}{\lambda+\alpha }\;d\lambda\quad (t, \nu\geq 0). $$ Thus $F(t, t)=f_0(t)$. We can write out $$ F(t,\nu)=\frac 1{2\pi i} \int_{-i\infty}^{i\infty} e^{\lambda t} \tilde y_1(\lambda, \nu) \frac{\lambda-r_1(\nu)}{\lambda+\alpha }\;d\lambda $$ where $$ \tilde y_j(\lambda, \nu):= \prod_{k=j+1}^n \frac{\lambda-r_k(\nu)}{\lambda +\alpha }\tilde y(\lambda)= \tilde y_{j+1}(\lambda, \nu) \frac{\lambda-r_{j+1}(\nu)}{\lambda+\alpha } $$ where $j \beta$ ($\nu\geq 0$) and \begin{equation} |y_j(t, \nu)|\geq |y_{j+1}(t, \nu)|-(\alpha-\beta)\int_0^t e^{-\alpha(t-s)}|y_{j+1}(s, \nu)| ds. \label{e2.6} \end{equation} Thus, with the notation $$ \eta_j:=\sup_{t\geq 0}|y_{j}(t, t)|\;\;(j