\documentclass[reqno]{amsart}
\usepackage{hyperref}

\AtBeginDocument{{\noindent\small
\emph{Electronic Journal of Differential Equations},
Vol. 2008(2008), No. 95, pp. 1--12.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2008 Texas State University - San Marcos.}
\vspace{9mm}}

\begin{document}
\title[\hfilneg EJDE-2008/95\hfil Linear matrix differential equations]
{Linear matrix differential equations of higher-order and
applications}

\author[R. Ben Taher, M. Rachidi \hfil EJDE-2008/95\hfilneg]
{Rajae Ben Taher, Mustapha Rachidi}  % in alphabetical order

\address{Rajae Ben Taher \newline
 D\'epartement de Math\'ematiques et Informatique \\
 Facult\'e des Sciences, \newline
 Universit\'e Moulay Ismail \\
 B.P. 4010, Beni M'hamed, M\'ekn\'es, Morocco}
\email{bentaher@fs-umi.ac.ma}

\address{Mustapha Rachidi \newline
Mathematics Section LEGT - F. Arago, Acad\'emie de Reims \\
1, Rue F. Arago, 51100 Reims, France}
\email{mu.rachidi@hotmail.fr}

\thanks{Submitted February 25, 2007 Published July 5, 2008.}
\subjclass[2000]{15A99, 40A05, 40A25, 15A18}
\keywords{Algebra of square matrices;
linear recursive relations; matrix powers; \hfill\break\indent
 matrix exponential; linear matrix differential equations}

\begin{abstract}
 In this article, we study linear differential equations
 of higher-order whose coefficients are square matrices.
 The combinatorial method for computing the matrix powers
 and exponential is adopted. New formulas representing
 auxiliary results are obtained. This allows us to prove
 properties of a large class of linear matrix differential
 equations of higher-order, in particular results of Apostol and
 Kolodner are recovered. Also illustrative examples and
 applications are presented.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{example}[theorem]{Example}
\newtheorem{remark}[theorem]{Remark}

\section{Introduction}\label{section1}

Linear matrix differential equations are important in many fields of
mathematics and their applications. Among the most simple and
fundamental are  the first order linear systems with constant
coefficients,
\begin{equation}\label{Equadiff-12}
X'(t)=AX(t), \quad \text{such that } X(0)=1_d,
\end{equation}
where $1_d$ is the identity matrix of $\mathcal{{M}}(d; \mathbb{C})$,
$A\in \mathcal{M}(d; \mathbb{C})$ and
$X\in {\mathcal{C}}^{\infty}(\mathbb{R}; \mathcal{{M}}(d; \mathbb{C}))$.
For the sake of simplicity we set in
the sequel ${ \mathcal{A}}_d=\mathcal{{M}}(d; \mathbb{C})$. The system
(\ref{Equadiff-12}) has been extensively studied;
its solutions depend closely on the computation of
$e^{tA}$ ($t\in \mathbb{R}$). To perform this computation, many
theoretical and numerical methods have been developed (see
\cite{br1, br2, br3, cy, ga, leo, mv, mr2, rp, vs} for example).
The combinatorial method is among those considered recently for
obtaining  some practical expressions of $A^n$ ($n\geq d$) and
$e^{tA}$ ($t\in \mathbb{R}$) (see \cite{br1, br2, mr2}). Techniques of
this method are based on properties of some linear recursive
sequences (see \cite{le, mr1} for example), known in the
literature as $r$-generalized Fibonacci sequences (to abbreviate
we write $r$-GFS).

Let $A_0,\dots, A_{s-1}$ be in ${ \mathcal{A}}_d$ and consider
the linear matrix differential equation of higher-order
\begin{equation}\label{Equadiff-1}
X^{(s)}(t)=A_0X^{(s-1)}(t)+\dots+A_{s-1}X(t),
\end{equation}
subject to the initial conditions $X(0)$, $X'(0)$,
$\dots$, $X^{(s-1)}(0)$. To study  \eqref{Equadiff-1} it is customary
to write it under its
equivalent form as the linear first order system
(\ref{Equadiff-12}). More precisely, \eqref{Equadiff-1}
takes the form $Y'(t)=BY(t)$, where $B\in {\mathcal{A}}_{ds}$ and
$Y \in {\mathcal{C}}^{\infty}(\mathbb{R};{\mathcal{A}}_{ds})$ (see Section
4 for more details). For $s=2$ properties of the linear
matrix differential equation \eqref{Equadiff-1},
\begin{equation}\label{Equadiff-3}
X''(t)=A_0X'(t)+A_1X(t),
\end{equation}
 have been studied (directly) by Apostol and Kolodner
(see \cite{ap, ko}) without
 appealing to its equivalent form as equation (\ref{Equadiff-12}).

The main purpose of this paper is to study solutions of a large
class of linear matrix differential equations \eqref{Equadiff-1},
whose coefficients are in the algebra $\mathcal{A}_d$. First
we consider \eqref{Equadiff-1} when
$A_0=\dots=A_{r-2}=\Theta_d$ (the zero matrix of
$\mathcal{A}_d$) and $A_{r-1}=A\neq  \Theta_d$. Solutions to
these matrix differential equations are expressed in terms
of the coefficients of the polynomial $P(z)=z^r-a_0z^{r-1}-\dots
-a_{r-1}$ satisfying $P(A)=\Theta_d$ and matrices $A^0=1_d$, $A$,
$\dots$, $A^{r-1}$. Moreover, these solutions are also described
with the aid of the zeros of the polynomial $P(z)$. Furthermore,
the case $r=2$ is improved and the fundamental results of
Apostol-Kolodner are recovered and their extension is established.
Secondly, in light of a survey of the general equation
\eqref{Equadiff-1}, we manage to supply their solutions under a
combinatorial form.


In Section 2 we  recall auxiliary
results on the powers and exponential of an element
$A\in\mathcal{A}_d$. In Section 3,  we study the class
of linear matrix differential equations of higher-order
\eqref{Equadiff-1} associated to $A_0=\dots=A_{s-2}=\Theta_d$ and
$A_{s-1}=A$ (we call this class the {\it Higher order linear
matrix differential of Apostol type}) and recover the results of
Apostol-Kolodner. In Section 4 we consider the combinatorial aspect
of the matrix powers and exponential in order to
explore the general setting of linear matrix differential
equations \eqref{Equadiff-1}. Finally,
concluding remarks are stated and a future problem is formulated
in Section 5.

\section{Auxiliary results on the powers and exponential of a matrix}
\label{section2}

We review here some basic results on the matrix powers and
exponential needed in the next sections. That is, we recall some
results of \cite{br1, br2, br3} and set forth a new result in
Proposition \ref{prop-24}. To begin, let $A_0,\dots, A_{r-1}$ and
$S_0,\dots, S_{r-1}$ be two finite sequences of $\mathcal{A}_d$
with $A_{r-1}\neq  \Theta_d$. Consider the recursive sequence
$\{Y_n\}_{n\geq 0}$ such that $Y_n=S_n$ for $0\leq n\leq r-1$ and
\begin{equation}\label{recursive-1}
Y_{n+1}=A_0Y_n+\dots+A_{r-1}Y_{n-r+1}, \quad \text{for every } n\geq r-1.
\end{equation}
 When $A_jA_k=A_kA_j$, for every $0\leq j,k\leq r-1$ and
following the same straightforward computation as in \cite{br1, mr2}
we obtain,
\begin{equation}\label{combinat-1}
Y_n=\rho(n,r)W_0+\dots+\rho(n-r+1,r)W_{r-1}, \quad\text{for every }
 n\geq r,
\end{equation}
 where $W_p=A_{r-1}S_p+\dots+A_pS_{r-1}$ ($p=0,1,\dots, r-1$) and
\begin{equation}\label{rho-1}
\rho(n,r)=\sum_{k_0+2k_1+\dots +rk_{r-1}=n-r} \frac{(k_0+\dots
+k_{r-1})!}{k_0!k_1!\dots k_{r-1}!}A_0^{k_0}A_1^{k_1}\dots
A_{r-1}^{k_{r-1}},
\end{equation}
 for every $n\geq r$, with $\rho(r,r)=1$ and $\rho(n,r)=0$
for $n\leq r-1$ (see \cite{br1, br2, leo, mr1, mr2}). The
computation of the powers $A^n$ ($n\geq r$) follows by a direct
application of \eqref{combinat-1}-\eqref{rho-1}. Indeed, the
polynomial equation $P(A)=\Theta_d$ shows that $A^{n+1}=
a_0A^{n}+\dots+a_{r-1}A^{n-r+1}$, for every $n\geq r-1$, and by the
way the sequence $\{A^n\}_{n\geq 0}$ is an $r$-GFS in
$\mathcal{A}_d$, whose coefficients and initial values are
$A_0=a_0,\dots , A_{r-1}=a_{r-1}$, $S_0=A^0=1_d, \dots,
S_{r-1}=A^{r-1}$ (respectively). In light of \cite{br1, mr2} we have
the following characterization of the powers of $A$,
\begin{equation}\label{powers-1}
A^n=\sum_{p=0}^{r-1}\Big(\sum_{j=0}^pa_{r-p+j-1}\rho(n-j,r)\Big)A^p,\;
\text{for every } n\geq r,
\end{equation}
 where
\begin{equation}\label{rho-2}
\rho(n,r)=\sum_{k_0+2k_1+\dots+rk_{r-1}=n-r}\frac{(k_0+\dots+k_{r-1})!}{k_0!k_1!\dots
k_{r-1}!}a_0^{k_0}\dots a_{r-1}^{k_{r-1}}, \quad \text{for }
n\geq r,
\end{equation}
with $\rho(r,r)=1$ and $\rho(n,r)=0$ for $n\leq r-1$
(see \cite{br1, mr2}). The matrix exponential
$e^{tA}$ ($A\in\mathcal{A}_d$) is
defined as usual by the series
$e^{tA}=\sum_{n=0}^{+\infty}(t^n/n!)A^n$. It turns out following
 \cite{br1} that direct computation using
(\ref{powers-1})-\eqref{rho-2} allows us to derive that the unique
solution of the matrix differential equation (\ref{Equadiff-12}),
satisfying the initial condition $X(0)=1_d$, is
$X(t)=e^{tA}=\sum_{k=0}^{r-1}\Omega_k(t)A^k$ such that
\begin{equation}\label{omega-1}
\Omega_k(t)=\frac{t^k}{k!}+\sum_{j=0}^ka_{r-k+j-1}\omega_j(t),\quad
(0\leq k\leq r-1),
\end{equation}
with
\[
\omega_j(t)=\sum_{n=0}^{+\infty}\rho(n-j,r)\frac{t^n}{n!}
\quad (0\leq j\leq r-1)
\]
 and  $\rho(n,r)$ given by \eqref{rho-2}.


As shown here the preceding expressions of $A^n$ ($n\geq r$) and
$e^{tA}$ are formulated in terms of the coefficients of the
polynomial $P(z)=z^r-a_0z^{r-1}-\dots-a_{r-1}$ and the first $r$
powers $A^0=1_d$, $A$, $\dots$, $A^{r-1}$. The goal now is to
express $A^n$ ($n\geq r$) and $e^{tA}$ using the roots of the
polynomial $P(z)=z^r-a_0z^{r-1}-\dots -a_{r-1}$ satisfying
$P(A)=\Theta_d$. To this aim, let $\lambda_1, \dots , \lambda_s$
be the distinct roots of $P(z)=z^r-a_0z^{r-1}-\dots-a_{r-1}$ of
multiplicities $m_1,\dots,m_{s}$ (respectively). For every $j$
($1\leq j\leq s$) we consider the sequence $\{b_{i,j}\}_{0\leq
i\leq m_j-1}$ such that $b_{i,j}=0$ for
$i>m_1+\dots+m_{j-1}+m_{j+1}+\dots+m_s$ and otherwise
\begin{equation}\label{bij}
b_{i,j}=\sum_{h_1+\dots+h_{j-1}+h_{j+1}+\dots+h_s=i,h_d\leq m_d}
\prod_{d=1,d\neq j}^s(^{h_d}_{m_d})(\lambda_j-\lambda_d)^{m_d-h_d}.
\end{equation}
For every $j$ ($1\leq j\leq s$), let $\{\alpha_{i,j}\}_{0\leq i\leq
m_j-1}$ be the sequence  defined by $\alpha_{0,j}=1$ and
\begin{equation}\label{alpha-ij}
\alpha_{i,j}=\frac{-1}{b_{0,j}}
\left(b_{1,j}\alpha_{i-1,j}+b_{2,j}\alpha_{i-2,j}+\dots+b_{i-1,j}
\alpha_{1,j}+b_{i,j}\alpha_{0,j}\right),
\end{equation}
where the $b_{i,j}$ are given by $(\ref{bij})$. Recall that
$\{b_{i,j}\}_{0\leq i\leq m_j-1}$ and $\{\alpha_{i,j}\}_{0\leq i\leq
m_j-1}$ have been introduced in \cite{br2} for computing the powers
of a matrix $A\in \mathcal{A}_d$. Besides the exploration of
Expression (4.15) in \cite{vs1} and Proposition 4.3 in \cite{br3}
allow us to obtain an explicit formula for the $\alpha_{i,j}$ as
follows,
\begin{equation}\label{alpha-3}
\alpha_{i,j}=(-1)^i\sum_{h_1+h_2
+ \dots +h_{j-1}+ h_{j+1}\dots+h_s=i,h_d \leq
m_d}\prod_{{d=1}_{d\neq j}}^s(^{h_d}_{m_d+h_d-1})\left(\lambda_j-\lambda_d
\right)^{- h_d }.
\end{equation}
With the aid of the basis of the Lagrange-Sylvester interpolation
polynomials as in \cite{ga}, we obtain the following result.

\begin{proposition}\label{prop-23}
Let $A$ be in $\mathcal{A}_d$ such that
$p(A)=\prod_{j=1}^s(A-\lambda_j1_d)^{m_j}=\Theta_d$
($\lambda_i\neq \lambda_j$, for $i\neq  j$). Then
\begin{equation}\label{exp-1}
\begin{aligned}
e^{tA}& = \sum_{j=1}^se^{\lambda_jt}\sum_{k=0}^{m_j-1}f_{j,k}(t)
 (A-\lambda_j1_d)^k q_j(A), \\
& = \sum_{j=1}^se^{\lambda_jt}\Big[\sum_{k=0}^{m_j-1}\frac{t^k}{k!}
 (A-\lambda_j1_d)^k\Big]p_j(A),
\end{aligned}
\end{equation}
\begin{equation}\label{pow-1}
A^n =
\sum_{j=1}^s\sum_{k=0}^{\min(n,m_j-1)}
\sum_{i=0}^{m_j-k-1}\lambda_j^{n-k}\alpha_{i,j}
\binom{n}{k}
(A-\lambda_j1_d)^{k+i} q_j(A)
\end{equation}
such that
\[
p_j(z)=\prod_{d=1, d\neq  j}
((z-\lambda_d)^{m_d}/(\lambda_j-\lambda_d)^{m_d})
\sum_{i=0}^{m_j-1}\alpha_{i,j}(z-\lambda_j)^i,
\]
$q_j(z)=p(z)/(z-\lambda_j)^{m_j}$ and
\[
f_{j,k}(t)=\sum_{i=0}^k\alpha_{i,j}(t^{k-i}/(k-i)!)(1/\prod_{d=1,
d\neq j}(\lambda_j-\lambda_d)^{m_d}),
\]
 where the $\alpha_{i,j}$ are given by \eqref{alpha-3}.
\end{proposition}

When $p(A)=(A-\lambda_j1)^{r}=\Theta_d$ we can take $\alpha_{01}=1$
and $\alpha_{i1}=0$ for every $i\neq 0$.

Let $\Omega_0(t)$ be the coefficient of $A^0=1_d$ in the well known
 polynomial decomposition
$e^{tA}=\sum_{k=0}^{r-1}\Omega_k(t)A^k$. A direct computation, using
(\ref{exp-1}), yields a new formula for  $\Omega_0(t)$ in terms of
$\{\lambda_j\}_{\{1\leq j\leq s\}}$ as follows.

\begin{proposition}\label{prop-24}
Under the hypothesis of Proposition \ref{prop-23}, we have
\begin{equation}\label{omega-0}
\Omega_0(t)=\sum_{k=1}^se^{\lambda_kt}Q_k(t)\prod_{j=1,
j\neq k}^s\frac{(-\lambda_j)^{m_j}}{(\lambda_k-\lambda_j)^{m_j}},
\end{equation}
where
\[
{Q_k(t)=\sum_{p=0}^{m_k-1}\Big(\sum_{i=0}^p\alpha_{i,k}t^{p-i}/(p-i)!\Big)
(-\lambda_k)^p},
\]
with the $\alpha_{i,k}$ given by \eqref{alpha-3}. Moreover, we have
${\frac{d\Omega_{k+1}}{dt}(t)=a_{r-k-2}\frac{d\omega_0}{dt}(t)+\Omega_k(t)}$,
with $\Omega_0(t)=1+a_{r-1}\omega_0(t)$.
\end{proposition}

From Proposition \ref{prop-24} we derive that
\[
\omega_0(t)=\frac{1-\Omega_0(t)}{(-\lambda_1)^{m_1}\dots
(-\lambda_s)^{m_s}},
\]
 where $\Omega_0(t)$ is given by (\ref{omega-0}).
It seems for us that (\ref{omega-0}), which gives
$\Omega_0(t)$ in terms of the eigenvalues
$\{\lambda_j\}_{\{1\leq j\leq s\}}$ of the matrix $A$, is not known
in the literature under this form.

The preceding results will play a central role in the next sections
devoted to some properties of the linear matrix differential
equations of higher-order \eqref{Equadiff-1}-\eqref{Equadiff-3}.

\section{Higher-order matrix differential equations of Apostol type}
\label{section3}

We are concerned here with the higher-order matrix differential
equations \eqref{Equadiff-1} when $A_0=\dots =A_{r-2}=\Theta_d$
and $A_{r-1}=A\neq  \Theta_d$, where $A\in \mathcal{A}_d$. These
linear matrix differential equations are called {\it of Apostol
type}. For reason of clarity we proceed as follows. We start by
reconsidering the case $r=2$ and afterwards we focus on the case
$r\geq 2$, particularly results of Apostol and Kolonder are
recovered.

\subsection{Simple second-order differential equations}
\label{Subsection-3.1}
Let $A$ be in $\mathcal{A}_d$ satisfying the polynomial equation
$P(A)=\Theta_d$, where $P(z)=z^r-a_0z^{r-1}-\dots-a_{r-1}$. It is
well known that the second-order linear matrix differential equation
\begin{equation}\label{Equadiff-S3}
X''(t)=AX(t),
\end{equation}
has a unique solution ${X(t)=C(t)X(0)+S(t)X'(0)}$,
where $X(0)$ and $X'(0)$ are the prescribed initial values and
$C(t)$ and $S(t)$ are the following series
\[
{C(t)=\sum_{k=0}^{+\infty}\frac{t^{2k}}{(2k)!}A^k}, \quad
{S(t)=\sum_{k=0}^{+\infty}\frac{t^{2k+1}}{(2k+1)!}A^k}
\]
(see \cite{ap, ko}). If we substitute for $A^k$ its expression
given in (\ref{powers-1}) we manage to have the following
improvement of the Apostol-Kolodner result.

\begin{proposition}\label{prop-31}
Let $A$ be in $\mathcal{A}_d$ satisfying the polynomial equation
$P(A)=\Theta_d$, where $P(z)=z^r-a_0z^{r-1}-\dots-a_{r-1}$. Then,
the unique solution of the matrix differential equation
\eqref{Equadiff-S3}, with the prescribed initial values $X(0)$ and
$X'(0)$ is $X(t)=\sum_{k=0}^{r-1}(C_k(t)X(0)+ S_k(t)X'(0))A^k$, such
that
$$
C_k(t)=\frac{t^{2k}}{(2k)!}+\sum_{n=r}^{+\infty}\frac{t^{2n}}{(2n)!}
\rho_k(n),\quad
S_k(t)=\frac{t^{2k+1}}{(2k+1)!}+\sum_{n=r}^{+\infty}
\frac{t^{2n+1}}{(2n+1)!}\rho_k(n)
$$
for $0\leq k\leq r-1$, where
${\rho_k(n)=\sum_{j=0}^ka_{r-k+j-1}\rho(n-j,r)}$ with
the $\rho(n,r)$ given by \eqref{rho-2}.
\end{proposition}

Consider the first-order differential equation (\ref{Equadiff-12})
with $A \in \mathcal{A}_d$ satisfying $P(A)=\Theta_d$, where
$P(X)=z^r-a_0z^{r-1}-\dots-a_{r-1}$. Kolodner's method (see
\cite{ko}) shows that $P(D)X(t)=P(A)X(t)=0$, where $D=d/dt$.
Therefore, we have $X(t)=\sum_{j=0}^{r-1}\phi_j(t)A^j$, where the
functions $\phi_j(t)$ ($0\leq j\leq r-1$) verify the scalar
differential equation $P(D)x(t)=0$, whose initial conditions are
$D^k\phi_j(0)=\delta_{j,k}$ (= 1 for $j=k$ and 0 if not), (see
\cite{ko, leo}). For the linear matrix differential equation
\eqref{Equadiff-S3} we have $D^2X(t)=AX(t)$, and the preceding
method shows that $P(D^2)X(t)=0$. If we set $Q(z)=P(z^2)$, it is
easy to show that each function  $\phi_j(t)$ ($0\leq j\leq 2r-1$)
is also a solution of the scalar ordinary differential equation
(of order $2r$) $Q(D)x(t)=0$, satisfying the initial conditions
$D^k\phi_j(0)=\delta_{j,k}$ ($0\leq j\leq 2r-1$). Therefore,
Equation (\ref{omega-1}) implies that
\begin{equation}\label{solu-S32}
\phi_k(t)=\frac{t^k}{k!}+\sum_{j=0}^kb_{2r-k+j-1}\omega_j(t),
\end{equation}
where $b_{2i}=0$, $b_{2i+1}=a_i$ and
$\omega_j(t)=\sum_{n=2r}^{+\infty}\rho(n-j,2r)t^n/n!$ ($0\leq j\leq
2r-1$), with $\rho(n,2r)$ given by \eqref{rho-2} are such that the
$a_i$ and $r$ are replaced by the $b_i$ and $2r$ (respectively). In
conclusion we have the following proposition.

\begin{proposition}\label{prop-32}
Let $A \in \mathcal{A}_d$ such that $P(A)=\Theta_r$, where
$P(z)=z^r-a_0z^{r-1}-\dots-a_{r-1}$. Then, the unique solution of
the matrix differential equation \eqref{Equadiff-S3}, with the
prescribed initial values $X(0)$ and $X'(0)$, is given by
$X(t)=C(t)X(0)+S(t)X'(0)$ with
\begin{equation}\label{solu-S33}
C(t)=\sum_{k=0}^{r-1}\phi_{2k+1}(t)A^k, \quad
S(t)=\sum_{k=0}^{r-1}\phi_{2k}(t)A^k,
\end{equation}
where the $\phi_k(t)$ ($0\leq k\leq r-1$) are described by
\eqref{solu-S32}.
\end{proposition}

Proposition \ref{prop-32} represents a new formulation of the result
of Kolodner (see \cite{ko}). As an application of the last
propositions, let us consider the following example.

\begin{example}\label{example-1} \rm
Let $A\in \mathcal{A}_d$ satisfying
$P(A)=A^2 -1_d=\Theta_d$ (or equivalently $A^2=1_d$),
then $\rho_0(2n)=1, \rho_0(2n+1)=0$ and $\rho_1(2n)=0, \rho_1(2n+1)=0$
for $n\geq 2$.
And a straightforward verification, using Propositions \ref{prop-31}
or \ref{prop-32}, shows that we have
$X(t)=\left(C_0(t)X(0)+S_0(t)X'(0)\right)1_d$ +
$\left(C_1(t)X(0)+S_1(t)X'(0)\right)A$, where
\begin{gather*}
C_0(t)=\frac{1}{2}{\left(ch(t)+\cos(t)\right)} ,\quad
S_0(t)=t+ \frac{1}{2}{\left(sh(t)+\sin(t)\right)},\\
C_1(t)=\frac{1}{2}{\left(ch(t)-\cos(t)\right)} ,\quad
S_1(t)=t+ \frac{1}{2}{\left(sh(t)-\sin(t)\right)}.
\end{gather*}
\end{example}


\subsection{Higher order linear differential equations of Apostol type}
In this Subsection we extend results of Subsection
\ref{Subsection-3.1} to the class of {\it Apostol} linear matrix
differential equations of order $p\geq 2$,
\begin{equation}\label{Equadiff-p}
X^{(p)}(t)=AX(t),
\end{equation}
whose prescribed initial conditions are $X(0)$, $X'(0)$, $\dots$,
$X^{(p-1)}(0)$, where $A \in \mathcal{A}_d$ satisfying the
polynomial equation $P(A)=\Theta_d$, where
$P(z)=z^r-a_0z^{r-1}-\dots-a_{r-1}1_d$. Consider the vector column
$Z(t)=^t(X^{(p-1)}(t), \dots, X(t))$ and the square matrix
$dp\times dp$,
\begin{equation}\label{matrix-B1}
 B= \begin{pmatrix}
\Theta_d         & \Theta_d      &    \dots & \Theta_d & A\\
1_d & \Theta_d   &  \dots &     &\Theta_d \\
\Theta_d         & 1_d    &\Theta_d     &\dots   &\Theta_d\\
\vdots     &\ddots &\ddots &\ddots    &\vdots\\
\Theta_d  &\dots &\Theta_d  &1_d      &\Theta_d
\end{pmatrix}.
\end{equation}
It is well known that the solution of the linear matrix differential
equation  \eqref{Equadiff-p} can be derived from the usual solution,
\begin{equation}\label{solu-matrix}
Z(t)=e^{tB}Z(0)\quad \text{where }
Z(0)=^t(X^{(p-1)}(0),\dots,X'(0), X(0)),
\end{equation}
of the first order linear system (\ref{Equadiff-12}) defined by
the matrix (\ref{matrix-B1}). That is, we focus our task on the
computation of ${e^{tB}}$, which is  essentially
based on the explicit formula of the powers $B^n$($n\geq 0$).
Indeed, by induction we verify that, for every $n\geq 0$, we have
\begin{equation}\label{matrix-B2}
B^{pn}= \begin{pmatrix}
A^n    &\Theta_d   &\dots     & \Theta_d\\
\Theta_d      &\ddots &\ddots     &\vdots\\
\vdots &\ddots &\ddots     &\Theta_d\\
\Theta_d     &\dots& \Theta_d &A^n
\end{pmatrix},\quad
  B^{pn+i}= \begin{pmatrix}
\Theta_d      & \dots & A^{n+1} &\dots  & \Theta_d\\ \vdots
&\ddots &
\ddots  &\ddots  &\vdots\\ \Theta_d     & \Theta_d      &\dots   &\Theta_d &A^{n+1}\\
A^n    &\Theta_d       &\ddots   &\ddots  &\vdots\\ \vdots &\ddots
&\ddots        &\ddots  &\vdots\\
\Theta_d      & \dots &A^n      &\dots  &\Theta_d
\end{pmatrix},
\end{equation}
for $1\leq i\leq p-1$, where the power $A^{n+1}$ of the first line
is located at the $(p-i+1)^{th}$ column and the power $A^n$ of the
first column is located at the $(i+1)^{th}$ line. That is, the
matrix $B^{np+i}=(E_{i,j})_{1\leq i,j\leq p}$ such that $1\leq i\leq
p-1$ and $E_{i,j}\in \mathcal{A}_d$, is described as follows :
$E_{j,p-i+j}=A^{n+1}$ for $j=1,\dots ,i+1$, $E_{i+j,j}=A^{n+1}$ for
$j=1,\dots ,p-i-1$ and $E_{i,j}=\Theta_d$ otherwise. The solution
$X(t)$ of the matrix differential equation \eqref{Equadiff-p}, is
derived by a direct computation. More precisely, the natural
generalization of Proposition \ref{prop-31} is formulated as
follows.

\begin{theorem}\label{thm-34}
Let $A$ be in $\mathcal{A}_d$. Then, the linear matrix
differential equation \eqref{Equadiff-p} of order $p\geq 2$, with
the prescribed initial conditions $X(0)$, $X'(0)$, $\dots$,
$X^{(p-1)}(0)$ has a unique solution $X(t)$ satisfying
$X(t)=C_0(t,A)X(0)+C_1(t,A)X'(0)+\dots +C_{p-1}(t, A)X^{(p-1)}(0)$,
where
\begin{equation}\label{solu-Cj-p}
C_j(t,A)=\sum_{n=0}^{+\infty}\frac{t^{pn+j}}{(np+j)!}A^n,\quad
\text{for every } j \, (0\leq j\leq p-1).
\end{equation}
\end{theorem}

Expression (\ref{matrix-B2}) of the powers  of the matrix
(\ref{matrix-B1}), can be easily derived using the general method
of Section \ref{section4}, where this later is based on the
extension of the technique of \cite{br4, mrw}. On the other hand,
properties of the family of the functions $C_j(t,A)$ ($0\leq j\leq
p-1$), may be obtained from (\ref{solu-Cj-p}). Indeed, a simple
verification gives the corollary.

\begin{corollary}\label{coro-35}
Under the hypothesis of Theorem \ref{thm-34}, the functions $C_j(t)$
($0\leq j\leq p-1$) defined by (\ref{solu-Cj-p}) satisfy the
following differential relations,
$$
D^kC_j(t,A)=C_{j-k}(t,A), \; \text{for }\; 0\leq k\leq j\leq p-1,\;
\text{and }\;D^kC_j(0)=\delta_{j,k},
$$
where $D=d/dt$. Moreover, for every $j$ $(0\leq j\leq p-1)$ we have
$C_j(t)=D^{p-j-1}C_{p-1}(t)$.
\end{corollary}
A direct application of (\ref{powers-1})-\eqref{rho-2} and Theorem
\ref{thm-34}, permits us to establish that solutions of the linear
matrix differential equation \eqref{Equadiff-p} are expressed in
terms of the coefficients $a_j$ ($0\leq j\leq r-1$) of the
polynomial $P(z)=z^r-a_0z^{r-1}-\dots-a_{r-1}$ satisfying
$P(A)=\Theta_d$. More precisely, replacing $A^n$ in
(\ref{solu-Cj-p}) by its  expression given in (\ref{powers-1})
yields the following proposition.

\begin{proposition}\label{prop-36}
Let $A$ be in $\mathcal{A}_d$ such that $P(A)=\theta_d$, where
$P(z)=z^r-a_0z^{r-1}-\dots-a_{r-1}$. Then, the solution of the
linear differential equation \eqref{Equadiff-p}, with the prescribed
initial conditions $X(0)$, $X'(0)$, $\dots$, $X^{(p-1)}(0)$, is
\[
X(t)=\sum_{k=0}^{r-1}\big[
\sum_{j=0}^{p-1}C_{j,k}(t)X^{(j)}(0)\big]A^k
\]
such that
\[
C_{j,k}(t)=\frac{t^{pk+j}}{(pk+j)!}+\sum_{n=r}^{+\infty}
\frac{t^{pn+j}}{(pn+j)!}\rho_k(n),
\]
with $\rho_k(n)=\sum_{j=0}^ka_{r-k+j-1}\rho(n-j,r)$, where
$\rho(n,r)$ is given by \eqref{rho-2}.
\end{proposition}

Now we  obtain an explicit formula for the solution of the
linear matrix differential equation \eqref{Equadiff-p}, in terms
of the roots of the polynomial
$P(z)=z^r-a_0z^{r-1}-\dots-a_{r-1}$ satisfying $P(A)=\Theta_d$.
To this aim, in Expression (\ref{solu-Cj-p}) we substitute for
$A^n$ ($n\geq r$) its expression set forth in Proposition
\ref{prop-23}. Then, a hard straightforward computation leads us
to the following result.

\begin{theorem}\label{thm-37}
Let $A$ be in $\mathcal{A}_d$ such that
$P(A)=\prod_{j=1}^s(A-\lambda_j1_d)^{m_j}=\Theta_d$
($\lambda_i\neq \lambda_j$ for $i\neq  j$). Then, the unique
solution of the linear
matrix differential equation \eqref{Equadiff-p}, with the prescribed
initial conditions $X(0)$, $X'(0)$, $\dots $, $X^{(p-1)}(0)$, is
$  X(t)= \sum_{i=0}^{p-1}C_{i}(t,A)X^{(i)}(0)$ such
that
$$
C_{i}(t,A)=\sum_{j=1}^s\varphi_j(t; A),\text{with
} \varphi_j(t; A)=\sum_{k=0}^{m_j-1}
\frac{d^kV_i}{dz^k}(t,\lambda_j)\rho_{jk}(A),
$$
where $V_i(t,z)=\sum_{n=0}^{+\infty}\frac{t^{pn+i}}{(pn+i)!}z^n$ and
$$
\rho_{j,k}(z)=\frac{1}{k!}\Big(\sum_{d=0}^{m_j-k-1}
\alpha_{d,j}(z-\lambda_j)^{k+d}\Big)
\Big(\prod_{t=1, t\neq j}^s\frac{(z-\lambda_t)^{m_t}}
{(\lambda_j-\lambda_t)^{m_t}}\Big),
$$
with  $\alpha_{d,j}$ given by \eqref{alpha-3}.
\end{theorem}

The expression of the $C_j(t,A)$ ($0\leq j\leq s$) given in
Theorem \ref{thm-37} seems quite complicated, yet its application
is a powerful tool in many important practical situations as shown
is the following corollaries and Example \ref{example-2}, it also
leads to have explicit formulas. Indeed, when
$p(A)=\prod_{j=1}^r(A-\lambda_j1_d)=\Theta_d$ ($\lambda_i\neq
\lambda_j$, then we show that $\rho_{j0}=\alpha_{0j}=1$, and the
following corollary is derived.

\begin{corollary}\label{coro-38}
Let $A$ be in $\mathcal{A}_d$ such that
$p(A)=\prod_{j=1}^r(A-\lambda_j1_d)=\Theta_d$ ($\lambda_i\neq
\lambda_j$ for $i\neq  j$). Then, the unique solution of Equation
\eqref{Equadiff-p}, with the prescribed initial conditions $X(0)$,
$X'(0)$, $\dots$, $X^{(p-1)}(0)$ is $
X(t)=\sum_{i=0}^{p-1}C_{i}(t,A)X^{(i)}(0)$, where
\[
C_i(t,z)=\sum_{j=1}^r \Big(\sum_{n=0}^{+\infty}
\frac{t^{pn+i}}{(pn+i)!}\lambda_j^n\Big)\prod_{t=1,
t\neq j}^r\frac{(z-\lambda_t)}{(\lambda_j-\lambda_t)}.
\]
\end{corollary}

In the particular case where $p=2$, the result of Apostol (see
\cite{ap}) is derived as a simple consequence of the above
corollary. Another important result of Theorem \ref{thm-37} can be
established when $p(A)=(A-\lambda_11_d)^r=\Theta_d$. Indeed, as it
was noticed above, in this case we have $\alpha_{01}=1$ and
$\alpha_{ij}=0$ for each $i\neq 0$ , whence $
\rho_{1,k}(z)=\frac{1}{k!}(z-\lambda_1)^{k}$.
\begin{corollary}\label{coro-39}
Suppose that $A\in\mathcal{A}_d$  satisfies
$p(A)=(A-\lambda_11_d)^r=\Theta_d$. Then, the unique solution of
Equation \eqref{Equadiff-p}, with the prescribed initial conditions
$X(0)$, $X'(0)$, $\dots$, $X^{(p-1)}(0)$, is $ X(t)=
\sum_{i=0}^{p-1}C_{i}(t,A)X^{(i)}(0)$, where
\[
C_{i}(t,A)=\sum_{j=1}^r\frac{1}{(j-1)!}
\frac{d^{j-1}V_i}{d\lambda^{j-1}}(t,\lambda_1)(A-\lambda_11_d)^{j-1}.
\]
\end{corollary}

\begin{example}\label{example-2} \rm
Let $A\in \mathcal{A}_d$  such that $P(A)=\Theta_d$, where
$P(z)=(z-\lambda)^2(z-\mu)$ (with $\lambda \neq  \mu$). Then, the
solution of the third-order matrix differential equation
$X'''(t)=AX(t)$, with the prescribed initial conditions $X(0)$,
$X'(0)$ and $X''(0)$, is given by
$X(t)=C_0(t,A)X(0)+C_1(t,A)X'(0)+C_2(t,A)X''(0)$. And by Corollary
\ref{coro-38} we have  $C_1(t,A)=DC_2(t,A)$, $C_0(t,A)$
=$D^2C_2(t,A)$ ($D=d/dt$), where
$$
C_2(t,A)=V_2(t,\lambda)\big[1_d+\frac{A-\lambda1_d
}{\mu-\lambda}\big]\phi_1(A)+\frac{dV_2}{dz}(t,\lambda)(A-\lambda1_d
)\phi_1(t, \lambda)+V_2(t,\mu)\phi_2(A),
$$
with
\[
V_2(t,z)=\sum_{n=0}^{+\infty}\frac{t^{3n+2}}{(3n+2)!}z^n,\quad
\phi_1(z)=\frac{z-\mu}{\lambda-\mu}
\]
and
${\phi_2(z)=\frac{\left(z-\lambda\right)^2}{\left(\mu-\lambda\right)^2}}$.
\end{example}

\begin{remark} \label{rmk3.11}
As shown before, our method for studying these kinds of matrix
differential equations is more direct and does not appeal to other
technics. Meanwhile, it seems for us that the method of Verde Star
based on the technics of divided differences can be also applied
for studying such equations (see \cite{vs}).
\end{remark}

\section{Solutions of Equation \eqref{Equadiff-1}:
 combinatorial setting}\label{section4}

We are interested here in the combinatorial solutions of  \eqref{Equadiff-1}, where the
exponential generating function of the family of sequences
$\{\rho(n-j,r)\}_{n\geq 0}$ ($0\leq j\leq r-1$) is defined by
\eqref{rho-1}.

Let $A_0,\dots, A_{r-1}$ and $S_0,\dots, S_{r-1}$ be two finite
sequences of $\mathcal{A}_d$ such that $A_{r-1}\neq  0$. Let
${\mathcal{ C}}^{\infty}(\mathbb{R}\ ; \mathcal{A}_d)$ be the
$\:\mathbb{C}$-vector space of functions of class
${\mathcal{C}}^{\infty}$. Consider the class of linear matrix
differential equations of higher-order \eqref{Equadiff-1}, with solutions in
$X\in {\mathcal{ C}}^{\infty}(\mathbb{R}\ ; \mathcal{A}_d)$ and subject to the
initial conditions, $X(0)$,
$X'(0)$, $\dots $, $X^{(r-1)}(0)$. Set $Z(t)=^t(X^{(r-1)}(t),
\dots, X(t))$ and $Z(0)=^t(X^{(r-1)}(0), \dots, X(0))$. A
standard computation shows that \eqref{Equadiff-1} is equivalent
to the following first-order matrix differential equation,
\begin{equation}\label{comp-equadiff-1}
Z'(t)=BZ(t),
\end{equation}
where $B \in \mathcal{M}(r,\;\mathcal{A}_d)$ is the
companion matrix
\begin{equation}\label{comp-matr}
B= \begin{pmatrix} A_0         & A_1      &    \dots &  & A_{r-1}\\
1_d & \Theta_d   &  \dots &     &\Theta_d \\
\Theta_d         & 1_d    &\Theta_d     &\dots   &\Theta_d\\
\vdots     &\ddots &\ddots &\ddots    &\vdots\\
\Theta_d  &\dots &\Theta_d  &1_d      &\Theta_d
\end{pmatrix}.
\end{equation}
It is well known that the solution of the linear matrix differential
equation \eqref{Equadiff-1} is
 $ Z(t)=e^{tB}Z(0)$, where the expression of $ e^{tB}$ depends on the
computation of the powers $B^n$ with the aid of $A_0,\dots, A_{r-1}$.
To this aim, we apply the recent technique for computing
the powers of the usual companion matrix (\ref{comp-matr}) (see
\cite{br4, mrw}). Indeed, let $\{Y_{n,s}\}_{n\geq 0}$ ($0\leq s\leq
r-1$) be the class of sequences \eqref{recursive-1} in
$\mathcal{A}_d$ such that $ Y_{n,s}= \delta_{n,s}1_d$
($\delta_{n,s}= 1$ if $n=s$ and 0 if not) for $0\leq n\leq r-1$ and
\begin{equation}\label{recursive-s}
Y_{n+1,s}= A_0Y_{n,s}+A_1
Y_{n-1,s}+\dots+A_{r-1} Y_{n-r+1,s}, \quad \text{for } n\geq r-1.
\end{equation}

\begin{lemma}\label{lemm-41}
Let $A_0,\dots, A_{r-1}$ be in $\mathcal{A}_d$ with $A_{r-1}\neq 0$.
Then
\begin{equation}\label{comp-power}
B^n= \begin{pmatrix}
Y_{n+r-1,r-1}     & Y_{n+r-1,r-2}     & \dots  & Y_{n+r-1, 0} \\
Y_{n+r-2,r-1}   & Y_{n+r-2,r-2}   & \dots  & Y_{n+r-2,0} \\
\vdots          &\vdots
&\ddots   &\vdots\\
Y_{n,r-1} & Y_{n,r-2} & \dots  & Y_{n,0} \\
\end{pmatrix},\quad \text{for every } n\geq 0.
\end{equation}
\end{lemma}

Expression (\ref{comp-power}) implies that
$e^{tB}=(C_{i,j}(t))_{0\leq i,\; j\leq r-1}$, where $C_{i,j}(t)$
($0\leq i,\; j\leq r-1$) is the exponential generating functions of
the sequence $\{Y_{n+r-1-i,r-1-j}\}_{n\geq 0}$ ($0\leq i,\; j\leq
r-1$). Thus, the characterization of solutions of the linear matrix
differential equation \eqref{Equadiff-1}, can be formulated as
follows.

\begin{theorem}\label{thm-42}
Let $A_0,\dots, A_{r-1}$ be in $\mathcal{A}_d$ such that
$A_{r-1}\neq  0$. Then, the solution of the linear matrix
differential equation \eqref{Equadiff-1} is
$X(t)=\sum_{j=0}^{r-1}C_j(t)X^{(j)}(0)$, where
$C_j(t)=\sum_{n=0}^{+\infty}Y_{n,j}t^n/n!$ ($0\leq j\leq r-1$) is
the exponential generating function  of the sequences
$\{Y_{n,j}\}_{n\geq 0}$ ($0\leq j\leq r-1$) defined by
\eqref{recursive-s}.
\end{theorem}

A result similar to Theorem \ref{thm-42} has been given by Verde
Star under another form, by using the divided difference method
for solving linear differential equations \eqref{Equadiff-1} (see
Section 5 of \cite{vs2}). Indeed, Equation \eqref{Equadiff-1} is
analogous to the Equation (5.6) given by Verde Star whose
solutions (5.13), submitted to the initial conditions
$C_0,C_1,\dots , C_s$, are expressed in terms of a sequence
$\{P_{k+j}\}_{{0\leq j\leq s},k\geq 0}$ (see \cite{vs2}),
satisfying a linear recursive relation analogous to
\eqref{recursive-s}, moreover $\{P_{k+j}\}_{{0\leq j\leq s},k\geq
0}$ is nothing else but the sequence $\{Y_{k,s-j}\}_j$ defined
previously by \eqref{recursive-s}. Comparison of our procedure
with the Verde Star's method leads to infer that the functions
$C_0(t), C_1(t),\dots, C_{r-1}(t)$ of Theorem \ref{thm-42} are
identical to the functions $G_{0,1}(t), G_{0,2}(t),\dots,
G_{0,s}(t)$ considered in \cite{vs2}.

Furthermore when the condition $A_jA_k=A_kA_j$ ($0\leq j,k\leq
r-1$) is satisfied, expressions \eqref{combinat-1}--\eqref{rho-1}
show that the combinatoric formula of sequences
\eqref{recursive-s} can be written explicitly as follows,
\begin{equation}\label{combinat-2}
Y_{n,j}=\sum_{k=0}^jA_{r-j+k-1}\rho(n-k,r),\quad\text{for } n\geq
r,
\end{equation}
where the $\rho(n,r)$ are defined in \eqref{rho-1} for $n\geq r$,
with $\rho(r,r)=1_d$ and $\rho(n,r)=\Theta_d$ for $n\leq r-1$.
Therefore, Expression (\ref{combinat-2}) implies that
 $C_j(t)=\frac{t^j}{j!}1_d+\sum_{k=0}^jA_{r-j+k-1}g_k(t)$, where
$g_k(t)=\sum_{n=0}^{+\infty}\rho(n-k,r)t^n/n!$ is the exponential
generating functions of the sequence $\{\rho(n-k,r)\}_{n\geq 0}$.
Moreover, we verify easily that the functions $g_j(t)$
($0\leq k\leq j$) satisfy
${g_j^{(j-k)}(t)=\frac{d^{j-k}g_j}{dt^{j-k}}(t)}
$=$g_k(t)$. Hence, an extension of Proposition \ref{prop-31} and
Theorem \ref{thm-34} can be formulated as follows.

\begin{proposition}\label{prop-43}
Let $A_0,\dots, A_{r-1}$ be in $\mathcal{A}_d$ such that
$A_iA_k=A_kA_i$ for $0\leq i$, $k\leq r-1$.
Then the solution of the
linear matrix differential equation \eqref{Equadiff-1}, subject to
the prescribed initial values $X(0), X'(0), \dots ,$
$X^{(r-1)}(0)$, is the following
\begin{equation}\label{solu-g2}
X(t)=\sum_{j=0}^{r-1}\Delta_j(t)X^{(j)}(0)=
\sum_{j=0}^{r-1}\Big(\frac{t^j}{j!}+\Pi_j(D)g_j(t)\Big)X^{(j)}(0),
\end{equation}
such that
\[
\Delta_j(t)=\frac{t^j}{j!}+\sum_{k=0}^jA_{r-j+k-1}g_k(t),\quad
\Pi_j(D)=\frac{t^j}{j!}+\sum_{k=0}^jA_{r-j+k-1}D^{j-k},
\]
 where
$D=d/dt$ and $ g_j(t)$ is the exponential generating function of the
sequence $\{\rho(n-j,r)\}_{n\geq 0}$.
\end{proposition}

Proposition \ref{prop-43} shows that solutions of the linear
matrix differential equation \eqref{Equadiff-1} may be given in terms
of the  exponential generating function of the class of
sequences $\{Y_{n,j}\}_{n\geq 0}$ ($0\leq j\leq r-1$) defined by
\eqref{recursive-s}. In the same way, expressions
\eqref{combinat-1}-\eqref{rho-1} may be used  to obtain the
combinatoric formulas of the sequences in \eqref{recursive-s}. Moreover,
solutions of the linear matrix differential equation
\eqref{Equadiff-1}, subject to the prescribed initial values
$X(0), X'(0), \dots ,$ $X^{(r-1)}(0)$, can be expressed in terms
the exponential generating functions of the class of sequences
$\{\rho(n-k,r)\}_{n\geq 0}$ ($0\leq k\leq r-1$).

\begin{remark}\label{remark-31}\rm
 Consider a unitary topological
$\mathbb{C}$-algebra ${\mathcal{A}}$ instead of the $\mathbb{C}$-algebra of the
square matrices  $\mathcal{A}_d$. Suppose that $A\in
{\mathcal{A}}$ is an algebraic element satisfying $P(A)=0$, where
$P(z)=z^r-a_0z^{r-1}-\dots-a_{r-1}$ ($a_j\in \mathbb{C}$ for $0\leq j\leq
r-1$). Then, all results of the preceding Sections are still
valid. On the other hand, it's easy to show that Theorem
\ref{thm-34} and Corollary \ref{coro-35}, on the solutions of the
linear matrix differential equation \eqref{Equadiff-p}, do not
depend on the condition that $A$ is algebraic.
\end{remark}

\begin{remark}[Future problem] \label{rmk4.5} \rm
Solutions of the linear matrix differential equation
\eqref{Equadiff-3}, can be also described using some recursive
relations and the exponential generating functions of the
combinatorial sequences \eqref{recursive-1}. One of the main
problems is to study the spectral aspect of solutions of these
classes of differential equations. More precisely, suppose that
the two (non trivial) matrices $A_0$, $A_1$  appearing in
\eqref{Equadiff-3} satisfy the following polynomial equations
$P_1(A_0)=P_2(A_1)=\Theta_d$, where
$P_1(z)=\prod_{j=0}^{s_1}(z-\lambda_j)^{p_j}$ and
$P_2(z)=\prod_{j=0}^{s_2}(z-\mu_j)^{q_j}$. The problem can be
formulated as follows : {\it study the solutions of the linear
matrix differential equation \eqref{Equadiff-3}, in terms of the
$\lambda_j$ ($0\leq j\leq s_1$) and $\mu_j$ ($0\leq j\leq s_2$)}.
Some investigations are currently done for this purpose.
\end{remark}

\subsection*{Acknowledgments}
 The authors would like to express
their sincere gratitude to Professor L. Verde Star for his helpful
suggestions and fruitful correspondence that improved this paper.
They are also grateful to the anonymous referee for his/her valuable
comments on an earlier version of this paper.

\begin{thebibliography}{99}

\bibitem{ap} T.M. Apostol;
\emph{Explicit formulas for solutions of the second order matrix differential equation
$Y''=AY$,} Amer. Math. Monthly 82 (1975), pp. 159-162.

\bibitem{br1} R. Ben Taher and M. Rachidi;
 \emph{Linear recurrence relations in the algebra of matrices and applications,}
 Linear Algebra and Its Applications, Vol. 330 (1-3) (2001), pp. 15-24.

\bibitem{br2} R. Ben Taher and M. Rachidi;
 \emph{Some explicit formulas for the polynomial decomposition of the matrix
exponential and applications,} Linear Algebra and Its Applications,
350, Issue 1-3 (2002), pp. 171-184.

\bibitem{br3} R. Ben Taher, I. Bensaoud and M. Rachidi;
 \emph{Dynamic solutions and computation of the powers and
exponential of matrix,} International Journal of Mathematics, Game
Theory, and Algebra 13 (2003), No. 6, p. 455-463.

\bibitem{br4} R. Ben Taher and M. Rachidi;
 \emph{On the  matrix  powers and exponential by the  $r$-generalized
Fibonacci sequences methods. The companion matrix case}, Linear
Algebra and Applications, 370 (2003), pp. 341-353.

\bibitem{cy} H.W. Cheng and S.S.-T. Yau;
 \emph{On more explicit formulas for matrix exponential,}
Linear Algebra Appl. 262 (1997), pp. 131-163.

\bibitem{mrw} B. El Wahbi, M. Mouline and M. Rachidi;
 \emph{Solving nonhomogeneous recurrence relations
by matrix methods,} The Fibonacci Quarterly, 40-2 (2002), pp.
109-117.

\bibitem{ga} F. R. Gantmacher;
 \emph{Theory of matrices}, Chelsea Publishing Company, New York (1959).

\bibitem{ko} I. I. Kolodner;
 \emph{On $exp(tA)$ with $A$ satisfying a polynomial},
J. Math.Anal. and Appl. 52 (1975), pp. 514-524.

\bibitem{le} C. Levesque;
 \emph{On the $m-th$ order linear recurrences}, Fibonacci Quarterly 23 (4),
(1985) : 290-295.

\bibitem{leo} I. E. Leonardo;
 \emph{The matrix exponential}, SIAM Review Vol. 38, No. 3 (1996),
 pp. 507-512.

\bibitem{mv} C. Moler and C. Van Loan;
 \emph{Nineteen dubious ways to compute the exponential of matrix,
twenty-Five years later,} SIAM Review Vol.45 (1), (2003), pp.
3-49.

\bibitem{mr1} M. Mouline and M. Rachidi;
 \emph{Application of Markov Chains properties to $r$-Generalized
Fibonacci Sequences,} Fibonacci Quarterly 37 (1999), pp. 34-38.

\bibitem{mr2} M. Mouline and M. Rachidi;
\emph{Suites de Fibonacci g\'en\'eralis\'ees, Th\'eor\`eme de Cayley-Hamilton
et chaines de Markov,} Rendiconti del Seminario Matematico di
Messina  Serie II, Tomo XIX, No. 4  (1996/97), pp. 107-115.

\bibitem{pu} E. J. Putzer;
 \emph{Avoiding the Jordan canonical form in the discussion of linear systems with constant
coefficients,} Amer. Math. Monthly 73 (1966), pp. 2-7.

\bibitem{rp} Hans-J. Runckel and U. Pitelkow;
 \emph{Practical Computation of matrix functions,} Linear Algebra Appl.
49 (1983), pp. 161-178.

\bibitem{vs} L. Verde-Star;
\emph{Operator identities and the solution of linear matrix difference
and differential equations,} Studies in Applied Mathematics 91 (1994), pp.
153-177.

\bibitem{vs1} L. Verde-Star;
 \emph{Divided differences and linearly recurrent sequences,}
Stud. Appl. Math. 95 (1995), pp. 433-456.

\bibitem{vs2} L. Verde-Star;
 \emph{Solutions of linear differential equations by the
method of divided differences,} Adv. in Applied Math. 16 (1995), pp.
484-508.

\end{thebibliography}

\end{document}
