
\documentclass[reqno]{amsart}
\usepackage{amscd}

\AtBeginDocument{{\noindent\small
2004 Conference on Diff. Eqns. and Appl. in Math. Biology,  Nanaimo, BC, Canada.\newline
{\em Electronic Journal of Differential Equations},
Conference 12, 2005, pp. 103--116.\newline
ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or
http://ejde.math.unt.edu
\newline ftp ejde.math.txstate.edu  (login: ftp)}
\thanks{\copyright 2005 Texas State University - San Marcos.}
\vspace{9mm}}
\setcounter{page}{103}

\begin{document}

\title[\hfilneg EJDE/Conf/12 \hfil Pointwise representation method]
{Pointwise representation method}

\author[V. M. Osipov, V. V. Osipov \hfil EJDE/Conf/12 \hfilneg]
{Vladimir Mihajlovich Osipov, Vladimir Vladimirovich Osipov}  

\address{Krasnoyarsk State Academy for Non-Ferrous Metals and Gold\\
95 ``Krasnoyarskiy rabochiy'' prospect, Krasnoyarsk 660025, Russia}
\email{gafur@rol.ru, ffomia@rol.ru,  Osipova@color.krasnoyarsk.su}

\date{}
\thanks{Published April 20, 2005.}
\subjclass[2000]{93C05, 37M05, 37N35, 62J10}
\keywords{Modeling; linear dynamic system; point representation}

\begin{abstract}
 This article suggests an approximate analytical apparatus for modeling 
 linear dynamic system of various types. This apparatus uses spline 
 step models and point depictions of functions and operators.
\end{abstract}

\maketitle
\numberwithin{equation}{section}
\newtheorem{statement}{Statement}[section]


\section{Introduction}

The method of pointwise representations is rather efficient,
analytically powerful, and constructive for mathematical modelling
of dynamical systems. This is due to special  algebraic properties
of the analytic methods used for the description of
pointwise representations as finite dimensional models of
functions and operators.

The method is based on the following simple idea. To any function
$f(\tau)$ in the space of continuous on $[0,1]$, which is an element of the
Hilbert space $L^2(0,1)$, the following $N$-dimensional vector is assigned:
\begin{equation} \label{e1}
f_T=\mathop{\rm col}[f(\tau^{(N)}_1),\dots, f(\tau^{(N)}_\nu),\dots,
 f(\tau^{(N)}_N)]
\end{equation}
which consists of the samples of this function at the nodes of an
orthogonal $N$-grid:
\begin{equation} \label{e2}
\big\{\tau^{(N)}_\nu : \cos \big(N\pi\tau^{(N)}_\nu\big)=0  \big\}\,.
\end{equation}
Note that $\tau^{(N)}_\nu=\frac{2\nu-1}{2N}$ $(\nu=\overline{1,N})$.
The vector $f_{T}$ is called a pointwise representation vector of the
function $f(\tau )$, associated with the $N$-grid (\ref{e2}) which is
the Chebyshev grid.

It is known that such a grid is the best among all possible
orthogonal grids from many points of view. This means that using
various types of interpolation,
the function $f(\tau )$ can be restored by its pointwise representation
$N$-vector (\ref{e1}) with highest accuracy.

Let us consider now the space $M(0,1)$  of all piecewise continuous
functions defined in $[0,1]$. We normalize it by introducing a sup-norm:
\begin{equation} \label{e3}
\|f\|= \sup _{\tau \in[0,1]} |f(\tau)|, \quad  f(\tau)\in M(0,1)
\end{equation}
Then the space $C(0,1)$ of all continuous functions on [0,1]  becomes
a subspace in $M(0,1)$, and
\begin{equation} \label{e4}
\|\varphi\|=  \sup_{\tau \in[0,1]} |\varphi(\tau)|
= \max_{\tau \in[0,1]} |\varphi(\tau)|, \quad
\varphi(\tau)\in C(0,1)\subset M(0,1)
\end{equation}


With this norm $M(0,1)$ and $C(0,1)$ are complete, i.e., Banach
spaces. We note that
$M(0,1)$ is also a subset of the Hilbert space $L_2(0,1)$. Since the
product of two
piecewise continuous and bounded functions on [0,1] is again a
piecewise continuous function bounded in the same segment,
the set $M(0,1)$ is closed with respect to the
operation of multiplication. Due to the  properties of the norm
(\ref{e3}),
\begin{gather} \label{e5}
\|f\varphi\| \leq \|f\|\cdot \|\varphi\|,\quad  f(\tau),
\varphi(\tau)\in M(0,1); \\
\label{e6}
\|1\|=1,
\end{gather}
it is not only a Banach space, but  also a commutative
Banach algebra with an identity. Let's denote it by
$AM(0,1)$.
Obviously the $AC(0,1)$, which is the Banach algebra of
all continuous  in [0,1] functions is a subalgebra of this algebra.

\section{Results}

Let us define the value at a discontinuity point as an average of left
and right limits.
Then any function in
$M(0,1)$ is defined on any orthogonal $N$-grid and, in particular,
on the Chebyshev grid:
\begin{equation}
\label{e7}
\tau^{(N)}_\nu=\frac{2\nu-1}{2N}\quad  (\nu=\overline{1,N}).
\end{equation}
Hence its pointwise representation $N$-vector will be also determined:
\begin{equation}
\label{e8}
f_T=\mathop{\rm col}[f(\tau^{(N)}_1),\dots, f(\tau^{(N)}_\nu),\dots,
f(\tau^{(N)}_N)]\to
f(\tau)\in M(0,1)
\end{equation}
The set of all pointwise images defined on the $N$-grid (\ref{e7}), is
the linear $N$-dimensional space $R^N_T$ which is complete for any norm.
Let's supply it with the norm
\begin{equation} \label{e9}
\|f_T\|= \sup_{\nu} |f(\tau^{(N)}_\nu)|<\|f\|= \sup_{\tau \in[0,1]} |f(\tau)|
\end{equation}
This $N$-vector can be presented  in the form of the integral
transformation
\begin{equation} \label{e10}
T_Nf(\tau)=\int_0^1 f(\tau)\delta_T(\tau)d \tau= f_T, \quad
f(\tau)\in M(0,1)
\end{equation}
where the $\delta$-kernel is defined by the $N$-grid (\ref{e7})
\begin{equation} \label{e11}
\delta_T (\tau)=\mathop{\rm col}[\delta(\tau - \tau^{(N)}_1),\dots, \delta(\tau -
\tau^{(N)}_\nu),\dots, \delta(\tau - \tau^{(N)}_N)]
\end{equation}
Homomorhism $T_N:M(0,1) \to R^N_T$ means that the pointwise
representation $N$-vector $f_{’}$ is an
image of not only one function $f(\tau )$ in $M(0,1)$, but of the whole
class of functions, and differences of any two functions of
this class are functions of the type
\begin{equation} \label{e12}
r_T (\tau)=\alpha_N (\tau)\cos (N \pi \tau), \quad  \alpha _N
(\tau) \in M(0,1),
\end{equation}
with zeroes in nodes of the $N$-grid (\ref{e7}); therefore, their pointwise
transformations have the zero image in $R^N_T$. The set of functions (\ref{e12}) forms
the kernel $\ker T_N$ of homomorphism $’T_{N}$:
\begin{equation} \label{e13}
\ker T_N / T_N r_T (\tau)=0
\end{equation}

Any function $f(\tau)$  in $M(0,1)$ extended to an even
periodic function using  some method, has an approximated model of
$M_N(f;\tau)$ in the form of the discrete
Fourier N-sum constructed by the values $f(\tau ^{(N)} _\nu)$  in the
nodes of the N-grid (\ref{e7}), therefore
the pointwise representation N-vectors of the function $f(\tau)\in M(0,1)$
and its model are identical,
i.e. the transformation $T_{N}$ maps them into the same
element $f_T \in R^N_T$.
Their difference belongs to the kernel (\ref{e13}) of the homomorphism
$’T_{N}$:
 \[
 f(\tau)-M_N(f;\tau)=r_N(\tau)\in \ker T_N,
 \]
and, as a result,
\begin{equation} \label{e14}
f(\tau)=M_N(f;\tau)+r_N(\tau),
\end{equation}
so any function in $M(0,1)$ is represented as the sum of its interpolated
model and an element from $\ker T_N$. The latter one plays the role of an
error of approximation of the interpolating model.

As $N\to \infty$ the error tends to zero in
the $ L_{2}(0,1)$ norm, since
\begin{equation} \label{e15}
\lim_{N\to\infty}\int_0^1 [f(\tau)-M_N(f;\tau)]^2
d \tau = \lim_{N\to \infty}\int_0^1 [r_N(\tau)]^2 d
\tau =0,
\end{equation}
and, hence, we have the convergence
\begin{equation} \label{e16}
\lim_{N\to\infty}M_N(f;\tau)=f(\tau)\quad\mbox{and}\quad
 \lim_{N\to \infty}r_N(\tau)=0,
\end{equation}
almost everywhere in [0,1] (Carleson theorem). The set
$S_N(0,1)$ of interpolated models $M_N(0,1)$ is a
space which is a N-dimensional
subspace of $M (0,1)$. The mapping $P_N:M(0,1)\to S_N(0,1)$ is a homomorphism
with the kernel (\ref{e13}).

Sets $S_N(0,1)$ and $R^N_T$  are equivalent,
since there is a one-to-one correspondence of their elements.
Moreover, they are isometrically isomorphic. Thus, it is
possible to illustrate the relation of these spaces by the
diagram
\begin{equation} \label{e17}
 \begin{CD}
M(0,1)     @> P_N >>   \\
@V{T_N}VV      @V{P_N}VV\\
S_N (0,1) @=  R_T^N
\end{CD}
\end{equation}
Arrows $T_N$  and $P_N$ show homomorphisms. The double line marks
the isometric isomorphism of spaces $S_N(0,1)$ and $R^N_T$.

As $N$ grows, due to the convergence (\ref{e16}), homomorphisms
tend to isometric isomorphisms. This does not describe all algebraic
properties of point
vector images of functions of $M(0,1)$ and all relations of the
appropriate spaces as sets.
It was already specified, that the space $M(0,1)$ is also a
commutative Banach algebra $AM(0,1)$ with the usual operation of
multiplication.

The space $R_T^N$ of pointwise vector images as a homomorphic image of
$M(0,1)$ space has the following property.
Let us define in $R_T^N$  a commutative pointwise multiplication operation
for vectors.
Let $f(\tau)$ and $\varphi(\tau)$ be two functions in $M(0,1)$
and $f_T$ and $\varphi_T$ be their pointwise vector images
in $R_T^N$, i.e.,
\begin{equation} \label{e18}
\begin{gathered}
f(\tau) \stackrel{T_N}{\to}
f_T=\mathop{\rm col}\big[f(\tau^{(N)}_1),\dots, f(\tau^{(N)}_\nu),\dots,
f(\tau^{(N)}_N)\big] \\
\varphi(\tau) \stackrel{T_N}{\to}
\varphi_T=\mathop{\rm col}\big[\varphi(\tau^{(N)}_1),\dots, \varphi(\tau^{(N)}_\nu),
\dots, \varphi(\tau^{(N)}_N)\big]
\end{gathered}
\end{equation}
Then the N-vector
\begin{equation} \label{e19}
\Phi_T=\mathop{\rm col}\big[f(\tau^{(N)}_1) \cdot
\varphi(\tau^{(N)}_1),\dots, f(\tau^{(N)}_\nu) \cdot
\varphi(\tau^{(N)}_\nu),\dots, f(\tau^{(N)}_N) \cdot
\varphi(\tau^{(N)}_N)\big],
\end{equation}
whose coordinates are  products of the respective coordinates of
vectors (\ref{e18}), which can be symbolically written as $f_T
\otimes \varphi _T$, then an $N$-vector
is a pointwise representation vector of the product $f_T \otimes
\varphi(\tau)$ of functions in $M(0,1)$:
\begin{equation}
\label{e20}
f(\tau) \varphi(\tau) \stackrel{T_N}{\to} f_T \otimes \varphi _T =
\Phi_T \in R^N_T,
\end{equation}
and, according to (\ref{e9})
\begin{equation} \label{e21}
\|\Phi_T\|=\|f_T \otimes \varphi _T\| \leq \|f_T\| \cdot \|\varphi
_T\| \leq \|f\| \cdot \|\varphi\|.
\end{equation}
In $R_T^N$  the identity element is defined as
\begin{equation} \label{e22}
1_T=\mathop{\rm col}[1,\dots, 1,\dots, 1] \to 1 \in M(0,1)
\end{equation}
with the unit norm $\|1_T\|=1$ which enjoys the property
\begin{equation}
\label{e23}
f_T \otimes 1_T= 1_T \otimes f_T=f_T, \quad  f_T \in M(0,1)
\end{equation}


Thus, the set $R_T^N$  of pointwise vector images with the introduced
operation of  multiplication and with the sup-norm (\ref{e9})
is a commutative Banach algebra with the
identity for any $N$. Let us denote it by $AR_T^N$.
Since for any $f(\tau)$ and $\varphi (\tau)$ in $M(0,1)$ the
following equation
is valid
\begin{equation} \label{e24}
T_N[f(\tau) \varphi (\tau)]=T_N [f(\tau)] \cdot T_N [\varphi
(\tau)]=f_T \otimes \varphi _T,
\end{equation}
then the pointwise transformation $T_{N}$ for any $N$ is a
continuous homomorphism not only of $M(0,1)$ space to space $R_T^N$ ,
but also of the Banach algebra $AM$ to the algebra $AR_T^N$:
 \[
 AM \stackrel{T_N}{\to} AR_T^N
 \]
However, the mapping $P_N : M(0,1) \to S_N (0,1)$  does not enjoy this
property since $S_N (0,1)$  is a space of
N-dimensional interpolated models of $M(0,1)$ functions which are
quadrature cosine Fourier sums, which is not an algebra
with  a usual multiplication.

Besides,
 \[
 P_N [f(\tau) \varphi (\tau)] \neq P_N [f(\tau)] \cdot P_N [\varphi
 (\tau)]
 \]
and apparently the relation of $M(0,1)$ and $S_N (0,1)$  can be
comprehensively described by
their homomorphism as a mapping of linear spaces. If we
consider
zero degree splines as approximation models for the functions in $M(0,1)$,
the situation changes significantly.

The point is that the set of spline models
\begin{equation}
\label{e25}
Sp^0_N (f_T ; \tau) = \sum_{\nu =1}^N f(\tau _ \nu ^{(N)}) \pi _N
(\tau - \tau _ \nu ^{(N)}),\quad  f(\tau) \in M(0,1)
\end{equation}
with interpolation elements
\begin{equation} \label{e26}
\pi _N (\tau - \tau _ \nu ^{(N)})
=\begin{cases}
 1 & \tau \in ( \tau _ \nu ^{(N)} - \frac{1}{2N} , \tau
 _ \nu ^{(N)} + \frac{1}{2N} ) \\
  0 & \tau \notin ( \tau _ \nu ^{(N)} - \frac{1}{2N} , \tau
 _ \nu ^{(N)} + \frac{1}{2N} )
\end{cases}
\quad (\nu =\overline{1,N})
\end{equation}
which look like rectangular pulses of the unit height, is not only a
sup-normalized $N$-dimensional subspace of step interpolation forms
of $M(0,1)$, but also is a commutative Banach algebra
$ASp_N^0$ with an identity,
with the usual operation of multiplication. It is a subalgebra of the
algebra $AM$. In fact, the following property of elements
(\ref{e26}),
\begin{equation} \label{e27}
\pi _N (\tau - \tau _ \nu ^{(N)}) \cdot \pi _N (\tau - \tau _ m ^{(N)})
= \begin{cases}
\pi _N (\tau - \tau _ \nu ^{(N)}) &  \nu = m\\
0 &  \nu \neq m
\end{cases} \quad  (\nu ,m =\overline{1,N})
\end{equation}
implies
\begin{equation*} %\label{e28}
Sp^0_N (f_T ; \tau) \cdot Sp^0_N (\varphi_T ; \tau) = \sum_{\nu =1}^N
f(\tau _ \nu ^{(N)}) \varphi(\tau _ \nu ^{(N)})  \pi _N
(\tau - \tau _ \nu ^{(N)})
= Sp^0_N (f_T \otimes \varphi_T ; \tau)
\end{equation*}
i.e., the product of two step spline models $f(\tau)$ and of dimension $N$
of functions $f(\tau)$ and $\varphi (\tau)$ from $M(0,1)$ is a spline
model of the same dimension of the
product of these functions. In other words, the space $Sp_N^0 (0,1)$
as the set of step
interpolation forms, is closed with respect to the operation of
multiplication.
Furthermore,
\begin{equation} \label{e29}
Sp^0_N (1_T ; \tau)=1  \in M(0,1)
\end{equation}
Thus, the homomorphic mapping $\pi _N$  of $M(0,1)$ space to its
subspace $Sp^0_N (0,1) $  of spline
models can be considered as a homomorphism  of  algebra $AM€$ algebra
to algebra $ASp_N^0$ , and the latter one is
isometrically isomorphic to algebra $AR_T^N$  for any $N$:
\begin{equation} \label{e30}
\begin{CD}
AM     @> T_N >>   \\
@V{\pi_N}VV      @V{T_N}VV\\
ASp_N^0  @= AR_T^N
\end{CD}
\end{equation}
As $N\to \infty$ the sequence ${Sp_N^0 (f_T,\tau )}$
of step interpolation forms converges almost everywhere
to any function $f(\tau) \in M(0,1)$  and if the latter one is continuous,
then the convergence is uniform.

At the same time functions in $M(0,1)$, which form the kernel
$\ker T_N$  of $T_{N}$,
converge almost everywhere to zero, and homomorphisms $\pi _{N}$ and
$T_{N}$ become isometric isomorphisms of algebras.

Now let us consider the linear bounded operator $A_{\tau}$  acting
from $M(0,1)$ to $M(0,1)$ or to some subspace $M_y(0,1)
\overline{\subset} M(0,1)$, in particular, in $C(0,1)$ to the space of
continuous in [0,1] functions. It is possible that the range is
finite dimensional. The operator $A _ \tau$
\begin{equation}\label{e31}
A_{\tau}x(\tau)=y(\tau); \quad  x(\tau) \in M(0,1);\quad
 y(\tau) \in M_y(0,1) \overline{\subset} M(0,1)
\end{equation}
is linear
\[
A_{\tau}[\alpha x_1 (\tau) + \beta x_2 (\tau)]
= \alpha \cdot A_{\tau} x_1 (\tau)
+ \beta \cdot A_{\tau} x_2 (\tau); \quad
 x_1 (\tau), x_2 (\tau) \in M(0,1);\;  \alpha ,\beta \in \mathbb{R}
\]
and its boundedness means the following inequality for sup-norms holds:
\begin{equation} \label{e32}
\|y\|=\|A_\tau x\| \leq C \cdot \|x\|,
\end{equation}
where the least possible value of a positive constant $C$ is the
norm $\|A_\tau\|$ of the operator $A_\tau$:
\begin{equation} \label{e33}
\|A_ \tau\|= \sup_{x \neq 0} \frac {\|A_ \tau x\|}{\|x\|}.
\end{equation}
The boundedness of the linear operator is equivalent to its continuity is
in the following sense: images $A_ \tau x_1 (\tau)$  and $A_ \tau x_2
(\tau)$  of
two close elements $x_1 (\tau)$ and $x_2 (\tau)$ of $M(0,1)$ are also
close, i.e. for every $\varepsilon >0$
there is a $\delta >0$, such that
$\|\tau x_1 - \tau x_2\|<\delta$ implies
$\|A_ \tau x_1 - A_ \tau x_2\| <\varepsilon$ .

Homomorphic  $T_{N}$ and $\pi_N$ which were introduced above
\begin{equation} \label{e34}
\begin{gathered}
T_N x(\tau)=X_T;\quad  x(\tau) \in M(0,1); \quad  X_T \in R_T^N \\
\pi_N x(\tau)=Sp_N^0 (X_T ; \tau);\quad  Sp_N^0 (X_T ; \tau) \in Sp_N^0
(0,1)
\end{gathered}
\end{equation}
are linear bounded operators. Their domain is space $M(0,1)$.

Operator $T_{N}$ maps functions of $M(0,1)$ into their pointwise
representation vectors of $R_T^N$, associated with the Chebyshev $N$-grid
(\ref{e7}),
and the operator $\pi _N$ maps them into interpolated spline models
constructed on the same N-grid
which form an $N$-dimensional subspace
$Sp_N^0 (0,1)$ of step forms of the space $M(0,1)$.

Let us note that because of the obvious equation
\begin{equation} \label{e35}
\pi _N [\pi _N x(\tau)]=\pi _N ^2 x(\tau) = \pi _N Sp _N ^0 (X_T
; \tau) = Sp_N^0 (X_T ; \tau) \Rightarrow \pi_N^2 = \pi_N
\end{equation}
the operator $\pi_N$ is a projecting operator.
Inequalities for sup-norms which were mentioned above
\begin{equation*} %\label{e36}
\|\pi _N x(\tau)\|=\|Sp _N ^0 (X_T ; \tau)\| = \|X_T\|
=\|T_N x(\tau)\| \leq \|x(\tau)\|; \ \ \ \ x(\tau) \in M(0,1)
\end{equation*}
imply the boundedness of operators $T_{N}$ and $\pi _N$;
their norms are equal to one: $\|T_N\|=\|\pi _N\|=1$ at any $N$.

Let us apply the operator of pointwise transformation $T_{N}$ to the
operational equation (\ref{e31}):
\begin{equation} \label{e37}
T_N [A_\tau x(\tau)]=T_N y(\tau) =Y_T^N
\end{equation}
As a result we obtain the vector-matrix equation
\begin{equation}\label{e38}
A_T^N \cdot X_T^{(N)} = Y_T^{(N)},
\end{equation}
generally speaking, approximate, which is a homomorphic image
of equation (\ref{e31}) in $R_T^N$  (the N-dimensional space of point
images).
There may be more than one pointwise matrix representation $A_T^{(N)}$
 $(N \times N)$ which is assigned to the
linear operator $A_\tau$ of $M(0,1)$.

The problem is to find the general method of an explicit definition
(choice) of a matrix pointwise representations of the set of all possible
representations  for any linear bounded
operator $A_\tau$ mapping $M(0,1)$ into some its subspace $M_y (0,1)$.

In this connection, let us note the following important property.
N-dimensional space $Sp_N^0 (0,1)$ of approximating spline models of
functions in $M(0,1)$ has a basis of $N$ rectangular impulse functions
\begin{equation} \label{e39}
\pi _N (\tau - \tau _ \nu ^{(N)})
=\begin{cases}
 1 &  \tau \in ( \tau _ \nu ^{(N)} - \frac{1}{2N} , \tau
 _ \nu ^{(N)} + \frac{1}{2N} ) \\
  0 & \tau \notin ( \tau _ \nu ^{(N)} - \frac{1}{2N} , \tau
 _ \nu ^{(N)} + \frac{1}{2N} )
\end{cases}
\quad (\nu =\overline{1,N})
\end{equation}
of the unit height, the support of $\frac{1}{N}$ and axes of symmetry in
the nodes of the Chebyshev $N$-grid (\ref{e7}).

Any element of $Sp_N^0 (0,1)$  can be represented as a linear combination
of basis elements, with
components of a pointwise representation $N$-vector of the
modeled function of $M(0,1)$ as coefficients:
\begin{equation} \label{e40}
Sp^0_N (X_T ; \tau) = \sum_{\nu =1}^N x(\tau _ \nu ^{(N)}) \pi _N
(\tau - \tau _ \nu ^{(N)}) \quad  x(\tau) \in M(0,1).
\end{equation}
We form a basis $N$-vector using basis elements (\ref{e39}) as
components:
\[
\Pi_N (\tau) = \mathop{\rm col}[\pi _N(\tau - \tau _1 ^{(N)}),\dots, \pi _N(\tau -
\tau _ \nu ^{(N)}),\dots, \pi _N(\tau - \tau _N  ^{(N)})],
\]
Then  spline model (\ref{e40}) of any function (i.e., the element of
the space $Sp_N^0 (0,1)$) can be written as an inner product of the
pointwise representation vector $X_{T}$ of function $x(\tau)$ by the basis
vector $\Pi_N (\tau)$:
\begin{equation} \label{e41}
x(\tau) \stackrel{\pi _N}{\to} Sp^0_N (X_T ; \tau) = \sum_{\nu =1}^N
x(\tau _ \nu ^{(N)}) \pi _N (\tau - \tau _ \nu ^{(N)}) =
(X_T, \Pi _N (\tau)) = X_T^+ \Pi_N (\tau)
\end{equation}
In particular, let us find spline models of functions
$A_\tau \pi _N(\tau - \tau _\nu ^{(N)})$
$(\nu=\overline{1,N})$  which for any linear bounded
operator $A_\tau$  acting in $M(0,1)$, are also  elements of this space.
Using operator $\pi _N$  we project them onto the $N$-dimensional space of
spline models $Sp_N^0 (0,1)$, i.e.,
we represent them as a combination of basis elements (\ref{e39}).
Thus we obtain
\begin{equation} \label{e42}
\pi _N A_\tau \pi _N (\tau - \tau _\nu ^{(N)}) = \sum_{k=1}^N
\alpha_{k\nu} \pi _N (\tau - \tau _ k ^{(N)}) \quad (\nu=\overline{1,N})
\end{equation}
The coefficients of these decomposition are components of pointwise
vector images of functions $A_\tau \pi _N (\tau - \tau _\nu ^{(N)})$
$(\nu=\overline{1,N})$.

It should be noted that the $(\nu -1)$ first coefficients are equal to zero
and the stepwise representation (\ref{e42}) begins with the $\nu$-th  step
since the original function
$A_\tau \pi _N (\tau - \tau _\nu ^{(N)})$ is equal to zero up
to the moment when the $\nu$-th  rectangular finite impulse
$\pi _N (\tau- \tau _\nu ^{(N)})$  occurs.
Thus, the decomposition has the form
\begin{equation} \label{e43}
\begin{aligned}
\pi _N A_\tau \pi _N (\tau - \tau _\nu ^{(N)})
&= \sum_{k=1}^N \alpha_{k\nu} \pi _N (\tau - \tau _ k ^{(N)})\\
&=[0,\dots, 0,\dots \alpha_{\nu\nu},\dots, \alpha_{k
\nu},\dots, \alpha_{N\nu}]
\cdot \Pi_N (\tau)
\end{aligned}
\end{equation}
$(\nu=\overline{1,N})$
which is an inner product of a $N$-vector of coefficients with
$(\nu-1)$ zero first components by the basis vector $\Pi_N (\tau)$.

This implies the  vector-matrix representation for a vector
function,
\begin{equation} \label{e44}
\begin{aligned}
&\pi _N A_\tau \Pi_N (\tau)\\
&=\pi_N\begin{bmatrix}
A_{\tau} \Pi_N (\tau - \tau _1^{(N)})  \\
\vdots \\
A_{\tau} \Pi_N (\tau - \tau _\nu^{(N)}) \\
\vdots \\
A_{\tau} \Pi_N (\tau - \tau _N^{(N)})
\end{bmatrix}
= \begin{bmatrix}
\alpha_{11} & \cdots & \alpha_{\nu 1} & \cdots & \alpha _{N1} \\
            & \ddots &  \vdots        &        &  \vdots      \\
            &        & \alpha_{\nu\nu}& \cdots & \alpha_{N \nu} \\
            &   0    &                & \ddots &   \vdots  \\
            &        &                &        &  \alpha_{NN}
\end{bmatrix}
\Pi_N (\tau)\\
&=(A_T^{(N)})^+ \Pi_N
\end {aligned}
\end{equation}
The symbol $(A_T^{(N)})^+$ denotes an upper triangular matrix with
coefficients of decomposition (\ref{e43}) as components:
\begin{equation}\label{e45}
(A_T^{(N)})^+ =\begin{bmatrix}
\alpha_{11} & \cdots & \alpha_{\nu 1} & \cdots & \alpha _{N1} \\
            & \ddots &  \vdots        &        &  \vdots      \\
            &        & \alpha_{\nu\nu}& \cdots & \alpha_{N \nu} \\
            &   0    &                & \ddots &   \vdots  \\
            &        &                &        &  \alpha_{NN}
\end{bmatrix}
\end{equation}
This $N \times N$ matrix is the result of transposing the lower triangular matrix
\begin{equation} \label{e46}
A_T^{(N)} =\begin{bmatrix}
\alpha_{11}   &               &                &        &              \\
\vdots        & \ddots        &                &        &              \\
\alpha_{\nu 1}& \cdots        & \alpha_{\nu\nu}&        &              \\
\vdots        &               &  \vdots        & \ddots &              \\
\alpha _{N1}  & \cdots        & \alpha_{N \nu} &\cdots  &  \alpha_{NN}
\end{bmatrix}
\end{equation}
Now let us find the spline representation $\pi _N A_ \tau Sp_N^0
(X_T,\tau)$
as an approximate model of function $A_ \tau Sp_N^0 (X_T,\tau) \in
M(0,1)$.

As $\|A_ \tau Sp_N^0 (X_T,\tau) - \pi _N A_ \tau Sp_N^0 (X_T,\tau)\|\to 0$
when $N \to \infty$, then for $N$ large enough  values of
these functions will differ by less than any prescribed positive value
for any linear positive operator $A_\tau$.
Taking into
account (\ref{e41}) and (\ref{e44}) and also the property of the inner
product we obtain:
\begin{equation}\label{e47}
 \begin{aligned}
A_ \tau Sp_N^0 (X_T,\tau)
&\approx \pi _N A_ \tau Sp_N^0 (X_T,\tau) =
(X_T , \pi_N A_\tau \Pi_N (\tau)) \\
&= (X_T ,  (A_T^{(N)})^+ \Pi_N
(\tau))= (A_T^{(N)} X_T , \Pi_N (\tau)) \\
&= Sp_N^0 (A_T^{(N)} X_T,\tau)
\end{aligned}
\end{equation}
where $A_T^{(N)}$ is the matrix in (\ref{e46}).
It is necessary to make the final step. By the property of a norm the
following inequality
\begin{equation}\label{e48}
\|A_ \tau x(\tau) - A_ \tau Sp_N^0 (X_T,\tau)\| \leq \| A_ \tau\|
\cdot \|x (\tau) - Sp_N^0 (X_T,\tau)\|
\end{equation}
holds for any bounded (continuous) linear operator $A_ \tau$,
any $x(\tau)$  of $M(0,1)$ and any $N$.
Since
\begin{equation} \label{e49}
\|x (\tau) - Sp_N^0 (X_T,\tau)\| \to \quad\mbox{as } N \to \infty
\end{equation}
i.e., the sequence ${Sp_N^0 (X_T,\tau)}$
of spline approximation models converges by norm  to any modeled function
$x(\tau)$ in $M(0,1)$ (and there is even a uniform
convergence, if $x(\tau)$ is a continuous function), then
the inequality (\ref{e48}) implies
\begin{equation} \label{e50}
\|A_ \tau x(\tau) - A_ \tau Sp_N^0 (X_T,\tau)\| \to 0 \quad
\mbox{as } N \to \infty\,.
\end{equation}
The latter convergence implies that for $N$ large enough, taking into
account  (\ref{e47}), we have any prescribed accuracy
for approximating spline models
\begin{equation} \label{e51}
\begin{aligned}
y(\tau)&=A_\tau x(\tau) \approx A_ \tau Sp_N^0 (X_T,\tau) \approx \pi
_N A_ \tau Sp_N^0 (X_T,\tau) \\
&=(A_T^{(N)} X_T , \Pi_N (\tau)) = Sp_N^0 (A_T^{(N)} X_T,\tau)
\end{aligned}
\end{equation}
Hence for pointwise images we obtain
\begin{equation} \label{e52}
T_y y(\tau)=Y_T^{(N)}=[A_ \tau x(\tau)]_T = A_T^{(N)} X_T^{(N)}; \quad
 X_T^{(N)} = T_N x(\tau).
\end{equation}
Thus, any linear bounded operator acting from $M(0,1)$ to any of
its subspaces $M_y(0,1) \overline{\subset} M(0,1)$ under the homomorphic
mapping into the $N$-dimensional subspace $Sp_N^0 (0,1) \subset M(0,1)$ of
spline models (with basis (\ref{e39})) has a pointwise representation
by a lower triangular matrix (\ref{e46}). The equality (\ref{e52}) for
pointwise images, generally speaking, approximate, corresponds to
the operator equation (\ref{e31}).
In practice components of pointwise matrix
representation of a linear operator operator $A_\tau$ can be found using
the projection of
function $A_\tau \pi_N (\tau - \tau_\nu ^{(N)})$
$(\nu = \overline{1,N}))$  onto the subspace of spline models and their
decompositions as linear combinations  of
basis elements (\ref{e39}) (decomposition of the type (\ref{e43})).
Coefficients of these
decompositions form rows of the matrix which, after transposition,
is the matrix of pointwise representation $A_T^{(N)}$ of the
operator $A_\tau$.

Further this method is used to find pointwise matrix representations of
various  linear operators which are necessary, in particular, for the
solution of linear differential equations of various types, which are
transformed into algebraic (vector-matrix) equations by pointwise
representations. This can be treated as a special operator
calculus.

In particular, the pointwise matrix representation is found for the
operator which shifts the function $x(\tau) \in M(0,1)$ along axis
``$\tau$''  by a fixed step
which is equal to the distance between Chebyshev nodes of the grid
(\ref{e2}), i.e., by the value of $\frac {1}{N}$.

Let us call this operator the pointwise shift operator and
denote it by $Z_\tau$.
The image of this operator for any bounded function $x(\tau)$ with a
support  in [0,1]
(obviously $x(\tau) \in M(0,1)$), will mean a shift of the function
variable by $\frac {1}{N}$:
\begin{equation} \label{e53}
Z_\tau x(\tau) = x(\tau - \frac{1}{N}) \quad \tau \in
(\frac{1}{N}, 1+\frac{1}{N})
\end{equation}
Using the general method, described above,  to obtain pointwise matrix
representations for linear bounded operators, the linear operator
$Z_\tau$  of pointwise shift in the space $R_T^N$  of pointwise images
has the matrix representation
\begin{equation} \label{e54}
Z =\begin{bmatrix}
0   &               &                &        &              \\
1   &      0        &                &   0    &              \\
    &      1        &        0       &        &              \\
0   &               &  \ddots        & \ddots &              \\
    &               &                &     1  &        0
\end{bmatrix}
\end{equation}
This $N \times N$ matrix is called the canonical right shift matrix.

The degrees of initial matrix of shift
\begin{equation} \label{e55}
E=Z^0 ,Z^1 ,Z^2 ,\dots Z^k ,\dots Z^{N-1}
\end{equation}
form a linearly independent system of matrices, since their linear
combinations, i.e. matrix polynomials of degree $N-1$ with real coefficients
\begin{equation} \label{e56}
P_{N-1} (Z) = \sum_{k=0}^{N-1} A_k Z^k =
\begin{bmatrix}
A_0     &               &                &        &              \\
A_1     &     A_0       &                &   0    &              \\
A_2     &     A_1       &       A_0      &        &              \\
\vdots  &   \vdots      &  \ddots        & \ddots &              \\
A_{N-1} &   \cdots      &  \cdots        &   A_1  &       A_0
\end{bmatrix}
\end{equation}
are triangular matrices ($N\times N$) can be identically equal to
zero only if all coefficients are equal to zero.

A Toeplitz type matrix (\ref{e56}) is called a polynomial
shift matrix.

First of all, let us note that the matrix polynomial
(\ref{e56}) is a homomorphic
image in $R_T^N$ of the  polynomial operator
\begin{equation} \label{e57}
P_{N-1} (z_ \tau) = \sum _{k=0}^{N-1} A_k z_\tau ^k
\end{equation}
with the shift operator $Z_{\tau }$ as a variable, which is described as
\begin{equation} \label{e58}
P_{N-1} (z_ \tau) x(\tau) = \sum _{k=0}^{N-1} A_k x(\tau - \frac
{k}{N}); \quad  x(\tau) \in M(0,1)
\end{equation}
and thus sums up all successive shifts of a finite
function $x(\tau )$ in $M(0,1)$ with the appropriate weight coefficients
$\{ A_{k}\}$.
Obviously, we have
\begin{equation}
\label{e59}
P_{N-1} (z_\tau) x(\tau) \stackrel{T_N}{\to} P_{N-1} (Z) X_T
\end{equation}
The set ${P_{N-1} (Z)}$  of all possible polynomial shift matrices is
a subspace of the
linear space of lower tridiagonal matrices ($N\times N$), which is  an
$N$-dimensional
linear space with basis (\ref{e55}) of the first $N$ degrees of
$N\times N$ matrix $Z$.
A wide class of such matrixes arises as a set of functions of matrix
$Z$, which is a canonical shift matrix.

Formally polynomial shift matrices appear after the change of complex
variable z of the whole rational function (polynomial) $P_{N - 1} (z)$ of
degree $N-1$ by the  matrix argument $Z$ $(N\times N)$:
\begin{equation} \label{e60}
P_{N-1} (z) = \sum _{k=0}^{N-1} A_k z^k \stackrel{z \to Z}{\to}
\sum _{k=0}^{N-1} A_k Z^k = P_{N-1} (Z) \quad (N\times N)
\end{equation}
Obviously, we have a one-to-one correspondence between the above
polynomials and
matrices (\ref{e60}). Polynomial $P_{N - 1} (z)$ of a complex variable z
will be called
a generating polynomial of matrix $P_{N - 1} (Z)$ $(N\times N)$.

Let us further call polynomial shift matrices $P$-matrixes
($N\times N$).

Every $P$-matrix is completely defined by an ordered set of $N$ real
numbers which are coefficients of a generating polynomial. Evidently
properties of these sets of numbers define both properties of generating
polynomials and properties of appropriate $P$-matrices.

Besides, if we assume that all degrees of a variable ``$z$'', exceeding
 $N-1$, vanish (these are degrees $z^{N} ,z^{N+1} \dots $)
i.e. impose the condition ``$z$'' is nilpotent
with index $N$, as it is valid for matrix
argument $Z$, then we have described one more binary operation
(in addition to summation) in the space of polynomials of degree less than
or equal to $N-1$, such that the space is closed with respect to this
operation.
This is an operation of polynomial multiplication which
satisfies all usual properties (usual axioms of multiplication) and,
in particular, is commutative.

Thus the set of generating polynomials is a more complicated algebraic
structure than a linear space.
It is a commutative algebra with an identity. The
set of  appropriate $P$-matrixes $(N\times N)$  is the same algebra, since
for any pair of these matrixes
a commutative operation of multiplication
with $P$-matrix $(N\times N)$ as a result is described. Besides,
the set of $P$-matrices is a linear space. The identity matrix $E$
$(N\times N)$ is an identity in the matrix algebra.

Obviously these algebras are isomorphic (as sets they are
simply equivalent) and all operations follow the same rules
and can be reduced to operations over coefficients of generating
polynomials (elements of one algebra), which are at the same time entrees
of appropriate $P$-matrices (which are elements of the other algebra).

It is also possible to prove the following three statements.

\begin{statement} \label{stmt1}
 The set of  functions of a
complex variable $z$, which are defined and continuous in the unit
circle $|z| \le 1$ and analytic inside this circle, form a
Banach algebra with the identity $AF$, with the norm
\begin{equation} \label{e61}
\|\varphi (z)\|= \max_{|z| \le 1} |\varphi (z)|,\quad \varphi (z) \in AF,
\end{equation}
which coincides with the $l_{1}$-norm of the appropriate power
series of functions in $AF$:
\begin{equation} \label{e62}
\|\varphi (z)\| = \|\sum _{k=0}^{\infty} \varphi _k z^k\| =
\max_{|z| \le 1}\vert \sum _{k=0}^{\infty} \varphi _k z^k \vert
= \sum  _{k=0}^{\infty}| \varphi _k|
\end{equation}
\end{statement}

The set of such power series is also a Banach algebra with the identity $AGF$,
which is isometrically isomorphic to algebra $AF$. This statement is
illustrated by the diagram
\begin{equation} \label{e63}
\begin{CD}
AF     @=  AGF
\end{CD}
\end{equation}


\begin{statement} \label{stmnt2}
There exists projector $\Pi ^{(N)}$, which is a homomorphism of normed algebras
$AF$, $AGF$ and $l_{1}$-normed $N$-algebra $AGF^{(N)}$ of partial sums of
power series of degree $N$, which are treated as elements of algebra
$AGF$.
\end{statement}
This statement is illustrated by the diagram
\begin{equation}  \label{e64}
\begin{CD}
AGF^{(N)}     @< \Pi^{(N)} <<   \\
@A \Pi^{(N)} AA      @A \Pi^{(N)} AA\\
AGF  @= AF
\end{CD}
\end{equation}

\begin{statement} \label{stmnt3}
 The change of variable $z$ by the canonical shift matrix $Z$  $(N\times N)$
leads to the homomorphism of algebra $AF$ of analytical in the circle
$|z|\le 1$ functions  to the algebra $AGF^{(N)}(Z)$ of polynomial shift
$N\times N$ matrices  ($P$-matrices) and also to the isometric isomorphism
of an $N$-algebra $AGF^{(N)}$ of generating polynomials to the matrix
algebra $AGF^{(N)}(Z)$.
\end{statement}
The diagram has the final form
\begin{equation} \label{e65}
\begin{CD}
AGF^{(N)}    @= AGF^{(N)} (Z)   \\
@A \Pi^{(N)} AA      @A \Pi^{(N)} AA\\
AGF  @= AF
\end{CD}
\end{equation}

Besides, using the above general method to obtain pointwise
matrix representations of linear operators, it is possible to deduce
$P$-matrix representation of the integral operator $J_{\tau }$ defined as
\begin{equation} \label{e66}
y(\tau)=J_\tau x(\tau) = \int  _{0}^{\tau} x(\tau) d \tau \quad
\tau \in [0,1].
\end{equation}
It is a linear bounded operator. Its domain is the space $M (0,1)$;
its range is a subset of the space of continuous functions
from $C (0,1)$ vanishing at $t = 0$. Operator $J_{\tau }$
in the space $R_T^N$  of pointwise vector images has the matrix
representation $J_{T}$ $(N\times N)$:
\begin{equation}\label{e67}
y(\tau)=J_\tau x(\tau) = \int  _{0}^{\tau} x(\tau) d \tau
\stackrel{T_N} {\to} Y_T = J_T X_T.
\end{equation}
The representation $J_{T}$ corresponds to two-step mapping
\begin{equation} \label{e68}
y(\tau)=J_\tau x(\tau)\stackrel{\pi _N} {\to} Sp_N (J_T X_T ; \tau)
\stackrel{T_N} {\to} Y_T = J_T X_T
\end{equation}
which is the first step approximation.

The matrix representation  $J_{T}$ of the integration operator
has the form:
\begin{equation} \label{e69}
J_ \tau \stackrel{T_N} {\to} J_T = \frac {1}{N}
\begin{bmatrix}
1/2    &       &            &        &  \\
1      & 1/2   &            &   0    &  \\
\vdots & 1     &  1/2       &        &  \\
1      &       &  \ddots    & \ddots &  \\
1      & 1     &  \cdots    &   1    &  1/2
\end{bmatrix}
\quad (N \times N)
\end{equation}
It is necessary to note, that any method, which improves the accuracy of
approximate equations,
changes significantly the structure of the representing matrix of the
integration operator.
Compared to (\ref {e69}), this not only makes the structure more
complicated, but
also essentially influences analytical structure and efficiency
of the developed applied theory based on pointwise
representations.

Mainly this is due to the fact
that the matrix of integration
(\ref{e69}) is a polynomial shift matrix ($P$-matrix) and consequently
for any $N$ it can be presented as a linear combination of the first $N$
degrees of the canonical
shift matrix $Z$ $(N\times N)$ which is reduced to a
rational function of the matrix variable $Z$:
\begin{equation}\label{e70}
\begin{aligned}
J_T &= \frac {1}{N} \big[ \frac {1} {2} E + \sum  _{k=1}^{N-1} Z^k\big]
= \frac {1}{2N} \big[ E + 2 \sum  _{k=1}^{N-1} Z^k \big] \\
&= \frac {1}{2N} \big[ E + 2Z \sum  _{k=1}^{N-1} Z^k \big]
= \frac {1}{2N} \big[ E + 2Z (E-Z)^{-1} \big]\\
&= \frac {1}{2N} (E-Z)^{-1} (E+Z)
\end{aligned}
\end{equation}

Considered functions of a dimensionless
variable $\tau $ stand for functions of time variable ``$t$'',
defined in a finite interval $[0, T]$. After the substitution
$t=T\tau $ the equation is
transformed, [0,1] is the domain for variable ``$\tau $'', while $T$ is
a parameter.
In the notation of function $x(\tau ) \in M(0,1)$ this parameter
is not included explicitly. However we assume
\begin{equation}\label{e71}
x(\tau) = x (T \tau) = x(t)\quad  t \in [0,T],
\end{equation}
and components $x (\tau _ {\nu}^{(N)})$ $(\nu = \overline {1,N})$
of the pointwise representation vector $X_{T}$ of the function $x (\tau)$
are function values $x (T \tau _{\nu}^{(N)}) = x (t _{\nu}^{(N)})$
$(\nu = \overline {1,N})$  in the nodes of the Chebyshev time
$N$-grid
\begin{equation} \label{e72}
t^{(N)}_\nu = T \tau^{(N)}_\nu=\frac{T(2\nu-1)}{2N}\quad (\nu = \overline{1,N}).
\end{equation}
The operator of integration over time variable ``$t$'' also involves
the factor $T$, since
\begin{equation} \label{e73}
J_t x(t) =\int  _{0}^{t} x(t) dt = T \int  _{0}^{t/T}
 x(T \tau) d \tau = T \int  _{0}^{\tau} x(\tau) d \tau
= T J_{\tau} x(\tau).
\end{equation}
For the pointwise matrix of integration we have
\begin{equation} \label{e74}
\begin{aligned}
T J_T &= \frac {T}{N}
\begin{bmatrix}
1/2    &      &         &        &  \\
1      & 1/2  &         &   0    &  \\
\vdots &  1   &  1/2    &        &  \\
1      &      &  \ddots & \ddots &  \\
1      &  1   &  \cdots &     1  &  1/2
\end{bmatrix}\\
&= \frac {T}{2N} (E-Z)^{-1} (E+Z)
= \lambda_0 (E-Z)^{-1} (E+Z)
\quad (N \times N)
\end{aligned}
\end{equation}
The scalar factor
\begin{equation} \label{e75}
\lambda _0 = \frac {T}{2N}
\end{equation}
is a half of the time distance between two adjacent nodes of the
$N$-grid \eqref{e72} and at the same time is an $N$-multiple eigenvalue of the
matrix \eqref{e74} with the determinant which
equals $\lambda _0 ^N$.

The parameter (\ref{e75}) plays an important role in the research
of time processes
by the method of pointwise representations: it connects the width of
the spectral characteristics (frequency $\omega_{cp} $.) and the
characteristic
time ``$T$'' in the time process with the dimension of these
representations. Really, by
Kotelnikov theorem we have
\begin{equation} \label{e76}
\lambda _0 = \frac {T}{2N} = \frac {1}{2} \frac {\pi}{\omega_{cp}}
\Rightarrow \lambda_0 \omega_{cp} = \frac {\pi}{2}
\end{equation}
For the fixed frequency $\omega_{cp} $ the parameter $\lambda _{0}$ also
should be fixed by the relation (\ref{e76}). Thus any change
of $T$ should lead to the change of the dimension $N$, such
that the ratio $\frac {T}{N}$ is constant. Thus
Chebyshev nodes of the time $N$-grids are also fixed
\begin{equation} \label{e77}
t^{(N)}_\nu = \frac{T(2\nu-1)}{2N} = \lambda_0 (2\nu - 1) \quad
(\nu = \overline{1,N}).
\end{equation}
as well as the values of the function $x(t) t \in [0,T]$, as
components of its pointwise representation vector $X_{T}$.

Consequently, the increase of the dimension $N$ (with
the increase of $T$) will mean the
addition of new components of the pointwise representation vector without
any change of all previous components, i.e. this leads to the property
well known for the Fourier coefficients.
Let us introduce a polynomial shift matrix, as a function of matrix
variable $Z$:
\begin{equation} \label{e78}
 J(Z) = (E-Z)^{-1} (E+Z) =E + 2 \sum_{k=1}^{N-1} Z^k
\end{equation}
Then the matrix of integration (\ref{e74}) can be rewritten as
\begin{equation} \label{e79}
\begin{aligned}
T J_T &=\lambda_0 J(Z) = \lambda_0 (E-Z)^{-1} (E+Z) \\
&=\lambda_0\big[ E + 2 \sum_{k=1}^{N-1} Z^k \big]
= \lambda_0 \begin{bmatrix}
1       &     &           &        &  \\
2       &  1  &           &        &  \\
\vdots  &  2  & 1         &        &  \\
2       &     &  \ddots   & \ddots &  \\
2       &  2  &  \cdots   &     2  &  1
\end{bmatrix}
\end{aligned}
\end{equation}

In the framework of the general approach, the problem of
the pointwise image of the convolution operator is investigated. The
latter is treated as a compact
integration operator with the difference kernel and as a commutative
binary
operation which is closed in $L_{1}$ - norm, which transforms
$M(0,1)$ into a
convolution normed algebra with an identity (with the
$\delta $ - function as an identity).

In the space of pointwise images $R_T^N$  a convolution of functions is
mapped into a convolution of vector images of these functions.
The latter convolution is closed in $l_1$ -norm, which makes $R_T^N$
a convoluted algebra and $ASR_T^N$ becomes a homomorphic image of a
functional convoluted algebra $ASM$. With the growth of $N$ the
homomorphism tends to the isomorphism.

Convolution operators are very important for the theory of linear
dynamical systems, they connect an input and an output. Therefore the
following fact is significant. Pointwise modelling of convolution
operators leads to the application of usual functions of dynamic
systems: transfer
functions as Laplace transforms of the kernels of convolution operators,
with the role of impulse
transfer characteristics of appropriate dynamical systems.

Connections of convolution algebras with algebraic
structures in the sets of functions of a complex variable
play an essential
role when studying properties of linear dynamical systems by their
pointwise models.
It is proved that
\begin{equation} \label{e80}
g \ast x = \int _{0}^{t} g(t- \eta) x(\eta) d \eta \stackrel{T_N}
{\to} Y_T =W _ g^ \ast (Z) X_T,
\end{equation}
while the $P$-matrix $W _ g^ \ast (Z)$ can be explicitly determined by the
Laplace inverse transform $G^\ast (\lambda)$
of the kernel $g(t)$:
\begin{equation}\label{e81}
G(p) \stackrel{p \to \frac {1}{\lambda}} {\to} G^ \ast
\stackrel{\lambda TJ} {\to} = G^\ast (TJ) = G^\ast \big[ \lambda_0
(E-Z)^{-1} (E+Z) \big] = W_g^ \ast (Z).
\end{equation}
Here $TJ$ is the pointwise representation of the Volterra, which is  a
$P$-matrix. Thus input - output connection for the linear
(stationary) dynamical system is modeled in the pointwise representation
of the vector-matrix equation
\begin{equation} \label{e82}
Y_T =W _ g^ \ast (Z) X_T.
\end{equation}

\begin{thebibliography}{00}

\bibitem{o1}
V. M. Osipov; \emph{Principles of the method of representing vectors}, ’Omsk
State University, 1983.

\bibitem{o2}
V. V. Osipov; \emph{Modelling of linear dynamic systems by a method of dot
performances}. Master's thesis, Krasnoyarsk, 2000.

\bibitem{n1}
M. A. Naimark; \emph{Normed rings}. Science, Moscow, 1968.
\end{thebibliography}

\end{document}

Osipov Vladimir Mihajlovich, born 1929,
graduated Tomsk polytechnical university in 1953,
doctor of physical and mathematical sciences,
professor of faculty of higher mathematics of the State university
of nonferrous metals and gold.

Osipov Vladimir Vladimirovich, born 1976,
graduated Krasnoyarsk state academy of nonferrous metals and gold in 1997,
candidate of physical and mathematical sciences,
senior lecturer of faculty of higher mathematics of the State university
 of nonferrous metals and gold.
