\documentclass[reqno]{amsart} 
\begin{document} 
{\noindent\small {\em Electronic Journal of Differential Equations},
Vol.~2000(2000), No.~54, pp.~1--10.\newline
ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu
\newline ftp ejde.math.swt.edu \quad ejde.math.unt.edu (login: ftp)}
\thanks{\copyright 2000 Southwest Texas State University  and 
University of North Texas.} 
\vspace{1cm}

\title[\hfilneg EJDE--2000/54\hfil Smoothness of solutions]
{Smoothness of solutions of conjugate boundary-value problems on a measure chain } 

\author[Eric R. Kaufmann\hfil EJDE--2000/54\hfilneg]
{Eric R. Kaufmann }

\address{Eric R. Kaufmann \hfill\break\indent
Department of Mathematics and Statistics\\
        University of Arkansas at Little Rock\hfill\break\indent
        Little Rock, Arkansas 72204-1099 USA}
\email{erkaufmann@@athena.ualr.edu}

\date{}
\thanks{Submitted June 27, 2000. Published July 14, 2000.}
\subjclass{34B15, 34B99, 39A10, 34A99 }
\keywords{ Measure chain, initial-value problem, boundary-value problem}


\begin{abstract}
In this paper we consider the $n^{th}$ order $\Delta$-differential equation
(often refered to as a differential equation on a measure chain)
$$u^{\Delta_n}(t) = f(t, u(\sigma(t)),\dots, u^{\Delta_{n-1}}(\sigma(t)))$$ 
satisfying $n$-point conjugate boundary conditions.  We show that solutions 
depend continuously and smoothly on the boundary values.
\end{abstract}

\maketitle

\newtheorem{theorem}{Theorem}[section]
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{definition}[theorem]{Definition}


\section{Introduction}

Differential equations on a measure chain (also called differential
equations on time scales) have received much attention since Hilger's
\cite{sh} work unifying continuous and discrete calculus. Subsequent works
by Agarwal and Bohner \cite{rpamb}, Aulback and Hilger \cite{bash}, Erbe
and Hilger \cite{lhesh}, and Kaymakcalan, {\it et al.} \cite {kls} have
furthered the development of calculus on measure chains. There are many
recent papers that consider a variety of different problem for differential
equations on a measure chain. See \cite{da, cdhy, lheap1, lheap2} for example.

In this paper we are concerned with the continuous dependence and
smoothness of solutions of  differential equations on a measure chain with
respect to boundary values. The results of this paper are patterned after
those found in Henderson and Lee \cite{jhll} and Henderson \cite{jh}. In
\cite{jhll}, the authors considered the continuous dependence and
smoothness of solutions of conjugate boundary-value problems for difference
equations with respect to boundary conditions. In \cite{jh}, the author
considered the continuous dependence and smoothness of solutions of
conjugate boundary-value problems for differential equations with respect
to boundary conditions. Other works devoted to continuous dependence and
smoothness of solutions with repsect to boundary values include \cite{adjh,
je, jejh, dh, jhmhlh, jherk} and references therein.

Let $T$ be a nonempty closed subset of ${\mathbb R},$ and let $T$ have the
subspace topology inherited from the Euclidean topology on ${\mathbb R}.$ Then
$T$ is called a {\em measure chain}, (in some of the literature $T$ is
called a {\em time scale}).

\begin{definition}
For  $t < \sup T$ and $r > \inf T$, we define the {\em forward jump
operator}, $\sigma,$ and the {\em backward jump operator}, $\rho,$
respectively, by 
$$\gathered
	\sigma(t) = \inf \{\tau \in T \ |\ \tau > t \} \in T, \cr
	\rho(r) = \sup \{\tau \in T \ |\ \tau < r \} \in T,
\endgathered$$ 
for all $t, r \in T.$ If $\sigma(t) > t, \ t$ is said to be {\em right
scattered}, and if $\sigma(t) = t, \ t$ is said to be {\em right dense}. If
$\rho(r) < r, \ r$ is said to be {\em left scattered}, and if $\rho(r) = r,
\ r$ is said to be {\em left dense}.
\end{definition}

\begin{definition}
For $x:T \to \mathbb{R}$ and $t \in T$ (assume $t$ is not left scattered if $t
= \sup T$), we define the {\em delta derivative} of $x(t)$, $x^\Delta (t)$,
to be the number (when it exists), with the property that, for each
$\varepsilon > 0,$ there is a neighborhood, $U,$ of $t$ such that
\begin{displaymath}
	\Big\vert[x(\sigma(t)) - x(s)] - x^\Delta (t) [\sigma(t) - s]\Big\vert
\leq \varepsilon \Big\vert\sigma(t) - s\Big\vert,
\end{displaymath}
for all $s \in U$. Higher order delta derivatives are defined recursively,
\begin{displaymath}
	x^{\Delta_n}(t) = (x^{\Delta_{n-1}})^\Delta(t).
\end{displaymath}
\end{definition}
For convenience, we will use the notation $x^{\Delta_0}(t)$ to represent
the function $x(t)$. That is, $x^{\Delta_0}(t) = x(t)$.

{\bf Remarks:} If $x:T \to \mathbb{R}$ is continuous at $t \in T,$ $t < \sup
T,$ and $t$ is right scattered, then 
\begin{displaymath}
	x^\Delta (t) = \frac {x(\sigma (t)) - x(t)}{\sigma (t) - t}.
\end{displaymath}
In particular, if $T = \mathbb{Z},$ the integers, then
\begin{displaymath}
	x^\Delta (t) = \Delta x(t) = x(t+1) - x(t),
\end{displaymath}
whereas, if $t$ is right dense, then
\begin{displaymath}
	x^\Delta (t) = x'(t).
\end{displaymath}

Let $a, b \in T$. We define the closed interval $[a, b]$ by $[a,b] = \{ t
\in T| a \leq t \leq b\}$. Other closed, open, and half-open intervals in
$T$ are similarly defined.

We consider solutions of the $\Delta$-differential equation
\begin{equation}\label{de}
	u^{\Delta_n}(t) = f(t, u(\sigma(t)), u^{\Delta}(\sigma(t)), \dots ,
u^{\Delta_{n-1}}(\sigma(t)),) \, t \in T,
\end{equation}
We will assume throughout that
\begin{itemize}
\item[(A)] $f(t, x_1, x_2, \dots , x_n)\!: T \times {\mathbb R}^n \to {\mathbb
R}^n$ is continuous.
\end{itemize}
At times we will need to assume that
\begin{itemize}
\item[(B)] $\frac{\partial f}{\partial x_i}(t, x_1, x_2, \dots , x_n)\!: T
\times {\mathbb R}^n \to {\mathbb R}^n$ is continuous, $1 \leq i \leq n$. 
\end{itemize}
Given a solution, $u(t)$, of (\ref{de}), we will also have need of the {\em
variational equation along $u(t)$},
\begin{equation}\label{ve}
	z^{\Delta_n}(t) = \sum_{i=1}^{n} \frac{\partial f}{\partial x_i}(t,
u(\sigma(t)), u^{\Delta}(\sigma(t)), \dots , u^{\Delta_{n-1}}(\sigma(t)))
z^{\Delta_i}(\sigma(t)), 
\end{equation}

In Section 2 we state two results for solutions of initial-value problems
of (\ref{de}). The first result is that solutions of initial-value problems
depend continuously on initial data provided condition (A) holds. The
second results states that if conditions (A) and (B) hold then solutions of
initial-value problems can be differentiated with respect to initial values.

In Section 3 we state our main results which are analogues of the Theorems
in section 2 for $n$-point conjugate boundary-value problems. The proofs of
these Theoresm depend on the uniqueness of solutions of conjugate boundary
value problems. 

\section{Smoothness with Respect to Initial Values}


In this section we present theorems on continuous dependence and smoothness
of solutions of initial-value problems with respect to initial values. The
$\Delta$-differential equation along with the conditions
\begin{equation}\label{ic}
	u^{\Delta_i}(t_0) = c_{i+1}, \, 0 \leq i \leq n-1,
\end{equation}
where $t_0 \in T$ is called an initial-value problem. The authors in
\cite{kls} have shown that under a weaker condition than (A) initial value
problems of the form (\ref{de}), (\ref{ic}) have unique solutions.
Furthermore they have shown that that the initial-value problem (\ref{de}),
(\ref{ic}) depends continuously on the initial values under this weaker
condition. Theorem \ref{cdic} is similar to the theorem on continuous
dependence presented in \cite{kls}. 

\begin{theorem}[Continuous Dependence on Initial Values]\label{cdic}
Suppose that condition (A) is satisfied. Let $u(t; t_0, c_1, c_2, \dots,
c_n)$ be the solution of (\ref{de}), (\ref{ic}) where $t_0 \in T$ and $c_1,
c_2, \dots c_n \in {\mathbb R}$. Then for each $\varepsilon > 0$ and $\tau$
such that $t_0 + \tau \in T$ there exists a $\delta(\varepsilon, t_0, \tau,
c_1, \dots, c_n)$ such that if $|c_i - d_i| < \delta, \, 1 \leq i \leq n$ then
\begin{displaymath}
	|u(t; t_0, c_1, c_2, \dots, c_n) - u(t; t_0, d_1, d_2, \dots, d_n)| <
\varepsilon
\end{displaymath}
for all $t \in [t_0, t_0 + \tau]$.
\end{theorem}

\begin{theorem}\label{scic}
	Assume that conditions (A) and (B) are satisfied. Let $u(t) = u(t; t_0,
c_1, c_2, \dots, c_n)$ denote the solution of the initial-value problem
(\ref{de}), (\ref{ic}) where $t_0 \in T$ and $c_1, c_2, \dots c_n \in {\mathbb
R}$. Then, given $\gamma_1, \dots , \gamma_n \in {\mathbb R}$, for each $1
\leq j \leq n$
\begin{displaymath}
	\beta_j(t) = \frac{\partial u}{\partial c_j}(t;t_0, \gamma_1, \dots,
\gamma_n)
\end{displaymath}
exists and is the solution of the variational equation
\begin{eqnarray*}
	\beta_j^{\Delta_n}(t) & = & \sum_{i=1}^{n} \frac{\partial f}{\partial
x_i}(t, u(\sigma(t); t_0, \gamma_1, \dots, \gamma_n), u^{\Delta}(\sigma(t);
t_0, \gamma_1, \dots, \gamma_n),\\
	&& \quad \quad \quad \quad \dots , u^{\Delta_{n-1}}(\sigma(t); t_0,
\gamma_1, \dots, \gamma_n)) \beta_j^{\Delta_i}(\sigma(t))
\end{eqnarray*}
and satisfies
\begin{displaymath}
	\beta_j^{\Delta_i}(t_0) = \delta_{i,j}, \, 1 \leq i \leq n,
\end{displaymath}
where
\begin{displaymath}
	\delta_{i,j} = \left \{ \begin{array}{cc}
			1, & i = j,\\
			0, & i \neq j.
		\end{array} \right . 
\end{displaymath}
\end{theorem}

\section{Smoothness with Respect to Boundary Values}

In this section we state and prove analogues to Theorems \ref{cdic} and
\ref{scic} for $n$-point  conjugate boundary-value problems.

\begin{definition} Let $t_1 < t_2 < \cdots < t_n \in T$ and let $u_1, u_2,
\dots, u_n \in {\mathbb R}$. A boundary-value problem satisfying
\begin{equation}\label{bc}
	u(t_i) = u_i, \, 1 \leq i \leq n,
\end{equation}
is called an {\em $n$-point conjugate boundary-value problem}.
\end{definition}

We give some conditions characterizing disconjugacy for linear
$\Delta$-differential equations in terms of {\em generalized zeros}. These
conditions parallel those given by Hartman \cite{PH} for the disconjugacy
for difference equations. 

\begin{definition} Let $u\!: T \to {\mathbb R}$. We say that $u$ has a {\em
generalized zero} at $t_0$ if either $u(t_0) = 0$ or if there is a $k \in
{\mathbb N}$ such that $(-1)^k u(\rho^k(t_0)) u(t_0) > 0$ and $u(\rho(t_0)) =
u(\rho^2(t_0)) = \cdots = u(\rho^{k-1}(t_0)) = 0$.
\end{definition}

\vspace{0.1in}

\begin{definition}\label{def1} The nonlinear $\Delta$-differential
(\ref{de}) is said to be $n$-point {\em disconjugate on $T$} provided that
whenever $u(t)$ and $v(t)$ are solutions of (\ref{de}) such that if $u(t) -
v(t)$ has $n$ generalized zeros at $t_1 < t_2 < \cdots < t_n \in T$ then
$u(t) - v(t) \equiv 0$ on $[t_1, +\infty)$.
\end{definition}

In the case when (\ref{de}) is linear, say
\begin{equation}\label{altde}
	v^{\Delta_n}(t) = \sum_{i=1}^n \alpha_i(t) v^{\Delta_{i-1}}(\sigma(t)),
\end{equation}
where $\alpha_i: T \to {\mathbb R}, \, 1 \leq i \leq n$, we may reformulate
Definition \ref{def1} as follows.

\begin{definition}
The linear equation (\ref{altde}) is said to be {\em $n$-point disconjugate
on $T$} provided no nontrivial solution $u$ of (\ref{altde}) has
$n$-generalized zeros on $T$.
\end{definition}

We adopt the following notation to distinguish initial-value problems from
boundary value problems. Given $t_0 \in T$ and $c_1, \dots, c_n \in {\mathbb
R}$, let $v(t) = v(t; t_0, c_1, \dots, c_n)$ denote the solution of the
initial-value problem (\ref{de}), (\ref{ic}). Given $t_1, \dots, t_n \in T$
and $v_1, \dots, v_n \in {\mathbb R}$, let $v(t) = v(t; t_1, \dots, t_n, v_1,
\dots, v_n)$ denote the solution of the boundary value problem (\ref{de}),
(\ref{bc}).

We will use the Brouwer Theorem on Invariance of Domain, Theorem \ref{bt}
below, to prove that solutions of (\ref{de}) depend continuously on the
boundary values when (\ref{de}) is $n$-point disconjugate. To show that
(\ref{de}) depends smoothly on the boundary values we must further assume
that the variational equation, (\ref{ve}), is $n$-point disconjugate. 

\begin{theorem}\label{bt}
If $U$ is an open subset of ${\mathbb R}^n$, $n$ dimensional Euclidean space,
and $\varphi: U \to {\mathbb R}^n$ is one-to-one and continuous on $U$ then
$\varphi$ is a homeomorphism and $\varphi(U)$ is an open subset of ${\mathbb
R}^n$.
\end{theorem}

\vspace{0.1in}


\begin{theorem}[Continuous Dependence on Boundary Values]\label{cdbc}
Suppose that condition (A) is satisfied and that (\ref{de}) is $n$-point
disconjugate on $T$. Let $y(t)$ be a solution of (\ref{de}) on $[t_1,
+\infty)$ and let $t_1 < t_2 < \cdots < t_n \in T$ be given. Then there
exists an $\varepsilon > 0$ such that if $\gamma_i \in {\mathbb R}, \, 1 \leq
i \leq n$ where $|\gamma_i| < \varepsilon, \, 1 \leq i \leq n$, then the
boundary-value problem (\ref{de}) satisfying
\begin{displaymath}
	u(t_i) = y(t_i) + \gamma_i, \quad 1 \leq i \leq n, 
\end{displaymath}
has a unique solution $u(t; t_1, \dots, t_n, y(t_1) + \gamma_1, \dots,
y(t_n) + \gamma_n)$. Furthermore we have $u(t; t_1, \dots, t_n$, $y(t_1) +
\gamma_1, \dots, y(t_n) + \gamma_n)$ converging to $y(t)$ as $\varepsilon
\to 0$.
\end{theorem}

\noindent
{\bf Proof:} Let $t_1 < t_2 < \cdots < t_n \in T$ be given and define a
mapping $\varphi: {\mathbb R}^n \to {\mathbb R}^n$ by $\varphi(c_1, c_2, \dots,
c_n) = ( v(t_1), v(t_2), \dots , v(t_n) )$ where $v(t) = v(t; t_1, c_1,
\dots c_n)$ is the solution of the (\ref{de}) satisfying the initial
conditions
\begin{displaymath}
	v^{\Delta_{i-1}}(t_1) = c_i, \quad 1 \leq i \leq n.
\end{displaymath}
We will show that $\varphi$ is one-to-one and continuous. It will then
follow from Theorem \ref{bt}, that $\varphi$ is a homeomorphism.

Suppose that $\varphi(c_1, c_2, \dots, c_n) = \varphi(c'_1, c'_2, \dots,
c'_n)$. Then,
\begin{eqnarray*}
	&&(v(t_1; t_1, c_1, \dots , c_n), v(t_2; t_1, c_1, \dots , c_n), \dots ,
v(t_n; t_1, c_1, \dots , c_n)) \\
	&& \quad \quad = (v(t_1; t_1, c'_1, \dots , c'_n), v(t_2; t_1, c'_1, \dots
, c'_n), \dots , v(t_n; t_1, c'_1, \dots , c'_n)).
\end{eqnarray*}
Now, equation (\ref{de}) is $n$-point disconjugate on $T$ and hence
solutions to (\ref{de}), (\ref{bc}) are unique. And so, for all $t \in
[t_1, +\infty)$ we have
\begin{displaymath}
	v(t;t_1, c_1, \dots, c_n) = v(t;t_1, c'_1, \dots, c'_n).
\end{displaymath}
In particular,
\begin{displaymath}
	v^{\Delta_{i-1}}(t_1;t_1, c_1, \dots, c_n) = v^{\Delta_{i-1}}(t_1;t_1,
c'_1, \dots, c'_n), \, 1 \leq i \leq n.
\end{displaymath}
Recalling our notation, we see that $(c_1, c_2, \dots, c_n) = (c'_1, c'_2,
\dots, c'_n)$. Hence $\varphi$ is one-to-one.

To show that $\varphi$ is continuous we consider a sequence
$\{\!(c^{\ell}_1, c^{\ell}_2, \dots, c^{\ell}_n)\!\}_{\ell = 1}^{\infty}$
which converges to $(c_1, c_2, \dots, c_n)$ as $\ell \to \infty$. By the
continuous dependence on initial values, Theorem \ref{cdic}, $v(t;t_1,
c^{\ell}_1, \dots, c^{\ell}_n) \to v(t;t_1, c_1, \dots, c_n)$ for all $t
\in [t_1, +\infty)$ as $\ell \to \infty$. That is,
\begin{displaymath}
	\lim_{\ell \to \infty} v(t;t_1, c^{\ell}_1, \dots, c^{\ell}_n) = v(t;t_1,
c_1, \dots, c_n).
\end{displaymath}
Thus, $\{\!\varphi(c^{\ell}_1, c^{\ell}_2, \dots, c^{\ell}_n)\!\}$
converges to $\varphi(c_1, c_2, \dots, c_n)$ as $\ell \to \infty$ and so
$\varphi$ is continuous. By the Brouwer Theorem on Invariance of Domain,
$\varphi$ is a homeomorphism onto its range, $\varphi({\mathbb R}^n)$, and
$\varphi({\mathbb R}^n)$ is open in ${\mathbb R}^n$.

Let $y(t)$ be a solution of (\ref{de}). Then $(y(t_1), \dots, y(t_n)) \in
\varphi({\mathbb R}^n)$. Since $\varphi({\mathbb R}^n)$ is open, there exists an
$\varepsilon > 0$ such that if $|\gamma_i| < \varepsilon, \, 1 \leq i \leq
n,$ then $(y(t_1) + \gamma_1, \dots, y(t_n) + \gamma_n) \in \varphi({\mathbb
R}^n)$. Since $\varphi$ is one-to-one there exists a unique $r = (r_1,
\dots , r_n) \in {\mathbb R}^n$ such that $\varphi(r_1, \dots , r_n) = (y(t_1)
+ \gamma_1, \dots, y(t_n) + \gamma_n)$. By our definition,
\begin{displaymath}
	\varphi(r_1, \dots , r_n) = (v(t_1;t_1, r_1, \dots, r_n), \dots,
v(t_n;t_1, r_1, \dots, r_n))
\end{displaymath}
where $v(t;t_1, r_1, \dots, r_n)$ is the solution of (\ref{de}) satisfying
the initial conditions
\begin{displaymath}
	v^{\Delta_{i-1}}(t_1) = r_{i}, \quad 1 \leq i \leq n.
\end{displaymath}
Thus,
\begin{displaymath}
	(y(t_1) + \gamma_1, \dots, y(t_n) + \gamma_n) = (v(t_1;t_1, r_1, \dots,
r_n), \dots, v(t_n;t_1, r_1, \dots, r_n)).
\end{displaymath}
That is, $v(t;t_1, r_1, \dots, r_n)$ is the solution of (\ref{de})
satisfying the boundary conditions,
\begin{displaymath}
	v(t_i;t_1, r_1, \dots, r_n) = y(t_i) + \gamma_i, \quad 1 \leq i \leq n.
\end{displaymath}

Now consider a sequence $\{(y(t_1) + \gamma^{\ell}_1, \dots, y(t_n) +
\gamma^{\ell}_n)\}_{\ell = 1}^{\infty} \subset \varphi( {\mathbb R}^n)$ where
$|\gamma^{\ell}_i| < \varepsilon$, $1 \leq i \leq n$ and
\begin{displaymath}
	\lim_{\ell \to \infty} (y(t_1) + \gamma^{\ell}_1, \dots, y(t_n) +
\gamma^{\ell}_n) = (y(t_1), \dots, y(t_n)).
\end{displaymath}
Let
\begin{displaymath}
	u_{\ell}(t) = u(t; t_1, \dots, t_n, y(t_1) + \gamma^{\ell}_1, \dots,
y(t_n) + \gamma^{\ell}_n).
\end{displaymath}
Since $\varphi$ is a homeomorphism then $\varphi^{-1}$ is continuous and so,
\begin{eqnarray*}
	\lim_{\ell \to \infty} \varphi^{-1}( u_{\ell}(t_1), \dots , u_{\ell}(t_n))
	& = & \lim_{\ell \to \infty} \varphi^{-1}(y(t_1) + \gamma^{\ell}_1, \dots,
y(t_n) + \gamma^{\ell}_n)\\
	& = & \varphi^{-1}(\lim_{\ell \to \infty} y(t_1) + \gamma^{\ell}_1, \dots,
\lim_{\ell \to \infty} y(t_n) + \gamma^{\ell}_n)\\ 
	& = & \varphi^{-1}(y(t_1), \dots,  y(t_n)).
\end{eqnarray*}

That is, the initial values of $u_{\ell}(t)$ converge to the initial values
of $y(t)$. By Theorem \ref{cdic} $u_{\ell}(t)$ converges uniformly to
$y(t)$ on each compact subset of $[t_1, +\infty)$. Thus, $u(t; t_1, \dots,
t_n, y(t_1) + \gamma^{\ell}_1, \dots, y(t_n) + \gamma^{\ell}_n)$ converges
to $y(t)$ as $\varepsilon \to 0$ and the proof is complete.

\begin{theorem}
Assume that $f$ satisfies (A) and (B), that (\ref{de}) is $n$-point
disconjugate on $T$, and that the variational equation (\ref{ve}) is
$n$-point disconjugate along all solutions of (\ref{de}). Let $u(t) = u(t;
t_1, \dots t_n, u_1, \dots, u_n)$ be the solution of (\ref{de}), (\ref{bc})
on $[t_1, +\infty)$. Then for $1 \leq j \leq n, \frac{\partial u}{\partial
u_j}$ exists on $[t_1, +\infty)$ and $z_j(t) = \frac{\partial u}{\partial
u_j}$ is the solution of the variational equation (\ref{ve}) along $u(t)$
and satisfies
\begin{displaymath}
	z_j(t_i) = \delta_{ij}, \quad 1 \leq i \leq n.
\end{displaymath}
\end{theorem}

\noindent
{\bf Proof:} Fix $j, \, 1 \leq j \leq n$. Let $\varepsilon > 0$ be as
Theorem \ref{cdbc} and let $h$ be such that $0 < |h| < \varepsilon$. Define
the difference quotient 
\begin{displaymath}
	z_{jh}(t) = \frac{1}{h}[ u(t; t_1, \dots, t_n, u_1, \dots, u_j + h, \dots,
u_n) - u(t; t_1, \dots, t_n, u_1, \dots, u_n)] 
\end{displaymath}
It suffices to show that $\lim_{h \to \infty} z_{jh}(t)$ exists on $[t_1,
+\infty)$. Note that for all $h \neq 0$,
\begin{displaymath}
	z_{jh}(t_i) = \delta_{ij}, \quad 1 \leq i \leq n.
\end{displaymath}

For each $2 \leq i \leq n,$ define $\alpha_i = u^{\Delta_{i-1}}(t_j; t_1,
\dots , t_n, u_1, \dots u_n)$ and $\varepsilon_i = \varepsilon_i(h) =
u(t_i; t_1, \dots, t_n, u_1, \dots, u_i + h, \dots, u_n) - \alpha_i$.
Recalling our notation we see that $u(t_i; t_1, \dots, t_n,$ $u_1, \dots,
u_i + h, \dots, u_n) = u_i + h$ and $u(t_i; t_1, \dots, t_n, u_1, \dots,
u_n) = u_i$. As a consequence of Theorem \ref{cdbc} $\varepsilon_i \to 0$
as $h \to 0$ for $2 \leq i \leq n$.

Recall that $v(t; t_j, v_1, v_2, \dots v_n)$ is the solution of (\ref{de})
satisfying the initial conditions
\begin{eqnarray*}
	&&v^{\Delta_{i-1}}(t_j) = v_i, \, 1 \leq i \leq n.
\end{eqnarray*}
In particular, $v(t; t_j, u_j, \alpha_2, \dots, \alpha_n)$ is the solution
of (\ref{de}) satisfying $v(t_j) = u_j$ and for $2 \leq i \leq n, \,
v^{\Delta_{i-1}}(t_j) = \alpha_i$. Likewise $v(t; t_j, u_j + h, \alpha_2 +
\varepsilon_2, \dots, \alpha_n + \varepsilon_n)$ is the solution of
(\ref{de}) satisfying $v(t_j) = u_j + h$ and for $2 \leq i \leq n, \,
v^{\Delta_{i-1}}(t_j) = \alpha_i  + \varepsilon_i$. Since solutions to
initial-value problems are unique then $v(t; t_j, u_j, \alpha_2, \dots,
\alpha_n) = u(t; t_1, \dots, t_n, u_1, \dots, u_n)$. Similarly, we have
$v(t; t_j, u_j + h, \alpha_2 + \varepsilon_2, \dots, \alpha_n +
\varepsilon_n) = u(t, t_1, \dots, t_n,$ $u_1, \dots, u_j + h, \dots, u_n)$.

Using a telescoping sum, we have
\begin{eqnarray*}
\lefteqn{z_{jh}(t)}\\
 & = & \frac{1}{h}[u(t; t_1, \dots, t_n, u_1, \dots, u_j + h,
\dots, u_n) - u(t; t_1, \dots, t_n, u_1, \dots, u_n)]\\
		& = & \frac{1}{h}[v(t; t_j, u_j + h, \alpha_2 + \varepsilon_2, \dots,
\alpha_n + \varepsilon_n) - v(t; t_j, u_j, \alpha_2, \dots, \alpha_n)]\\
		& = & \frac{1}{h}[[v(t; t_j, u_j + h, \alpha_2 + \varepsilon_2, \dots,
\alpha_n + \varepsilon_n) - v(t; t_j, u_j, \alpha_2 + \varepsilon_2, \dots,
\alpha_n + \varepsilon_n)]\\
		&   & + [v(t; t_j, u_j, \alpha_2 + \varepsilon_2, \dots, \alpha_n +
\varepsilon_n) - v(t; t_j, u_j, \alpha_2 , \dots, \alpha_n + \varepsilon_n)]\\
		&   & + \dots
		 + [v(t; t_j, u_j, \alpha_2, \dots, \alpha_{n-1}, \alpha_n +
\varepsilon_n) - v(t; t_j, u_j, \alpha_2 , \dots, \alpha_{n-1},  \alpha_n)]]\\
\end{eqnarray*}
By Theorem \ref{scic}, solutions of (\ref{de}) can be differentiated with
respect to initial values. That is $\beta_1 = \frac{\partial v}{\partial
v_1}, \beta_2 = \frac{\partial v}{\partial v_2}, \dots, \beta_n =
\frac{\partial v}{\partial v_n}$ exist. By Theorem \ref{scic} and the Mean
Value Theorem, we see that
\begin{eqnarray}
	z_{jh}(t) & = & \frac{1}{h}[\beta_1(t, v(t; t_1, u_j + \bar{h}, \alpha_2 +
\varepsilon_2, \dots, \alpha_n + \varepsilon_n))h \label{eq1}\\
		&   & + \, \, \beta_2(t, v(t; t_1, u_j, \alpha_2 + \bar{\varepsilon}_2,
\dots, \alpha_n + \varepsilon_n))\varepsilon_2\nonumber\\
		&   & + \, \dots \, + \, \, \beta_n(t; v(t, t_1, u_j, \alpha_2, \dots,
\alpha_n +  + \bar{\varepsilon}_n))\varepsilon_n]\nonumber
\end{eqnarray}
where
\begin{eqnarray*}
	&&\beta_1(t; v(t; t_1, u_j + \bar{h}, \alpha_2 + \varepsilon_2, \dots,
\alpha_n + \varepsilon_n))\\
	&& \hspace{1.6in} = \frac{\partial v}{\partial v_1} (t; t_1, u_j +
\bar{h}, \alpha_2 + \varepsilon_2, \dots, \alpha_n + \varepsilon_n)\\
	&&\hspace{0.4in} \vdots \\
	&&\beta_n(t; v(t; t_1, u_j, \alpha_2, \dots, \alpha_n +
\bar{\varepsilon}_n)) = \frac{\partial v}{\partial v_n} (t; t_1, u_j,
\alpha_2, \dots, \alpha_n +  + \bar{\varepsilon}_n)
\end{eqnarray*}
and $\bar{h}$ is between $0$ and $h$ and $\bar{\varepsilon}_{\ell}$ is
between $0$ and $\varepsilon_{\ell}$, $2 \leq \ell \leq n$. That is,
$\beta_1(t; v(t, t_1, u_j + \bar{h}, \alpha_2 + \varepsilon_2, \dots,
\alpha_n + \varepsilon_n))$ is the solution of the variational equation
(\ref{ve}) along $v(t; t_1, u_j + \bar{h}, \alpha_2 + \varepsilon_2, \dots,
\alpha_n + \varepsilon_n)$ satisfying $\beta_1^{\Delta_{i-1}}(t_j) =
\delta_{i1}, 1 \leq i \leq n$. For $2 \leq \ell \leq n$, $\beta_\ell(t;
v(t; t_1, u_j, \alpha_2 + \varepsilon_2, \dots, \alpha_\ell +
\bar{\varepsilon}_\ell, \dots, \alpha_n + \varepsilon_n))$ is the solution
of the variational equation (\ref{ve}) along $v(t; t_1, u_j, \alpha_2 +
\varepsilon_2, \dots, \alpha_\ell + \bar{\varepsilon}_\ell, \dots, \alpha_n
+ \varepsilon_n)$ satisfying $\beta_\ell^{\Delta_{i-1}}(t_j) =
\delta_{i\ell}, 1 \leq i \leq n$. In particular note that
\begin{displaymath}
	\beta_2(t_j) = \cdots = \beta_n(t_j) = 0.
\end{displaymath}

Distribute the factor $\frac{1}{h}$ in equation (\ref{eq1}).
\begin{eqnarray}
	z_{jh}(t) & = & \beta_1(t, v(t; t_1, u_j + \bar{h}, \alpha_2 +
\varepsilon_2, \dots, \alpha_n + \varepsilon_n))\label{eq2}\\
		&   & + \, \beta_2(t, v(t; t_1, u_j, \alpha_2 + \bar{\varepsilon}_2,
\dots, \alpha_n + \varepsilon_n))\frac{\varepsilon_2}{h}\nonumber\\
		&   & + \dots + \beta_n(t; v(t, t_1, u_j, \alpha_2, \dots, \alpha_n +  +
\bar{\varepsilon}_n))\frac{\varepsilon_n}{h}.\nonumber
\end{eqnarray}
To show that $\lim_{h \to 0} z_{jh}(t)$ exists, it suffices to show that
$\lim_{h \to 0} \frac{\varepsilon_\ell}{h}$ exists for $2 \leq \ell \leq n$.

Recall that $z_{jh}(t_1) = \cdots = z_{jh}(t_{j-1}) = z_{jh}(t_{j+1}) =
\cdots = z_{jh}(t_n) = 0$. Evaluate (\ref{eq2}) at $t_\ell, 1 \leq \ell
\leq n, \ell \neq j$ to obtain the system of equations
\begin{eqnarray*}
	&&-\beta(t_\ell; v(t_j; u_j + \bar{h}, \alpha_2 + \varepsilon_2, \dots,
\alpha_n + \varepsilon_n)\\
	&&\quad \quad \quad = \beta_2 (t_\ell; v(t_j; u_j, \alpha_2 +
\bar{\varepsilon}_2, \dots, \alpha_n + \varepsilon_n)\frac{\varepsilon_2}{h}\\
	&&\quad \quad \quad + \cdots + \beta_n (t_\ell; v(t_j; u_j, \alpha_2,
\dots, \alpha_n +  \bar{\varepsilon}_n)\frac{\varepsilon_n}{h}, \, 1 \leq
\ell \leq n, \, \ell \neq j.
\end{eqnarray*}
This is a system of $n-1$ equations in the $n-1$ unknowns
$\frac{\varepsilon_2}{h}, \frac{\varepsilon_3}{h}, \dots ,
\frac{\varepsilon_n}{h}$.

By Cramer's rule we have, (after surpressing the variable dependency in $v(
\cdot )$),
\begin{eqnarray*}
	\frac{\varepsilon_2}{h} & = & \frac{\left | \begin{array}{cccc}
	-\beta_1(t_1; v(\cdot)) & \beta_3(t_1; v(\cdot)) & \cdots & \beta_n(t_1;
v(\cdot))\\
	\vdots & \vdots & & \vdots\\
	-\beta_1(t_{j-1}; v(\cdot)) & \beta_3(t_{j-1}; v(\cdot)) & \cdots &
\beta_n(t_{j-1}; v(\cdot))\\
	-\beta_1(t_{j+1}; v(\cdot)) & \beta_3(t_{j+1}; v(\cdot)) & \cdots &
\beta_n(t_{j+1}; v(\cdot))\\
	\vdots & \vdots & & \vdots\\
	-\beta_1(t_n; v(\cdot)) & \beta_3(t_n; v(\cdot)) & \cdots & \beta_n(t_n;
v(\cdot))\\
		\end{array} \right |}{D(h)},\\
	\\
	& \vdots &\\
	\\
	\frac{\varepsilon_n}{h} & = & \frac{\left | \begin{array}{cccc}
	\beta_2(t_1; v(\cdot)) & \beta_3(t_1; v(\cdot)) & \cdots & -\beta_1(t_1;
v(\cdot))\\
	\vdots & \vdots & & \vdots\\
	\beta_2(t_{j-1}; v(\cdot)) & \beta_3(t_{j-1}; v(\cdot)) & \cdots &
-\beta_1(t_{j-1}; v(\cdot))\\
	\beta_2(t_{j+1}; v(\cdot)) & \beta_3(t_{j+1}; v(\cdot)) & \cdots &
-\beta_1(t_{j+1}; v(\cdot))\\
	\vdots & \vdots & & \vdots\\
	\beta_1(t_n; v(\cdot)) & \beta_3(t_n; v(\cdot)) & \cdots & -\beta_1(t_n;
v(\cdot))\\
		\end{array} \right |}{D(h)},
\end{eqnarray*}
provided that 
\begin{displaymath}
	D(h) \equiv \left | \begin{array}{cccc}
	\beta_2(t_1; v(\cdot)) & \beta_3(t_1; v(\cdot)) & \cdots & \beta_n(t_1;
v(\cdot))\\
	\vdots & \vdots & & \vdots\\
	\beta_2(t_{j-1}; v(\cdot)) & \beta_3(t_{j-1}; v(\cdot)) & \cdots &
\beta_n(t_{j-1}; v(\cdot))\\
	\beta_2(t_{j+1}; v(\cdot)) & \beta_3(t_{j+1}; v(\cdot)) & \cdots &
\beta_n(t_{j+1}; v(\cdot))\\
	\vdots & \vdots & & \vdots\\
	\beta_1(t_n; v(\cdot)) & \beta_3(t_n; v(\cdot)) & \cdots & \beta_n(t_n;
v(\cdot))\\
		\end{array} \right | \, \neq \, 0.
\end{displaymath}

To see that $D(h) \neq 0$ for small values of $h$, consider the determinant
\begin{displaymath}
	D = \left | \begin{array}{ccc}
	\beta_2(t_1; v(t;t_j, u_j, \alpha_2, \dots, \alpha_n)) & \cdots &
\beta_n(t_1; v(t;t_j, u_j, \alpha_2, \dots, \alpha_n))\\
	\vdots & & \vdots\\
	\beta_2(t_{j-1}; v(t;t_j, u_j, \alpha_2, \dots, \alpha_n)) & \cdots &
\beta_n(t_{j-1}; v(t;t_j, u_j, \alpha_2, \dots, \alpha_n))\\
	\beta_2(t_{j+1}; v(t;t_j, u_j, \alpha_2, \dots, \alpha_n)) & \cdots &
\beta_n(t_{j+1}; v(t;t_j, u_j, \alpha_2, \dots, \alpha_n))\\
	\vdots & & \vdots\\
	\beta_1(t_n; v(t;t_j, u_j, \alpha_2, \dots, \alpha_n)) & \cdots &
\beta_n(t_n; v(t;t_j, u_j, \alpha_2, \dots, \alpha_n))\\
		\end{array} \right |
\end{displaymath}
If $D = 0$ then there exists a set of numbers $r_2, \dots, r_n$, at least
one of which is nonzero, such that 
\begin{displaymath}
	\gamma(t) = \sum_{\ell = 2}^n r_\ell \beta_\ell(t; v(t;t_j, u_j, \alpha_2,
\dots, \alpha_n))
\end{displaymath}
is a nontrivial solution of (\ref{ve}) along $v(t;t_j, u_j, \alpha_2,
\dots, \alpha_n)$ that vanishes at $t = t_1, \dots t_{j-1}$, $t_{j+1},
\dots, t_n$. Since $\beta_\ell(t_j) = 0$ for $2 \leq \ell \leq n$ then
$\gamma(t_j) = 0$. That is $\gamma(t)$ is a nontrivial solution of
(\ref{ve}) that has $n$ zeros in $T$ contradicting the $n$-point
disconjugacy of the variational equation.

Consequently $D \neq 0$. By continuity, $D(h) \neq 0$ for $h$ sufficiently
small. Thus $\lim_{h \to 0} \frac{\varepsilon_\ell}{h}$ exists for each $2
\leq \ell \leq n$. Let
\begin{displaymath}
	\lim_{h \to 0} \frac{\varepsilon_\ell}{h} = k_\ell, \, 2 \leq \ell \leq n.
\end{displaymath}
Then,
\begin{eqnarray*}
	z_j(t) & = & \lim_{h \to 0} z_{jh}(t)\\
		& = & \beta_1(t; v(t_j; u_j, \alpha_2, \dots , \alpha_n)) + 
		\sum_{\ell = 2}^n k_\ell \beta_\ell(t; v(t_j; u_j, \alpha_2, \dots ,
\alpha_n))
\end{eqnarray*}
exists. That is, $\frac{\partial u}{\partial u_j}(t; t_1, \dots, t_n, u_1,
\dots, u_n)$ exists and $z_j(t) = \frac{\partial u}{\partial u_j}$.
Furthermore, since each $\beta_\ell(t; v(t; t_j, u_j, \alpha_2, \dots ,
\alpha_n)), \, 1 \leq \ell \leq n$ is a solution of the variational
equation (\ref{ve}) along $v(t; t_j, u_j, \alpha_2, \dots , \alpha_n) =
u(t; t_1, \dots, t_n, u_1, \dots, u_n)$ then $z_j(t) = \frac{\partial
u}{\partial u_j}$ is also a solution of (\ref{ve}) along $u(t;t_1, \dots,
t_n, u_1, \dots, u_n)$. Finally we note that
\begin{displaymath}
	z_j(t_i) = \lim_{h \to 0} z_{jh}(t_i) = \delta_{ij}, \, 1 \leq i \leq n
\end{displaymath}
and the proof is complete.

\begin{thebibliography}{99}

\bibitem {rpamb} R. P. Agarwal and M. Bohner, Basic calculus on time scales
and some of its applications, {\it Results Math.} {\bf 35} (1999), 3 - 22.

\bibitem {da} D. Anderson, Positivity of Green's functions for $n$-point
right focal boundary-value problem on a measure chain, preprint.

\bibitem {bash} B. Aulback and S. Hilger, Linear dynamic processes with
inhomogeneous time scale, Nonlinear Dynamics and Quantum Dynamical Systems,
{\it Math. Res.} {\bf 59}, Akademie Verlag, Berlin, 1990.

\bibitem {cdhy} C.J. Chyan, J.M. Davis, J. Henderson and W.K.C.Yin,
Eigenvalue comparisons for differential equations on a Measure Chain, {\it
Electronic Jour. Diff. Equns.} {\bf 35}(1998), 1-7. 

\bibitem{adjh} A. Datta and J. Henderson, Differentiation of solutions of
difference equations with respect to right focal boundary values, {\em
PanAmer. Math. J.} {\bf 2} (1992), 1 - 16.

\bibitem{je} J. Ehme, and D. Brewley, Continuous Data Dependence for a
Class of Nonlinear Boundary Value Problems, {\em Comm. Appl. Nonlinear
Anal.}, {\bf 3} (1996) No. 2, 59-65.

\bibitem{jejh} J. Ehme and J. Henderson, Differentiation of solutions of
boundary-value problems with respect to boundary conditions, {\it Appl.
Anal.} {\bf 46} (1992), 175 - 194.

\bibitem {lhesh} L.H. Erbe and S. Hilger, Sturmian theory on measure
chains, {\em Differential Equations Dynam. Systems} {\bf 1}(1993), 223-246.

\bibitem {lheap1} L.H. Erbe and A. Peterson, Green's functions and
comparison theorems for differential equations on measure chains, {\em
Dynam. Contin. Discrete Impuls. Systems}, {\bf 6} (1999), 121 - 137.

\bibitem {lheap2} L.H. Erbe and A. Peterson, Positive solutions for a
nonlinear differential equation on a measure chain, preprint.

\bibitem {dh} D. Hankerson, {\it Boundary Value Problems for $n^{th}$ Order
Difference Equations}, Ph.D. dissertation, University of Nebraska-Lincoln,
1986.

\bibitem{PH} P. Hartman, Difference equations: disconjugacy, principal
solutions, Green's functions, complete monotonicity, {\it Trans. Amer.
Math. Soc.} {\bf 246} (1978), 1 - 30.

\bibitem{jh} J. Henderson, Disconjugacy, disfocality, and differentiation
with respect to boundary conditions, {\it J. Math. Anal. Appl.} {\bf 121}
No. 1 (1987), 1 - 9.

\bibitem{jhmhlh} J. Henderson, M. Horn and L. Howard, Differentiation of
soltuions with respect to boundary values and parameters, {\em Comm. Appl.
Nonlin. Anal.} {\bf 1} (1994), 47-60.

\bibitem{jherk} J. Henderson and E. R. Kaufmann, Multipoint boundary value
problems with parameter for a system of difference equations, {\it Journal
of Difference Equations and Applications}, {\bf 1} (1995), 163-172.

\bibitem{jhll} J. Henderson and L. Lee, Continuous dependence and
differentiation of solutions of finite difference equations, {\it Internat.
J. Math. and Math. Sci.} {\bf 14} No. 4 (1991), 747 - 756.

\bibitem{sh} S. Hilger, Analysis on measure chains - a unified approach to
continuous and discrete calculus, {\em Results Math.} {\bf 18}(1990), 18-56.

\bibitem{kls} V. Lakshmikantham, B. Kaymakcalan and S. Sivasundaram, {\em
Dynamical Systems on Measure Chains}, Kluwer Academic Publishers, Boston,
1996.

\bibitem{acp} A. C. Peterson, Comparison theorems and existence theorems
for ordinary differential equations, {\it J. Math. Anal. Appl.} {\bf 55}
No. 3 (1976), 773-784.

\end{thebibliography}

\end{document}

%---------- End Tex File ---------------
