\documentclass[reqno]{amsart} \AtBeginDocument{{\noindent\small {\em Electronic Journal of Differential Equations}, Vol. 2001(2001), No. 08, pp. 1--8.\newline ISSN: 1072-6691. URL: http://ejde.math.swt.edu or http://ejde.math.unt.edu \newline ftp ejde.math.swt.edu \quad ejde.math.unt.edu (login: ftp)} \thanks{\copyright 2001 Southwest Texas State University.} \vspace{1cm}} \begin{document} \title[\hfilneg EJDE--2001/08\hfil Nonsmoothing in a single conservation law] { Nonsmoothing in a single conservation law with memory } \author[G. Gripenberg\hfil EJDE--2001/08\hfilneg] { G. Gripenberg } \address{ Institute of Mathematics\\ Helsinki University of Technology, P.O. Box 1100\\ FIN-02015 HUT\\ Finland } \email{gustaf.gripenberg@hut.fi, www.math.hut.fi/$\sim$ggripenb } \date{} \thanks{Submitted September 11, 2000. Published January 11, 2001.} \subjclass{35L65, 35L67, 45K05} \keywords{conservation law, discontinuous solution, memory} \begin{abstract} It is shown that, provided the nonlinearity $\sigma$ is strictly convex, a discontinuity in the initial value $u_0(x)$ of the solution of the equation $$\frac{\partial}{\partial t} \Big( u(t,x) + \int_0^t k(t-s) (u(s,x)-u_0(x))\,\textup{d}s\Big) + \sigma(u)_x(t,x) = 0,$$ where $t>0$ and $x\in \mathbb{R}$, is not immediately smoothed out even if the memory kernel $k$ is such that the solution of the problem where $\sigma$ is a linear function is continuous for $t>0$. \end{abstract} \maketitle \makeatletter \numberwithin{equation}{section} \makeatother \newcommand{\defeq}{\buildrel \textup{def}\over =} % \newcommand{\aeeq}{\mathrel {\lower 1pt\hbox{\smash{$\buildrel \hbox{{\small \textup{a.e.}}}\over =$}}}} % \newcommand{\aeleq}{\mathrel {\lower 1pt\hbox{\smash{$\buildrel \hbox{{\small \textup{a.e.}}}\over \leq$}}}} % \newcounter{ConstCount} \setcounter{ConstCount}{0} \newcommand{\Const}[2]{\stepcounter{ConstCount}\xdef#2{#1_{\theConstCount}}#2} \newcommand{\abs}[1]{\mathopen\vert #1 \mathclose\vert} \newcommand{\norm}[1]{\mathopen\Vert #1 \mathclose\Vert} \newcommand{\bigpar}[1]{\bigl( #1 \bigr)} \newcommand{\Bigpar}[1]{\Bigl( #1 \Bigr)} \newcommand{\Par}[1]{\left( #1 \right)} \newcommand{\set}[1]{\lbrace\, #1 \,\rbrace} \newcommand{\nbrace}[1]{\lbrace #1 \rbrace} \newcommand{\Bigbrace}[1]{\Bigl\lbrace #1 \Bigr\rbrace} % \newtheorem{theorem}{Theorem} \section{Introduction and statement of results}\label{S:Intro} % The purpose of this paper is to show that, provided the nonlinearity $\sigma$ is strictly convex, a discontinuity in the initial value $u(0,x)=u_0(x)$ of the solution of the equation %% $$\label{E:MainEq} \frac{\partial}{\partial t} \Par{ u(t,x) + \int_0^t k(t-s)\bigpar{u(s,x)-u_0(x)}\,\textup{d} s} + \sigma(u)_x(t,x) = 0,$$ %% where $t > 0$ and $x\in \mathbb{R}$, is not immediately smoothed out. Recall that if the equation is linear, i.e., $\sigma(r)= r$, and if $k$ is nonnegative, nonincreasing, and locally integrable with $\lim_{t\downarrow 0} k(t)= +\infty$, then the solution $u$ is continuous when $t> 0$; see \cite[Thm.~3]{Pruss87}, (where the notation differs somewhat from the one used here) or see the argument in the proofs below. If one has $k=0$, then it is well known and easy to check that discontinuities in the initial value for the conservation law $u_t(t,x) + \sigma(u)_x(t,x) = 0$ are not smoothed out and that in fact discontinuities can appear in the solution even if the initial value is smooth provided $\sigma$ is not linear. (But there is of course no smoothing effect in the linear case.) These results can also be established in the case where $k$ is assumed to be of bounded variation (and thus bounded). On the other hand, it follows from \cite[Thm.~1, 2]{SmoConvLaw00} that the solution of the equation %% $$\label{E:OtherEq} \frac{\partial}{\partial t} \int_0^t k(t-s)\bigpar{u(s,x)-u_0(x)}\,\textup{d} s + \sigma(u)_x(t,x) = 0,$$ %% is continuous when $t> 0$ provided the initial value has bounded variation (but is not necessarily continuous), $k$ is locally integrable, nonnegative, non-increasing, log-convex, and $\lim_{t\downarrow 0}k(t)=+\infty$ and provided $\sigma$ is continuous and strictly increasing. Thus one sees that for equation \eqref{E:OtherEq}, in contrast to equation \eqref{E:MainEq}, there is not much difference between the continuity properties of the solution in the linear and the nonlinear case. This paper does not answer the question whether equation \eqref{E:MainEq} has discontinuous solutions with continuous initial values in the case $\lim_{t\downarrow 0} k(t) = +\infty$ too. Note also that the problem considered here is to a large extent only a model problem; the more interesting cases are the ones where one considers systems of conservation laws and wants to investigate the effect of memory terms. In particular, in certain cases these systems can be written as higher order equations and then one is led to consider the question of how the qualitative properties of the solutions change when one goes from the wave equation $u_{tt} = \sigma(u_x)_x$ to the diffusion equation $u_t=\sigma(u_x)_x$ by considering equations of the form $D_t^\alpha(u_t)= \sigma(u_x)_x$ (where $D_t^\alpha$ is a fractional derivative of order $\alpha\in (0,1)$) or of the form $\gamma u_{tt}+ D_t^\alpha(u_t)= \sigma(u_x)_x$ which can be written in the form $\frac{\partial}{\partial t} (\gamma u_t(t,x)+\int_0^t k(t-s)\bigpar{u_t(s,x)-u_1(x)}\,\textup{d} s = \sigma(u_x)_x(t,x)$. For some further results on equation \eqref{E:MainEq}, see for example \cite{FeierPetze97} and \cite{FeierPetze98}. Here we prove the following result. \begin{theorem}\label{T:MainThm} Assume that \makeatletter \renewcommand{\theenumi}{\Roman{enumi}} \makeatother \begin{enumerate} \item\label{I:kHyp} $k\in L^1_{\textup {loc}}(\mathbb{R}^+;\mathbb{R})$ is nonnegative, non-increasing, and absolutely continuous on $(0,\infty)$ with \begin{equation*} \sup_{0 0$; \item \label{I:qHypB}$\int_0^\infty\frac 1{1+t} \Psi(t)\,\textup{d} t < \infty$; \item \label{I:qHypC}$\sup_{t>0} \sup_{s\geq t/2} \frac {\Psi(s)}{\Psi(t)} <\infty$; \item \label{I:uHyp}$\phi$is a nonnegative measurable function on$\mathbb{R}^+$such that$\int_0^\infty \phi(t)\,\textup{d} t \leq 1$and %% $$t\phi(t) = \psi (t) \eta + \int_0^t \psi (t-s)\phi(s)\,\textup{d} s,\quad t > 0,$$ %% where$\eta \in [0,1]$. \end{enumerate} % Then there are positive constants$C$and$T$, which depend only on$\Psi$, such that $$\phi(t) \leq C\frac {\Psi(t)} t,\quad t\geq T.$$ \end{theorem} Note that it would be sufficient to assume that the equation holds almost everywhere, if one may change the values of$u$on a set of measure zero. As one easily sees the claim of this theorem is intuitively quite obvious, but unfortunately I have not been able to find a proof that does not involve too many technicalities. \section{Proofs} \begin{proof}[Proof of Theorem \ref{T:MainThm}] We may without loss of generality assume that$\sigma(0)=0$, and since we will show that$0\leq u(t,x)\leq 1$for$t\geq 0$and$x\in \mathbb{R}$we may assume that$\sigma$is strictly increasing on$\mathbb{R}$, and not just on$[0,1]$. In order to show that there is an entropy solution$u$of \eqref{E:MainEq} such that$u(t,x) = 1$for$t > 0$and$x\leq 0$and$u$is nondecreasing in its first variable and non-increasing in its second one, we argue as follows, see \cite{CockburnGGSOL} and \cite{GGSOL95}: If we let$\mathcal{D}(A) = \set{v\in L^1(\mathbb{R}^+;\mathbb{R}) \mid \sigma(v)\in AC(\mathbb{R}^+;\mathbb{R}), v(0) = 1,\,\, \sigma(v)' \in L^1(\mathbb{R}^+;\mathbb{R})}$and define$A(v) = \sigma(v)'$for$v\in \mathcal{D}(A)$, then$A$is a closed, m-accretive operator in$L^1(\mathbb{R}^+;\mathbb{R})$, that in addition satisfies certain natural monotonicity properties, see \cite[Lemma 3]{GGSOL95}. By \cite[Thm.~1]{CockburnGGSOL} there is a generalized solution$u\in \mathcal{C}(\mathbb{R}^+;L^1(\mathbb{R}^+;\mathbb{R}))$of the equation$u'(t) + \frac {\textup{d}}{\textup{d} t}\int_0^t k(t-s) u(s)\, \textup{d} s + A(u(t)) \ni 0$,$t\geq 0$,$u(0) = 0$. By the argument used in the proof of \cite[Thm.~5]{GGSOL95} one sees that this function$u(t,x)$is nondecreasing in its first and non-increasing in its second variable. We extend this solution as$1$for$x\leq 0$and then use the same argument as in the proof of \cite[Thm.~7]{CockburnGGSOL} to show that we have an entropy solution. If we replace$u$by the function$(t,x)\mapsto u(t,\lambda x)$we get a solution of equation \eqref{E:MainEq} where$\sigma$has been replaced by$\frac 1\lambda \sigma$. Thus we may without loss of generality assume that$\sigma(1) < 1 < \sigma'(1)$. Since$\sigma$is continuously differentiable it follows that there are constants$\mu$and$\eta\in (0,1)$such that %% $$\label{E:BoundOnSigma} \sigma(y) \leq 1-\mu \leq 1\leq \sigma'(y),\quad y\in [1-\eta,1].$$ Suppose that $$\label{E:SmoothAss} k\in \mathbb{C}^2(\mathbb{R}^+;\mathbb{R}).$$ %% Next we shall show that in this case the solution has a discontinuity (just as in the case where$k=0$). We write \eqref{E:MainEq} as %% $$\label{E:MainEqNew} u_t(t,x) + \sigma(u)_x(t,x) + g(u(t,x),t,x) =0,$$ %% where %% $$\label{E:DefOfG} g(v,t,x) = k(0)v + \int_0^t k'(t-s) u(s,x)\,\textup{d} s,$$ %% and where we thus assume that the function$u$appearing in the integral is a known function. It is clear that %% $$\label{E:BoundOnG} \abs{g(v,t,x)}\leq k(0),\quad g(u(t,x),t,x)\geq 0, \quad v\in [0,1],\quad t\geq 0,\quad x\in \mathbb{R}.$$ %% If one uses the fact that$u$is a weak solution of \eqref{E:MainEqNew}, then one sees that the function$x\mapsto \sigma\bigpar{u(t,x)}$is Lipschitz continuous in$L^1([0,T];\mathbb{R})$for each$T> 0$. Since we are going to show that there is a point$t_0$such that$u(t,x)\in \nbrace{0}\cup [1-\eta,1]$when$0\leq t\leq t_0$and$x\in \mathbb{R}$, we may without loss of generality assume that$\sigma'(0) > 0$and it follows that the function$x\mapsto u(t,x)$is Lipschitz continuous in$L^1([0,T];\mathbb{R})$and therefore$g$is Lipschitz continuous. Since the function$x\to u(t,x)$is non-increasing, we can apply the results from \cite{Dafermos77} and we recall that a generalized characteristic satisfies the equation %% \begin{equation*} \xi'(t) = \begin{cases} \sigma'\bigpar{u(t,\xi(t))}, & \text{if$u(t,\xi(t)+)=u(t,\xi(t)-)$},\\ \frac{\sigma\bigpar{u(t,\xi(t)+)}-\sigma\bigpar{u(t,\xi(t)-)}}{ u(t,\xi(t)+)-u(t,\xi(t)-)},& \text{otherwise.} \end{cases} \end{equation*} %% (Note that in \cite{Dafermos77} it is assumed that the function$g$is continuously differentiable, but it is sufficient for the conclusions needed here that it is Lipschitz continuous.) Observe that if$u(t,x) = 0$, then$g\bigpar{u(t,x),t,x}=0$as well and therefore it follows from \eqref{E:BoundOnG} and from \cite[Thm.~3.2, 3.3]{Dafermos77} that if one follows a maximal backward characteristic from a point$(t,x)$where$t < \frac 1{k(0)}$and$u(t,x) > 0$, then one can conclude that$u(t,x) > 1-tk(0)$. Thus we see that if we define the function$\varphi$by %% \begin{equation*} \varphi(t)\defeq \inf\set{x> 0 \mid u(t,x) = 0}, \end{equation*} %% then we have$u(t,\varphi(t)-) \geq 1-tk(0)$when$t< \frac 1{k(0)}$. There is, by \cite[Thm.~4.1]{Dafermos77}, a unique characteristic starting at$(0,0)$and it must be$\varphi$. We may and will assume that$u$is continuous from the left in its second variable and thus we conclude that for almost every$t>0$we have %% $$\label{E:RankHug} \varphi'(t) = \frac {\sigma\bigpar{u(t,\varphi(t))}}{u(t,\varphi(t))}.$$ %% We let$h$be the inverse of$\varphi$and hence we have for almost every$x$: %% $$\label{E:RankHugB} h'(x) = \frac {u(h(x),x)} {\sigma\bigpar{u(h(x),x)}}.$$ %% Now we must show that there is a positive number$t_0$, independent of the assumption \eqref{E:SmoothAss} such that %% $$\label{E:LowerBound} u(t,x) \geq 1-\eta,\quad x\leq \varphi(t),\quad 0\leq t\leq t_0.$$ %% (Recall that$\eta$was defined in \eqref{E:BoundOnSigma}.) Once we have done this, the desired conclusion follows from \cite[Thm.~1, 7.(f)]{CockburnGGSOL}. Let$v$be the solution of the linear problem %% \begin{equation*} \frac{\partial}{\partial t} \Par{ v(t,x) + \int_0^t k(t-s)\bigpar{v(s,x)-v_0(x)}\,\textup{d} s} + v_x(t,x) = 0, \end{equation*} %% for$t > 0$and$x\in \mathbb{R}$where$v_0 = \chi_{\mathbb{R}^-}$. Equivalently one can consider the problem for$x > 0$with boundary value$v(t,0) = 1$. Thus$v$is nondecreasing in its first variable and non-increasing in its second variable by the result given above for the nonlinear case. The Laplace transform of$v$(with respect to its first variable) is given by %% \begin{equation*} \hat v(z,x) = \frac 1{z} \textup{e}^{- {zx} - {z\hat k(z)x}},\quad x\geq 0, \Re z > 0. \end{equation*} %% The derivative of$v$with respect to$t$is thus a measure that has Laplace-Stieltjes transform$\textup{e}^{- {zx} - {z\hat k(z)x}}$and we see that it consists of a translation of a nonnegative measure with Laplace-Stieltjes transform$\textup{e}^{- z\hat k(z)x}$, see \cite[Prop.~4.2, 4.3]{PrussBook93}. This measure in turn has a point-mass of size$\textup{e}^{-k(0)x}$at$0$and therefore we consider the measure$w$whose Laplace transform is given by %% \begin{equation*} \hat w(z,x) = \textup{e}^{-z\hat k(z)x} - \textup{e}^{-k(0)x},\quad x\geq 0. \end{equation*} %% It is clear that$w$does not have a nonzero point-mass at$0$. Next we differentiate both sides of this equation with respect to$z$and use the fact that$\frac {\textup{d}}{\textup{d} z} (z\hat k(z))$is the Laplace transform of the function$P(t)\defeq -tk'(t)$. Since the convolution of a locally integrable function and a measure is a locally integrable function we conclude that$tw(\textup{d} t,x)$is given by a locally integrable function and we get (with a slight abuse of notation) that %% $$\label{E:FundSolEq} tw(t,x) = x P(t)\textup{e}^{-k(0)x} + x \int_0^t P(t-s)w(s,x)\,\textup{d} s.$$ %% The conclusion we can draw is that %% $$\label{E:ExprForV} v_t(\textup{d} t,x)= w(t-x,x)\,\textup{d} t + \textup{e}^{-k(0)x} \delta_{x}(\textup{d} t).$$ %% When we restrict ourselves to$x > 0$we can rewrite equation \eqref{E:MainEq} as %% \begin{equation*} \frac{\partial}{\partial t} \Par{ u(t,x) + \int_0^t k(t-s)u(s,x)\,\textup{d} s} + u_x(t,x) = F(t,\textup{d} x), \end{equation*} %% where$F= \frac{\partial}{\partial x}\bigpar{ u -\sigma(u)}$. Take Laplace transforms (with respect to$t$) and solve the differential equation with respect to$x$that one gets. Then one sees that$u$can be written in the form %% \begin{equation*} u(t,x) = v(t,x) + \int_{[0,x]}\int_{[0,t]} v_t(\textup{d} s,x-y)F(t-s,\textup{d} y). \end{equation*} %% We take$t$to be so small that$u(t,x) > 1-\eta$when$x \leq \varphi (t)$. We note that$F$consists of two parts, one nonnegative coming from the points where$h(x) < t$so that$\sigma'(u(t,x)) \geq 1$and another non-positive point-mass when$h(x)=t$. The absolute value of this point-mass is clearly less than$1$. Since$v$is nondecreasing in its first variable, we get %% $$\label{E:UGeqVA} u(t,x) \geq v(t,x) - \int_{[(t-h(x))_+,t]} v_t(\textup{d} s,x-\varphi (t-s)),$$ %% because if$s 1$so that if$t> h(x)$then$t-h(x-s)> h(x)-h(x-s) > s$when$s\in [0,x]$and it follows that$\varphi (t-s) > x-s$. That is, we have %% \begin{equation*} s > {x-\varphi (t-s)},\quad 0\leq s\leq t, \quad t> h(x). \end{equation*} %% By \eqref{E:ExprForV} and \eqref{E:UGeqVA} we therefore get %% \begin{equation*} u(t,x) \geq v(t,x) - \int_{t-h(x)}^t w\Bigpar{s-\bigpar{x-\varphi (t-s)}, x-\varphi (t-s)}\,\textup{d} s, \quad t> h(x). \end{equation*} %% Since we assume that$u$is continuous from the left and nonincreasing in its second variable and since the function$t\mapsto u(t,x)$is continuous in$L^1(\mathbb{R}^+;\mathbb{R})$, we conclude that$\lim_{t\downarrow h(x)} u(t,x) = u(h(x),x) $. Moreover, because$h(x) > x$we have$\lim_{t\downarrow h(x)} v(t,x) = v(h(x),x)$. Thus we conclude after changing variables$s=h(x)-h(y)$that %% $$\label{E:UGeqVB} u(h(x),x)\geq v(h(x),x) - \int_0^x h'(y) w\Bigpar{h(x)-h(y)-(x-y), x-y}\,\textup{d} y.$$ %% Let us now return to equation \eqref{E:FundSolEq}. Since we only consider small values of$t$we may without loss of generality assume that there is a constant$\Const c\COne$(independent of the assumption \eqref{E:SmoothAss}), such that %% $$\label{E:BoundOnkP} \abs{k'(t)}\leq \COne t^{-\alpha-1},\quad t> 0.$$ %% Let$x> 0$and replace$t$by$tx^{\frac 1\alpha}$in \eqref{E:FundSolEq}. After a change of variables in the integral we get the equation %% \begin{equation*} t\phi(t) = xP(tx^{\frac 1\alpha})\textup{e}^{-k(0)x} + \int_0^t xP\bigpar{(t-s)x^{\frac 1\alpha}}\phi(s)\,\textup{d} s, \end{equation*} %% where we have defined$\phi(t) = x^{\frac 1\alpha}w(tx^{\frac 1\alpha},x)$. Now$0\leq xP(tx^{\frac 1\alpha}) \leq \COne t^{-\alpha}$and$\int_0^\infty \phi(t)\,\textup{d} t = \hat {w}(0,x)=1-\textup{e}^{-k(0)x}\leq 1$and therefore it follows from Theorem \ref{T:BasicRayleighEstim} that there is a constant$\Const c\CTwo$depending only on$\COne$and$\alpha$such that$\phi(t) \leq \CTwo t^{-\alpha-1}$when$t\geq \CTwo$. By the definition of$\phi$this implies that %% $$\label{E:EstimOnW} w(t,x) \leq \CTwo t^{-\alpha-1}x,\quad t\geq \CTwo x^{\frac 1\alpha}.$$ %% By \eqref{E:ExprForV} we have %% $$\label{E:IntOfW} v(t,x) = 1-\int_t^\infty w(s-x,x)\,\textup{d} s, \quad t> x> 0.$$ %% By \eqref{E:RankHugB} and \eqref{E:LowerBound} we have %% $$\label{E:BoundOnHp} \frac1{1-\mu}\leq h'(x)\leq \frac 1{\sigma(1-\eta)},\quad x\leq \varphi(t_0),$$ so that$h(x) \geq \frac x{1-\mu}$when$x\leq \varphi(t_0)$. If, in addition,$x < (\frac {1-\mu}\mu\CTwo)^{-\frac \alpha{1-\alpha}}$and$s \geq h(x)$, then$s-x > \CTwo x^{\frac 1\alpha}$. Thus we get from \eqref{E:IntOfW} %% \begin{multline}\label{E:LowBouV} v(t,x) \geq 1 -\CTwo \int_t^\infty (s-x)^{-\alpha-1}x\,\textup{d} s = 1 - \frac\CTwo \alpha (t-x)^{-\alpha}x \\ \geq 1- \frac \CTwo\alpha \Bigpar{\frac{1-\mu}\mu}^\alpha x^{1-\alpha},\quad t \geq h(x),\quad x\leq \min\Bigbrace{\varphi(t_0),(\tfrac {1-\mu}\mu\CTwo)^{-\frac \alpha{1-\alpha}}}. \end{multline} %% With the aid of \eqref{E:BoundOnHp} we conclude that %% \begin{multline}\label{E:EstimInt} \int_0^x h'(y) w\Bigpar{h(x)-h(y)-(x-y), x-y}\,\textup{d} y \\ \leq \frac 1{\sigma(1-\eta)}\int_0^x \CTwo \Bigpar{\frac{\mu}{1-\mu}}^{-\alpha-1} y^{-\alpha}\,\textup{d} y = \frac 1{(1-\alpha)\sigma(1-\eta)} \CTwo \Bigpar{\frac{\mu}{1-\mu}}^{-\alpha-1} x^{1-\alpha},\\ x\leq \min\Bigbrace{\varphi(t_0),(\tfrac {1-\mu}\mu\CTwo)^{-\frac \alpha{1-\alpha}}}. \end{multline} %% When we combine \eqref{E:LowBouV} and \eqref{E:EstimInt} we conclude from \eqref{E:UGeqVB} that$u(h(x),x) >1-\eta$for sufficiently small values of$x$, that is, we have established \eqref{E:LowerBound} with$t_0$independent of$k(0)$and the smoothness assumptions on$k$. This completes the proof. \end{proof} \begin{proof}[Proof of Theorem \ref{T:BasicRayleighEstim}] Let %% \begin{equation*} f_1(t) = \frac {\Psi(t)}t + \frac 1t \int_0^1 \Psi(t-s)\phi(s)\,\textup{d} s, \quad t\geq 1. \end{equation*} %% It follows that$f_1\in L^1([1,\infty);\mathbb{R})$, with an upper bound on the norm depending on$\Psi$only. Let$\varphi$be the solution to the equation %% $$\label{E:NewSingEq} \varphi(t) \aeeq f_1(t) + \frac 1t \int_1^t \Psi(t-s)\varphi(s)\,\textup{d} s, \quad t\geq 1.$$ %% Such a solution$\varphi\in L^1([1,\infty);\mathbb{R})$exists by \cite[Thm. 9.3.6, Cor.\ 9.3.14]{GGSOLOS} when one observes that$\sup_{s\geq \tau} \int_s^\infty \frac 1t \abs{\Psi(t-s)}\,\textup{d} t < 1$for sufficiently large values of$\tau$. Moreover, it follows from \cite[Cor.~9.3.18]{GGSOLOS} and the fact that$\Psi$is nonnegative that we have %% $$\label{E:FirstUpperBound} \phi(t)\aeleq \varphi(t),\quad t\geq 1.$$ %% Now there is a point$t_* \in [2,3]$(in fact a set with positive measure of such points) so that$\varphi(t_*) \leq \norm{\varphi}_{L^1([1,\infty))}$, and it follows from \eqref{E:NewSingEq} that %% \begin{equation*} \frac 1{t_*} \int_0^{t_*} \Psi(t_*-s)\varphi(s)\,\textup{d} s \leq \norm{\varphi}_{L^1([1,\infty))}. \end{equation*} %% Using \eqref{I:qHypC} we see that there is a constant$\Const c\RPOne$such that %% \begin{equation*} f_2(t) \defeq f_1(t) + \frac 1t \int_1^{t_*} \Psi(t-s)\varphi(s)\,\textup{d} s \leq \RPOne, \quad t\geq t_*. \end{equation*} %% Thus we have %% $$\label{E:EqAfterTStar} \varphi(t)\aeeq f_2(t) + \frac 1t \int_{t_*}^t \Psi(t-s)\varphi(s)\,\textup{d} s, \quad t\geq t_*,$$ %% and it follows from \cite[Thm. 9.3.6, Cor.\ 9.3.14]{GGSOLOS} (where we now use the fact that$\sup_{t\geq \tau} \frac 1t\int_\tau^t \abs{\Psi(t-s)}\,\textup{d} s < 1$for sufficiently large values of$\tau$) that there is a constant$\Const c\RPTwo$such that %% $$\label{E:wBounded} \varphi(t) \aeleq \RPTwo,\quad t\geq t_*.$$ %% Thus we see that the convolution term is continuous, at least when$t\geq t_*$and therefore we may assume that equation \eqref{E:NewSingEq} and inequalities \eqref{E:FirstUpperBound} and \eqref{E:wBounded} hold for all values$t> t_*$. Since$\lim_{t\to\infty} \frac 1t \int_0^t \Psi(s)\,\textup{d} s =0$by \eqref{I:qHypB} we can choose a number$T > t_*$such that %% $$\label{E:ChooseT} \frac 1t \int_0^{t} \Psi(s)\,\textup{d} s < \frac 14 \frac 1{\sup_{t>0} \sup_{s\geq t/2} \frac {\Psi(s)}{\Psi(t)}},\quad t\geq T.$$ %% Now it is clear that there is a constant$\Const c\RPThree$such that %% \begin{equation*} f_2(t) \leq \RPThree \frac {\Psi(t)}t,\quad t\geq T, \end{equation*} %% and therefore we have by \eqref{E:EqAfterTStar}, %% \begin{multline}\label{E:InEqA} \varphi(t) \leq \RPThree \frac {\Psi(t)}t + \frac 1t\int_{t_*}^{\max\nbrace{T,t/2}} \Psi(t-s)\varphi(s)\,\textup{d} s \\+ \frac 1t \int_{\max\nbrace{T,t/2}}^t \Psi(t-s)\varphi(s)\,\textup{d} s,\quad t\geq T. \end{multline} %% Using the fact that$\varphi$is integrable one sees by \eqref{I:qHypC} that there is a constant$\Const c\RPFour$such that %% $$\label{E:InEqB} \frac 1t\int_{t_*}^{\max\nbrace{T,t/2}} \Psi(t-s)\varphi(s)\,\textup{d} s \leq \RPFour \frac {\Psi(t)}t,\quad t \geq T.$$ %% Define$v(t)= t\varphi(t)/\Psi(t)$. Then one has by our choice of$T\$ in \eqref{E:ChooseT} and by inequalities \eqref{E:InEqA} and \eqref{E:InEqB} that %% \begin{equation*} v(t) \leq \RPThree + \RPFour + \frac 12\max_{s\in [T,t]} v(s),\quad t\geq T, \end{equation*} %% and this inequality implies that %% \begin{equation*} \varphi(t) \leq 2(\RPThree + \RPFour)\frac {\Psi(t)}t,\quad t\geq T. \end{equation*} %% The proof is complete. \end{proof} \bibliographystyle{amsplain} \providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace} \begin{thebibliography}{0} \bibitem{CockburnGGSOL} B.~Cockburn, G.~Gripenberg, and S-O. Londen, \emph{On convergence to entropy solutions of a single conservation law}, J. Differential Equations \textbf{128} (1996), 206--251. \bibitem{Dafermos77} C.M. Dafermos, \emph{Generalized characteristics and the structure of solutions of hyperbolic conservation laws}, Indiana Univ. Math. J. \textbf{26} (1977), 1097--1119. \bibitem{FeierPetze97} E.~Feireisl and H.~Petzeltov\'a, \emph{On the long-time behaviour of solutions to a conservation law with memory}, Math. Methods Appl. Sci. \textbf{20} (1997), 569--581. \bibitem{FeierPetze98} \bysame, \emph{On compactness of bounded solutions to multidimensional conservation laws with memory}, NoDEA Nonlinear Differential Equations Appl. \textbf{5} (1998), 193--204. \bibitem{SmoConvLaw00} G.~Gripenberg, Ph. Cl\'ement, and S-O. Londen, \emph{Smoothness in fractional evolution equations and conservation laws}, Ann. Scuola Norm. Sup. Pisa Cl. Sci. (4) \textbf{29} (2000), 231--251. \bibitem{GGSOL95} G.~Gripenberg and S-O. Londen, \emph{Fractional derivatives and smoothing in nonlinear conservation laws}, Differential Integral Equations (1995), 1961--1976. \bibitem{GGSOLOS} G.~Gripenberg, S-O. Londen, and O.~Staffans, \emph{Volterra integral and functional equations}, Cambridge University Press, Cambridge, 1990. \bibitem{Pruss87} Pr{\"u}ss J, \emph{Positivity and regularity of hyperbolic volterra equations in {Banach} spaces}, Math. Ann. \textbf{279} (1987), 317--344. \bibitem{PrussBook93} \bysame, \emph{Evolutionary integral equations and applications}, Birkh{\"a}user, Basel, 1995. \end{thebibliography} %\bibliography{ggbiblio,publgus} \end{document}