\documentclass[reqno]{amsart} \usepackage{graphicx} \usepackage{hyperref} \AtBeginDocument{{\noindent\small {\em Electronic Journal of Differential Equations}, Monograph 08, 2007, (101 pages).\newline ISSN: 1072-6691. URL: http://ejde.math.txstate.edu or http://ejde.math.unt.edu \newline ftp ejde.math.txstate.edu (login: ftp)} \thanks{\copyright 2007 Texas State University - San Marcos.} \vspace{9mm}} \begin{document} \title[\hfilneg EJDE-2007/Mon. 08\hfil Lyapunov functions] {An algorithm for constructing\\ Lyapunov functions} \author[S. F. Hafstein\hfil EJDE-2007/Mon. 08\hfilneg] {Sigurdur Freyr Hafstein} \address{Sigurdur Freyr Hafstein \newline School of Science and Engineering\\ Reykjavik University\\ Reykjavik, Iceland} \email{sigurdurh@ru.is} \thanks{Submitted August 29, 2006. Published August 15, 2007.} \subjclass[2000]{35J20, 35J25} \keywords{Lyapunov functions; switched systems; converse theorem; \hfill\break\indent piecewise affine functions} \begin{abstract} In this monograph we develop an algorithm for constructing Lyapunov functions for arbitrary switched dynamical systems $\dot{\mathbf{x}} = \mathbf{f}_\sigma(t,\mathbf{x})$, possessing a uniformly asymptotically stable equilibrium. Let $\dot{\mathbf{x}}=\mathbf{f}_p(t,\mathbf{x})$, $p\in\mathcal{P}$, be the collection of the ODEs, to which the switched system corresponds. The number of the vector fields $\mathbf{f}_p$ on the right-hand side of the differential equation is assumed to be finite and we assume that their components $f_{p,i}$ are $\mathcal{C}^2$ functions and that we can give some bounds, not necessarily close, on their second-order partial derivatives. The inputs of the algorithm are solely a finite number of the function values of the vector fields $\mathbf{f}_p$ and these bounds. The domain of the Lyapunov function constructed by the algorithm is only limited by the size of the equilibrium's region of attraction. Note, that the concept of a Lyapunov function for the arbitrary switched system $\dot{\mathbf{x}} = \mathbf{f}_\sigma(t,\mathbf{x})$ is equivalent to the concept of a common Lyapunov function for the systems $\dot{\mathbf{x}}=\mathbf{f}_p(t,\mathbf{x})$, $p\in\mathcal{P}$, and that if $\mathcal{P}$ contains exactly one element, then the switched system is just a usual ODE $\dot{\mathbf{x}}=\mathbf{f}(t,\mathbf{x})$. We give numerous examples of Lyapunov functions constructed by our method at the end of this monograph. \end{abstract} \maketitle \tableofcontents \numberwithin{equation}{section} \newtheorem{theorem}{Theorem}[section] \newtheorem{corollary}[theorem]{Corollary} \newtheorem{lemma}[theorem]{Lemma} \newtheorem{definition}[theorem]{Definition} \newtheorem{SwS}[theorem]{Switched System} \newtheorem{procedure}[theorem]{Procedure} \allowdisplaybreaks \newcommand{\diff}[2]{\frac{d{#1}}{d{#2}}} \newcommand{\pdiff}[2]{\frac{\partial{#1}}{\partial{#2}}} \section{Introduction} Let $\mathcal{P}$ be a nonempty set and equip it with the discrete metric, let $\mathcal{U}\subset \mathbb{R}^n$ be a domain containing the origin, and let $\|\cdot\|$ be a norm on $\mathbb{R}^n$. For every $p\in\mathcal{P}$ assume that $\mathbf{f}_p: \mathbb{R}_{\geq0}\times \mathcal{U} \to \mathbb{R}^n$ satisfies the local Lipschitz condition: for every compact $\mathcal{C}\in\mathbb{R}_{\geq0}\times \mathcal{U}$ there is a constant $L_{p,\mathcal{C}}$ such that $(t,\mathbf{x}),(t,\mathbf{y})\in \mathcal{C}$ implies $\|\mathbf{f}_p(t,\mathbf{x}) - \mathbf{f}_p(t,\mathbf{y})\|\leq L_{p,\mathcal{C}}\|\mathbf{x}-\mathbf{y}\|$. Define $\mathcal{B}_{\|\cdot\|,R}:= \{\mathbf{x}\in\mathbb{R}^n : \|\mathbf{x}\|0$. We consider the switched system $\dot{\mathbf{x}}=\mathbf{f}_\sigma(t,\mathbf{x})$, where $\sigma$ is an arbitrary right-continuous mapping $\mathbb{R}_{\geq0} \to\mathcal{P}$ of which the discontinuity-points form a discrete set. In this monograph we establish the claims made in the abstract in the following three steps: First, we show that the origin is a uniformly asymptotically stable equilibrium of the arbitrary switched system $\dot{\mathbf{x}}=\mathbf{f}_\sigma(t,\mathbf{x})$, whenever there exists a common Lyapunov function for the systems $\dot{\mathbf{x}}=\mathbf{f}_p(t,\mathbf{x})$, $p\in\mathcal{P}$, and we show how to derive a lower bound on the equilibrium's region of attraction from such a Lyapunov function. Second, we show that if $\mathcal{B}_{\|\cdot\|,R}\subset\mathcal{U}$ is a subset of the region of attraction of the arbitrary switched system $\dot{\mathbf{x}}=\mathbf{f}_\sigma(t,\mathbf{x})$ and the vector fields $\mathbf{f}_p$, $p\in\mathcal{P}$, satisfy the Lipschitz condition: there exists a constant $L$ such that for every $p\in\mathcal{P}$ and every $(s,\mathbf{x}),(t,\mathbf{y})\in \mathbb{R}_{\geq 0} \times \mathcal{B}_{\|\cdot\|,R}$ the inequality $\|\mathbf{f}_p(s,\mathbf{x}) - \mathbf{f}_p(t,\mathbf{y})\|\leq L(|s-t|+\|\mathbf{x}-\mathbf{y}\|)$ holds; then for every $00} \times \mathcal{U}$. Further, note that if $\sigma,\varsigma \in \mathcal{S}_\mathcal{P}$, $\sigma \neq \varsigma$, then in general $\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})$ is not equal to $\boldsymbol{\phi}_\varsigma(t,t_0,\boldsymbol{\xi})$ and that if the Switched System \ref{POLYSYS} is autonomous, that is, none of the vector fields $\mathbf{f}_p$, $p\in\mathcal{P}$, does depend on the time $t$, then $$\boldsymbol{\phi}_\sigma(t,t',\mathbf{x}) = \boldsymbol{\phi}_\gamma(t-t',0,\boldsymbol{\xi}),\quad \text{where \gamma(s) := \sigma(s+t') for all s\geq 0},$$ for all $t\geq t'\geq0$ and all $\boldsymbol{\xi}\in\mathcal{U}$. Therefore, we often suppress the middle argument of the solution to an autonomous system and simply write $\boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi})$. We later need the following generalization of Theorem \ref{APPLEMMA} to switched systems. \begin{theorem} \label{APPLEMMASW} Consider the Switched System \ref{POLYSYS}, let $\|\cdot\|$ be a norm on $\mathbb{R}^n$, and assume that the functions $\mathbf{f}_p$ satisfy the Lipschitz condition: there exists a constant $L$ such that $$\|\mathbf{f}_p(t,\mathbf{x})-\mathbf{f}_p(t,\mathbf{y})\| \leq L\|\mathbf{x}-\mathbf{y}\|$$ for all $t\geq 0$, all $\mathbf{x},\mathbf{y}\in\mathcal{U}$, and all $p\in\mathcal{P}$. Let $t_0 \geq 0$, let $\boldsymbol{\xi},\boldsymbol{\eta}\in\mathcal{U}$, let $\sigma,\varsigma\in\mathcal{S}_\mathcal{P}$, and assume there is a constant $\delta\geq 0$ such that $$\|\mathbf{f}_{\sigma(t)}(t,\mathbf{x})-\mathbf{f}_{\varsigma(t)}(t,\mathbf{x})\| \leq \delta$$ for all $t\geq 0$ and all $\mathbf{x}\in\mathcal{U}$. Denote the solution to the initial value problem $$\dot{\mathbf{x}} = \mathbf{f}_{\sigma}(t,\mathbf{x}),\quad \mathbf{x}(s_0) = \boldsymbol{\xi},$$ by $\mathbf{y}:\mathcal{I}_\mathbf{y} \to \mathbb{R}^n$ and denote the solution to the initial value problem $$\dot{\mathbf{x}} = \mathbf{f}_{\varsigma}(t,\mathbf{x}),\quad \mathbf{x}(s_0) = \boldsymbol{\eta},$$ by $\mathbf{z}:\mathcal{I}_\mathbf{z}\to \mathbb{R}^n$. Set $\mathcal{J} := \mathcal{I}_\mathbf{y} \cap \mathcal{I}_\mathbf{z}$ and set $\gamma := \|\boldsymbol{\xi} - \boldsymbol{\eta}\|$. Then the inequality $$\label{APPLEMMASWIE1} \|\mathbf{y}(t) - \mathbf{z}(t)\| \leq \gamma e^{L|t-s_0|} + \frac{\delta}{L}(e^{L|t-s_0|}-1)$$ holds for all $t\in\mathcal{J}$. \end{theorem} \begin{proof} We prove only inequality (\ref{APPLEMMASWIE1}) for $t\geq s_0$, the case $t 0 \quad \text{for all$x\in \mathcal{I} \setminus \mathcal{C}$}, &&\text{implies that } \text{$g$is a strictly monotonically }\\ & &&\text{increasing function on$\mathcal{I}$.}\\ &D^*g(x) \leq 0 \quad \text{for all$x\in \mathcal{I} \setminus \mathcal{C}$}, &&\text{implies that } \text{$g$is a monotonically }\\ & &&\text{decreasing function on$\mathcal{I}$.}\\ &D^*g(x) < 0 \quad \text{for all$x\in \mathcal{I} \setminus \mathcal{C}$}, &&\text{implies that } \text{$g$is a strictly monotonically }\\ & && \text{decreasing function on$\mathcal{I}.} \end{align*} \end{corollary} \subsection{Stability of arbitrary switched systems} The concepts equilibrium point and stability are motivated by the desire to keep a dynamical system in, or at least close to, some desirable state. The term {\it equilibrium} or {\it equilibrium point} of a dynamical system, is used for a state of the system that does not change in the course of time, that is, if the system is at an equilibrium at timet=0$, then it will stay there for all times$t > 0$. \begin{definition}[Equilibrium point] \rm A state$\mathbf{y}$in the state-space of the Switched System \ref{POLYSYS} is called an equilibrium or an equilibrium point of the system, if and only if$\mathbf{f}_p(t,\mathbf{y})=\boldsymbol{0}$for all$p\in\mathcal{P}$and all$t\geq0$. \end{definition} If$\mathbf{y}$is an equilibrium point of Switched System \ref{POLYSYS}, then obviously the initial value problem $$\dot{\mathbf{x}} = \mathbf{f}_\sigma(t,\mathbf{x}),\quad \mathbf{x}(0) = \mathbf{y}$$ has the solution$\mathbf{x}(t) = \mathbf{y}$for all$t\geq 0$regardless of the switching signal$\sigma \in \mathcal{S}$. The solution with$\mathbf{y}$as an initial value in the state-space is thus a constant vector and the state does not change in the course of time. By a translation in the state-space one can always reach that$\mathbf{y} = \boldsymbol{0}$without affecting the dynamics. Hence, there is no loss of generality in assuming that a particular equilibrium point is at the origin. A real-world system is always subject to some fluctuations in the state. There are some external effects that are unpredictable and cannot be modelled, some dynamics that have (hopefully) very little impact on the behavior of the system are neglected in the modelling, etc. Even if the mathematical model of a physical system would be perfect, which hardly seems possible, the system state would still be subject to quantum mechanical fluctuations. The concept of local stability in the theory of dynamical systems is motivated by the desire, that the system state stays at least close to an equilibrium point after small fluctuations in the state. Any system that is expected to do something useful must have a predictable behavior to some degree. This excludes all equilibria that are not locally stable as usable working points for a dynamical system. Local stability is thus a minimum requirement for an equilibrium. It is, however, not a very strong property. It merely states, that there are disturbances that are so small, that they do not have a great effect on the system in the long run. In this monograph we will concentrate on {\it uniform asymptotic stability on a set} containing the equilibrium. This means that we are demanding that the {\it uniform asymptotic stability} property of the equilibrium is not merely valid for some, possibly arbitrary small, neighborhood of the origin, but this property must hold on a a\,priori defined neighborhood of the origin. This is a much more robust and powerful concept. It denotes, that all disturbances up to a certain known degree are ironed out by the dynamics of the system, and, because the domain of the Lyapunov functions is only limited by the size of the equilibriums' region of attraction, that we can get a reasonable lower bound on the region of attraction. The common stability concepts are most practically characterized by the use of so-called$\mathcal{K}$,$\mathcal{L}$, and$\mathcal{K}\mathcal{L}$functions. \begin{definition}[Comparison functions$\mathcal{K}$,$\mathcal{L}$, and$\mathcal{K}\mathcal{L}$] \rm The function classes$\mathcal{K}$,$\mathcal{L}$, and$\mathcal{K}\mathcal{L}$of comparison functions are defined as follows: \begin{itemize} \item[(i)] A continuous function$\alpha:\mathbb{R}_{\geq0} \to \mathbb{R}_{\geq0}$is said to be of class$\mathcal{K}$, if and only if$\alpha(0)=0$, it is strictly monotonically increasing, and$\lim_{r \to +\infty}\alpha(r) = +\infty$. \item[(ii)] A continuous function$\beta:\mathbb{R}_{\geq0} \to \mathbb{R}_{\geq0}$is said to be of class$\mathcal{L}$, if and only if it is strictly monotonically decreasing and$\lim_{s \to +\infty}\beta(s) = 0$. \item[(iii)] A continuous function$\varsigma:\mathbb{R}_{\geq 0} \times \mathbb{R}_{\geq 0} \to \mathbb{R}_{\geq 0}$is said to be of class$\mathcal{K}\mathcal{L}$, if and only if for every fixed$s\in\mathbb{R}_{\geq0}$the mapping$r \mapsto \varsigma(r,s)$is of class$\mathcal{K}$and for every fixed$r\in\mathbb{R}_{\geq0}$the mapping$s \mapsto \varsigma(r,s)$is of class$\mathcal{L}$. \end{itemize} \end{definition} Note that some authors make a difference between strictly monotonically increasing functions that vanish at the origin and strictly monotonically increasing functions that vanish at the origin and additionally asymptotically approach infinity at infinity. They usually denote the functions of the former type as class$\mathcal{K}$functions and the functions of the latter type as class$\mathcal{K}_\infty$functions. We are not interested in functions of the former type and in this work$\alpha\in\mathcal{K}$always implies$\lim_{r \to +\infty} \alpha(r) = +\infty$. We now define various stability concepts for equilibrium points of switched dynamical systems with help of the comparison functions. \begin{definition}[Stability concepts for equilibria] \label{STABDEFS} \rm Assume that the origin is an equilibrium point of the Switched System \ref{POLYSYS}, denote by$\boldsymbol{\phi}$the solution to the system, and let$\|\cdot\|$be an arbitrary norm on$\mathbb{R}^n$. \begin{itemize} \item[(i)] The origin is said to be a uniformly stable equilibrium point of the Switched System \ref{POLYSYS} on a neighborhood$\mathcal{N}\subset\mathcal{U}$of the origin, if and only if there exists an$\alpha\in \mathcal{K}$such that for every$\sigma\in\mathcal{S}_\mathcal{P}$, every$t\geq t_0\geq0$, and every$\boldsymbol{\xi}\in\mathcal{N}$the following inequality holds $$\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \leq \alpha(\|\boldsymbol{\xi}\|).$$ \item[(ii)] The origin is said to be a uniformly asymptotically stable equilibrium point of the Switched System \ref{POLYSYS} on the neighborhood$\mathcal{N}\subset\mathcal{U}$of the origin, if and only if there exists a$\varsigma\in \mathcal{K}\mathcal{L}$such that for every$\sigma\in\mathcal{S}_\mathcal{P}$, every$t\geq t_0\geq0$, and every$\boldsymbol{\xi}\in\mathcal{N}$the following inequality holds $$\label{UAS1} \|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \leq \varsigma(\|\boldsymbol{\xi}\|,t-t_0).$$ \item[(iii)] The origin is said to be a uniformly exponentially stable equilibrium point of the Switched System \ref{POLYSYS} on the neighborhood$\mathcal{N}\subset\mathcal{U}$of the origin, if and only if there exist constants$k>0$and$\gamma > 0$, such that for every$\sigma\in\mathcal{S}_\mathcal{P}$, every$t\geq t_0\geq0$, and every$\boldsymbol{\xi}\in\mathcal{N}$the following inequality holds \begin{equation*} \|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \leq ke^{-\gamma (t-t_0)}\|\boldsymbol{\xi}\|. \end{equation*} \end{itemize} \end{definition} The stability definitions above imply, that if the origin is a uniformly exponentially stable equilibrium of the Switched System \ref{POLYSYS} on the neighborhood$\mathcal{N}$, then the origin is a uniformly asymptotically stable equilibrium on$\mathcal{N}$as well, and, if the origin is a uniformly asymptotically stable equilibrium of the Switched System \ref{POLYSYS} on the neighborhood$\mathcal{N}$, then the origin is a uniformly stable equilibrium on$\mathcal{N}$. If the Switched System \ref{POLYSYS} is autonomous, then the stability concepts presented above for the systems equilibria are {\it uniform} in a canonical way, that is, independent of$t_0$, and the definitions are somewhat more simple. \begin{definition} \label{STABDEFS2} \rm (Stability concepts for equilibria of autonomous systems)\quad Assume that the origin is an equilibrium point of the Switched System \ref{POLYSYS}, denote by$\boldsymbol{\phi}$the solution to the system, let$\|\cdot\|$be an arbitrary norm on$\mathbb{R}^n$, and assume that the system is autonomous. \begin{itemize} \item[(i)] The origin is said to be a stable equilibrium point of the autonomous Switched System \ref{POLYSYS} on a neighborhood$\mathcal{N}\subset\mathcal{U}$of the origin, if and only if there exists an$\alpha\in \mathcal{K}$such that for every$\sigma\in\mathcal{S}_\mathcal{P}$, every$t\geq0$, and every$\boldsymbol{\xi}\in\mathcal{N}$the following inequality holds $$\|\boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi})\| \leq \alpha(\|\boldsymbol{\xi}\|).$$ \item[(ii)] The origin is said to be an asymptotically stable equilibrium point of the autonomous Switched System \ref{POLYSYS} on the neighborhood$\mathcal{N}\subset\mathcal{U}$of the origin, if and only if there exists a$\varsigma\in \mathcal{K}\mathcal{L}$such that for every$\sigma\in\mathcal{S}_\mathcal{P}$, every$t\geq0$, and every$\boldsymbol{\xi}\in\mathcal{N}$the following inequality holds \begin{equation*} \|\boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi})\| \leq \varsigma(\|\boldsymbol{\xi}\|,t). \end{equation*} \item[(iii)] The origin is said to be an exponentially stable equilibrium point of the Switched System \ref{POLYSYS} on the neighborhood$\mathcal{N}\subset\mathcal{U}$of the origin, if and only if there exist constants$k>0$and$\gamma > 0$, such that for every$\sigma\in\mathcal{S}_\mathcal{P}$, every$t\geq0$, and every$\boldsymbol{\xi}\in\mathcal{N}$the following inequality holds \begin{equation*} \|\boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi})\| \leq ke^{-\gamma t}\|\boldsymbol{\xi}\|. \end{equation*} \end{itemize} \end{definition} The set of those points in the state-space of a dynamical system, that are attracted to an equilibrium point by the dynamics of the system, is of great importance. It is called the {\it region of attraction} of the equilibrium. Some authors prefer {\it domain}, {\it basin}, or even {\it bassin} instead of {\it region}. For nonautonomous systems it might depend on the initial time. \begin{definition}[Region of attraction] \rm Assume that$\mathbf{y}=\boldsymbol{0}$is an equilibrium point of the Switched System \ref{POLYSYS} and let$\boldsymbol{\phi}$be the solution to the system. For every$t_0\in\mathbb{R}_{\geq 0}$the set $$\mathcal{R}_{\it Att}^{t_0} := \{\boldsymbol{\xi} \in \mathcal{U} : \ \limsup_{t \to +\infty} \boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi}) = \boldsymbol{0}\quad \text{for all \sigma\in\mathcal{S}_\mathcal{P}}\}$$ is called the region of attraction with respect to$t_0$of the equilibrium at the origin. The region of attraction$\mathcal{R}_{\it Att}$of the equilibrium at the origin is defined by $$\mathcal{R}_{\it Att} := \bigcap_{t_0 \geq 0}\mathcal{R}_{\it Att}^{t_0}.$$ \end{definition} Thus, for the Switched System \ref{POLYSYS},$\boldsymbol{\xi} \in \mathcal{R}_{\it Att}$implies$\lim_{t \to +\infty} \boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi}) = \boldsymbol{0}$for all$\sigma\in\mathcal{S}_\mathcal{P}$and all$t_0\geq 0$. \subsection{Three useful lemmas} It is often more convenient to work with smooth rather that merely continuous functions and later on we need estimates by convex$\mathcal{C}^\infty\cap\mathcal{K}$functions. The two next lemmas state some useful facts in this regard. \begin{lemma}\label{SOBLEMMA} Let$f:\mathbb{R}_{>0} \to \mathbb{R}_{\geq 0}$be a monotonically decreasing function. Then there exists a function$g:\mathbb{R}_{>0} \to \mathbb{R}_{> 0}$with the following properties: \begin{enumerate} \item[(i)]$g\in\mathcal{C}^\infty(\mathbb{R}_{>0})$. \item[(ii)]$g(x) > f(x)$for all$x\in\mathbb{R}_{>0}$. \item[(iii)]$g$is strictly monotonically decreasing. \item[(iv)]$\lim_{x \to 0+} g(x)= +\infty$and$\lim_{x \to +\infty} g(x) = \lim_{x \to +\infty} f(x)$. \item[(v)]$g$is invertible and$g^{-1} \in \mathcal{C}^\infty(g(\mathbb{R}_{>0}))$. \end{enumerate} \end{lemma} \begin{proof} We define the function$\widetilde{h}: \mathbb{R}_{>0} \to \mathbb{R}_{>0}$by, $$\widetilde{h}(x) :=\begin{cases} f\big(\frac{1}{n+1}\big)+\frac{1}{x}, &\text{if x\in[\frac{1}{n+1},\frac{1}{n}[\ for some n\in\mathbb{N}_{>0},}\\ f(n)+\frac{1}{x}, &\text{if x\in [n,n+1[\ for some n\in\mathbb{N}_{>0},} \end{cases}$$ and the function$h: \mathbb{R}_{>0} \to \mathbb{R}_{> 0}$by $$h(x) := \widetilde{h}(x-\tanh(x)).$$ Then$h$is a strictly monotonically decreasing measurable function and because$\widetilde{h}$is, by its definition, strictly monotonically decreasing and larger than$f$, we have $$h(x+\tanh(x)) =\widetilde{h}(x+\tanh(x) - \tanh(x+\tanh(x))) > \widetilde{h}(x) > f(x)$$ for all$x\in\mathbb{R}_{>0}$. Let$\rho\in\mathcal{C}^\infty(\mathbb{R})$such that$\rho(x) \geq 0$for all$x\in\mathbb{R}$,$\operatorname{supp}(\rho) \subset\, ]-1,1[$, and$\int_\mathbb{R}\rho(x)dx=1$. We claim that the function$g:\mathbb{R}_{>0} \to \mathbb{R}_{>0}, $$g(x):= \int_{x-\tanh(x)}^{x+\tanh(x)} \rho\big(\frac{x-y}{\tanh(x)}\big) \frac{h(y)}{\tanh(x)}dy = \int_{-1}^1 \rho_1(y)h(x-y\tanh(x))dy,$$ fulfills the properties (i)--(v). Proposition (i) follows from elementary Lebesgue integration theory. Proposition (ii) follows from \begin{align*} g(x) &= \int_{-1}^1 \rho(y)h(x-y\tanh(x))dy\\ & > \int_{-1}^1 \rho(y)h(x+\tanh(x))dy\\ & > \int_{-1}^1 \rho(y)f(x)dy = f(x). \end{align*} To see thatg$is strictly monotonically decreasing let$t>s>0$and consider that $$\label{HHHHH} t-y\tanh(t) > s-y\tanh(s)$$ for all$y$in the interval$[-1,1]. Inequality (\ref{HHHHH}) follows from \begin{align*} t-y\tanh(t) - [s-y\tanh(s)] &= t-s -y[\tanh(t)-\tanh(s)]\\ &= t-s - y(t-s)(1-\tanh^2(s + \vartheta_{t,s}(t-s))) > 0, \end{align*} for some\vartheta_{t,s} \in [0,1]$, where we used the Mean-value theorem. But then $$h(t-y\tanh(t)) < h(s-y\tanh(s))$$ for all$y\in[-1,1]$and the definition of$g$implies that$g(t)0}))$, that is, proposition (v). \end{proof} \begin{lemma} \label{CONVLEMMA} Let$\alpha\in\mathcal{K}$. Then, for every$R>0$, there is a function$\beta_R \in \mathcal{K}$, such that: \begin{itemize} \item[(i)]$\beta_R$is a convex function. \item[(ii)]$\beta_R$restricted to$\mathbb{R}_{>0}$is infinitely differentiable. \item[(iii)] For all$0\leq x \leq R$we have$\beta_R(x) \leq \alpha(x)$. \end{itemize} \end{lemma} \begin{proof} By Lemma \ref{SOBLEMMA} there is a function$g$, such that$g\in\mathcal{C}^\infty(\mathbb{R}_{>0})$,$g(x) > 1/\alpha(x)$for all$x>0$,$\lim_{t \to 0+} g(x) = +\infty$, and$g$is strictly monotonically decreasing. Then the function$\beta_R:\mathbb{R}_{\geq0} \to \mathbb{R}_{\geq 0}$, defined through $$\beta_R(x) := \frac{1}{R}\int_0^x \frac{d\tau}{g(\tau)},$$ has the desired properties. First,$\beta_R(0)=0$and for every$0 < x\leq R$we have $$\beta_R(x) = \frac{1}{R}\int_0^x\frac{d\tau}{g(\tau)} \leq \frac{1}{g(x)} < \alpha(x).$$ Second, to prove that$\beta_R$is a convex$\mathcal{K}$function is suffices to prove that the second derivative of$\beta_R$is strictly positive. But this follows immediately because for every$x >0$we have$g'(x) <0$, which implies $$\frac{d^{\,2}\beta_R}{dx^2}(x) = \frac{-g'(x)}{R[g(x)]^2} > 0.$$ \end{proof} The third existence lemma is the well known and very useful Massera's lemma \cite{massera}. \begin{lemma}[Massera's lemma] \label{MLEMMA} Let$f\in\mathcal{L}$and$\lambda\in \mathbb{R}_{>0}$. Then there is a function$g\in\mathcal{C}^{1}(\mathbb{R}_{\geq 0})$, such that$g,g'\in\mathcal{K}$,$g$restricted to$\mathbb{R}_{>0}$is a$\mathcal{C}^{\infty}(\mathbb{R}_{>0})$function, $$\int_0^{+\infty} g(f(t))dt < +\infty,\quad \text{and}\quad \int_0^{+\infty}g'(f(t))e^{\lambda t}dt < +\infty.$$ \end{lemma} Note, that because$g,g'\in\mathcal{K}$in Massera's lemma above, we have for every measurable function$u:\mathbb{R}_{\geq 0} \to \mathbb{R}_{\geq 0}$, such that$u(t) \leq f(t)$for all$t\in\mathbb{R}_{\geq 0}$, that $$\int_0^{+\infty} g(u(t))dt \leq \int_0^{+\infty} g(f(t))dt \quad \text{and}\quad \int_0^{+\infty}g'(u(t))e^{\lambda t}dt \leq \int_0^{+\infty}g'(f(t))e^{\lambda t}dt.$$ It is further worth noting that Massera's lemma can be proved quite simply by using Lemma \ref{SOBLEMMA}, which implies that there is a strictly monotonically decreasing$\mathcal{C}^{\infty}(\mathbb{R}_{>0})$bijective function$h:\mathbb{R}_{>0}\to\mathbb{R}_{>0}$such that$h(x) > f(x)$for all$x >0$and$h^{-1}\in\mathcal{C}^{\infty}(\mathbb{R}_{>0})$. The function$g:\mathbb{R}_{\geq 0} \to \mathbb{R}_{\geq 0}$, $$g(t) := \int_0^t e^{-(1+\lambda)h^{-1}(\tau)}d\tau,$$ then fulfills the claimed properties. \subsection{Linear programming} For completeness we spend a few words on linear programming problems. A linear programming problem is a set of linear constraints, under which a linear function is to be minimized. There are several equivalent possibilities to state a linear programming problem, one of them is $$\label{DEFLINP} \begin{gathered} \text{minimize}\quad g(\mathbf{x}) := \mathbf{c}\cdot\mathbf{x},\\ \text{given}\quad C\mathbf{x} \leq \mathbf{b},\quad \mathbf{x} \geq \boldsymbol{0}, \end{gathered}$$ where$r,s>0$are integers,$C \in \mathbb{R}^{s\times r}$is a matrix,$\mathbf{b} \in \mathbb{R}^s$and$\mathbf{c} \in \mathbb{R}^r$are vectors, and$\mathbf{x} \leq \mathbf{y}$denotes$x_i \leq y_i$for all$i$. The function$g$is called the objective of the linear programming problem and the conditions$C\mathbf{x} \leq \mathbf{b}$and$\mathbf{x} \geq \boldsymbol{0}$together are called the constraints. A feasible solution to the linear programming problem is a vector$\mathbf{x}' \in \mathbb{R}^r$that satisfies the constraints, that is,$\mathbf{x}' \geq \boldsymbol{0}$and$C\mathbf{x}' \leq\mathbf{b}$. There are numerous algorithms known to solve linear programming problems, the most commonly used being the simplex method (see, for example, \cite{TLIP}) or interior point algorithms, for example, the primal-dual logarithmic barrier method (see, for example, \cite{Roos97}). Both need a starting feasible solution for initialization. A feasible solution to (\ref{DEFLINP}) can be found by introducing slack variables$\mathbf{y} \in \mathbb{R}^s$and solving the linear programming problem: $$\label{SLACK} \begin{gathered} \text{minimize}\quad g(\begin{bmatrix}\mathbf{x} \\ \mathbf{y} \end{bmatrix}) := \sum_{i=1}^s y_i ,\\ \text{given }\quad \begin{bmatrix}C & -I_s \end{bmatrix} \begin{bmatrix}\mathbf{x} \\ \mathbf{y} \end{bmatrix} \leq \mathbf{b},\quad \begin{bmatrix}\mathbf{x} \\ \mathbf{y} \end{bmatrix} \geq \boldsymbol{0}, \end{gathered}$$ which has the feasible solution$\mathbf{x}=\boldsymbol{0}$and$\mathbf{y} = (|b_1|,|b_2|,\dots,|b_s|)$. If the linear programming problem (\ref{SLACK}) has the solution$g([\mathbf{x}'\ \mathbf{y}']) = 0$, then$\mathbf{x}'$is a feasible solution to (\ref{DEFLINP}), if the minimum of$g$is strictly larger than zero, then (\ref{DEFLINP}) does not have any feasible solution. \section{Lyapunov's Direct Method for Switched Systems} \label{SECLDM} The Russian mathematician and engineer Alexandr Mikhailovich Lyapunov published a revolutionary work in 1892 on the stability of motion, where he introduced two methods to study the stability of general continuous dynamical systems. An English translation of this work can be found in \cite{lya1}. The more important of these two methods, known as {\it Lyapunov's second method} or {\it Lyapunov's direct method}, enables one to prove the stability of an equilibrium of (\ref{NNSYSTEM}) without integrating the differential equation. It states, that if$\mathbf{y}=\boldsymbol{0}$is an equilibrium point of the system,$V \in \mathcal{C}^1(\mathbb{R}_{\geq 0}\times\mathcal{U})$is a {\it positive definite function}, that is, there exist functions$\alpha_1,\alpha_2\in\mathcal{K}$such that $$\alpha_1(\|\mathbf{x}\|_2) \leq V(t,\mathbf{x})\leq \alpha_2(\|\mathbf{x}\|_2)$$ for all$\mathbf{x}\in\mathcal{U}$and all$t\in\mathbb{R}_{\geq 0}$, and$\boldsymbol{\phi}$is the solution to the ODE (\ref{NNSYSTEM}). Then the equilibrium is uniformly asymptotically stable, if there is an$\omega \in \mathcal{K}such that the inequality \label{UAS2} \begin{aligned} \diff{}{t}V(t,\boldsymbol{\phi}(t,t_0,\boldsymbol{\xi})) & = [\nabla_\mathbf{x} V](t,\boldsymbol{\phi}(t,t_0,\boldsymbol{\xi}))\cdot \mathbf{f}(t,\boldsymbol{\phi}(t,t_0,\boldsymbol{\xi})) + \pdiff{V}{t}(t,\boldsymbol{\phi}(t,t_0,\boldsymbol{\xi})) \\ & \leq -\omega(\|\boldsymbol{\phi}(t,t_0,\boldsymbol{\xi})\|_2) \end{aligned} holds for all\boldsymbol{\phi}(t,t_0,\boldsymbol{\xi})$in an open neighborhood$\mathcal{N}\subset \mathcal{U}$of the equilibrium$\mathbf{y}$. In this case the equilibrium is uniformly asymptotically stable on a neighborhood, which depends on$V$, of the origin. The function$V$satisfying (\ref{UAS2}) is said to be a {\it Lyapunov function} for (\ref{NNSYSTEM}). The direct method of Lyapunov is covered in practically all modern textbooks on nonlinear systems and control theory. Some good examples are \cite{hahn,hirsch04,NS,NSASAC,vidyasagar,bhatiaszegoe,willems70}. We will prove, that if the time-derivative in the inequalities above is replaced with a Dini derivative with respect to$t$, then the assumption$V\in\mathcal{C}^1(\mathbb{R}_{\geq 0}\times\mathcal{U})$can be replaced with the less restrictive assumption, that$V$is merely continuous. The same is done in Theorem 42.5 in \cite{hahn}, but a lot of details are left out. Further, we generalize the results to arbitrary switched systems. Before we state and prove the direct method of Lyapunov for switched systems, we prove a lemma that we use in its proof. \begin{lemma} \label{DMLLEMMA} Assume that the origin is an equilibrium of the Switched System \ref{POLYSYS} and let$\|\cdot\|$be a norm on$\mathbb{R}^n$. Further, assume that there is a function$\alpha\in \mathcal{K}$, such that for all$\sigma\in\mathcal{S}_\mathcal{P}$and all$t \geq t_0 \geq0$the inequality $$\label{GBSR} \|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \leq \alpha(\|\boldsymbol{\xi}\|)$$ holds for all$\boldsymbol{\xi}$in some bounded neighborhood$\mathcal{N} \subset \mathcal{U}$of the origin. Under these assumptions the following two propositions are equivalent: \begin{enumerate} \item[(i)] There exists a function$\beta \in \mathcal{L}$, such that $$\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \leq \sqrt{\alpha(\|\boldsymbol{\xi}\|)}\beta(t-t_0)$$ for all$\sigma \in \mathcal{S}_\mathcal{P}$, all$t\geq t_0\geq 0$, and all$\boldsymbol{\xi}\in\mathcal{N}$. \item[(ii)] For every$\varepsilon >0$there exists a$T>0$, such that for every$t_0 \geq 0$, every$\sigma\in\mathcal{S}_\mathcal{P}$, and every$\boldsymbol{\xi}\in \mathcal{N}$the inequality $$\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \leq \varepsilon$$ holds for all$t\geq T + t_0$. \end{enumerate} \end{lemma} \begin{proof} Let$R>0$be so large that$\mathcal{N} \subset \mathcal{B}_{\|\cdot\|,R}$and set$C:= \max\{1,\alpha(R)\}$. Note that Proposition (i) implies proposition (ii): For every$\varepsilon >0$we set$T:= \beta^{-1}(\varepsilon/\sqrt{\alpha(R)})$and proposition (ii) follows immediately. Proposition (ii) implies proposition (i): For every$\varepsilon >0$define$\widetilde{T}(\varepsilon)$as the infimum of all$T>0$with the property, that for every$t_0 \geq 0$, every$\sigma\in\mathcal{S}_\mathcal{P}$, and every$\boldsymbol{\xi}\in \mathcal{N}$the inequality $$\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \leq \varepsilon$$ holds for all$t\geq T+t_0$. Then$\widetilde{T}$is a monotonically decreasing function$\mathbb{R}_{>0} \to \mathbb{R}_{\geq 0}$and, because of (\ref{GBSR}),$\widetilde{T}(\varepsilon)= 0$for all$\varepsilon > \alpha(R)$. By Lemma \ref{SOBLEMMA} there exists a strictly monotonically decreasing$\mathcal{C}^{\infty}(\mathbb{R}_{>0})$bijective function$g:\mathbb{R}_{>0} \to \mathbb{R}_{>0}$, such that$g(\varepsilon) > \widetilde{T}(\varepsilon)$for all$\varepsilon >0$. Now, for every pair$t>t_0\geq0$set$\varepsilon' := g^{-1}(t-t_0)$and note that because$t = g(\varepsilon') + t_0 \geq \widetilde{T}(\varepsilon') + t_0$we have $$g^{-1}(t-t_0) = \varepsilon' \geq \|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\|.$$ But then $$\beta(s) := \begin{cases} \sqrt{2C - C/g(1)\cdot s},\quad &\text{if s\in[0,g(1)],} \\ \sqrt{Cg^{-1}(s)}, &\text{if s> g(1),} \end{cases}$$ is an$\mathcal{L}$function such that $$\sqrt{\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\|} \leq \beta(t-t_0),$$ for all$t\geq t_0\geq 0$and all$\boldsymbol{\xi}\in\mathcal{N}$, and therefore $$\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \leq \sqrt{\alpha(\|\boldsymbol{\xi}\|)}\beta(t-t_0).$$ \end{proof} We come to the main theorem of this section: The Lyapunov's direct method for arbitrary switched systems. \begin{theorem} \label{TDMOL} Assume that the Switched System \ref{POLYSYS} has an equilibrium at the origin. Let$\|\cdot\|$be a norm on$\mathbb{R}^n$and let$R>0$be a constant such that the closure of the ball$\mathcal{B}_{\|\cdot\|,R}$is a subset of$\mathcal{U}$. Let$V:\mathbb{R}_{\geq 0}\times\mathcal{B}_{\|\cdot\|,R}\to \mathbb{R}$be a continuous function and assume that there exist functions$\alpha_1,\alpha_2\in\mathcal{K}$such that $$\alpha_1(\|\boldsymbol{\xi}\|)\leq V(t,\boldsymbol{\xi}) \leq \alpha_2(\|\boldsymbol{\xi}\|)$$ for all$t\geq 0$and all$\boldsymbol{\xi}\in\mathcal{B}_{\|\cdot\|,R}$. Denote the solution to the Switched System \ref{POLYSYS} by$\boldsymbol{\phi}$and set$d:= \alpha_2^{-1}(\alpha_1(R))$. Finally, let$D^* \in \{D^+,D_+,D^-,D_-\}$be a Dini derivative with respect to the time$t$, which means, for example with$D^* = D^+$, that $$D^+[V(t,\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi}))] := \limsup_{h \to 0+}\frac{V(t+h,\boldsymbol{\phi}_\sigma(t+h,t_0,\boldsymbol{\xi})) - V(t,\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi}))}{h}.$$ Then the following propositions are true: \begin{enumerate} \item[(i)] If for every$\sigma\in\mathcal{S}_\mathcal{P}$, every$\boldsymbol{\xi}\in\mathcal{U}$, and every$t\geq t_0 \geq 0$, such that$\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\in \mathcal{B}_{\|\cdot\|,R}$, the inequality $$\label{TDMOLIE1} D^*[V(t,\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi}))] \leq 0$$ holds, then the origin is a uniformly stable equilibrium of the Switched System \ref{POLYSYS} on$\mathcal{B}_{\|\cdot\|,d}$. \item[(ii)] If there exists a function$\psi \in \mathcal{K}$, with the property that for every$\sigma\in\mathcal{S}_\mathcal{P}$, every$\boldsymbol{\xi}\in\mathcal{U}$, and every$t\geq t_0 \geq 0$, such that$\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\in \mathcal{B}_{\|\cdot\|,R}$, the inequality $$\label{TDMOLIE2} D^*[V(t,\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi}))] \leq -\psi(\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\|)$$ holds, then the origin is a uniformly asymptotically stable equilibrium of the Switched System \ref{POLYSYS} on$\mathcal{B}_{\|\cdot\|,d}$. \end{enumerate} \end{theorem} \begin{proof} Proposition (i): Let$t_0\geq0$,$\boldsymbol{\xi}\in \mathcal{B}_{\|\cdot\|,d}$, and$\sigma\in\mathcal{S}_\mathcal{P}$all be arbitrary but fixed. By the note after the definition of Switched System \ref{POLYSYS} either$\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi}) \in \mathcal{B}_{\|\cdot\|,R}$for all$t\geq t_0$or there is a$t^* > t_0$such that$\boldsymbol{\phi}_\sigma(s,t_0,\boldsymbol{\xi}) \in \mathcal{B}_{\|\cdot\|,R}$for all$s\in[t_0,t^*[$and$\boldsymbol{\phi}_\sigma(t^*,t_0,\boldsymbol{\xi}) \in \partial\mathcal{B}_{\|\cdot\|,R}$. Assume that the second possibility applies. Then, by inequality (\ref{TDMOLIE1}) and Corollary \ref{TEMP51} $$\alpha_1(R) \leq V(t^*,\boldsymbol{\phi}_\sigma(t^*,t_0,\boldsymbol{\xi})) \leq V(t_0,\boldsymbol{\xi}) \leq \alpha_2(\|\boldsymbol{\xi}\|) < \alpha_2(d),$$ which is contradictory to$d = \alpha_2^{-1}(\alpha_1(R))$. Therefore$\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi}) \in \mathcal{B}_{\|\cdot\|,R}$for all$t\geq t_0$. But then it follows by inequality (\ref{TDMOLIE1}) and Corollary \ref{TEMP51} that $$\alpha_1(\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\|) \leq V(t,\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})) \leq V(t_0,\boldsymbol{\xi}) \leq \alpha_2(\|\boldsymbol{\xi}\|),$$ for all$t\geq t_0$, so $$\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \leq \alpha_1^{-1}(\alpha_2(\|\boldsymbol{\xi}\|))$$ for all$t \geq t_0$. Because$\alpha_1^{-1}\circ\alpha_2$is a class$\mathcal{K}$function, it follows, because$t_0\geq0$,$\boldsymbol{\xi}\in \mathcal{B}_{\|\cdot\|,d}$, and$\sigma\in\mathcal{S}_\mathcal{P}$were arbitrary, that the equilibrium at the origin is a uniformly stable equilibrium point of the Switched System \ref{POLYSYS} on$\mathcal{B}_{\|\cdot\|,d}$. Proposition (ii): Inequality (\ref{TDMOLIE2}) implies inequality (\ref{TDMOLIE1}) so Lemma \ref{DMLLEMMA} applies and it suffices to show that for every$\varepsilon>0$there is a finite$T>0$, such that $$\label{UATT} t \geq T + t_0 \quad \text{implies} \quad \|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \leq \varepsilon$$ for all$t_0 \geq 0$, all$\boldsymbol{\xi} \in \mathcal{B}_{\|\cdot\|,d}$, and all$\sigma\in\mathcal{S}_\mathcal{P}$. To prove this choose an arbitrary$\varepsilon>0$and set $$\delta^* := \min\{d,\alpha_2^{-1}(\alpha_1(\varepsilon))\}\quad \text{and}\quad T := \frac{\alpha_2(d)}{\psi(\delta^*)}.$$ We first prove that for every$\sigma\in\mathcal{S}_\mathcal{P}$the following proposition: $$\label{UATT2} \boldsymbol{\xi} \in \mathcal{B}_{\|\cdot\|,d}\quad \text{and}\quad t_0 \geq 0 \quad \text{implies} \quad \|\boldsymbol{\phi}_\sigma(t^*,t_0,\boldsymbol{\xi})\| < \delta^*$$ for some$t^*\in [t_0,T+t_0]$. We prove (\ref{UATT2}) by contradiction. Assume that $$\label{ANNAHME1} \|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \geq \delta^*$$ for all$t\in [t_0,T+t_0]$. Then $$\label{CONT1} 0 < \alpha_1(\delta^*) \leq \alpha_1(\|\boldsymbol{\phi}_\sigma(T+t_0,t_0,\boldsymbol{\xi})\|) \leq V(T+t_0,\boldsymbol{\phi}_\sigma(T+t_0,t_0,\boldsymbol{\xi})).$$ By Theorem \ref{MEANVT} and the assumption (\ref{ANNAHME1}), there is an$s\in[t_0,T+t_0], such that \begin{align*} \frac{V(T+t_0,\boldsymbol{\phi}_\sigma(T+t_0,t_0,\boldsymbol{\xi})) - V(t_0,\boldsymbol{\xi})}{T} &\leq [D^*V](s,\boldsymbol{\phi}(s,t_0,\boldsymbol{\xi}))] \\ &\leq -\psi(\|\boldsymbol{\phi}_\sigma(s,t_0,\boldsymbol{\xi})\|) \\ &\leq -\psi(\delta^*), \end{align*} that is \begin{align*} V(T+t_0,\boldsymbol{\phi}_\sigma(T+t_0,t_0,\boldsymbol{\xi})) &\leq V(t_0,\boldsymbol{\xi})-T\psi(\delta^*) \\ &\leq \alpha_2(\|\boldsymbol{\xi}\|) -T\psi(\delta^*) \\ &< \alpha_2(d) -T\psi(\delta^*) \\ &= \alpha_2(d) - \frac{\alpha_2(d)}{\psi(\delta^*)}\psi(\delta^*) = 0, \end{align*} which is contradictory to (\ref{CONT1}). Therefore proposition (\ref{UATT2}) is true. Now, lett^*$be as in (\ref{UATT2}) and let$t>T+t_0be arbitrary. Then, because $$s\mapsto V(s,\boldsymbol{\phi}_\sigma(s,t_0,\boldsymbol{\xi})),\quad s\geq t_0,$$ is strictly monotonically decreasing by inequality (\ref{TDMOLIE2}) and Corollary \ref{TEMP51}, we get by (\ref{UATT2}), that \begin{align*} \alpha_1(\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\|) &\leq V(t,\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})) \\ & \leq V(t^*,\boldsymbol{\phi}_\sigma(t^*,t_0,\boldsymbol{\xi})) \\ &\leq \alpha_2(\|\boldsymbol{\phi}_\sigma(t^*,t_0,\boldsymbol{\xi})\|) \\ &< \alpha_2(\delta^*) \\ &= \min\{\alpha_2(d),\alpha_1(\varepsilon)\} \\ &\leq \alpha_1(\varepsilon), \end{align*} and we have proved (\ref{UATT}). The proposition (ii) follows.\\ \end{proof} The functionV$in the last theorem is called a Lyapunov function for the Switched System \ref{POLYSYS}. \begin{definition}[Lyapunov function] \label{DEFLYAFUNC} \rm Assume that the Switched System \ref{POLYSYS} has an equilibrium at the origin. Denote the solution to the Switched System \ref{POLYSYS} by$\boldsymbol{\phi}$and let$\|\cdot\|$be a norm on$\mathbb{R}^n$. Let$R>0$be a constant such that the closure of the ball$\mathcal{B}_{\|\cdot\|,R}$is a subset of$\mathcal{U}$. A continuous function$V:\mathbb{R}_{\geq 0}\times\mathcal{B}_{\|\cdot\|,R}\to \mathbb{R}$is called a Lyapunov function for the Switched System \ref{POLYSYS} on$\mathcal{B}_{\|\cdot\|,R}$, if and only if there exists a Dini derivative$D^* \in \{D^+,D_+,D^-,D_-\}$with respect to the time$t$and functions$\alpha_1,\alpha_2,\psi\in\mathcal{K}$with the properties that: \begin{enumerate} \item[{\bf (L1)}] $$\alpha_1(\|\boldsymbol{\xi}\|)\leq V(t,\boldsymbol{\xi}) \leq \alpha_2(\|\boldsymbol{\xi}\|)$$ for all$t\geq 0$and all$\boldsymbol{\xi}\in\mathcal{B}_{\|\cdot\|,R}$. \item[{\bf (L2)}] $$D^*[V(t,\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi}))] \leq -\psi(\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\|)$$ for every$\sigma\in\mathcal{S}_\mathcal{P}$, every$\boldsymbol{\xi}\in\mathcal{U}$, and every$t\geq t_0 \geq 0$,\\ such that$\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\in \mathcal{B}_{\|\cdot\|,R}$. \end{enumerate} \end{definition} The Direct Method of Lyapunov (Theorem \ref{TDMOL}) can thus, by Definition \ref{DEFLYAFUNC}, be rephrased as follows: \begin{quote} Assume that the Switched System \ref{POLYSYS} has an equilibrium point at the origin and that there exists a Lyapunov function defined on the ball$\mathcal{B}_{\|\cdot\|,R}$, of which the closure is a subset of$\mathcal{U}$, for the system. Then there is a$d$,$00$, and let$\varsigma \in \mathcal{K}\mathcal{L}$be such that$\|\boldsymbol{\phi}_\sigma(t,t_0,\boldsymbol{\xi})\| \leq \varsigma(\|\boldsymbol{\xi}\|,t-t_0)$for all$\sigma\in\mathcal{S}_\mathcal{P}$, all$\boldsymbol{\xi} \in \mathcal{B}_{\|\cdot\|,R}$, and all$t\geq t_0 \geq 0$. Assume further, that there exists a constant$L$for the functions$\mathbf{f}_p$, such that $$\|\mathbf{f}_p(t,\mathbf{x}) - \mathbf{f}_p(t,\mathbf{y})\| \leq L\|\mathbf{x} - \mathbf{y}\|$$ for all$t\geq 0$, all$\mathbf{x},\mathbf{y} \in \mathcal{B}_{\|\cdot\|,R}$, and all$p\in\mathcal{P}$. By Massera's lemma (Lemma \ref{MLEMMA}) there exists a function$g\in\mathcal{C}^{1}(\mathbb{R}_{\geq 0})$, such that$g,g'\in\mathcal{K}$,$g$is infinitely differentiable on$\mathbb{R}_{>0}$, $$\int_0^{+\infty}g(\varsigma(R,\tau))d\tau < +\infty,\quad \text{and}\quad \int_0^{+\infty}g'(\varsigma(R,\tau))e^{L\tau}d\tau < +\infty.$$ \begin{itemize} \item[(i)] For every$\sigma\in\mathcal{S}_\mathcal{P}$we define the function$W_\sigma$for all$t\geq 0$and all$\boldsymbol{\xi} \in\mathcal{B}_{\|\cdot\|,R}$by $$W_\sigma(t,\boldsymbol{\xi}):= \int_t^{+\infty} g(\|\boldsymbol{\phi}_\sigma(\tau,t,\boldsymbol{\xi})\|)d\tau.$$ \item[(ii)] We define the function$W$for all$t\geq 0$and all$\boldsymbol{\xi} \in\mathcal{B}_{\|\cdot\|,R}$by $$W(t,\boldsymbol{\xi}) := \sup_{\sigma \in\mathcal{S}_\mathcal{P}} W_\sigma(t,\boldsymbol{\xi}).$$ Note, that if the Switched System \ref{POLYSYS} is autonomous, then$W$does not depend on$t$, that is, it is time-invariant. \end{itemize} \end{definition} The function$W$from the definition above (Definition \ref{WSDEF}) is a Lyapunov function for the Switched System \ref{POLYSYS} used in its construction. This is proved in the next theorem. \begin{theorem}[Converse theorem for switched systems] \label{LL} The function$W$in Definition \ref{WSDEF} is a Lyapunov function for the Switched System \ref{POLYSYS} used in its construction. Further, there exists a constant$L_W>0$such that $$\label{WLL} |W(t,\boldsymbol{\xi}) - W(t,\boldsymbol{\eta})| \leq L_W\|\boldsymbol{\xi} - \boldsymbol{\eta}\|$$ for all$t\geq 0$and all$\boldsymbol{\xi},\boldsymbol{\eta} \in \mathcal{B}_{\|\cdot\|,R}$, where the norm$\|\cdot\|$and the constant$R$are the same as in Definition \ref{WSDEF}. \end{theorem} \begin{proof} We have to show that the function$W$complies to the conditions {\bf(L1)} and {\bf(L2)} of Definition \ref{DEFLYAFUNC}. Because $$\boldsymbol{\phi}_\sigma(u,t,\boldsymbol{\xi}) = \boldsymbol{\xi} +\int_t^u\mathbf{f}_{\sigma(\tau)}(\tau,\boldsymbol{\phi}_\sigma(\tau,t,\boldsymbol{\xi})) d\tau,$$ and$\|\mathbf{f}_{\sigma(s)}(s,\mathbf{y})\| \leq LR$for all$s\geq0$and all$\mathbf{y} \in \mathcal{B}_{\|\cdot\|,R}$, we conclude$\|\boldsymbol{\phi}_\sigma(u,t,\boldsymbol{\xi})\| \geq \|\boldsymbol{\xi}\| - (u-t)LR$for all$u\geq t\geq0$,$\boldsymbol{\xi} \in \mathcal{B}_{\|\cdot\|,R}$, and all$\sigma\in\mathcal{S}_\mathcal{P}$. Therefore, $$\|\boldsymbol{\phi}_\sigma(u,t,\boldsymbol{\xi})\| \geq \frac{\|\boldsymbol{\xi}\|}{2}\quad \text{whenever}\ t \leq u \leq t+\frac{\|\boldsymbol{\xi}\|}{2LR},$$ which implies \begin{equation*} W_\sigma(t,\boldsymbol{\xi}) := \int_t^{+\infty} g(\|\boldsymbol{\phi}_\sigma(\tau,t,\boldsymbol{\xi})\|)d\tau \geq \frac{\|\boldsymbol{\xi}\|}{2LR}g(\|\boldsymbol{\xi}\|/2) \end{equation*} and then$\alpha_1(\|\boldsymbol{\xi}\|) \leq W(t,\boldsymbol{\xi})$for all$t\geq 0$and all$\boldsymbol{\xi} \in \mathcal{B}_{\|\cdot\|,R}$, where$\alpha_1(x) := x/(2LR)g(x/2)$is a$\mathcal{K}$function. By the definition of$W, $$W(t,\boldsymbol{\xi}) \geq \int_t^{t+h}g(\|\boldsymbol{\phi}_\sigma(\tau,t,\boldsymbol{\xi})\|)d\tau + W(t+h,\boldsymbol{\phi}_\sigma(t+h,t,\boldsymbol{\xi}))$$ \begin{align*} &\text{(reads: supremum over all trajectories emerging from\boldsymbol{\xi}$}\\ &\text{at time$t$is not less than over any particular trajectory}\\ &\text{emerging from$\boldsymbol{\xi}$at time$t)} \end{align*} for all\boldsymbol{\xi}\in \mathcal{B}_{\|\cdot\|,R}$, all$t\geq 0$, all small enough$h>0$, and all$\sigma \in \mathcal{S}_\mathcal{P}$, from which $$\limsup_{h\to 0+}\frac{W(t+h,\boldsymbol{\phi}_\sigma(t+h,t,\boldsymbol{\xi})) - W(t,\boldsymbol{\xi}) }{h} \leq - g(\|\boldsymbol{\xi}\|)$$ follows. Because$g\in\mathcal{K}$this implies that the condition {\bf(L2)} from Definition \ref{DEFLYAFUNC} holds for the function$W$. Now, assume that there is an$L_W>0$such that inequality (\ref{WLL}) holds. Then$W(t,\boldsymbol{\xi}) \leq \alpha_2(\|\boldsymbol{\xi}\|)$for all$t\geq 0$and all$\boldsymbol{\xi} \in \mathcal{B}_{\|\cdot\|,R}$, where$\alpha_2(x) := L_Wx$is a class$\mathcal{K}$function. Thus, it only remains to prove inequality (\ref{WLL}). However, as this inequality is a byproduct of the next lemma, we spare us the proof here. \end{proof} The results of the next lemma are needed in the proof of our converse theorem on uniform asymptotic stability of a switched system's equilibrium and as a convenient side effect it completes the proof of Theorem \ref{LL}. \begin{lemma} \label{WWLEMMA} The function$W$in Definition \ref{WSDEF} satisfies for all$t\geq s \geq 0$, all$\boldsymbol{\xi},\boldsymbol{\eta}\in \mathcal{B}_{\|\cdot\|,R}$, and all$\sigma\in\mathcal{S}_\mathcal{P}$the inequality $$\label{WUNGL} W(t,\boldsymbol{\xi}) - W(s,\boldsymbol{\eta}) \leq C\|\boldsymbol{\xi} - \boldsymbol{\phi}_\sigma(t,s,\boldsymbol{\eta})\| - \int_s^tg(\|\boldsymbol{\phi}_\sigma(\tau,s,\boldsymbol{\eta})\|)d\tau,$$ where $$C:= \int_0^{+\infty} g'(\varsigma(R,\tau))e^{L\tau}d\tau < +\infty.$$ Especially, $$\label{WUNGL2} |W(t,\boldsymbol{\xi}) - W(t,\boldsymbol{\eta})| \leq C\|\boldsymbol{\xi} - \boldsymbol{\eta}\|$$ for all$t\geq 0$and all$\boldsymbol{\xi},\boldsymbol{\eta}\in \mathcal{B}_{\|\cdot\|,R}$. The norm$\|\cdot\|$, the constants$R,L$, and the functions$\varsigma$and$gare, of course, the same as in Definition \ref{WSDEF}. \end{lemma} \begin{proof} By the Mean-value theorem and Theorem \ref{APPLEMMASW} we have \begin{align} \label{WWLEMMAIE1} &W_\sigma(t,\boldsymbol{\xi}) - W_\sigma(s,\boldsymbol{\eta}) \\ &= \int_t^{+\infty} g(\|\boldsymbol{\phi}_\sigma(\tau,t,\boldsymbol{\xi})\|)d\tau - \int_s^{+\infty} g(\|\boldsymbol{\phi}_\sigma(\tau,s,\boldsymbol{\eta})\|)d\tau \nonumber\\ & \leq \int_t^{+\infty} \big{|}g(\|\boldsymbol{\phi}_\sigma(\tau,t,\boldsymbol{\xi})\|) - g(\|\boldsymbol{\phi}_\sigma(\tau,s,\boldsymbol{\eta})\|)\big{|} d\tau - \int_s^tg(\|\boldsymbol{\phi}_\sigma(\tau,s,\boldsymbol{\eta})\|)d\tau \nonumber \\ & = \int_t^{+\infty} \big{|}g(\|\boldsymbol{\phi}_\sigma(\tau,t,\boldsymbol{\xi})\|) - g(\|\boldsymbol{\phi}_\sigma(\tau,t,\boldsymbol{\phi}_\sigma(t,s,\boldsymbol{\eta}))\|)\big{|} d\tau - \int_s^tg(\|\boldsymbol{\phi}_\sigma(\tau,s,\boldsymbol{\eta})\|)d\tau \nonumber \\ & \leq \int_t^{+\infty} g'(\varsigma(R,\tau-t))\|\boldsymbol{\phi}_\sigma(\tau,t,\boldsymbol{\xi}) - \boldsymbol{\phi}_\sigma(\tau,t,\boldsymbol{\phi}_\sigma(t,s,\boldsymbol{\eta}))\| d\tau - \int_s^tg(\|\boldsymbol{\phi}_\sigma(\tau,s,\boldsymbol{\eta})\|)d\tau \nonumber \\ &\leq \int_t^{+\infty} g'(\varsigma(R,\tau-t))e^{L(\tau-t)}\|\boldsymbol{\xi} - \boldsymbol{\phi}_\sigma(t,s,\boldsymbol{\eta})\| d\tau - \int_s^tg(\|\boldsymbol{\phi}_\sigma(\tau,s,\boldsymbol{\eta})\|)d\tau \nonumber \\ & = C\|\boldsymbol{\xi} - \boldsymbol{\phi}_\sigma(t,s,\boldsymbol{\eta})\| - \int_s^tg(\|\boldsymbol{\phi}_\sigma(\tau,s,\boldsymbol{\eta})\|)d\tau. \nonumber \end{align} We now show that we can replaceW_\sigma(t,\boldsymbol{\xi}) - W_\sigma(s,\boldsymbol{\eta})$by$W(t,\boldsymbol{\xi}) - W(s,\boldsymbol{\eta})$on the leftmost side of inequality (\ref{WWLEMMAIE1}) without violating the$\leq$relations. That this is possible might seem a little surprising at first sight. However, a closer look reveals that this is not surprising at all because the rightmost side of inequality (\ref{WWLEMMAIE1}) only depends on the values of$\sigma(z)$for$s\leq z\leq t$and because$W_\sigma(t,\boldsymbol{\xi}) - W(s,\boldsymbol{\eta}) \leq W_\sigma(t,\boldsymbol{\xi}) - W_\sigma(s,\boldsymbol{\eta})$, where the left-hand side only depends on the values of$\sigma(z)$for$z\geq t$, . To rigidly prove the validity of this replacement let$\delta >0 $be an arbitrary constant and choose a$\gamma \in \mathcal{S}_\mathcal{P}$, such that $$\label{SSS1} W(t,\boldsymbol{\xi}) - W_\gamma(t,\boldsymbol{\xi}) < \frac{\delta}{2},$$ and a$u>0$so small that $$\label{SSS2} ug(\varsigma(\|\boldsymbol{\xi}\|,0)) + 2CR(e^u-1) < \frac{\delta}{2}.$$ We define$\theta\in\mathcal{S}_\mathcal{P}by $$\theta(\tau) := \begin{cases} \sigma(\tau),& \text{if 0 \leq \tau 0 was arbitrary we have proved inequality (\ref{WUNGL}). Inequality (\ref{WUNGL2}) is a trivial consequence of inequality (\ref{WUNGL}), just set s=t and note that \boldsymbol{\xi} and \boldsymbol{\eta} can be reversed. \end{proof} Finally, we come to the central theorem of this section. It is the promised converse Lyapunov theorem for a uniformly asymptotically stable equilibrium of the Switched System \ref{POLYSYS}. \begin{theorem}[Smooth converse theorem for switched systems] \label{CONVLYA} Assume that the origin is a uniformly asymptotically stable equilibrium point of the Switched System \ref{POLYSYS} on the ball \mathcal{B}_{\|\cdot\|,R}\subset\mathcal{U}, R>0, where \|\cdot\| is a norm on \mathbb{R}^n. Assume further, that the functions \mathbf{f}_p, p\in\mathcal{P}, satisfy the common Lipschitz condition: there exists a constant L>0 such that $$\label{CTA2} \|\mathbf{f}_p(s,\mathbf{x}) - \mathbf{f}_p(t,\mathbf{y})\| \leq L(|s-t| + \|\mathbf{x} - \mathbf{y}\|)$$ for all s,t\geq 0, all \mathbf{x},\mathbf{y} \in \mathcal{B}_{\|\cdot\|,R}, and all p\in\mathcal{P}. Then, for every 00 be constants such that$$ \|\mathbf{x}\|_2 \leq m\|\mathbf{x}\|\quad \text{and}\quad \|\mathbf{x}\| \leq M\|\mathbf{x}\|_2 $$for all \mathbf{x}\in \mathbb{R}^n and let a be a constant such that$$ a > 2m \quad \text{and set}\quad y^* := \frac{mR}{a}. $$Define$$ K := \frac{g(y^*)}{a} L\Big(C\big[m(1+M)R + mR\big(\frac{4}{3}LR+M\big) \big] + g(4R/3) mR\Big), $$and set $$\label{EPSMIN} \epsilon := \min\left\{\frac{a}{3g(y^*)},\frac{a(R-R^*)}{R^* g(y^*)},\frac{a}{2mRL g(y^*)},\frac{1}{K}\right\}.$$ Note that \epsilon is a real-valued constant that is strictly larger than zero. We define the function \varepsilon:\mathbb{R}_{\geq0} \to \mathbb{R}_{\geq 0} by $$\label{DEFVAREPS} \varepsilon(x) := \epsilon \int_0^{\frac{x}{a}} g(z)dz.$$ The definition of \varepsilon implies $$\label{CT1} \varepsilon(x) \leq \epsilon g(x/a)\frac{x}{a} \leq \frac{a}{3g(y^*)}\cdot g(x/a)\frac{x}{a} \leq \frac{x}{3}$$ for all 0\leq x\leq mR and $$\label{CT2} \varepsilon'(x) = \frac{\epsilon}{a}g(x/a)$$ for all x\geq 0. Define the function \vartheta by \vartheta(x) := g(2x/3) - g(x/2) for all x\geq 0. Then \vartheta(0)=0 and for every x>0 we have$$ \vartheta'(x) = \frac{2}{3}g'(2x/3) - \frac{1}{2} g'(x/2) > 0 because g'\in\mathcal{K}, that is \vartheta\in \mathcal{K}. \smallskip \noindent\textbf{Part II:} Let \rho\in\mathcal{C}^\infty(\mathbb{R}) be a nonnegative function with \operatorname{supp}(\rho) \subset\,]-1,0[ and \int_\mathbb{R}\rho(x) =1 and let \varrho\in\mathcal{C}^\infty(\mathbb{R}^n) be a nonnegative function with \operatorname{supp}(\varrho) \subset \mathcal{B}_{\|\cdot\|_2,1} and \int_{\mathbb{R}^n}\varrho(\mathbf{x})d^nx = 1. Extend W on \mathbb{R}\times\mathbb{R}^n by setting it equal to zero outside of \mathbb{R}_{\geq 0} \times \mathcal{B}_{\|\cdot\|,R}. We claim that the function V:\mathbb{R}_{\geq 0} \times \mathcal{B}_{\|\cdot\|,R^*} \to \mathbb{R}_{\geq 0}, V(t,\boldsymbol{0}):=0 for all t\geq 0, and \begin{align*} V(t,\boldsymbol{\xi}) &:= \int_\mathbb{R} \int_{\mathbb{R}^n} \rho \Big(\frac{t-\tau}{\varepsilon(\|\boldsymbol{\xi}\|_2)}\Big) \varrho\Big(\frac{\boldsymbol{\xi}-\mathbf{y}}{\varepsilon(\|\boldsymbol{\xi}\|_2)}\Big) \frac{W[\tau,\mathbf{y}]}{\varepsilon^{n+1}(\|\boldsymbol{\xi}\|_2)}d^ny d\tau \\ &= \int_\mathbb{R} \int_{\mathbb{R}^n}\rho(\tau) \varrho(\mathbf{y})W[t - \varepsilon(\|\boldsymbol{\xi}\|_2)\tau ,\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y} ]d^ny d\tau \end{align*} for all t\geq0 and all \boldsymbol{\xi}\in\mathcal{B}_{\|\cdot\|,R^*}\setminus \{\boldsymbol{0}\}, is a \mathcal{C}^{\infty}(\mathbb{R}_{\geq 0} \times \left[\mathcal{B}_{\|\cdot\|,R^*}\setminus \{\boldsymbol{0}\}\right]) Lyapunov function for the switched system. Note, that if the Switched System \ref{POLYSYS} in question is autonomous, then W is time-invariant, which implies that V is time-invariant too. Because, for every \|\mathbf{y}\|_2 \leq 1 and every \|\boldsymbol{\xi}\| < R^*, we have by (\ref{CT1}) and (\ref{EPSMIN}), that \begin{align*} \|\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y}\| &\leq \Big(1+\frac{\varepsilon(\|\boldsymbol{\xi}\|_2)}{\|\boldsymbol{\xi}\|_2}\Big)\|\boldsymbol{\xi}\| \\ & \leq \Big(1+\frac{\epsilon g(\|\boldsymbol{\xi}\|_2/a)}{\|\boldsymbol{\xi}\|_2} \cdot\frac{\|\boldsymbol{\xi}\|_2}{a}\Big)\|\boldsymbol{\xi}\| \\ & < \Big(1 +\frac{a(R-R^*)g(y^*)}{R^* g(y^*)a} \Big)R^*\\ &= R, \end{align*} so V is properly defined on \mathbb{R}_{\geq 0}\times \mathcal{B}_{\|\cdot\|,R^*}. But then, by construction, V\in\mathcal{C}^{\infty}(\mathbb{R}_{\geq 0} \times \left[\mathcal{B}_{\|\cdot\|,R^*}\setminus \{\boldsymbol{0}\}\right]). It remains to be shown that V fulfills the conditions {\bf (L1)} and {\bf (L2)} in Definition \ref{DEFLYAFUNC} of a Lyapunov function. By Theorem \ref{LL} and Lemma \ref{WWLEMMA} there is a function \alpha_1\in\mathcal{K} and a constant L_W>0, such that \alpha_1(\|\boldsymbol{\xi}\|) \leq W(t,\boldsymbol{\xi}) \leq L_W \|\boldsymbol{\xi}\| for all t\geq0 and all \boldsymbol{\xi} \in \mathcal{B}_{\|\cdot\|,R}. By inequality (\ref{CT1}) we have for all \boldsymbol{\xi} \in \mathcal{B}_{\|\cdot\|,R} and all \|\mathbf{y}\|_2 \leq 1, that \begin{gather} \label{NORMABSX1} \|\boldsymbol{\xi} - \varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y}\| \geq \|\boldsymbol{\xi} - \frac{\|\boldsymbol{\xi}\|_2}{3} \frac{\boldsymbol{\xi}}{\|\boldsymbol{\xi}\|_2}\| = \frac{2}{3}\|\boldsymbol{\xi}\|,\\ \label{NORMABSX2} \|\boldsymbol{\xi} - \varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y}\| \leq \|\boldsymbol{\xi} + \frac{\|\boldsymbol{\xi}\|_2}{3} \frac{\boldsymbol{\xi}}{\|\boldsymbol{\xi}\|_2}\| = \frac{4}{3}\|\boldsymbol{\xi}\|. \end{gather} Hence \label{ULYA} \begin{aligned} \alpha_1(2\|\boldsymbol{\xi}\|/3) &= \int_\mathbb{R} \int_{\mathbb{R}^n}\rho(\tau) \varrho(\mathbf{y})\alpha_1(2\|\boldsymbol{\xi}\|/3)d^ny d\tau\\ &\leq \int_\mathbb{R} \int_{\mathbb{R}^n}\rho(\tau) \varrho(\mathbf{y})\alpha_1(\|\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2) \mathbf{y}\|)d^ny d\tau \\ &\leq \int_\mathbb{R} \int_{\mathbb{R}^n}\rho(\tau) \varrho(\mathbf{y})W[t - \varepsilon(\|\boldsymbol{\xi}\|_2)\tau ,\boldsymbol{\xi} -\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y} ]d^ny d\tau \\ &= V(t,\boldsymbol{\xi}) \\ &\leq \int_\mathbb{R} \int_{\mathbb{R}^n}\rho(\tau) \varrho(\mathbf{y})L_W\|\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2) \mathbf{y}\|d^ny d\tau \\ &\leq \frac{4L_W}{3}\|\boldsymbol{\xi}\|, \end{aligned} and the function V fulfills the condition {\bf (L1)}. We now prove that V fulfills the condition {\bf (L2)}. To do this let t\geq 0, \boldsymbol{\xi}\in\mathcal{B}_{\|\cdot\|,R^*}, and \sigma\in\mathcal{S}_\mathcal{P} be arbitrary, but fixed throughout the rest of the proof. Denote by \mathcal{I} the maximum interval in \mathbb{R}_{\geq 0} on which s \mapsto \boldsymbol{\phi}_\sigma(s,t,\boldsymbol{\xi}) is defined and set q(s,\tau) := s-\varepsilon(\|\boldsymbol{\phi}_\sigma(s,t,\boldsymbol{\xi})\|_2)\tau for all s\in\mathcal{I} and all -1\leq \tau \leq 0 and define \begin{align*} D(h,\mathbf{y},\tau) &:=W[q(t+h,\tau),\boldsymbol{\phi}_\sigma(t+h,t,\boldsymbol{\xi})-\varepsilon(\|\boldsymbol{\phi}_\sigma(t+h,t,\boldsymbol{\xi})\|_2)\mathbf{y}]\\ &\quad \quad \quad \quad \ - W[q(t,\tau),\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y}] \end{align*} for all h such that t+h\in\mathcal{I}, all \|\mathbf{y}\|_2 \leq 1, and all -1\leq\tau\leq 0. Then V(t+h,\boldsymbol{\phi}_\sigma(t+h,t,\boldsymbol{\xi})) - V(t,\boldsymbol{\xi}) = \int_\mathbb{R}\int_{\mathbb{R}^n} \rho(\tau) \varrho(\mathbf{y})D(h,\mathbf{y},\tau)d^ny d\tau for all h such that t+h \in\mathcal{I}, especially this equality holds for all h in an interval of the form [0,h'[, where 0t that is smaller than any switching-time (discontinuity-point) of \sigma larger than t, and because of (\ref{EPSMIN}) and (\ref{CTA2}), we have \begin{align*} \frac{dq}{ds}(s,\tau) &= 1 - \frac{\epsilon g(\|\boldsymbol{\phi}_\sigma(s,t,\boldsymbol{\xi})\|_2/a)}{a} \frac{\boldsymbol{\phi}_\sigma(s,t,\boldsymbol{\xi})}{\|\boldsymbol{\phi}_\sigma(s,t,\boldsymbol{\xi})\|_2}\cdot\mathbf{f}_{\sigma(s)}(s,\boldsymbol{\phi}_\sigma(s,t,\boldsymbol{\xi}))\tau\\ &\geq 1 -\epsilon \, \frac{g(y^*)LmR}{a}\\ &\geq \frac{1}{2}, \end{align*} so q(t+h,\tau) \geq q(t,\tau) \geq 0 for all small enough h\geq 0. Now, denote by \gamma the constant switching signal \sigma(t) in \mathcal{S}_\mathcal{P}, that is \gamma(s) := \sigma(t) for all s\geq 0, and consider that by Lemma \ref{WWLEMMA} \begin{align*} \frac{D(h,\mathbf{y},\tau)}{h} &\leq \frac{C}{h} \Big{\|}\boldsymbol{\phi}_\sigma(t+h,t,\boldsymbol{\xi})-\varepsilon(\|\boldsymbol{\phi}_\sigma(t+h,t,\boldsymbol{\xi})\|_2)\mathbf{y} \\ &\quad - \boldsymbol{\phi}_\gamma(q(t+h,\tau),q(t,\tau),\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y})\Big{\|} \\ &\quad -\frac{1}{h}\int_{q(t,\tau)}^{q(t+h,\tau)} g(\|\boldsymbol{\phi}_\gamma(s,q(t,\tau),\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y})\|)ds \\ &= C\Big{\|}\frac{\boldsymbol{\phi}_\sigma(t+h,t,\boldsymbol{\xi})-\boldsymbol{\xi}}{h} -\frac{\varepsilon(\|\boldsymbol{\phi}_\sigma(t+h,t,\boldsymbol{\xi})\|_2) - \varepsilon(\|\boldsymbol{\xi}\|_2)}{h}\mathbf{y} \\ &\quad - \frac{\boldsymbol{\phi}_\gamma(q(t+h,\tau),q(t,\tau),\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y}) - [\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y}]}{h} \Big{\|} \\ &\quad -\frac{1}{h}\int_{q(t,\tau)}^{q(t+h,\tau)} g(\|\boldsymbol{\phi}_\gamma(s,q(t,\tau),\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y})\|)ds. \end{align*} For the next calculations we need s \mapsto q(s,\tau) to be differentiable at t. If it is not, which might be the case if t is a switching time of \sigma, we replace \sigma with \sigma^*\in\mathcal{S}_\mathcal{P} where \sigma^*(s) := \begin{cases} \sigma(t), &\text{if0\leq s\leq t$},\\ \sigma(s), &\text{if$s\geq t}. \end{cases} $$Note that this does not affect the numerical value$$ \limsup_{h\to 0+}\frac{D(h,\mathbf{y},\tau)}{h} because \sigma^*(t+h) = \sigma(t+h) for all h\geq 0. Hence, with p:= \sigma(t), and by (\ref{CTA2}), the chain rule, (\ref{NORMABSX1}), and (\ref{NORMABSX2}), \begin{align*} &\limsup_{h\to 0+}\frac{D(h,\mathbf{y},\tau)}{h}\\ &\leq C\Big{\|}\mathbf{f}_p(t,\boldsymbol{\xi}) - \mathbf{f}_p(q(t,\tau),\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y})\cdot\frac{dq}{dt'}(t',\tau)\Big|_{t'=t} \\ &\quad -\varepsilon'(\|\boldsymbol{\xi}\|_2)\cdot\frac{d}{dt'}\|\boldsymbol{\phi}_{\sigma}(t',t,\boldsymbol{\xi})\|_2 \Big|_{t'=t}\mathbf{y}\Big{\|} \\ &\quad - g(\|\boldsymbol{\phi}_\gamma(q(t,\tau),q(t,\tau),\boldsymbol{\xi} -\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y})\|)\cdot\frac{dq}{dt'}(t',\tau)\Big|_{t'=t}\\ & = C\Big{\|}\mathbf{f}_p(t,\boldsymbol{\xi}) - \mathbf{f}_p(q(t,\tau),\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y}) \Big[1-\varepsilon'(\|\boldsymbol{\xi}\|_2)\frac{\boldsymbol{\xi}}{\|\boldsymbol{\xi}\|_2}\cdot \mathbf{f}_p(t,\boldsymbol{\xi})\tau\Big]\\ &\quad -\varepsilon'(\|\boldsymbol{\xi}\|_2)[\frac{\boldsymbol{\xi}}{\|\boldsymbol{\xi}\|_2}\cdot \mathbf{f}_p(t,\boldsymbol{\xi})]\mathbf{y}\Big{\|}\\ &\quad - g(\|\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y}\|)\left[1-\varepsilon'(\|\boldsymbol{\xi}\|_2)\frac{\boldsymbol{\xi}}{\|\boldsymbol{\xi}\|_2}\cdot \mathbf{f}_p(t,\boldsymbol{\xi})\right]\\ & \leq C \big{\|}\mathbf{f}_p(t,\boldsymbol{\xi}) - \mathbf{f}_p(q(t,\tau),\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y})\big{\|} \\ & \quad + C\varepsilon'(\|\boldsymbol{\xi}\|_2)\|\mathbf{f}_p(t,\boldsymbol{\xi})\|_2 \big[\|\mathbf{f}_p(q(t,\tau),\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y})\| + \|\mathbf{y}\|\big]\\ & \quad - g(2\|\boldsymbol{\xi}\|/3 ) + g(4\|\boldsymbol{\xi}\|/3 ) \varepsilon'(\|\boldsymbol{\xi}\|_2)\|\mathbf{f}_p(t,\boldsymbol{\xi})\|_2\\ &\leq C L \big[ |t - q(t,\tau)| + \varepsilon(\|\boldsymbol{\xi}\|_2)\|\mathbf{y}\| ) \big{[]} \\ & \quad +C\varepsilon'(\|\boldsymbol{\xi}\|_2)mLR \big[L\|\boldsymbol{\xi}-\varepsilon(\|\boldsymbol{\xi}\|_2)\mathbf{y})\| + M\|\mathbf{y}\|_2 \big]\\ & \quad - g(2\|\boldsymbol{\xi}\|/3 ) + g(4\|\boldsymbol{\xi}\|/3 ) \varepsilon'(\|\boldsymbol{\xi}\|_2)mLR\\ & \leq C\big[ L (1+M) \varepsilon(\|\boldsymbol{\xi}\|_2)+ \varepsilon'(\|\boldsymbol{\xi}\|_2)mLR \big\{L\frac{4}{3}\|\boldsymbol{\xi}\| + M \big\}\big] \\ & \quad - g(2\|\boldsymbol{\xi}\|/3 ) + g(4\|\boldsymbol{\xi}\|/3 ) \varepsilon'(\|\boldsymbol{\xi}\|_2)mLR. \end{align*} Therefore, by (\ref{CT1}), (\ref{CT2}), and (\ref{EPSMIN}), and with x:=\|\boldsymbol{\xi}\|, we can further simplify, \begin{align*} &\limsup_{h\to 0+}\frac{D(h,\mathbf{y},\tau)}{h} \\ &\leq -g(2x/3) +\frac{\epsilon}{a} g(mx/a) L\Big(C\big[m(1+M)x + mR\big(\frac{4}{3}Lx+M\big) \big] + g(4x/3) mR\Big)\\ & \leq - g(2x/3) + K \epsilon g(x/2) \\ & \leq -\vartheta(x), \end{align*} and because t\geq 0, \boldsymbol{\xi}\in\mathcal{B}_{\|\cdot\|,R^*}, and \sigma\in\mathcal{S}_\mathcal{P} were arbitrary, we have proved that V is a Lyapunov function for the system. \end{proof} Now, we have proved the main theorem of this section, our much wanted converse theorem for the arbitrary Switched System \ref{POLYSYS}. \section{Construction of Lyapunov Functions} \label{SECCLF} In this section we present a procedure to construct Lyapunov functions for the Switched System \ref{POLYSYS}. After a few preliminaries on piecewise affine functions we give an algorithmic description of how to derive a linear programming problem from the Switched System \ref{POLYSYS} (Definition \ref{LP}), and we prove that if the linear programming problem possesses a feasible solution, then it can be used to parameterize a Lyapunov function for the system. Then, in Section \ref{SECALG} and after some preparation in Section \ref{SECCCT}, we present an algorithm that systematically generates linear programming problems for the Switched System \ref{POLYSYS} and we prove, that if the switched system possesses a Lyapunov function at all, then the algorithm generates, in a finite number of steps, a linear programming problem that has a feasible solution. Because there are algorithms that always find a feasible solution to a linear programming problem if one exists, this implies that we have derived an algorithm for constructing Lyapunov functions, whenever one exists. Further, we consider the case when the Switched System \ref{POLYSYS} is autonomous separately, because in this case it is possible to parameterize a time-independent Lyapunov function for the system. Let us be a little more specific on these points before we start to derive the results: To construct a Lyapunov function with a linear programming problem, one needs a class of continuous functions that are easily parameterized. That is, we need a class of functions that is general enough to be used as a search-space for Lyapunov functions, but it has to be a finite-dimensional vector space so that its functions are uniquely characterized by a finite number of real numbers. The class of the continuous piecewise affine functions \operatorname{CPWA} is a well suited candidate. The algorithm for parameterizing a Lyapunov function for the Switched System \ref{POLYSYS} consists roughly of the following steps: \begin{itemize} \item[(i)] Partition a neighborhood of the equilibrium under consideration in a family \mathfrak{S} of simplices. \item[(ii)] Limit the search for a Lyapunov function V for the system to the class of continuous functions that are affine on any S \in \mathfrak{S}. \item[(iii)] State linear inequalities for the values of V at the vertices of the simplices in \mathfrak{S}, so that if they can be fulfilled, then the function V, which is uniquely determined by its values at the vertices, is a Lyapunov function for the system in the whole area. \end{itemize} We first partition \mathbb{R}^n into n-simplices and use this partition to define the function spaces \operatorname{CPWA} of continuous piecewise affine functions \mathbb{R}^n\to\mathbb{R}. A function in \operatorname{CPWA} is uniquely determined by its values at the vertices of the simplices in \mathfrak{S}. Then we present a linear programming problem, algorithmically derived from the Switched System \ref{POLYSYS}, and prove that a \operatorname{CPWA} Lyapunov function for the system can be parameterized from any feasible solution to this linear programming problem. Finally, in Section \ref{SECCCT}, we prove that if the equilibrium of the Switched System \ref{POLYSYS} is uniformly asymptotically stable, then any simplicial partition with small enough simplices leads to a linear programming problem that does have a feasible solution. Because, by Theorem \ref{TDMOL} and Theorem \ref{CONVLYA}, a Lyapunov function exists for the Switched System \ref{POLYSYS} exactly when the equilibrium is uniformly asymptotically stable, and because it is always possible to algorithmically find a feasible solution if at least one exists, this proves that the algorithm we present in Section \ref{SECALG} can parameterize a Lyapunov function for the Switched System \ref{POLYSYS} if the system does possess a Lyapunov functions at all. \subsection{Continuous piecewise affine functions} \label{SUBSECCPWAL} To construct a Lyapunov function by linear programming, one needs a class of continuous functions that are easily parameterized. Our approach is a simplicial partition of \mathbb{R}^n, on which we define the finite dimensional \mathbb{R}-vector space \operatorname{CPWA} of continuous functions, that are affine on every of the simplices. We first discuss an appropriate simplicial partition of \mathbb{R}^n and then define the function space \operatorname{CPWA}. The same is done in considerable more detail in Chapter 4 in \cite{Marinosson:02a}. The simplices S_\sigma, where \sigma\in\operatorname{Perm}[\{1,2,\dots,n\}], will serve as the atoms of our partition of \mathbb{R}^n. They are defined in the following way: \begin{definition}[The simplices S_\sigma] \rm For every \sigma \in \operatorname{Perm}[\{1,2,\dots,n\}] we define the n-simplex \begin{equation*} S_\sigma := \{ \mathbf{y} \in \mathbb{R}^n : \ 0 \leq y_{\sigma(1)} \leq y_{\sigma(2)}\leq \dots \leq y_{\sigma(n)} \leq 1 \}, \end{equation*} where y_{\sigma(i)} is the \sigma(i)-th component of the vector \mathbf{y}. An equivalent definition of the n-simplex S_\sigma is \begin{align*} S_\sigma &= \operatorname{con}\Big\{\sum_{j=1}^n \mathbf{e}_{\sigma(j)}, \sum_{j=2}^n \mathbf{e}_{\sigma(j)},\dots,\sum_{j=n+1}^n \mathbf{e}_{\sigma(j)}\Big\} \\ &=\Big\{ \sum_{i=1}^{n+1}\lambda_i \sum_{j=i}^{n+1}\mathbf{e}_{\sigma(j)}: 0\leq \lambda_i\leq 1\quad \text{for i=1,2,\dots,n+1 and } \sum_{i=1}^{n+1}\lambda_i=1 \Big\} , \end{align*} where \mathbf{e}_{\sigma(i)} is the \sigma(i)-th unit vector in \mathbb{R}^n. \end{definition} For every \sigma \in \operatorname{Perm}[\{1,2,\dots,n\}] the set S_\sigma is an n-simplex with the volume 1/n! and, more importantly, if \alpha,\beta\in \operatorname{Perm}[\{1,2,\dots,n\}], then $$\label{SIMSCHNITT} S_\alpha \cap S_\beta = \operatorname{con}\left\{ \mathbf{x} \in\mathbb{R}^n : \text{\mathbf{x} is a vertex of S_\alpha and \mathbf{x} is a vertex of S_\beta}\right\}.$$ Thus, we can define a continuous function p:[0,1]^n\to \mathbb{R} that is affine on every S_\sigma, \sigma \in \operatorname{Perm}[\{1,2,\dots,n\}], by just specifying it values at the vertices of the hypercube [0,1]^n. That is, if \mathbf{x} \in S_\sigma, then \mathbf{x} = \sum_{i=1}^{n+1}\lambda_i \sum_{j=i}^{n+1}\mathbf{e}_{\sigma(j)} $$where 0\leq \lambda_i\leq 1 for i=1,2,\dots,n+1 and \sum_{i=1}^{n+1}\lambda_i=1, Then we set$$ p(\mathbf{x}) = p\Big(\sum_{i=1}^{n+1}\lambda_i \sum_{j=i}^{n+1}\mathbf{e}_{\sigma(j)}\Big) = \sum_{i=1}^{n+1}\lambda_i\, p\Big(\sum_{j=i}^{n+1}\mathbf{e}_{\sigma(j)}\Big). $$The function p is now well defined and continuous because of (\ref{SIMSCHNITT}). We could now proceed by partitioning \mathbb{R}^n into the simplices (\mathbf{z} + S_\sigma)_{\mathbf{z}\in\mathbb{Z}^n,\sigma\in\operatorname{Perm}[\{1,2,\dots,n\}]}, but we prefer a simplicial partition of \mathbb{R}^n that is invariable with respect to reflections through the hyperplanes \mathbf{e}_i \cdot \mathbf{x} = 0, i=1,2,\dots,n, as a domain for the function space \operatorname{CPWA}. We construct such a partition by first partitioning \mathbb{R}^n_{\geq 0} into the family (\mathbf{z}+S_\sigma)_{\mathbf{z}\in\mathbb{Z}^n_{\geq 0},\ \sigma\in\operatorname{Perm}[\{1,2,\dots,n\}]} and then we extend this partition on \mathbb{R}^n by use of the reflection functions \mathbf{R}^\mathcal{J}, where \mathcal{J} \in \mathfrak{P}( \{1,2,\dots,n\}). \begin{definition}[Reflection functions \mathbf{R}^\mathcal{J}] \label{Refdef} \rm For every \mathcal{J} \in \mathfrak{P}( \{1,2,\dots,n\}), we define the reflection function \mathbf{R}^\mathcal{J}:\mathbb{R}^n \to \mathbb{R}^n, \begin{equation*} \mathbf{R}^\mathcal{J} (\mathbf{x}) := \sum_{i=1}^n (-1)^{\chi_{_{\mathcal{J}}}(i)}x_i \mathbf{e}_i \end{equation*} for all \mathbf{x} \in \mathbb{R}^n, where \chi_{_{\mathcal{J}}}:\{1,2,\dots,n\}\to \{0,1\} is the characteristic function of the set \mathcal{J}. \end{definition} Clearly \mathbf{R}^\mathcal{J}, where \mathcal{J}:=\{j_1,j_2,\dots,j_k\}, represents reflections through the hyperplanes \mathbf{e}_{j_1} \cdot \mathbf{x} = 0, \mathbf{e}_{j_2} \cdot \mathbf{x} = 0, \dots,\mathbf{e}_{j_k} \cdot \mathbf{x} = 0 in succession. The simplicial partition of \mathbb{R}^n that we use for the definition of the function spaces \operatorname{CPWA} of continuous piecewise affine functions is$$ (\mathbf{R}^\mathcal{J}(\mathbf{z}+S_\sigma))_{\mathbf{z}\in\mathbb{Z}_{\geq 0}^n,\ \mathcal{J} \in \mathfrak{P} (\{1,2,\dots,n\}),\ \sigma\in\operatorname{Perm}[\{1,2,\dots,n\}]}. $$Similar to (\ref{SIMSCHNITT}), this partition has the advantageous property, that from$$ S,S^* \in \left\{\mathbf{R}^\mathcal{J}(\mathbf{z}+S_\sigma) : \ \mathbf{z}\in\mathbb{Z}_{\geq 0}^n,\ \mathcal{J} \in \mathfrak{P} (\{1,2,\dots,n\}),\ \sigma\in\operatorname{Perm}[\{1,2,\dots,n\}] \right\} $$follows, that S\cap S^* is the convex hull of the vertices that are common to S and S^*. This leads to the following theorem: \begin{theorem} \label{RNZERL} Let (q_\mathbf{z})_{\mathbf{z}\in\mathbb{Z}^n} be a collection of real numbers. Then there is exactly one continuous function p:\mathbb{R}^n \to \mathbb{R} with the following properties: \begin{itemize} \item[(i)] p(\mathbf{z}) = q_\mathbf{z} for every \mathbf{z}\in\mathbb{Z}^n. \item[(ii)] For every \mathcal{J}\in \mathfrak{P}( \{1,2,\dots,n\}), every \sigma\in\operatorname{Perm}[\{1,2,\dots,n\}], and every \mathbf{z}\in\mathbb{Z}^n_{\geq 0}, the restriction of the function p to the simplex \mathbf{R}^\mathcal{J}(\mathbf{z} + S_\sigma) is affine. \end{itemize} \end{theorem} \begin{proof} See, for example, Corollary 4.12 in \cite{Marinosson:02a}. \end{proof} A \operatorname{CPWA} space is a set of continuous affine functions from a subset of \mathbb{R}^n into \mathbb{R} with a given boundary configuration. If the subset is compact, then the boundary configuration makes it possible to parameterize the functions in the respective \operatorname{CPWA} space with a finite number of real-valued parameters. Further, the \operatorname{CPWA} spaces are vector spaces over \mathbb{R} in a canonical way. They are thus well suited as a foundation, in the search of a Lyapunov function with a linear programming problem. We first define the function spaces \operatorname{CPWA} for subsets of \mathbb{R}^n that are the unions of n-dimensional cubes. \begin{definition}[\operatorname{CPWA} function on a simple grid] \rm Let \mathcal{Z}\subset\mathbb{Z}^n, \mathcal{Z} \neq \emptyset, be such that the interior of the set$$ \mathcal{N} := \bigcup_{\mathbf{z}\in\mathcal{Z}} (\mathbf{z} +[0,1]^n), $$is connected. The function space \operatorname{CPWA}[\mathcal{N}] is then defined as follows. A function p: \mathcal{N} \to \mathbb{R} is in \operatorname{CPWA}[\mathcal{N}], if and only if: \begin{enumerate} \item[(i)] p is continuous. \item[(ii)] For every simplex \mathbf{R}^\mathcal{J}(\mathbf{z} +S_\sigma)\subset \mathcal{N}, where \mathbf{z} \in \mathbb{Z}_{\geq 0}^n, \mathcal{J} \in\mathfrak{P}(\{1,2,\dots,n\}), and \sigma \in \operatorname{Perm}[\{1,2,\dots\}], the restriction p|_{\mathbf{R}^\mathcal{J}(\mathbf{z}+S_\sigma)} is affine. \end{enumerate} \end{definition} We will need continuous piecewise affine functions, defined by their values on grids with smaller grid steps than one, and we want to use grids with variable grid steps. We achieve this by using images of \mathbb{Z}^n under mappings \mathbb{R}^n \to \mathbb{R}^n, of which the components are continuous and strictly increasing functions \mathbb{R}\to\mathbb{R}, affine on the intervals [m,m+1] for all integers m, and map the origin on itself. We call such \mathbb{R}^n\to \mathbb{R}^n mappings {\it piecewise scaling functions}. \label{PSdef} Note that if y_{i,j}, i=1,2,\dots,n and j\in\mathbb{Z}, are real numbers such that y_{i,j} < y_{i,j+1} and y_{i,0}=0 for all i=1,2,\dots,n and all j\in\mathbb{Z}, then we can define a piecewise scaling function \mathbf{PS}:\mathbb{R}^n\to\mathbb{R}^n by \widetilde{\rm PS_i}(j):=y_{i,j} for all i=1,2,\dots,n and all j\in\mathbb{Z}. Moreover, the piecewise scaling functions \mathbb{R}^n\to\mathbb{R}^n are exactly the functions, that can be constructed in this way. In the next definition we use piecewise scaling functions to define general \operatorname{CPWA} spaces. \begin{definition}[\operatorname{CPWA} function, general] \label{NN10000} \rm Let \mathbf{PS}:\mathbb{R}^n\to\mathbb{R}^n be a piecewise scaling function and let \mathcal{Z}\subset\mathbb{Z}^n, \mathcal{Z} \neq \emptyset, be such that the interior of the set$$ \mathcal{N} := \bigcup_{\mathbf{z}\in\mathcal{Z}} (\mathbf{z} +[0,1]^n) $$is connected. The function space \operatorname{CPWA}[\mathbf{PS},\mathcal{N}] is defined as \begin{equation*} \operatorname{CPWA}[\mathbf{PS},\mathcal{N}] := \{p \circ \mathbf{PS}^{-1}\ : \, p \in \operatorname{CPWA}[\mathcal{N}]\} \end{equation*} and we denote by \mathfrak{S}[\mathbf{PS},\mathcal{N}] the set of the simplices in the family$$ (\mathbf{PS}(\mathbf{R}^\mathcal{J}(\mathbf{z}+S_\sigma)))_{\mathbf{z}\in\mathbb{Z}_{\geq 0}^n,\ \mathcal{J} \in \mathfrak{P} (\{1,2,\dots,n\}),\ \sigma\in\operatorname{Perm}[\{1,2,\dots,n\}]} $$that are contained in the image \mathbf{PS}(\mathcal{N}) of \mathcal{N} under \mathbf{PS}. \end{definition} Clearly \begin{equation*} \{ \mathbf{x}\in \mathbb{R}^n : \mathbf{x} \text{ is a vertex of a simplex in } \mathfrak{S}[\mathbf{PS},\mathcal{N}]\} = \mathbf{PS}(\mathcal{N}\cap\mathbb{Z}^n) \end{equation*} and every function in \operatorname{CPWA}[\mathbf{PS},\mathcal{N}] is continuous and is uniquely determined by its values on the grid \mathbf{PS}(\mathcal{N}\cap\mathbb{Z}^n). We use functions from \operatorname{CPWA}[\mathbf{PS},\mathcal{N}] to approximate functions in \mathcal{C}^2(\mathbf{PS}(\mathcal{N})), that have bounded second-order derivatives. The next lemma gives an upper bound of the approximation error of such a linearization. \begin{lemma} \label{FABSZ} Let \sigma\in\operatorname{Perm}[\{1,2,\dots,n\}], let \mathcal{J}\in\mathfrak{P} (\{1,2,\dots,n\}), let \mathbf{z}\in\mathbb{Z}_{\geq 0}^n, let \mathbf{R}^\mathcal{J} be a reflection function, and let \mathbf{PS} be a piecewise scaling function. Denote by S the n-simplex that is the convex combination of the vertices$$ \mathbf{y}_i := \mathbf{PS}(\mathbf{R}^\mathcal{J}(\mathbf{z}+\sum_{j=i}^{n+1} \mathbf{e}_{\sigma(j)})),\quad i=1,2,\dots,n+1, $$and let f\in \mathcal{C}^2(\mathcal{U}) be a function defined on a domain S\subset\mathcal{U}\subset\mathbb{R}^n. For every i=1,2,\dots,n+1 and every k=1,2,\dots,n define the constant$$ A_{k,i} := |\mathbf{e}_k\cdot(\mathbf{y}_i - \mathbf{y}_{n+1})| $$and for every r,s=1,2,\dots,n let B_{rs} be a constant, such that$$ B_{rs} \geq \max_{\mathbf{x}\in S} \Big|\frac{\partial^2f}{\partial x_r \partial x_s}(\mathbf{x})\Big|. $$Define for every i=1,2,\dots,n+1 the constant$$ E_i:=\frac{1}{2} \sum_{r,s=1}^nB_{rs}A_{r,i}(A_{s,1} + A_{s,i}). $$Then for every convex combination $$\label{FFIT1} \mathbf{y}:=\sum_{i=1}^{n+1}\lambda_i \mathbf{y}_i,$$ of the vertices of the simplex S we have$$ \Big|f(\mathbf{y}) - \sum_{i=1}^{n+1}\lambda_i f(\mathbf{y}_i)\Big| \leq \sum_{i=1}^{n+1}\lambda_i E_i. \end{lemma} \begin{proof} Let \mathbf{y} be as in equation (\ref{FFIT1}). Then, by Taylor's theorem, there is a vector \mathbf{y}_\mathbf{x} on the line-segment between \mathbf{y}_{n+1} and \mathbf{y}, such that \begin{align*} f(\mathbf{y}) &= f(\mathbf{y}_{n+1}) + \nabla f(\mathbf{y}_{n+1})\cdot \left(\mathbf{y}-\mathbf{y}_{n+1}\right)\\ & \quad + \frac{1}{2}\sum_{r,s=1}^n [\mathbf{e}_r\cdot(\mathbf{y}-\mathbf{y}_{n+1})][\mathbf{e}_s\cdot(\mathbf{y}-\mathbf{y}_{n+1})]\frac{\partial^2f}{\partial x_r \partial x_s}\left(\mathbf{y}_\mathbf{x}\right) \\ &=\sum_{i=1}^{n+1}\lambda_i \Big(f(\mathbf{y}_{n+1}) + \nabla f(\mathbf{y}_{n+1})\cdot \left(\mathbf{y}_i-\mathbf{y}_{n+1}\right) \\ & \quad + \frac{1}{2}\sum_{r,s=1}^n [\mathbf{e}_r\cdot(\mathbf{y}_i-\mathbf{y}_{n+1})][\mathbf{e}_s\cdot(\mathbf{y}-\mathbf{y}_{n+1})]\frac{\partial^2f}{\partial x_r \partial x_s}\left(\mathbf{y}_\mathbf{x}\right)\Big) \end{align*} and for every i=1,2,\dots,n there is a vector \mathbf{y}_{i,\mathbf{x}} on the line-segment between \mathbf{y}_i and \mathbf{y}_{n+1} such that \begin{align*} f(\mathbf{y}_i) &= f(\mathbf{y}_{n+1}) + \nabla f(\mathbf{y}_{n+1})\cdot \left(\mathbf{y}_i-\mathbf{y}_{n+1}\right)\\ &\quad + \frac{1}{2}\sum_{r,s=1}^n [\mathbf{e}_r\cdot(\mathbf{y}_i-\mathbf{y}_{n+1})][\mathbf{e}_s\cdot(\mathbf{y}_i-\mathbf{y}_{n+1})]\frac{\partial^2f}{\partial x_r \partial x_s}\left(\mathbf{y}_{i,\mathbf{x}}\right). \end{align*} Further, because a simplex is a convex set, the vectors \mathbf{y}_\mathbf{x} and \mathbf{y}_{1,\mathbf{x}},\mathbf{y}_{2,\mathbf{x}},\dots,\mathbf{y}_{n,\mathbf{x}} are all in S. But then \begin{align*} &\Big|f(\mathbf{y}) - \sum_{i=1}^{n+1}\lambda_i f(\mathbf{y}_i)\Big|\\ & \leq \frac{1}{2} \sum_{i=1}^{n+1}\lambda_i\sum_{r,s=1}^n |\mathbf{e}_r\cdot(\mathbf{y}_i-\mathbf{y}_{n+1})|\left(|\mathbf{e}_s\cdot(\mathbf{y}-\mathbf{y}_{n+1})|+ |\mathbf{e}_s\cdot(\mathbf{y}_i-\mathbf{y}_{n+1})|\right) B_{rs} \\ & =\frac{1}{2} \sum_{i=1}^{n+1}\lambda_i\sum_{r,s=1}^n B_{rs} A_{r,i}\left(|\mathbf{e}_s\cdot(\mathbf{y}-\mathbf{y}_{n+1})|+ A_{s,i}\right) \end{align*} and because |\mathbf{e}_s\cdot(\mathbf{y}-\mathbf{y}_{n+1})| \leq \sum_{i=1}^{n+1}\lambda_i|\mathbf{e}_s \cdot(\mathbf{y}_i-\mathbf{y}_{n+1})| \leq |\mathbf{e}_s\cdot(\mathbf{y}_1-\mathbf{y}_{n+1})| = A_{s,1} $$it follows that$$ \Big|f(\mathbf{y}) - \sum_{i=1}^{n+1}\lambda_i f(\mathbf{y}_i)\Big| \leq \frac{1}{2} \sum_{i=1}^{n+1}\lambda_i \sum_{r,s=1}^nB_{rs}A_{r,i}(A_{s,1} + A_{s,i})= \sum_{i=1}^{n+1}\lambda_i E_i. \end{proof} An affine function p, defined on a simplex S\subset\mathbb{R}^n and with values in \mathbb{R}, has the algebraic form p(\mathbf{x})=\mathbf{w}\cdot\mathbf{x} +q, where \mathbf{w} is a constant vector in \mathbb{R}^n and q is constant in \mathbb{R}. Another characterization of p is given by specifying its values at the vertices as stated. The next lemma gives a formula for the components of the vector \mathbf{w} when the values of p at the vertices of S are known and S is a simplex in \mathfrak{S}[\mathbf{PS},\mathcal{N}]. \begin{lemma} \label{WLEMMA} Let \mathbf{PS}:\mathbb{R}^n\to\mathbb{R}^n be a piecewise scaling function, let \mathbf{z} \in \mathbb{Z}^n_{\geq 0}, let \mathcal{J} \in{\mathfrak P}(\{1,2,\dots,n\}), let \sigma \in \operatorname{Perm}[\{1,2,\dots,n\}], and let p(\mathbf{x}):=\mathbf{w}\cdot\mathbf{x}+q be an affine function defined on the n-simplex with the vertices \begin{equation*} \mathbf{y}_i := \mathbf{PS}(\mathbf{R}^\mathcal{J}(\mathbf{z} + \sum_{j=i}^n \mathbf{e}_{\sigma(j)})),\quad i=1,2,\dots,n. \end{equation*} Then \begin{equation*} \mathbf{w} = \sum_{i=1}^n \frac{p(\mathbf{y}_i)-p(\mathbf{y}_{i+1})}{\mathbf{e}_{\sigma(i)} \cdot(\mathbf{y}_i - \mathbf{y}_{i+1})}\mathbf{e}_{\sigma(i)}. \end{equation*} \end{lemma} \begin{proof} For any i\in\{1,2,\dots,n\} we have \begin{align*} p(\mathbf{y}_i)-p(\mathbf{y}_{i+1}) &=\mathbf{w}\cdot(\mathbf{y}_i-\mathbf{y}_{i+1}) \\ &= \sum_{k=1}^n w_{\sigma(k)}[\mathbf{e}_{\sigma(k)} \cdot(\mathbf{y}_i - \mathbf{y}_{i+1})]\\ &= w_{\sigma(i)}[\mathbf{e}_{\sigma(i)} \cdot(\mathbf{y}_i - \mathbf{y}_{i+1})] \end{align*} because the components of the vectors \mathbf{y}_i and \mathbf{y}_{i+1} are all equal with except of the \sigma(i)-th one. But then \begin{equation*} w_{\sigma(i)} = \frac{p(\mathbf{y}_i)-p(\mathbf{y}_{i+1})}{\mathbf{e}_{\sigma(i)} \cdot(\mathbf{y}_i-\mathbf{y}_{i+1})} \end{equation*} and we have finished the proof. \end{proof} Now, that we have defined the function spaces \operatorname{CPWA} we are ready to state our linear programming problem, of which every feasible solution parameterizes a \operatorname{CPWA} Lyapunov function for the Switched System \ref{POLYSYS} used in the derivation of its linear constraints. \subsection{The linear programming problem} \label{LINPROBCHAP} We come to the linear programming problem, of which every feasible solution parameterizes a Lyapunov function for the Switched System \ref{POLYSYS}. The Lyapunov function is of class \operatorname{CPWA}. We first define the linear programming problem in Definition \ref{LP}. In the definition the linear constraints are grouped into four classes, (LC1), (LC2), (LC3), and (LC4), for linear constraints 1, 2, 3, and 4 respectively. Then we show how the variables of the linear programming problem that fulfill these constraints can be used to parameterize functions that meet the conditions {\bf (L1)} and {\bf (L2)} of Definition \ref{DEFLYAFUNC}, the definition of a Lyapunov function. Then we state and discuss the results in Section \ref{CONC}. Finally, we consider a more simple linear programming, defined in Definition \ref{LPA}, for autonomous systems and we show that it is equivalent to the linear programming problem in Definition \ref{LP} with additional constraints that force the parameterized \operatorname{CPWA} Lyapunov function to be time-invariant. The next definition plays a central role in this work. It is generalization of the linear programming problems presented in \cite{Marinosson:02a}, \cite{Marinosson:02b}, \cite{Hafstein:04}, and \cite{Hafstein:04b} to serve the nonautonomous Switched System \ref{POLYSYS}. \begin{definition} \label{LP} \rm (Linear programming problem {\bf LP} (\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS}, \mathbf{t},\mathcal{D},\|\cdot\|))\quad Consider the Switched System \ref{POLYSYS} where the set \mathcal{P} has a finite number of elements. Let T' and T'' be constants such that 0\leq T'< T'' and let \mathbf{PS}:\mathbb{R}^n\to\mathbb{R}^n be a piecewise scaling function and \mathcal{N}\subset\mathcal{U} be such that the interior of the set \mathcal{M} := \bigcup_{\mathbf{z}\in\mathbb{Z}^n,\; \mathbf{PS}(\mathbf{z} + [0,1]^n) \subset \mathcal{N}} \mathbf{PS}(\mathbf{z}+[0,1]^n) $$is a connected set that contains the origin. Let$$ \mathcal{D}:=\mathbf{PS}(\,]d^-_1,d^+_1[\,\times\,]d^-_2,d^+_2\,[\times\, \dots \, \times\, ]d^-_n,d^+_n[\,) be a set, of which the closure is contained in the interior of \mathcal{M}, and either \mathcal{D}=\emptyset or d^-_i and d^+_i are integers such that d^-_i\leq -1 and 1\leq d^+_i for every i=1,2,\dots,n. Finally, let \|\cdot\| be an arbitrary norm on \mathbb{R}^n and \mathbf{t}:=(t_0,t_1,\dots,t_M) \in \mathbb{R}^{M+1}, M\in\mathbb{N}_{>0}, be a vector such that T'=:t_0 0 and \delta >0 be arbitrary constants. \end{enumerate} The variables of the linear programming problem are: \begin{align*} &\Upsilon,\\ &\Psi[y], \quad \text{for all y\in \mathcal{X}^{\|\cdot\|}},\\ &\Gamma[y], \quad \text{for all y\in \mathcal{X}^{\|\cdot\|}},\\ &V[\tilde{\mathbf{x}}], \quad \text{for all \tilde{\mathbf{x}}\in \mathcal{G}},\\ &C[\{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\}], \quad \text{for all \{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\}\in \mathcal{Y}}. \end{align*} Considering Definition \ref{DEFLYAFUNC}, the definition of a Lyapunov function, the variables \Psi[y] correspond to the function \alpha_1, the variables \Gamma[y] to the function \psi, and the variables V[\tilde{\mathbf{x}}] to the Lyapunov function V, the \tilde{x}_0 component representing the time t. The variables C[\{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\}] are local bounds on the gradient \nabla_{\tilde{\mathbf{x}}} V^{\it Lya} of the Lyapunov function V^{\it Lya} to be constructed and \Upsilon is a corresponding global bound. \smallskip The linear constraints of the linear programming problem are: \begin{enumerate} \item[{\bf (LC1)}] Let y_0,y_1,\dots,y_K be the elements of \mathcal{X}^{\|\cdot\|} in an increasing order. Then \begin{gather*} \Psi[y_0] = \Gamma[y_0] = 0 ,\\ \varepsilon y_1 \leq \Psi[y_1] ,\\ \varepsilon y_1 \leq \Gamma[y_1], \end{gather*} and for every i=1,2,\dots,K-1: \begin{gather*} \frac{\Psi[y_i]-\Psi[y_{i-1}]}{y_i-y_{i-1}} \leq \frac{\Psi[y_{i+1}]-\Psi[y_i]}{y_{i+1}-y_i}, \\ \frac{\Gamma[y_i]-\Gamma[y_{i-1}]}{y_i-y_{i-1}} \leq \frac{\Gamma[y_{i+1}]-\Gamma[y_i]}{y_{i+1}-y_i}. \end{gather*} \item[{\bf (LC2)}] For every \tilde{\mathbf{x}}\in \mathcal{G}: \begin{equation*} \Psi[\|\tilde{\mathbf{x}}\|_*] \leq V[\tilde{\mathbf{x}}]. \end{equation*} If \mathcal{D}=\emptyset, then, whenever \|\tilde{\mathbf{x}}\|_* = 0: \begin{equation*} V[\tilde{\mathbf{x}}] =0. \end{equation*} If \mathcal{D} \neq \emptyset, then, whenever (\tilde{x}_1,\tilde{x}_2,\dots,\tilde{x}_2) \in \mathbf{PS}(\mathbb{Z}^n)\cap\partial\mathcal{D}: V[\tilde{\mathbf{x}}] \leq \Psi[x_{\min,\partial\mathcal{M}}]-\delta. Further, if \mathcal{D} \neq \emptyset, then for every i=1,2,\dots,n and every j=0,1,\dots,M: \begin{gather*} V[{\rm PS}_0(j)\mathbf{e}_0 + {\rm PS}_i(d_i^-)\mathbf{e}_i] \leq -\Upsilon \cdot{\rm PS}_i(d_i^-), \\ V[{\rm PS}_0(j)\mathbf{e}_0 + {\rm PS}_i(d_i^+)\mathbf{e}_i] \leq \Upsilon \cdot{\rm PS}_i(d_i^+). \end{gather*} \item[{\bf (LC3)}] For every \{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\} \in \mathcal{Y}: \begin{equation*} -C[\{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\}]\cdot \|\tilde{\mathbf{x}} - \tilde{\mathbf{y}}\|_{\infty} \leq V[\tilde{\mathbf{x}}]-V[\tilde{\mathbf{y}}] \leq C[\{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\}]\cdot \|\tilde{\mathbf{x}} - \tilde{\mathbf{y}}\|_{\infty} \leq \Upsilon\cdot \|\tilde{\mathbf{x}} - \tilde{\mathbf{y}}\|_{\infty}. \end{equation*} \item[{\bf (LC4)}] For every p\in\mathcal{P}, every (\mathbf{z},\mathcal{J}) \in \mathcal{Z}, every \sigma \in \operatorname{Perm}[\{0,1,\dots,n\}], and every i=0,1,\dots,n+1: \begin{align*} &-\Gamma\big[\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*\big]\\ & \geq \sum_{j=0}^n\Big(\frac{V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}]- V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}]} {\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})}\tilde{f}_{p,\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) + E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i} C[\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j},\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\}]\Big). \end{align*} \end{enumerate} As the objective of the linear programming problem is not needed to parameterize a \operatorname{CPWA} Lyapunov function we do not define it here. \end{definition} Note that the values of the constants \varepsilon>0 and \delta>0 do not affect whether there is a feasible solution to the linear program or not. If there is a feasible solution for \varepsilon := \varepsilon'>0 and \delta:=\delta'>0, then there is a feasible solution for all \varepsilon := \varepsilon^*>0 and \delta:=\delta^*>0. Just multiply the numerical values of all variables of the feasible solution with \max\{\frac{\varepsilon^*}{\varepsilon'},\frac{\delta^*}{\delta'}\}. $$Further note that if \|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*=0, then \tilde{f}_{p,\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})=0 for all j\in\{0,1,\dots,n\} such that \sigma(j) \neq 0 and if \sigma(j)=0, then V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}]- V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}] = 0. Thus, the constraints (LC4) reduce to $$\label{CONTRAXXX} 0\geq\sum_{j=0}^n E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i} C[\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j},\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\}],$$ which looks contradictory at first glance. However, if \|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*=0 then necessarily i=n+1 and$$ E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,n+1} := \frac{1}{2}\sum_{r,s=0}^n B^{(\mathbf{z},\mathcal{J})}_{p,rs} A^{(\mathbf{z},\mathcal{J})}_{\sigma,r,n+1}(A^{(\mathbf{z},\mathcal{J})}_{\sigma,s,n+1}+A^{(\mathbf{z},\mathcal{J})}_{\sigma,s,0}) =0 $$because A^{(\mathbf{z},\mathcal{J})}_{\sigma,r,n+1}=0 for all r=0,1,\dots,n, so (\ref{CONTRAXXX}) is not contradictory and even trivially fulfilled. Finally, if the Switched System \ref{POLYSYS} is autonomous, then we know by Theorem \ref{CONVLYA} that there exists a time-invariant Lyapunov function for the system. To reflect this fact one is tempted to additionally include the constraints V[\tilde{\mathbf{x}}] = V[\tilde{\mathbf{y}}] for every pair \tilde{\mathbf{x}},\tilde{\mathbf{y}}\in\mathcal{G} such that \|\tilde{\mathbf{x}}-\tilde{\mathbf{y}}\|_* = 0 in the linear programming problem to limit the search to time-invariant Lyapunov functions. However, as we will show in Theorem \ref{SIMPLLP}, this is equivalent to a more simple linear programming problem if the Switched System \ref{POLYSYS} is autonomous, namely, the linear programming problem defined in Definition \ref{LPA}. In the next sections we prove that a feasible solution to the linear programming problem defined in Definition \ref{LP} parameterizes a \operatorname{CPWA} Lyapunov function for the Switched System \ref{POLYSYS} used for its construction. For this proof the variable \Upsilon is not needed. However, it will be needed for the analysis in Section \ref{CONC}. \subsection{Definition of the functions \psi, \gamma,\ \text{and}\ V^{\it L\lowercase{ya}}} \label{SecFun} Let y_0,y_1,\dots,y_K be the elements of \mathcal{X}^{\|\cdot\|} in an increasing order. We define the piecewise affine functions \psi,\gamma :\mathbb{R}_{\geq 0} \to \mathbb{R}, \begin{gather*} \psi(y) := \Psi[y_i]+\frac{\Psi[y_{i+1}]-\Psi[y_i]}{y_{i+1}-y_i}(y-y_i), \\ \gamma(y) := \Gamma[y_i]+\frac{\Gamma[y_{i+1}]-\Gamma[y_i]}{y_{i+1}-y_i}(y-y_i), \end{gather*} for all y\in [y_i,y_{i+1}] and all i=0,1,\dots,K-1. The values of \psi and \gamma on ]y_K,+\infty[ do not really matter, but to have everything properly defined, we set \begin{gather*} \psi(y) := \Psi[y_{K-1}]+\frac{\Psi[y_K] -\Psi[y_{K-1}]}{y_K-y_{K-1}}(y-y_{K-1}),\\ \gamma(y) := \Gamma[y_{K-1}]+\frac{\Gamma[y_K] -\Gamma[y_{K-1}]}{y_K-y_{K-1}}(y-y_{K-1}) \end{gather*} for all y > y_K. Clearly the functions \psi and \gamma are continuous. The function V^{\it Lya} \in \operatorname{CPWA}[\widetilde{\mathbf{PS}},\widetilde{\mathbf{PS}}^{-1}\big([T',T'']\times\big(\mathcal{M}\setminus\mathcal{D}\big)\big)] is defined by assigning \begin{equation*} V^{\it Lya}(\tilde{\mathbf{x}}) := V[\tilde{\mathbf{x}}] \end{equation*} for all \tilde{\mathbf{x}}\in\mathcal{G}. We will sometimes write V^{\it Lya}(t,\mathbf{x}) for V^{\it Lya}(\tilde{\mathbf{x}}) and V[t,\mathbf{x}] for V[\tilde{\mathbf{x}}]. It is then to be understood that t:=\tilde{x}_0 and \mathbf{x} := (\tilde{x}_1,\tilde{x_2},\dots,\tilde{x}_n). In the next four sections we will successively derive the implications the linear constraints (LC1), (LC2), (LC3), and (LC4) have on the functions \psi, \gamma, and V^{\it Lya}. \subsection{Implications of the constraints (LC1)} Let y_0,y_1,\dots,y_K be the elements of \mathcal{X}^{\|\cdot\|} in an increasing order. We are going to show that the constraints (LC1) imply, that the functions \psi and \gamma are convex and strictly increasing on [0,+\infty[\,. Because y_0=0, \psi(y_0)=\Psi[y_0]=0, and \gamma(y_0)=\Gamma[y_0]=0, this means that they are convex \mathcal{K} functions. The constraints are the same for \Psi and \Gamma, so it suffices to show this for the function \psi. From the definition of \psi, it is clear that it is continuous and that $$\label{LC1-6} \frac{\psi(x)-\psi(y)}{x-y} = \frac{\Psi[y_{i+1}]-\Psi[y_i]}{y_{i+1}-y_i}$$ for all x,y \in [y_i,y_{i+1}] and all i=0,1,\dots,K-1. From y_0=0, \Psi[y_0]=0, and \varepsilon y_1 \leq \Psi[y_1] we get \begin{equation*} \varepsilon \leq \frac{\Psi[y_1]-\Psi[y_0]}{y_1-y_0} \leq \frac{\Psi[y_2]-\Psi[y_1]}{y_2-y_1} \leq \dots \leq \frac{\Psi[y_K]-\Psi[y_{K-1}]}{y_K-y_{K-1}}. \end{equation*} But then D^+\psi is a positive and increasing function on \mathbb{R}_{\geq 0} and it follows from Corollary \ref{TEMP51}, that \psi is a strictly increasing function. The function \psi is {\it convex}, if and only if for every y \in \mathbb{R}_{>0} there are constants a_y,b_y\in \mathbb{R}, such that$$ a_y y+b_y = \psi(y)\quad \text{and}\quad a_y x+b_y\leq \psi(x) $$for all x\in \mathbb{R}_{\geq0} (see, for example, Section 17 in Chapter 11 in \cite{AI}). Let y\in \mathbb{R}_{>0}. Because the function D^+\psi is increasing, it follows by Theorem \ref{MEANVT}, that for every x \in \mathbb{R}_{\geq0}, there is a c_{x,y}\in \mathbb{R}, such that \begin{equation*} \psi(x)=\psi(y)+ c_{x,y}(x-y) \end{equation*} and c_{x,y} \leq D^+\psi(y) if xy. This means that \begin{equation*} \psi(x)=\psi(y)+ c_{x,y}(x-y) \geq D^+\psi(y)x +\psi(y)-D^+\psi(y)y \end{equation*} for all x \in \mathbb{R}_{\geq 0}. Because y was arbitrary, the function \psi is convex. \subsection{Implications of the constraints (LC2)} \label{SECLC2} Define the constant$$ V^{\it Lya}_{\partial\mathcal{M},\min}:= \min_{\mathbf{x} \in \partial\mathcal{M} \atop t\in[T',T'']} V^{\it Lya}(t,\mathbf{x}) $$and if \mathcal{D} \neq \emptyset the constant$$ V^{\it Lya}_{\partial\mathcal{D},\max}:= \max_{\mathbf{x} \in \partial\mathcal{D} \atop t\in[T',T'']} V^{\it Lya}(t,\mathbf{x}). $$We are going to show that the constraints (LC2) imply, that $$\label{LC2UNG1} \psi(\|\mathbf{x}\|) \leq V^{\it Lya}(t,\mathbf{x})$$ for all t\in[T',T''] and all \mathbf{x} \in \mathcal{M}\setminus\mathcal{D} and that$$ V^{\it Lya}_{\partial\mathcal{D},\max} \leq V^{\it Lya}_{\partial\mathcal{M},\min}-\delta if \mathcal{D} \neq \emptyset. We first show that they imply, that \begin{equation*} \psi(\|\tilde{\mathbf{x}}\|_*) \leq V^{\it Lya}(\tilde{\mathbf{x}}) \end{equation*} for all \tilde{\mathbf{x}}\in\mathcal{G}, which obviously implies (\ref{LC2UNG1}). Let \tilde{\mathbf{x}}\in\mathcal{G}. Then there is a (\mathbf{z},\mathcal{J}) \in \mathcal{Z}, a \sigma \in \operatorname{Perm}[\{0,1,\dots,n\}], and constants \lambda_0,\lambda_1,\dots,\lambda_{n+1}\in[0,1], such that \begin{equation*} \tilde{\mathbf{x}} = \sum_{i=0}^{n+1} \lambda_i \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\quad \text{and}\quad \sum_{i=0}^{n+1}\lambda_i=1. \end{equation*} Then \begin{align*} \psi(\|\tilde{\mathbf{x}}\|_*) &= \psi(\| \sum_{i=0}^{n+1} \lambda_i \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*) \leq \psi( \sum_{i=0}^{n+1} \lambda_i\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*) \\ &\leq \sum_{i=0}^{n+1} \lambda_i \psi(\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*) = \sum_{i=0}^{n+1} \lambda_i \Psi[\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*] \leq \sum_{i=0}^{n+1} \lambda_i V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}] \\ &= \sum_{i=0}^{n+1} \lambda_i V^{\it Lya}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) = V^{\it Lya}( \sum_{i=0}^{n+1} \lambda_i \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) = V^{\it Lya}(\tilde{\mathbf{x}}). \end{align*} Now consider the case \mathcal{D} \neq \emptyset. From the definition of V^{\it Lya} and the constants V^{\it Lya}_{\partial\mathcal{D},\max} and V^{\it Lya}_{\partial\mathcal{M},\min} it is clear, that \begin{gather*} V^{\it Lya}_{\partial\mathcal{D},\max} = \max_{\mathbf{x} \in \partial\mathcal{D} \cap\mathbf{PS}(\mathbb{Z}^n) \atop u=0,1,\dots,M} V[t_u,\mathbf{x}],\\ V^{\it Lya}_{\partial\mathcal{M},\min} = \min_{\mathbf{x} \in \partial\mathcal{M} \cap\mathbf{PS}(\mathbb{Z}^n) \atop u=0,1,\dots,M} V[t_u,\mathbf{x}]. \end{gather*} Let \mathbf{x}\in\partial\mathcal{M}\cap\mathbf{PS}(\mathbb{Z}^n) and u\in\{0,1,\dots,M\} be such that V[t_u,\mathbf{x}]=V^{\it Lya}_{\partial\mathcal{M},\min}, then \begin{align*} V^{\it Lya}_{\partial\mathcal{D},\max} &\leq \Psi[x_{\min,\partial\mathcal{M}}]-\delta =\psi(x_{\min,\partial\mathcal{M}})-\delta \\ &\leq \psi(\|\mathbf{x}\|)-\delta \leq V[t_u,\mathbf{x}]-\delta\\ &= V^{\it Lya}_{\partial\mathcal{M},\min}-\delta. \end{align*} \subsection{Implications of the constraints (LC3)} The constraints (LC3) imply that \begin{equation*} \left|\frac{V[\tilde{\mathbf{x}}]-V[\tilde{\mathbf{y}}]}{\|\tilde{\mathbf{x}} - \tilde{\mathbf{y}}\|_{\infty}}\right| \leq C[\{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\}] \leq \Upsilon \end{equation*} for every \{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\} \in \mathcal{Y} and these local bounds C[\{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\}] on the gradient \nabla_{\tilde{\mathbf{x}}} V^{\it Lya} will be used in the next section. \subsection{Implications of the constraints (LC4)} We are going to show that the constraints (LC4) and (LC3) together imply that \begin{align} \label{LC4MU} -\gamma(\|\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})\|) \geq \limsup_{h\to 0+}\frac{V^{\it Lya}(t+h,\boldsymbol{\phi}_\varsigma(t+h,t',\boldsymbol{\xi})) - V^{\it Lya}(t,\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}))}{h} \end{align} for all \varsigma\in\mathcal{S}_\mathcal{P} and all (t,\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})) in the interior of [T',T'']\times(\mathcal{M}\setminus\mathcal{D}). Let \varsigma\in\mathcal{S}_\mathcal{P} and \tilde{\mathbf{x}} := (t,\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})) in the interior of [T',T'']\times(\mathcal{M}\setminus\mathcal{D}) be arbitrary, but fixed throughout this section, and set \mathbf{x} := \boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}) and p:=\varsigma(t). We claim that there is a (\mathbf{z},\mathcal{J}) \in \mathcal{Z}, a \sigma \in \operatorname{Perm}[\{0,1,\dots,n\}], and constants \lambda_0,\lambda_1,\dots,\lambda_{n+1}\in[0,1], such that $$\label{LC4CON1} \begin{gathered} \tilde{\mathbf{x}} = \sum_{i=0}^{n+1} \lambda_i \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i},\quad \sum_{i=0}^{n+1}\lambda_i=1,\\ \tilde{\mathbf{x}} + h \tilde{\mathbf{f}}_p(\tilde{\mathbf{x}}) \in \operatorname{con}\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,0}, \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,1},\dots, \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,n+1}\} \end{gathered}$$ for all h\in [0,a], where a>0 is some constant. We prove this claim by a contradiction. Assume that it does not hold true. The vector \tilde{\mathbf{x}} is contained in some of the simplices in the simplicial partition of [T',T'']\times \big{(}\mathcal{M}\setminus\mathcal{D}\big{)}, say S_1,S_2,\dots,S_k. Simplices are convex sets so we necessarily have \big\{ \tilde{\mathbf{x}} + \frac{1}{j}\tilde{\mathbf{f}}_p(\tilde{\mathbf{x}}) : j \in \mathbb{N}_{>0} \big\} \cap S_i = \emptyset $$for every i=1,2,\dots,k. But then there must be a simplex S in the simplicial partition, different to the simplices S_1,S_2,\dots,S_k, such that the intersection$$ \big\{ \tilde{\mathbf{x}} + \frac{1}{j}\tilde{\mathbf{f}}_p(\tilde{\mathbf{x}}): j \in \mathbb{N}_{>0} \big\}\cap S contains an infinite number of elements. This implies that there is a sequence in S that converges to \tilde{\mathbf{x}}, which is a contradiction, because S is a closed set and \tilde{\mathbf{x}}\notin S. Therefore (\ref{LC4CON1}) holds. Because \gamma is a convex function, we have $$\label{III1} -\gamma(\|\tilde{\mathbf{x}}\|_*) \geq - \sum_{i=0}^{n+1} \lambda_i \Gamma\big[\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*\big]$$ as was shown in Section \ref{SECLC2}. From the definition of V^{\it Lya} it follows, that there is a vector \mathbf{w}\in\mathbb{R}\times\mathbb{R}^n, such that $$\label{LC4AD} V^{\it Lya}(\tilde{\mathbf{y}}) = \mathbf{w}\cdot(\tilde{\mathbf{y}} - \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,n+1}) + V^{\it Lya}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,n+1})$$ for all \tilde{\mathbf{y}} \in \operatorname{con}\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,0},\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,1},\dots,\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,n+1}\}. It follows by H\"older's inequality, that \label{LC4-10} \begin{aligned} \mathbf{w}\cdot \tilde{\mathbf{f}}_p(\tilde{\mathbf{x}}) &= \mathbf{w}\cdot \sum_{i=0}^{n+1} \lambda_i \tilde{\mathbf{f}}_p(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) +\mathbf{w}\cdot\Big(\tilde{\mathbf{f}}_p(\tilde{\mathbf{x}})- \sum_{i=0}^{n+1} \lambda_i \tilde{\mathbf{f}}_p(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\Big) \\ &\leq \sum_{i=0}^{n+1} \lambda_i \mathbf{w}\cdot \tilde{\mathbf{f}}_p (\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})+ \|\mathbf{w}\|_1\|\tilde{\mathbf{f}}_p(\tilde{\mathbf{x}})- \sum_{i=0}^{n+1} \lambda_i \tilde{\mathbf{f}}_p(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\|_\infty\,. \end{aligned} By Lemma \ref{FABSZ} and the assignment in (\ref{ERRFORM}), \begin{align*} \|\tilde{\mathbf{f}}_p(\tilde{\mathbf{x}})-\sum_{i=0}^{n+1} \lambda_i \tilde{\mathbf{f}}_p(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\|_\infty &=\max_{j=0,1,\dots,n} \big|\tilde{f}_{p,j}(\tilde{\mathbf{x}}) - \sum_{i=0}^{n+1} \lambda_i \tilde{f}_{p,j}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\big| \\ &\leq\frac{1}{2}\sum_{i=0}^{n+1}\lambda_i\sum_{r,s=0}^n B^{(\mathbf{z},\mathcal{J})}_{p,rs}A^{(\mathbf{z},\mathcal{J})}_{\sigma,r,i} ( A^{(\mathbf{z},\mathcal{J})}_{\sigma,s,0}+ A^{(\mathbf{z},\mathcal{J})}_{\sigma,s,i} )\\ &\leq \sum_{i=0}^{n+1}\lambda_i E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i}, \end{align*} which implies that we have derived the inequality $$\label{III} \mathbf{w}\cdot \tilde{\mathbf{f}}_p(\tilde{\mathbf{x}}) \leq \sum_{i=0}^{n+1} \lambda_i \left( \mathbf{w}\cdot \tilde{\mathbf{f}}_p(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) +\|\mathbf{w}\|_1 E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i} \right).$$ We come to the vector \mathbf{w}. By Lemma \ref{WLEMMA}, the constraints (LC3), and because \left|\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j} -\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})\right| = \|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j} -\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\|_\infty for all j=0,1,\dots,n, we obtain the inequality \begin{align*} \|\mathbf{w}\|_1 = \sum_{j=0}^n \left| \frac{ V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}]-V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}] } { \|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j} -\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\|_\infty }\right| \leq \sum_{j=0}^n C[\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j},\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\}]. \end{align*} This inequality combined with (\ref{III}) gives \label{LC4UG1} \begin{aligned} \mathbf{w}\cdot \tilde{\mathbf{f}}_p(\tilde{\mathbf{x}}) &\leq \sum_{i=0}^{n+1} \lambda_i \sum_{j=0}^n\Bigl( \frac{ V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}]-V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}] }{ \mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j} -\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}) } \tilde{f}_{p,\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\\ & \quad + E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i} C[\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}, \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\}] \Bigr). \end{aligned} We are going to show that inequality (\ref{LC4UG1}) together with the constraints (LC4) imply that inequality (\ref{LC4MU}) holds. First, note that because V^{\it Lya} satisfies a Lipschitz condition with Lipschitz constant, say L_V>0 with respect to the norm \|\cdot\|, we have \begin{align*} &\limsup_{h\to 0+} \left| \frac{V^{\it Lya}(t+h,\boldsymbol{\phi}_\varsigma(t+h,t',\boldsymbol{\xi})) - V^{\it Lya}(t+h,\mathbf{x} + h\mathbf{f}_p(t,\mathbf{x}))}{h}\right| \\ & \leq \limsup_{h\to 0+} L_V\big\|\frac{\boldsymbol{\phi}_\varsigma(t+h,t',\boldsymbol{\xi}) - \mathbf{x}}{h} -\mathbf{f}_p(t,\mathbf{x})\big\| \\ & = L_V\|\mathbf{f}_p(t,\mathbf{x}) -\mathbf{f}_p(t,\mathbf{x})\| = 0. \end{align*} Hence, by Lemma \ref{LSLIM} and the representation (\ref{LC4AD}) of V^{\it Lya}, \begin{align*} &\limsup_{h\to 0+}\frac{V^{\it Lya}(t+h,\boldsymbol{\phi}_\varsigma(t+h,t',\boldsymbol{\xi})) - V^{\it Lya}(t,\boldsymbol{\phi}_\varsigma(t,t'´,\boldsymbol{\xi}))}{h} \\ & =\limsup_{h\to 0+}\frac{V^{\it Lya}(\tilde{\mathbf{x}} + h \tilde{\mathbf{f}}_p(\tilde{\mathbf{x}})) -V^{\it Lya}(\tilde{\mathbf{x}})}{h} \nonumber \\ & =\limsup_{h\to 0+}\frac{h \mathbf{w}\cdot \tilde{\mathbf{f}}_p(\tilde{\mathbf{x}})}{h} \nonumber \\ & = \mathbf{w}\cdot\tilde{\mathbf{f}}_p(\tilde{\mathbf{x}}),\nonumber \end{align*} and we obtain by (\ref{LC4UG1}), (LC4), and (\ref{III1}) that \begin{align} &\limsup_{h\to 0+}\frac{V^{\it Lya}(t+h,\boldsymbol{\phi}_\varsigma(t+h,t',\boldsymbol{\xi})) - V^{\it Lya}(t,\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}))}{h} \nonumber \\ & =\mathbf{w}\cdot\tilde{\mathbf{f}}_p(\tilde{\mathbf{x}}) \nonumber \\ & \leq \sum_{i=0}^{n+1} \lambda_i \sum_{j=0}^n \Bigl(\frac{ V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}]-V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}] }{ \mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j} -\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}) } \tilde{f}_{p,\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) + E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i} C[\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j},\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\}] \Bigr) \nonumber \\ & \leq -\sum_{i=1}^{n+1} \lambda_i \Gamma\big[\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|\big] \nonumber \\ & \leq -\gamma(\|\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})\|).\nonumber \end{align} Hence, inequality (\ref{LC4MU}) holds for all \varsigma\in\mathcal{S}_\mathcal{P} and all (t,\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})) in the interior of [T',T'']\times(\mathcal{M}\setminus\mathcal{D}). \subsection{Summary of the results and their consequences} \label{CONC} We start by summing up the results we have proved after the definition of the linear programming problem in a theorem. \begin{theorem}[\operatorname{CPWA} Lyapunov functions by linear programming] \label{PARAML} \quad \\ Consider the linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS},\mathbf{t},\mathcal{D},\|\cdot\|) in Definition \ref{LP} and assume that it possesses a feasible solution. Let the functions \psi, \gamma,\ \text{and}\ V^{\it Lya} be defined as in Section \ref{SecFun} from the numerical values of the variables \Psi[y], \Gamma[y], and V[\tilde{\mathbf{x}}] from a feasible solution. Then the inequality \psi(\|\mathbf{x}\|) \leq V^{\it Lya}(t,\mathbf{x}) $$holds for all \mathbf{x} \in \mathcal{M}\setminus\mathcal{D} and all t\in[T',T'']. If \mathcal{D} = \emptyset we have \psi(0) = V^{\it Lya}(t,\boldsymbol{0})=0 for all t\in[T',T'']. If \mathcal{D} \neq \emptyset we have, with \begin{gather*} V^{\it Lya}_{\partial\mathcal{M},\min}:= \min_{\mathbf{x} \in \partial\mathcal{M} \atop t\in[T',T'']} V^{\it Lya}(t,\mathbf{x}), \\ V^{\it Lya}_{\partial\mathcal{D},\max}:= \max_{\mathbf{x} \in \partial\mathcal{D} \atop t\in[T',T'']} V^{\it Lya}(t,\mathbf{x}), \end{gather*} that$$ V^{\it Lya}_{\partial\mathcal{D},\max} \leq V^{\it Lya}_{\partial\mathcal{M},\min}-\delta. $$Further, with \boldsymbol{\phi} as the solution to the Switched System \ref{POLYSYS} that we used in the construction of the linear programming problem, the inequality $$\label{GLxx} -\gamma(\|\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})\|) \geq \limsup_{h\to 0+}\frac{V^{\it Lya}(t+h,\boldsymbol{\phi}_\varsigma(t+h,t', \boldsymbol{\xi})) - V^{\it Lya}(t,\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}))}{h}$$ hold true for all \varsigma\in\mathcal{S}_\mathcal{P} and all (t,\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})) in the interior of [T',T'']\times(\mathcal{M}\setminus\mathcal{D}). \end{theorem} We now come to the important question: \begin{quote} Which information on the stability behavior of the Switched System \ref{POLYSYS} can we extract from the Lyapunov-like function V^{\it Lya} defined in Section \ref{SecFun}? \end{quote} Before we answer this question we discuss the implications secured by a continuously differentiable Lyapunov function on the stability behavior of a non-switched system to get an idea what we can expect. To do this consider the system$$ \dot{\mathbf{x}} = \mathbf{f}(t,\mathbf{x}), $$where \mathbf{f} \in [\mathcal{C}^1(\mathbb{R}_{\geq 0}\times\mathcal{V})]^n and \mathcal{V} is a bounded domain in \mathbb{R}^n containing the origin, and assume that there is a function W \in \mathcal{C}^1(\mathbb{R}_{\geq 0}\times\mathcal{V}) and functions a, b, c\in \mathcal{K}, such that$$ a(\|\boldsymbol{\xi}\|) \leq W(t,\boldsymbol{\xi}) \leq b(\|\boldsymbol{\xi}\|) for all \boldsymbol{\xi}\in\mathcal{V} and all t\geq 0 and \begin{align*} \frac{d}{dt}W(t,\boldsymbol{\phi}(t,t',\boldsymbol{\xi})) &= [\nabla_\mathbf{x} W](t,\boldsymbol{\phi}(t,t',\boldsymbol{\xi})) \cdot \mathbf{f}(t,\boldsymbol{\phi}(t,t',\boldsymbol{\xi})) + \frac{\partial W}{\partial t}(t,\boldsymbol{\phi}(t,t',\boldsymbol{\xi})) \\ &\leq -c(\|\boldsymbol{\phi}(t,t',\boldsymbol{\xi})\|) \end{align*} for all (t,\boldsymbol{\phi}(t,t',\boldsymbol{\xi})) \in \mathbb{R}_{\geq 0} \times \mathcal{V}, where \boldsymbol{\phi} is the solution to the differential equation \dot{\mathbf{x}} =\mathbf{f}(t,\mathbf{x}). For our analysis we let (t',\boldsymbol{\xi}) \in \mathbb{R}_{\geq 0}\times \mathcal{V} be arbitrary but constant and set y(t) := W(t,\boldsymbol{\phi}(t,t',\boldsymbol{\xi})). Then y(t') = W(t',\boldsymbol{\xi}) and y satisfies the differential inequality \dot y(t) \leq -c(b^{-1}(y(t))) $$for all t such that \boldsymbol{\phi}(t,t',\boldsymbol{\xi}) \in \mathcal{V}. Now, assume that there are constants b^*>0 and c^*>0, such that b(\|\mathbf{x}\|) \leq b^*\|\mathbf{x}\| and c^*\|\boldsymbol{\xi}\| \leq c(\|\mathbf{x}\|) for all \mathbf{x} \in \mathcal{V}. In this simple case it is quite simple to derive the inequality$$ y(t) \leq y(t')\exp\Big(-\frac{c^*}{b^*}(t-t')\Big), $$which is valid for all t\geq t' if$$ W(t',\boldsymbol{\xi}) < \inf_{s \geq t',\; \mathbf{y} \in\partial\mathcal{V}} W(s,\mathbf{y}). $$We are going to show that a very similar analysis can be done for a switched system and the corresponding Lyapunov-like function V^{\it Lya} if the arbitrary norm \|\cdot\| used in Definition \ref{LP} of the linear programming problem is a p-norm \|\cdot\|_p, 1\leq p \leq +\infty, but first we prove a technical lemma that will be used in the proof of the theorem. \begin{lemma} \label{DIFFABS4} Let [a,b[ be an interval in \mathbb{R}, -\infty < a < b \leq +\infty, and let y,z:[a,b[\, \to \mathbb{R} be functions such that y(a) \leq z(a), y is continuous, and z is differentiable. Assume that there is a function s:\mathbb{R} \to \mathbb{R} that satisfies the local Lipschitz condition, for every compact \mathcal{C}\subset\mathbb{R} there is a constant L_\mathcal{C} such that$$ |y(\alpha)-y(\beta)| \leq L_\mathcal{C}|\alpha-\beta|,\quad \text{for all }\alpha,\beta\in\mathcal{C}, $$and assume further that$$ D^+y(t) \leq -s(y(t)) \quad \text{and}\quad \dot z(t) = -s(z(t)) $$for all t\in [a,b[\,. Then y(t) \leq z(t) for all t\in [a,b[\,. \end{lemma} \begin{proof} Assume that the proposition of the lemma does not hold. Then there is a t_0 \in [a,b[ such that y(t) \leq z(t) for all t\in[a,t_0] and an \epsilon >0 such that y(t) > z(t) for all t\in\,]t_0,t_0+\epsilon]. Let L>0 be a local Lipschitz constant for s on the interval [y(t_0),y(t_0+\epsilon)]. Then, by Lemma \ref{LSLIM},$$ D^+(y-z)(t) = D^+y(t) - \dot z(t) \leq -s(y(t)) + s(z(t)) \leq L(y(t) - z(t)) for every t\in[t_0,t_0+\epsilon]. But then, with w(t) := y(t) - z(t) for all t\in[a,b[\,, we have \begin{align*} &\limsup_{h \to 0+} \frac{w(t+h)e^{-L(t+h)} - w(t)e^{-Lt}}{h}\\ &\leq e^{-Lt}\limsup_{h \to 0+}\frac{w(t+h)(e^{-Lh} -1)}{h} + e^{-Lt}\limsup_{h \to 0+}\frac{w(t+h)-w(t)}{h} \\ &=-Le^{-Lt}w(t)+ e^{-Lt}D^+w(t) \\ &\leq -Le^{-Lt}w(t)+Le^{-Lt}w(t) = 0, \end{align*} for all t\in[t_0,t_0+\epsilon], which implies, by Corollary \ref{TEMP51}, that the function t\mapsto e^{-Lt}w(t) is monotonically decreasing on the same interval. Because w(t_0)=0 this is contradictory to y(t) > z(t) for all t\in\,]t_0,t_0+\epsilon] and therefore the proposition of the lemma must hold true. \end{proof} We come to the promised theorem, where the implications of the function V^{\it Lya} on the stability behavior of the Switched System \ref{POLYSYS} are specified. Here with k-norm we mean the norm \|\mathbf{x}\|_k := \left(\sum_{i=1}^n|x_i|^{k}\right)^{1/k} if 1\leq k < +\infty and \|\mathbf{x}\|_\infty:=\max_{i=1,2,\dots,n} |x_i|. Unfortunately, these norms are usually called p-norms, which is inappropriate in this context because the alphabet p is used to index the functions \mathbf{f}_p, p\in\mathcal{P}. \begin{theorem}[Implications of the Lyapunov function V^{\it Lya}] \label{IMPLYA} \quad\\ Make the same assumptions and definitions as in Theorem \ref{PARAML} and assume additionally that the norm \|\cdot\| in the linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS},\mathbf{t}, \mathcal{D},\|\cdot\|) is a k-norm, 1\leq k \leq +\infty. Define the set \mathcal{T} through \mathcal{T} := \{\boldsymbol{0}\} if \mathcal{D} = \emptyset and \mathcal{T} := \mathcal{D}\cup\big\{\mathbf{x} \in \mathcal{M}\setminus\mathcal{D} : \max_{t\in[T',T'']}V^{\it Lya}(t,\mathbf{x}) \leq V^{\it Lya}_{\partial\mathcal{D},\max} \big\}, \quad \text{if\mathcal{D} \neq \emptyset$}, $$and the set \mathcal{A} through$$ \mathcal{A} := \big\{\mathbf{x} \in \mathcal{M}\setminus\mathcal{D}: \max_{t\in[T',T'']}V^{\it Lya}(t,\mathbf{x}) < V^{\it Lya}_{\partial\mathcal{M},\min} \big\}. $$Set q := k\cdot(k-1)^{-1} if 1 < k < +\infty, q:= 1 if k = +\infty, and q:=+\infty if k=1, and define the constant$$ E_q := \|\sum_{i=1}^n\mathbf{e}_i\|_q. $$Then the following propositions hold true: \begin{itemize} \item[(i)] If \boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}) \in \mathcal{T} for some particular \varsigma \in\mathcal{S}_\mathcal{P}, T'' \geq t \geq T', t'\geq 0, and \boldsymbol{\xi} \in\mathcal{U}, then \boldsymbol{\phi}_\varsigma(s,t',\boldsymbol{\xi}) \in \mathcal{T} for all s\in [t,T'']. \item[(ii)] If \boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}) \in \mathcal{M}\setminus\mathcal{D} for some particular \varsigma \in\mathcal{S}_\mathcal{P}, T'' \geq t \geq T', t'\geq 0, and \boldsymbol{\xi} \in\mathcal{U}, then the inequality $$\label{EXPFALLOFF} V^{\it Lya}(s,\boldsymbol{\phi}_\varsigma(s,t',\boldsymbol{\xi})) \leq V^{\it Lya}(t,\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})) \exp\Big(-\frac{\Upsilon}{\varepsilon E_q }(s-t)\Big)$$ holds for all s such that \boldsymbol{\phi}_\varsigma(s',t',\boldsymbol{\xi}) \in \mathcal{M}\setminus\mathcal{D} for all t \leq s' \leq s \leq T''. \item[(iii)] If \boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}) \in \mathcal{A} for some particular \varsigma \in\mathcal{S}_\mathcal{P}, T'' \geq t \geq T', t'\geq 0, and \boldsymbol{\xi} \in\mathcal{U}, then the solution \boldsymbol{\phi}_\varsigma either fulfills inequality (\ref{EXPFALLOFF}) for all t \leq s \leq T'', or there is a T^* \in\, ]t,T''], such that the solution \boldsymbol{\phi}_\varsigma fulfills inequality (\ref{EXPFALLOFF}) for all t \leq s \leq T^*, \boldsymbol{\phi}_\varsigma(T^*,t',\boldsymbol{\xi}) \in \partial\mathcal{D}, and \boldsymbol{\phi}_\varsigma(s,t',\boldsymbol{\xi}) \in\mathcal{T} for all T^*\leq s \leq T''. \end{itemize} \end{theorem} \begin{proof} Proposition (i) is trivial if \mathcal{D} = \emptyset. To prove proposition (i) when \mathcal{D} \neq \emptyset define for every \kappa> 0 the set$$ \mathcal{T}_\kappa := \{\mathbf{x} \in \mathbb{R}^n : \|\mathbf{x} - \mathbf{y}\|_2 < \kappa \text{ for some$\mathbf{y} \in \mathcal{T}}\}. $$Because V^{\it Lya}_{\partial\mathcal{D},\max} \leq V^{\it Lya}_{\partial\mathcal{M},\min}-\delta by Theorem \ref{PARAML}, it follows that \mathcal{T}_\kappa \subset \mathcal{M} for all small enough \kappa>0. For every such small \kappa>0 notice, that inequality (\ref{GLxx}) and Corollary \ref{TEMP51} together imply, that if \boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}) \in \mathcal{T}_\kappa for some particular \varsigma \in\mathcal{S}_\mathcal{P}, T'' \geq t \geq T', t'\geq 0, and \boldsymbol{\xi} \in\mathcal{U}, then \boldsymbol{\phi}_\varsigma(s,t',\boldsymbol{\xi}) \in \mathcal{T}_\kappa for all s\in [t,T'']. Then the proposition (i) follows, because if \boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}) \in \mathcal{T}, then \boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}) \in \bigcap_{\kappa >0} \mathcal{T}_\kappa, and therefore \boldsymbol{\phi}_\varsigma(s,t',\boldsymbol{\xi}) \in \bigcap_{\kappa >0} \mathcal{T}_\kappa = \mathcal{T} for all s\in [t,T'']. To prove proposition (ii) first note that the linear constraints (LC2) and (LC3) imply that V^{\it Lya}(t,\mathbf{x}) \leq \Upsilon \|\mathbf{x}\|_1 for all t\in[T',T''] and all \mathbf{x} \in \mathcal{M}. To see this just notice that at least for one i\in\{1,2,\dots,n\} we must have either$$ x_i \geq {\rm PS}_i(d^+_i)\quad \text{or}\quad x_i \leq {\rm PS}_i(d^-_i) $$because \mathbf{x} \notin \mathcal{D}. Then either$$ V^{\it Lya}(t,x_i\mathbf{e}_i) \leq \Upsilon \cdot {\rm PS}_i(d^+_i) + \Upsilon\cdot |x_1 - {\rm PS}_i(d^+_i)| = \Upsilon|x_i| $$or$$ V^{\it Lya}(t,x_i\mathbf{e}_i) \leq -\Upsilon \cdot {\rm PS}_i(d^-_i) + \Upsilon\cdot |x_1 - {\rm PS}_i(d^-_i)| = \Upsilon|x_i|, $$so$$ V^{\it Lya}(t,x_i \mathbf{e}_i) \leq \Upsilon |x_i|, $$which in turn implies, for any j\in\{1,2,\dots,n\}, j\neq i, that$$ V^{\it Lya}(t,x_i \mathbf{e}_i + x_j\mathbf{e}_j) \leq V^{\it Lya}(t,x_i \mathbf{e}_i) + \Upsilon|x_j| \leq \Upsilon (|x_i| + |x_j|) $$and by mathematical induction V^{\it Lya}(t,\mathbf{x}) \leq \Upsilon \|\mathbf{x}\|_1. Then, by H\"older's inequality,$$ V^{\it Lya}(t,\mathbf{x}) \leq \Upsilon \|\mathbf{x}\|_1 = \Upsilon \Big(\sum_{i=1}^n \mathbf{e}_i\Big)\cdot\Big( \sum_{i=1}^n |x_i|\mathbf{e}_i \Big) \leq \Upsilon E_q \|\mathbf{x}\|_k, so by the linear constraints (LC1) and inequality (\ref{GLxx}), we have for every \varsigma \in\mathcal{S}_\mathcal{P}, T'' \geq t \geq T', t'\geq 0, and \boldsymbol{\xi} \in\mathcal{U}, such that \boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}) \in \mathcal{M}\setminus\mathcal{D}, that \begin{align*} &-\frac{\varepsilon}{\Upsilon E_q} V^{\it Lya} (t,\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})) \\ &\geq -\varepsilon\|\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})\|_k \\ &\geq -\gamma(\|\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})\|_k) \\ & \geq \limsup_{h\to 0+}\frac{V^{\it Lya}(t+h,\boldsymbol{\phi}_\varsigma(t+h,t',\boldsymbol{\xi})) - V^{\it Lya}(t,\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi}))}{h}. \end{align*} The differential equation \dot y(s) = -\Upsilon E_q / \varepsilon\cdot y(s) has solution y(s) = \exp[-\Upsilon E_q (s-t') / \varepsilon ]\,y(t'). Hence, by Lemma \ref{DIFFABS4}, V(s,\boldsymbol{\phi}_\varsigma(s,t',\boldsymbol{\xi})) \leq V(t,\boldsymbol{\phi}_\varsigma(t,t',\boldsymbol{\xi})) \exp\Big(-\frac{\Upsilon}{\varepsilon E_q }(s-t)\Big) $$and proposition (ii) holds. Proposition (iii) is a direct consequence of the propositions (i) and (ii) and the definition of the set \mathcal{A}. It merely states that if it is impossible for a solution to exit the set \mathcal{M}\setminus\mathcal{D} at the boundary \partial\mathcal{M}, then it either exits at the boundary \partial\mathcal{D} or it does not exit at all. \end{proof} \subsection{The autonomous case} \label{AUTOCASE} As was discussed after Definition \ref{LP}, one is tempted to try to parameterize a time-invariant Lyapunov function for the Switched System \ref{POLYSYS} if it is autonomous. The reason for this is that we proved in Theorem \ref{CONVLYA} that if it is autonomous, then there exists a time-invariant Lyapunov function. In the next definition we present a linear programming problem that does exactly this. It is a generalization of the linear programming problem presented in \cite{Marinosson:02a}, \cite{Marinosson:02b}, \cite{Hafstein:04}, and \cite{Hafstein:04b} to serve the Switched System \ref{POLYSYS} in the particular case that it is autonomous. \begin{definition} \label{LPA} \rm (Linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS},\mathcal{D},\|\cdot\|))\quad Consider the Switched System \ref{POLYSYS}, where the set \mathcal{P} has a finite number of elements and the functions \mathbf{f}_p:\mathcal{U}\to\mathbb{R}^n, p\in\mathcal{P} are time-invariant. Let \mathbf{PS}:\mathbb{R}^n\to\mathbb{R}^n be a piecewise scaling function and \mathcal{N}\subset\mathcal{U} be such that the interior of the set$$ \mathcal{M} := \bigcup_{\mathbf{z}\in\mathbb{Z}^n,\; \mathbf{PS}(\mathbf{z} + [0,1]^n) \subset \mathcal{N}} \mathbf{PS}(\mathbf{z}+[0,1]^n) $$is a connected set that contains the origin. Let \|\cdot\| be an arbitrary norm on \mathbb{R}^n and let$$ \mathcal{D}:=\mathbf{PS}(\,]d^-_1,d^+_1[\,\times\,]d^-_2,d^+_2\,[\times\, \dots \, \times\, ]d^-_n,d^+_n[\,) $$be a set, of which the closure is contained in the interior of \mathcal{M}, and either \mathcal{D}=\emptyset or d^-_i and d^+_i are integers such that d^-_i \leq -1 and 1\leq d^+_i for all i=1,2,\dots,n.$$ \fbox{\parbox{95mm}{\noindent We assume that the components of the\mathbf{f}_p$,$p\in\mathcal{P}$, have bounded second-order partial derivatives on$\mathcal{M}\setminus\mathcal{D}.}} $$The linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS},\mathcal{D},\|\cdot\|) is now constructed in the following way: \begin{enumerate} \item[(i)] Define the sets \begin{gather*} \mathcal{G}_a := \{\mathbf{x} \in \mathbb{R}^n : \mathbf{x} \in \mathbf{PS}(\mathbb{Z}^n)\cap \big{(}\mathcal{M}\setminus\mathcal{D}\big{)}\},\\ \mathcal{X}^{\|\cdot\|}:= \{\|\mathbf{x}\| : \ \mathbf{x} \in \mathbf{PS}(\mathbb{Z}^n)\cap \mathcal{M}\}. \end{gather*} \item[(ii)] Define for every \sigma \in \operatorname{Perm}[\{1,2,\dots,n\}] and every i=1,\dots,n+1 the vector \begin{equation*} \mathbf{x}^{\sigma}_i := \sum_{j=i}^n\mathbf{e}_{\sigma(j)}. \end{equation*} \item[(iii)] Define the set \mathcal{Z}_a through:$$ \mathcal{Z}_a := \big{\{} (\mathbf{z},\mathcal{J}) \in \mathbb{Z}^n_{\geq 0} \times \mathfrak{P}(\{1,2,\dots,n\}) : \mathbf{PS}(\mathbf{R}^\mathcal{J}(\mathbf{z}+[0,1]^n)) \subset \mathcal{M}\setminus\mathcal{D} \big{\}}. $$\item[iv)] For every (\mathbf{z},\mathcal{J}) \in \mathcal{Z}_a, every \sigma \in \operatorname{Perm}[\{1,2,\dots,n\}], and every i=1,2,\dots,n+1 we set$$ \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i} := \mathbf{PS}(\mathbf{R}^\mathcal{J}(\mathbf{z}+\mathbf{x}_i^\sigma)). $$\item[(v)] Define the set $\mathcal{Y}_a := \big\{ \{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,k}, \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,k+1}\} : \sigma \in \operatorname{Perm}[\{1,2,\dots,n\}], (\mathbf{z},\mathcal{J}) \in \mathcal{Z}_a,\; k\in\{1,2,\dots,n\} \big\}.$ \item[(vi)] For every p\in\mathcal{P}, every (\mathbf{z},\mathcal{J}) \in\mathcal{Z}_a, and every r,s=1,2,\dots,n let B^{(\mathbf{z},\mathcal{J})}_{p,rs} be a real-valued constant, such that \begin{equation*} B^{(\mathbf{z},\mathcal{J})}_{p,rs} \geq \max_{i=1,2,\dots,n}\sup_{\mathbf{x} \in \mathbf{PS}(\mathbf{R}^\mathcal{J}(\mathbf{z}+[0,1]^n))}\left|\pdiff{^2 f_{p,i}}{x_r \partial x_s}(\mathbf{x})\right|. \end{equation*} \item[(vii)] For every (\mathbf{z},\mathcal{J}) \in\mathcal{Z}_a, every i,k=1,2,\dots,n, and every\\ \sigma \in \operatorname{Perm}[\{1,2,\dots,n\}], define$$ A^{(\mathbf{z},\mathcal{J})}_{\sigma,k,i} := \left|\mathbf{e}_k\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}-\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,n+1})\right|. $$\item[(viii)] Define the constant$$ x_{\min,\partial\mathcal{M}} := \min\{\|\mathbf{x}\| : \mathbf{x} \in \mathbf{PS}(\mathbb{Z}^n) \cap \partial \mathcal{M}\}, where \partial\mathcal{M} is the boundary of the set \mathcal{M}. \item[(ix)] For every p\in\mathcal{P}, every (\mathbf{z},\mathcal{J}) \in \mathcal{Z}_a, every \sigma \in \operatorname{Perm}[\{1,2,\dots,n\}], and every i=1,2,\dots,n+1 set $$\label{ERRFORMA} E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i} := \frac{1}{2}\sum_{r,s=1}^n B^{(\mathbf{z},\mathcal{J})}_{p,rs} A^{(\mathbf{z},\mathcal{J})}_{\sigma,r,i}(A^{(\mathbf{z},\mathcal{J})}_{\sigma,s,i}+A^{(\mathbf{z},\mathcal{J})}_{\sigma,s,1}).$$ \item[(ix)] Let \varepsilon > 0 and \delta >0 be arbitrary constants. \end{enumerate} The variables of the linear programming problem are: \begin{align*} &\Upsilon_a,\\ &\Psi_a[y], \quad \text{for all y\in \mathcal{X}^{\|\cdot\|}},\\ &\Gamma_a[y], \quad \text{for all y\in \mathcal{X}^{\|\cdot\|}},\\ &V_a[\mathbf{x}], \quad \text{for all \mathbf{x}\in \mathcal{G}_a},\\ &C_a[\{\mathbf{x},\mathbf{y}\}], \quad \text{for all \{\mathbf{x},\mathbf{y}\}\in \mathcal{Y}_a}. \end{align*} The linear constraints of the linear programming problem are: \begin{enumerate} \item[{\bf (LC1a)}] Let y_0,y_1,\dots,y_K be the elements of \mathcal{X}^{\|\cdot\|} in an increasing order. Then \begin{align*} &\Psi_a[y_0] = \Gamma_a[y_0] = 0 ,\\ &\varepsilon y_1 \leq \Psi_a[y_1] ,\\ &\varepsilon y_1 \leq \Gamma_a[y_1], \end{align*} and for every i=1,2,\dots,K-1: \begin{gather*} \frac{\Psi_a[y_i]-\Psi_a[y_{i-1}]}{y_i-y_{i-1}} \leq \frac{\Psi_a[y_{i+1}]-\Psi_a[y_i]}{y_{i+1}-y_i}, \\ \frac{\Gamma_a[y_i]-\Gamma_a[y_{i-1}]}{y_i-y_{i-1}} \leq \frac{\Gamma_a[y_{i+1}]-\Gamma_a[y_i]}{y_{i+1}-y_i}. \end{gather*} \item[{\bf (LC2a)}] For every \mathbf{x}\in \mathcal{G}_a: \begin{equation*} \Psi_a[\|\mathbf{x}\|] \leq V_a[\mathbf{x}]. \end{equation*} If \mathcal{D}=\emptyset, then: \begin{equation*} V_a[\boldsymbol{0}] =0. \end{equation*} If \mathcal{D} \neq \emptyset, then, for every \mathbf{x}\in \mathbf{PS}(\mathbb{Z}^n)\cap\partial\mathcal{D}: V_a[\mathbf{x}] \leq \Psi_a[x_{\min,\partial\mathcal{M}}]-\delta. $$Further, if \mathcal{D} \neq \emptyset, then for every i=1,2,\dots,n:$$ V_a[{\rm PS}_i(d_i^-)\mathbf{e}_i] \leq -\Upsilon_a \cdot{\rm PS}_i(d_i^-)\quad \text{and}\quad V_a[{\rm PS}_i(d_i^+)\mathbf{e}_i] \leq \Upsilon_a \cdot{\rm PS}_i(d_i^+). \item[{\bf (LC3a)}] For every \{\mathbf{x},\mathbf{y}\} \in \mathcal{Y}_a: \begin{equation*} -C_a[\{\mathbf{x},\mathbf{y}\}]\cdot \|\mathbf{x} - \mathbf{y}\|_{\infty} \leq V_a[\mathbf{x}]-V_a[\mathbf{y}] \leq C_a[\{\mathbf{x},\mathbf{y}\}]\cdot \|\mathbf{x} - \mathbf{y}\|_{\infty} \leq \Upsilon_a\cdot \|\mathbf{x} - \mathbf{y}\|_{\infty}. \end{equation*} \item[{\bf (LC4a)}] For every p\in\mathcal{P}, every (\mathbf{z},\mathcal{J}) \in \mathcal{Z}_a, every \sigma \in \operatorname{Perm}[\{1,2,\dots,n\}], and every i=1,2,\dots,n+1: \begin{align*} &-\Gamma_a\big[\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|\big] \\ &\geq \sum_{j=1}^n\Big(\frac{V_a[\mathbf{y}^{(\mathbf{z}, \mathcal{J})}_{\sigma,j}]- V_a[\mathbf{y}^{(\mathbf{z}, \mathcal{J})}_{\sigma,j+1}]} {\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})}f_{p,\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i} + E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i} C_a[\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}, \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\}]\Big). \end{align*} \end{enumerate} As the objective of the linear programming problem is not needed to parameterize a \operatorname{CPWA} Lyapunov function we do not define it here. \end{definition} Obviously, the two first comments after Definition \ref{LP} apply equally to the linear programming problem from this definition. Further, if the functions \mathbf{f}_p, p\in\mathcal{P}, in Definition \ref{LPA} are linear, then obviously we can set B^{(\mathbf{z},\mathcal{J})}_{p,rs}:=0 for all p\in\mathcal{P}, all (\mathbf{z},\mathcal{J}) \in \mathcal{Z}_a, and all r,s=1,2,\dots,n, and then the error terms'' E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i} are all identically zero. Linear problems are thus the most easy to solve with the linear programming problem because we can drop the variables C[\{\mathbf{x},\mathbf{y}\}] and the constraints (LC3) out of the linear programming problem altogether. If the linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS},\mathcal{D},\|\cdot\|) from Definition \ref{LPA} possesses a feasible solution, then we can use this solution to parameterize a time-invariant \operatorname{CPWA} Lyapunov function for the autonomous Switched System \ref{POLYSYS} used in the construction of the linear programming problem. The definition of the parameterized \operatorname{CPWA} Lyapunov function in the autonomous case is in essence identical to the definition in the nonautonomous case. \begin{definition} \label{AUTODEF10} Assume that \begin{align*} &\Upsilon_a,\\ &\Psi_a[y], \quad \text{for all y\in \mathcal{X}^{\|\cdot\|}},\\ &\Gamma_a[y], \quad \text{for all y\in \mathcal{X}^{\|\cdot\|}},\\ &V_a[\mathbf{x}], \quad \text{for all \mathbf{x}\in \mathcal{G}_a},\\ &C_a[\{\mathbf{x},\mathbf{y}\}], \quad \text{for all \{\mathbf{x},\mathbf{y}\}\in \mathcal{Y}_a}. \end{align*} is a feasible solution to {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS},\mathcal{D},\|\cdot\|) from Definition \ref{LPA}. Then we define the function V_a^{\it Lya} trough V_a^{\it Lya} \in \operatorname{CPWA}[\mathbf{PS},\mathbf{PS}^{-1}\big(\mathcal{M}\setminus\mathcal{D}\big)] and V_a^{\it Lya}(\mathbf{x}) := V_a[\mathbf{x}]\quad \text{for all\mathbf{x} \in \mathcal{G}_a$.} $$Further, we define the function \psi_a from the numerical values of the variables \Psi_a[y] and \gamma_a from the numerical values of the variables \Gamma_a[y], just as the functions \psi and \gamma were defined in Section \ref{SecFun} from the numerical values of the variables \Psi[y] and \Gamma[y] respectively. \end{definition} That V_a^{\it Lya} in Definition \ref{AUTODEF10} is a Lyapunov function for the autonomous Switched System \ref{POLYSYS}, that is equivalent to a time-invariant Lyapunov function parameterized by the linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS},\mathbf{t},\mathcal{D},\|\cdot\|) from Definition \ref{LP}, is proved in the next theorem. \begin{theorem} \label{SIMPLLP} Consider the Switched System \ref{POLYSYS} where the set \mathcal{P} is finite. Let T' and T'' be constants such that 0\leq T'< T'' and let \mathbf{PS}:\mathbb{R}^n\to\mathbb{R}^n be a piecewise scaling function and \mathcal{N}\subset\mathcal{U} be such that the interior of the set$$ \mathcal{M} := \bigcup_{\mathbf{z}\in\mathbb{Z}^n ,\; \mathbf{PS}(\mathbf{z} + [0,1]^n) \subset \mathcal{N}} \mathbf{PS}(\mathbf{z}+[0,1]^n) $$is a connected set that contains the origin. Let \|\cdot\| be an arbitrary norm on \mathbb{R}^n and let$$ \mathcal{D}:=\mathbf{PS}(\,]d^-_1,d^+_1[\,\times\,]d^-_2,d^+_2\,[\times\, \dots \, \times\, ]d^-_n,d^+_n[\,) $$be a set, of which the closure is contained in the interior of \mathcal{M}, and either \mathcal{D}=\emptyset or d^-_i and d^+_i are integers such that d^-_i\leq -1 and 1\leq d^+_i for all i=1,2,\dots,n. Finally, let \mathbf{t}:= (t_0,t_1,\dots,t_M) \in \mathbb{R}^{M+1}, M\in\mathbb{N}_{>0} be a vector such that T'=:t_00 such that [-a,a]^n\subset \mathcal{U} and W\in\mathcal{C}^2(\mathbb{R}_{\geq 0} \times ([-a,a]^n\setminus\{\boldsymbol{0}\})) is a Lyapunov function for the switched system. By Theorem \ref{CONVLYA} this is, for example, the case if the origin is a uniformly asymptotically stable equilibrium of the Switched System \ref{POLYSYS}, [-a,a]^n is a subset of its region of attraction, and the functions \mathbf{f}_p all satisfy the Lipschitz condition: for every p\in\mathcal{P} there exists a constant L_p such that$$ \|\mathbf{f}_p(t,\mathbf{x}) - \mathbf{f}_p(s,\mathbf{y})\| \leq L_p(|s-t| - \|\mathbf{x}-\mathbf{y}\|),\quad \text{for all$s,t\in\mathbb{R}_{\geq0}$and all$\mathbf{x},\mathbf{y}\in [-a,a]^n.} $$By Definition \ref{DEFLYAFUNC} there exist, for an arbitrary norm \|\cdot\| on \mathbb{R}^n, class \mathcal{K} functions \alpha, \beta, and \omega, such that$$ \alpha(\|\mathbf{x}\|) \leq W(t,\mathbf{x}) \leq \beta(\|\mathbf{x}\|) $$and $$\label{OMEGAING} [\nabla_\mathbf{x} W](t,\mathbf{x})\cdot \mathbf{f}_p(t,\mathbf{x}) + \pdiff{W}{t}(t,\mathbf{x}) \leq -\omega(\|\mathbf{x}\|)$$ for all (t,\mathbf{x}) \in \mathbb{R}_{>0} \times\,(]-a,a[^n\setminus\{\boldsymbol{0}\}) and all p\in\mathcal{P}. Further, by Lemma \ref{CONVLEMMA}, we can assume without loss of generality that \alpha and \omega are convex functions. Now, let 0\leq T' < T'' < +\infty be arbitrary and let \mathcal{D}'\subset[-a,a]^n be an arbitrary neighborhood of the origin. Especially, the set \mathcal{D}'\neq \emptyset can be taken as small as one wishes. We are going to prove that we can parameterize a \operatorname{CPWA} Lyapunov function on the set [T',T'']\times \big{(}[-a,a]^n \setminus \mathcal{D}'\big{)}. We will start by assigning values to the constants and the variables of the linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},]-a,a[^n,\mathbf{PS},\mathbf{t},\mathcal{D},\|\cdot\|) in Definition \ref{LP}. This includes that we define the piecewise scaling function \mathbf{PS}, the vector \mathbf{t}, and the set \mathcal{D}\subset \mathcal{D}'. Thereafter, we will prove that the linear constraints of the linear programming problem are all fulfilled by these values. \subsection{The assignments} First, we determine a constant B that is an upper bound on all second-order partial derivatives of the components of the functions \mathbf{f}_{p}, p\in\mathcal{P}. That is, with \tilde{\mathbf{x}} = (\tilde{x}_0,\tilde{x}_1,\dots,\tilde{x}_n) := (t,\mathbf{x}) and$$ \tilde{\mathbf{f}}_p(\tilde{\mathbf{x}}) = (\tilde{f}_{p,0}(\tilde{\mathbf{x}}),\tilde{f}_{p,1}(\tilde{\mathbf{x}}),\dots,\tilde{f}_{p,n}(\tilde{\mathbf{x}})) \\ = (1,f_{p,1}(t,\mathbf{x}),f_{p,2}(t,\mathbf{x}),\dots,f_{p,n}(t,\mathbf{x})), $$we need a constant B<+\infty such that$$ B \geq \max_{p\in\mathcal{P} ,\; i,r,s=0,1,\dots,n ,\; \tilde{\mathbf{x}} \in [T',T'']\times [-a,a]^n} \Big|\frac{\partial^2\tilde{f}_{p,i}}{\partial \tilde{x}_r\partial \tilde{x}_s}(\tilde{\mathbf{x}})\Big|. $$We must, at least in principle, be able to assign a numerical value to the constant B. This is in contrast to the rest of the constants and variables, where the mere knowledge of the existence of the appropriate values suffices. However, because B is an arbitrary upper bound (no assumptions are needed about its quality) on the second-order partial derivatives of the components of the functions \mathbf{f}_p on the compact set [T',T''] \times [-a,a]^n, this should not cause any difficulties if the algebraic form of the components is known. It might sound strange that the mere existence of the appropriate values to be assigned to the other variables suffices in a constructive theorem. However, as we will prove later on, if they exist then the simplex algorithm, for example, will successfully determine valid values for them. With$$ x^*_{\rm min} := \min_{\|\mathbf{x}\|_\infty = a} \|\mathbf{x}\| $$we set$$ \delta := \frac{\alpha(x^*_{\rm min})}{2} $$and let m^* be a strictly positive integer, such that $$\label{BETADELTA} [-\frac{a}{2^{m^*}},\frac{a}{2^{m^*}}]^n \subset \{\mathbf{x} \in \mathbb{R}^n : \beta(\|\mathbf{x}\|) \leq \delta\}\cap \mathcal{D}'$$ and set$$ \mathcal{D} :=\, ]-\frac{a}{2^{m^*}},\frac{a}{2^{m^*}}[^n. $$Note that we do not know the numerical values of the constants \delta and m^* because \alpha and \beta are unknown. However, their mere existence allows us to properly define \delta and m^*. We will keep on introducing constants in this way. Their existence is secured in the sense that {\it there exists a constant with the following property.} Set \begin{gather*} x^* := 2^{-m^*}x^*_{\rm min},\quad \omega^* := \frac{1}{2}\omega(x^*),\\ A^* := \sup_{p\in\mathcal{P} ,\, \tilde{\mathbf{x}} \in [T',T'']\times [-a,a]^n} \|\tilde{\mathbf{f}}_p(\tilde{\mathbf{x}})\|_2. \end{gather*} We define \widetilde{W}(\tilde{\mathbf{x}}):= W(t,\mathbf{x}), where \tilde{\mathbf{x}} := (t,\mathbf{x}), and assign \begin{gather*} C := \max_{r = 0,1,\dots,n \atop \tilde{\mathbf{x}} \in [T',T'']\times ([-a,a]^n\setminus\mathcal{D})} \Big|\pdiff{\widetilde{W}}{\tilde{x}_r}(\tilde{\mathbf{x}})\Big|, \\ B^* := (n+1)^\frac{3}{2}\cdot \max_{r,s=0,1,\dots,n \atop \tilde{\mathbf{x}} \in [T',T'']\times ([-a,a]^n\setminus\mathcal{D})} \Big|\frac{\partial^2 \widetilde{W}}{\partial \tilde{x}_r\partial \tilde{x}_s}(\tilde{\mathbf{x}})\Big|, \\ C^* := (n+1)^3 C B. \end{gather*} We set a^* := \max\{T''-T',a\} and let m \geq m^* be an integer, such that$$ \frac{a^*}{2^m} \leq \frac{\sqrt{(A^*B^*)^2+4x^*\omega^* C^*}-A^*B^*}{2C^*}, $$and set d := 2^{m - m^*}. We define the piecewise scaling function \mathbf{PS}:\mathbb{R}^n \to \mathbb{R}^n through$$ \mathbf{PS}(j_1,j_2,\dots,j_n) := a 2^{-m}(j_1,j_2,\dots,j_n) $$for all (j_1,j_2,\dots,j_n)\in \mathbb{Z}^n and the vector \mathbf{t} := (t_0,t_1,\dots,t_{2^m}), where$$ t_j := T' + 2^{-m} j(T''-T') for all j=0,1,\dots,2^m. We assign the following values to the variables and the remaining constants of the linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS},\mathbf{t},\mathcal{D},\|\cdot\|): \begin{align*} &B^{(\mathbf{z},\mathcal{J})}_{p,rs} := B, \quad \text{for all p\in\mathcal{P}, all (\mathbf{z},\mathcal{J}) \in\mathcal{Z}, and all r,s=0,1,\dots,n,}\\ &\Psi[y] := \alpha(y), \quad \text{for all y\in \mathcal{X}^{\|\cdot\|}},\\ &\Gamma[y] := \omega^*y, \quad \text{for all y\in \mathcal{X}^{\|\cdot\|}},\\ &V[\tilde{\mathbf{x}}] := \widetilde{W}(\tilde{\mathbf{x}}) \quad \text{for all \tilde{\mathbf{x}}\in \mathcal{G}},\\ &C[\{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\}] := C, \quad \text{for all \{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\} \in \mathcal{Y}},\\ &\Upsilon := \max\left\{C,\ a^{-1}2^{m^*}\cdot \max_{i=1,2,\dots,n} \beta(a2^{-m^*}\|\mathbf{e}_i\|)\right\},\\ &\varepsilon := \min\{\omega^*,\alpha(y_1)/y_1\},\quad \text{where}\quad y_1:=\min\{y : y\in\mathcal{X}^{\|\cdot\|}\ \text{and}\ y\neq 0 \} . \end{align*} We now show that the linear constraints (LC1), (LC2), (LC3), and (LC4) of the linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS},\mathbf{t},\mathcal{D},\|\cdot\|) are satisfied by these values. \subsection{The constraints (LC1) are fulfilled} Let y_0,y_1,\dots,y_K be the elements of \mathcal{X}^{\|\cdot\|} in an increasing order. We have to show that \Psi[y_0] = \Gamma[y_0] = 0, \varepsilon y_1 \leq \Psi[y_1], \varepsilon y_1 \leq \Gamma[y_1], and that for every i=1,2,\dots,K-1: \begin{gather*} \frac{\Psi[y_i]-\Psi[y_{i-1}]}{y_i-y_{i-1}} \leq \frac{\Psi[y_{i+1}]-\Psi[y_i]}{y_{i+1}-y_i}, \\ \frac{\Gamma[y_i]-\Gamma[y_{i-1}]}{y_i-y_{i-1}} \leq \frac{\Gamma[y_{i+1}]-\Gamma[y_i]}{y_{i+1}-y_i}. \end{gather*} \begin{proof} Clearly \Psi[y_0] = \Gamma[y_0] = 0 because y_0 = 0 and \varepsilon y_1 \leq \omega^* y_1 = \Gamma[y_1] \quad \text{and} \quad \varepsilon y_1 \leq \frac{\alpha(y_1)}{y_1} y_1 = \Psi[y_1]. $$Because \alpha is convex we have for all i = 1,2,\dots,K-1 that$$ \frac{y_i - y_{i-1}}{y_{i+1}-y_{i-1}}\ \alpha(y_{i+1}) + \frac{y_{i+1} - y_{i}}{y_{i+1}-y_{i-1}}\ \alpha(y_{i-1}) \geq \alpha(y_i), $$that is$$ \frac{\alpha(y_i)-\alpha(y_{i-1})}{y_i-y_{i-1}} = \frac{\Psi[y_i]-\Psi[y_{i-1}]}{y_i-y_{i-1}} \leq \frac{\Psi[y_{i+1}]-\Psi[y_i]}{y_{i+1}-y_i} = \frac{\alpha(y_{i+1})-\alpha(y_i)}{y_{i+1}-y_i}. $$Finally, we clearly have for every i = 1,2,\dots,K-1 that$$ \frac{\omega^*}{2} = \frac{\Gamma[y_i]-\Gamma[y_{i-1}]}{y_i-y_{i-1}} \leq \frac{\Gamma[y_{i+1}]-\Gamma[y_i]}{y_{i+1}-y_i} = \frac{\omega^*}{2}. $$\end{proof} \subsection{The constraints (LC2) are fulfilled} We have to show that for every \tilde{\mathbf{x}}\in \mathcal{G} we have \begin{equation*} \Psi[\|\tilde{\mathbf{x}}\|_*] \leq V[\tilde{\mathbf{x}}], \end{equation*} that for every \tilde{\mathbf{x}}=(\tilde{x}_0,\tilde{x}_1,\dots,\tilde{x}_n), such that (\tilde{x}_1,\tilde{x}_2,\dots,\tilde{x}_n) \in \mathbf{PS}(\mathbb{Z}^n)\cap\partial\mathcal{D} we have$$ V[\tilde{\mathbf{x}}] \leq \Psi[x_{\min,\partial\mathcal{M}}]-\delta, $$and that for every i=1,2,\dots,n and every j=0,1,\dots,2^m we have$$ V[{\rm PS}_0(j) \mathbf{e}_0 + {\rm PS}_i(d_i^-)\mathbf{e}_i] \leq -\Upsilon {\rm PS}(d_i^-) $$and$$ V[{\rm PS}_0(j) \mathbf{e}_0 + {\rm PS}_i(d_i^-)\mathbf{e}_i] \leq \Upsilon {\rm PS}_i(d_i^+). $$\begin{proof} Clearly,$$ \Psi[\|\tilde{\mathbf{x}}\|_*] = \alpha(\|\tilde{\mathbf{x}}\|_*) \leq \widetilde{W}(\tilde{\mathbf{x}}) = V[\tilde{\mathbf{x}}] $$for all \tilde{\mathbf{x}} \in \mathcal{G}. For every \tilde{\mathbf{x}}=(\tilde{x}_0,\tilde{x}_1,\dots,\tilde{x}_n), such that (\tilde{x}_1,\tilde{x}_2,\dots,\tilde{x}_n) \in \mathbf{PS}(\mathbb{Z}^n)\cap\partial\mathcal{D}, we have by (\ref{BETADELTA}) that$$ V[\tilde{\mathbf{x}}] = \widetilde{W}(\tilde{\mathbf{x}}) \leq \beta(\|\tilde{\mathbf{x}}\|_*) \leq \delta = \alpha(x^*_{\rm min}) -\delta \leq \alpha(x_{\min,\partial\mathcal{M}}) -\delta = \Psi[x_{\min,\partial \mathcal{M}}]-\delta. Finally, note that d_i^+ = -d_i^- = d = 2^{m-m^*} for all i=1,2,\dots,n, which implies that for every i=1,2,\dots,n and j=0,1,\dots,2^m we have \begin{align*} V[{\rm PS}_0(j)\mathbf{e}_0 + {\rm PS}(d_i^+)\mathbf{e}_i] &= V[{\rm PS}_0(j)\mathbf{e}_0 + a2^{-m^*}\mathbf{e}_i]\\ &= W(t_j,a2^{-m^*}\mathbf{e}_i) \\ &\leq \beta(a2^{-m^*}\|\mathbf{e}_i\|)\\ & \leq \Upsilon a2^{-m^*} \\ &=\Upsilon \cdot {\rm PS}_i(d^+_i) \end{align*} and \begin{align*} V[{\rm PS}_0(j)\mathbf{e}_0 + {\rm PS}(d_i^-)\mathbf{e}_i] &= V[{\rm PS}_0(j)\mathbf{e}_0 -a2^{-m^*}\mathbf{e}_i]\\ &= W(t_j,-a2^{-m^*}\mathbf{e}_i) \\ &\leq \beta(a2^{-m^*}\|\mathbf{e}_i\|)\\ & \leq \Upsilon a2^{-m^*} \\ &=-\Upsilon \cdot {\rm PS}(d^-_i). \end{align*} \end{proof} \subsection{The constraints (LC3) are fulfilled} We have to show that \{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\} \in \mathcal{Y}\\ implies the inequalities: \begin{equation*} -C[\{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\}]\cdot \|\tilde{\mathbf{x}} - \tilde{\mathbf{y}}\|_{\infty} \leq V[\tilde{\mathbf{x}}]-V[\tilde{\mathbf{y}}] \leq C[\{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\}]\cdot \|\tilde{\mathbf{x}} - \tilde{\mathbf{y}}\|_{\infty} \leq \Upsilon\cdot \|\tilde{\mathbf{x}} - \tilde{\mathbf{y}}\|_{\infty}. \end{equation*} \begin{proof} Let \{\tilde{\mathbf{x}},\tilde{\mathbf{y}}\} \in \mathcal{Y}. Then there is an i\in \{0,1,\dots,n\} such that \tilde{\mathbf{x}} - \tilde{\mathbf{y}} = \pm \mathbf{e}_i\|\tilde{\mathbf{x}}-\tilde{\mathbf{y}}\|_\infty . By the Mean-value theorem there is a \vartheta\in\,]0,1[ such that \Big|\frac{V[\tilde{\mathbf{x}}] - V[\tilde{\mathbf{y}}]}{\|\tilde{\mathbf{x}} -\tilde{\mathbf{y}}\|_\infty}\Big| = \Big|\frac{\widetilde{W}(\tilde{\mathbf{x}}) - \widetilde{W}(\tilde{\mathbf{y}})}{\|\tilde{\mathbf{x}}-\tilde{\mathbf{y}}\|_\infty}\Big| = \Big|\frac{\partial \widetilde{W}}{\partial \tilde{x}_i}(\tilde{\mathbf{y}} + \vartheta (\tilde{\mathbf{x}}-\tilde{\mathbf{y}}))\Big|. $$Hence, by the definition of the constants C and \Upsilon,$$ \Big|\frac{V[\tilde{\mathbf{x}}] - V[\tilde{\mathbf{y}}]}{\|\tilde{\mathbf{x}} -\tilde{\mathbf{y}}\|_\infty}\Big| \leq C \leq \Upsilon, which implies that the constraints (LC3) are fulfilled. \end{proof} \subsection{The constraints (LC4) are fulfilled} We have to show that for arbitrary p\in\mathcal{P}, (\mathbf{z},\mathcal{J})\in \mathcal{Z}, \sigma \in \operatorname{Perm}[\{0,1,\dots,n\}], and i\in\{0,1,\dots,n+1\} we have \label{MI-1} \begin{aligned} &-\Gamma\big[\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*\big] \\ &\geq \sum_{j=0}^n \Big( \frac{V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}]- V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}]} {\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})}\tilde{f}_{p,\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) + E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i} C[\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j},\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\}] \Big). \end{aligned} \begin{proof} With the values we have assigned to the variables and the constants of the linear programming problem we have for every p\in\mathcal{P}, every (\mathbf{z},\mathcal{J})\in \mathcal{Z}, every \sigma \in \operatorname{Perm}[\{0,1,\dots,n\}], every i,j=0,1,\dots,n, and with h := a^*2^{-m}, that \begin{gather*} A^{(\mathbf{z},\mathcal{J})}_{\sigma,i,j} \leq h, \\ E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i}:=\frac{1}{2}\sum_{r,s=0}^n B^{(\mathbf{z},\mathcal{J})}_{p,rs} A^{(\mathbf{z},\mathcal{J})}_{\sigma,r,i}(A^{(\mathbf{z},\mathcal{J})}_{\sigma,s,i} +A^{(\mathbf{z},\mathcal{J})}_{\sigma,s,1}) \leq (n+1)^2Bh^2, \\ \sum_{j=0}^nC[\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j},\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\}] \leq (n+1)C. \end{gather*} Hence, inequality (\ref{MI-1}) follows if we can prove that \label{MI-1X} \begin{aligned} -\Gamma\big[\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*\big] &= -\omega^*\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*\\ & \geq \sum_{j=0}^n \frac{\widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}) - \widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})} {\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})}{\tilde f}_{p,\sigma(j)} (\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) + C^*h^2. \end{aligned} Now, by the Cauchy-Schwarz inequality and inequality (\ref{OMEGAING}), \begin{align*} &\sum_{j=0}^n \frac{ \widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}) - \widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})} { \mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}) } \tilde{f}_{p,\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\\ &=\sum_{j=0}^n \Big( \frac{\widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}) - \widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})} {\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})}- \pdiff{\widetilde{W}}{\tilde{x}_{\sigma(j)}} (\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\Big) \tilde{f}_{p,\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) \\ &\quad +\nabla_{\tilde{\mathbf{x}}} \widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) \cdot \tilde{\mathbf{f}}_p(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\\ &\leq \Big\|\sum_{j=0}^n \Big( \frac{\widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}) - \widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})} {\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})} -\pdiff{\widetilde{W}}{\tilde{x}_{\sigma(j)}}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) \Big)\mathbf{e}_j \Big\|_2 \|\tilde{\mathbf{f}}_p(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\|_2\\ &\quad- \omega(\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*)\\ \end{align*} By the Mean-value theorem there is an \mathbf{y} on the line-segment between the vectors \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j} and \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}, such that \frac{\widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j})- \widetilde{W} (\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})}{\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j} -\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})} = \pdiff{\widetilde{W}}{\tilde{x}_{\sigma(j)}} (\mathbf{y}) $$and an \mathbf{y}^* on the line-segment between the vectors \mathbf{y} and \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i} such that$$ \pdiff{\widetilde{W}}{\tilde{x}_{\sigma(j)}}(\mathbf{y}) - \pdiff{\widetilde{W}}{\tilde{x}_{\sigma(j)}}(\mathbf{y}^{(\mathbf{z}, \mathcal{J})}_{\sigma,i}) =\big[\nabla_{\tilde{\mathbf{x}}}\pdiff{\widetilde{W}}{\tilde{x}_{\sigma(j)}} \big](\mathbf{y}^*) \cdot(\mathbf{y} - \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}). $$Because \mathbf{y} and \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i} are both elements of the simplex \mathbf{PS}(\mathbf{R}^\mathcal{J}(\mathbf{z}+S_\sigma)), we have$$ \|\mathbf{y} - \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_2 \leq h\sqrt{n+1} $$and because$$ \big\|\big[\nabla_{\tilde{\mathbf{x}}}\pdiff{\widetilde{W}}{\tilde{x}_{\sigma(j)}} \big](\mathbf{y}^*)\big\|_2 \leq \sqrt{n+1}\cdot \max_{r,s=0,1,\dots,n \atop \tilde{\mathbf{x}} \in [T',T'']\times ([-a,a]^n\setminus\mathcal{D})} \big|\frac{\partial^2 \widetilde{W}}{\partial \tilde{x}_r\partial \tilde{x}_s}(\tilde{\mathbf{x}})\big|, $$we obtain$$ \Big\|\sum_{j=0}^n \Big( \frac{\widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j})- \widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})} {\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})}- \pdiff{\widetilde{W}}{\tilde{x}_{\sigma(j)}} (\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\Big)\mathbf{e}_j \Big\|_2 \leq h B^*. $$Finally, by the definition of A^*, \|\tilde{\mathbf{f}}_p(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\|_2 \leq A^*. Putting the pieces together delivers the inequality$$ \sum_{j=0}^n \frac{ \widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}) - \widetilde{W}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})} { \mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}) } \tilde{f}_{p,\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) \leq hB^* A^* - \omega(\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*). $$From this inequality and because \omega(x) \geq 2\omega^*x for all x \geq x^* and because of the fact that \|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_*\geq x^*, inequality (\ref{MI-1X}) holds if$$ -\omega^*\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_* \geq hA^*B^* - 2\omega^*\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_* + h^2 C^*. $$This last inequality follows from$$ h := \frac{a^*}{2^m} \leq \frac{\sqrt{(A^*B^*)^2+4x^*\omega^* C^*}-A^*B^*}{2C^*}, $$which implies$$ 0 \geq hA^*B^* - \omega^*x^* + h^2 C^* \geq hA^*B^* - \omega^* \|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|_* + h^2 C^*. $$Because p\in\mathcal{P}, (\mathbf{z},\mathcal{J})\in \mathcal{Z}, \sigma \in \operatorname{Perm}[\{0,1,\dots,n\}], and i\in\{0,1,\dots,n+1\} were arbitrary, the proof is complete \end{proof} In the last proof we took care of that the second-order polynomial$$ P(z) := z^2 C^* + z A^*B^* - \omega^*x^* $$has two distinct real-valued roots, one smaller than zero and one larger then zero. Further, because h:= a^*2^{-m}>0 is not larger than the positive root, we have P(h) \leq 0, which is exactly what we need in the proof so that everything adds up. \subsection{Summary of the results} In this Section \ref{SECCCT}, we have delivered a proof of the following theorem: \begin{theorem}[Constructive converse theorem for arbitrary switched systems] \label{HAUPTSATZ} \quad \\ Consider the Switched System \ref{POLYSYS} where \mathcal{P} is a finite set, let a>0 be a real-valued constant such that [-a,a]^n\subset\mathcal{U}, and assume that at least one of the following two assumptions holds: \begin{itemize} \item[(i)] There exists a Lyapunov function W \in\mathcal{C}^2(\mathbb{R}_{\geq 0} \times ([-a,a]^n\setminus\{\boldsymbol{0}\})) for the Switched System \ref{POLYSYS}. \item[(ii)] The origin is a uniformly asymptotically stable equilibrium point of the Switched System \ref{POLYSYS}, the set [-a,a]^n is contained in its region of attraction, and the functions \mathbf{f}_p satisfy the Lipschitz condition: for every p\in\mathcal{P} there exists a constant L_P such that$$ \|\mathbf{f}_p(s,\mathbf{x}) - \mathbf{f}_p(t,\mathbf{y})\| \leq L_p(|s-t|+ \|\mathbf{x}-\mathbf{y}\|) $$for all s,t\in \mathbb{R}_{\geq 0} and all \mathbf{x},\mathbf{y}\in [-a,a]^n. \end{itemize} Then, for every constants 0\leq T' < T'' < +\infty and every neighborhood \mathcal{N}\subset[-a,a]^n of the origin, no matter how small, it is possible to parameterize a Lyapunov function V^{\it Lya} of class \operatorname{CPWA},$$ V^{\it Lya} : [T',T'']\times\big{(}[-a,a]^n\setminus\mathcal{N}\big{)} \to \mathbb{R}, $$for the Switched System \ref{POLYSYS} by using the linear programming problem defined in Definition \ref{LP}. More concretely: Let m be a positive integer and define the piecewise scaling function \mathbf{PS}:\mathbb{R}^n \to \mathbb{R}^n, the set \mathcal{D}, and the vector \mathbf{t} of the linear programming problem through$$ \mathbf{PS}(j_1,j_2,\dots,j_n) := a2^{-m} (j_1,j_2,\dots,j_n),  \mathcal{D} :=\, ]-2^k\frac{a}{2^m},2^k\frac{a}{2^m}[^n \subset \mathcal{N}, $$for some integer 1\leq k < m, and$$ \mathbf{t} := (t_0,t_1,\dots,t_M),\quad \text{where}\quad t_i:= T'+j2^{-m}(T''-T')\quad \text{for allj=0,1,\dots,2^m$.} $$Then the linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},[-a,a]^n,\mathbf{PS},\mathbf{t},\mathcal{D},\|\cdot\|) in Definition \ref{LP} possesses a feasible solution, whenever m is large enough. \end{theorem} \begin{proof} Note that by Theorem \ref{CONVLYA} assumption (ii) implies assumption (i). But then, by the arguments already delivered in this Section \ref{SECCCT}, the propositions of the theorem follow. \end{proof} Note that we have, in this Section \ref{SECCCT}, actually proved substantially more than stated in Theorem \ref{HAUPTSATZ}. Namely, we did derive formulae for the values of the parameters that are needed to initialize the linear programming problem in Definition \ref{LP}. These formulae do depend on the unknown Lyapunov function W, so we cannot extract the numerical values. However, these formulae are concrete enough to derive the promised algorithm for generating a \operatorname{CPWA} Lyapunov function. This will be done in Section \ref{SECALG}. \subsection{The autonomous case} The circumstances are close to identical when the Switched System \ref{POLYSYS} is autonomous. \begin{theorem}[Converse theorem for autonomous switched systems] \label{HAUPTSATZ2} \quad\\ Consider the Switched System \ref{POLYSYS} where \mathcal{P} is a finite set and assume that it is autonomous. Let a>0 be a real-valued constant such that [-a,a]^n\subset\mathcal{U}, and assume that at least one of the following two assumptions holds: \begin{itemize} \item[(i)] There exists a Lyapunov function W \in\mathcal{C}^2([-a,a]^n\setminus\{\boldsymbol{0}\}) for the Switched System \ref{POLYSYS}. \item[(ii)] The origin is an asymptotically stable equilibrium point of the Switched System \ref{POLYSYS}, the set [-a,a]^n is contained in its region of attraction, and the functions \mathbf{f}_p satisfy the Lipschitz condition: for every p\in\mathcal{P} there exists a constant L_P such that$$ \|\mathbf{f}_p(\mathbf{x}) - \mathbf{f}_p(\mathbf{y})\| \leq L_p\|\mathbf{x}-\mathbf{y}\|,\quad \text{for all$\mathbf{x},\mathbf{y}\in [-a,a]^n$.} $$\end{itemize} Then, for every neighborhood \mathcal{N}\subset[-a,a]^n of the origin, no matter how small, it is possible to parameterize a time-invariant Lyapunov function V^{\it Lya} of class \operatorname{CPWA},$$ V^{\it Lya} : [-a,a]^n\setminus\mathcal{N} \to \mathbb{R}, $$for the Switched System \ref{POLYSYS} by using the linear programming problem from Definition \ref{LPA}. More concretely: Let m be a positive integer and define the piecewise scaling function \mathbf{PS}:\mathbb{R}^n \to \mathbb{R}^n and the set \mathcal{D} of the linear programming problem through$$ \mathbf{PS}(j_1,j_2,\dots,j_n) := a2^{-m} (j_1,j_2,\dots,j_n) $$and$$ \mathcal{D} :=\, ]-2^k\frac{a}{2^m},2^k\frac{a}{2^m}[^n \subset \mathcal{N}, $$for some integer 1\leq k < m. Then, the linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},[-a,a]^n,\mathbf{PS},\mathcal{D},\|\cdot\|) in Definition \ref{LPA} possesses a feasible solution, whenever m is large enough. \end{theorem} \begin{proof} The proof is essentially a slimmed down version of the proof of Theorem \ref{HAUPTSATZ}, so we will not go very thoroughly into details. First, note that by Theorem \ref{CONVLYA} assumption (ii) implies assumption (i), so in both cases there are functions \alpha,\beta,\gamma\in\mathcal{K} and a function W \in \mathcal{C}^2([-a,a]^n\setminus\{\boldsymbol{0}\}) \to \mathbb{R}, such that \begin{gather*} \alpha(\|\mathbf{x}\|) \leq W(\mathbf{x}) \leq \beta(\|\mathbf{x}\|),\\ \nabla W(\mathbf{x})\cdot \mathbf{f}_p(\mathbf{x}) \leq -\omega(\|\mathbf{x}\|) \end{gather*} for all \mathbf{x}\in\,]-a,a[\,^n\setminus\{\boldsymbol{0}\} and all p\in\mathcal{P}. Further, by Lemma \ref{CONVLEMMA}, we can assume without loss of generality that \alpha and \omega are convex functions. With$$ x^*_{\rm min} := \min_{\|\mathbf{x}\|_\infty = a} \|\mathbf{x}\| $$we set$$ \delta := \frac{\alpha(x^*_{\rm min})}{2} $$and let m^* be a strictly positive integer, such that$$ [-\frac{a}{2^{m^*}},\frac{a}{2^{m^*}}]^n \subset \{\mathbf{x} \in \mathbb{R}^n : \beta(\|\mathbf{x}\|) \leq \delta\}\cap \mathcal{N} $$and set$$ \mathcal{D}:= ]-\frac{a}{2^{m^*}},\frac{a}{2^{m^*}}[^n $$Set \begin{gather*} x^* := \min_{\|\mathbf{x}\|_\infty = a2^{-m^*}} \|\mathbf{x}\|,\quad \omega^* := \frac{1}{2}\omega(x^*),\\ C := \max_{i=1,2,\dots,n \atop \mathbf{x}\in[-a,a]^n\setminus\mathcal{D}} \Big|\pdiff{W}{x_i}(\mathbf{x})\Big|, \end{gather*} and determine a constant B such that $B \geq \max_{p\in\mathcal{P} \atop {i,r,s=1,2,\dots,n \atop \mathbf{x}\in[-a,a]^n}}\Big|\frac{\partial^2f_{p,i}}{\partial x_r\partial x_s}(\mathbf{x})\Big|.$ Assign \begin{gather*} A^* := \sup_{p\in\mathcal{P} \atop \mathbf{x} \in [-a,a]^n }\|\mathbf{f}_p(\mathbf{x})\|_2,\\ B^* := n^\frac{3}{2} \cdot \max_{r,s=1,2,\dots,n \atop \mathbf{x}\in[-a,a]^n\setminus\mathcal{D}}\left|\frac{\partial^2 W}{\partial x_r\partial x_s}(\mathbf{x})\right|,\\ C^* := n^3BC, \end{gather*} and let m \geq m^* be an integer such that$$ \frac{a}{2^m} \leq \frac{\sqrt{(A^*B^*)^2+4x^*\omega^*C^*}-A^*B^*}{2C^*} $$and set d := 2^{m - m^*}. We define the piecewise scaling function \mathbf{PS}:\mathbb{R}^n \to \mathbb{R}^n through$$ \mathbf{PS}(j_1,j_2,\dots,j_n) := a 2^{-m}(j_1,j_2,\dots,j_n)\quad \text{for all$(j_1,j_2,\dots,j_n)\in \mathbb{Z}^n.} We assign the following values to the variables and the remaining constants of the linear programming problem {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},[-a,a]^n,\mathbf{PS},\mathcal{D},\|\cdot\|): \begin{align*} &B^{(\mathbf{z},\mathcal{J})}_{rs} := B, \quad \text{for all (\mathbf{z},\mathcal{J}) \in\mathcal{Z}_a and all r,s=1,2,\dots,n,}\\ &\Psi_a[y] := \alpha(y), \quad \text{for all y\in \mathcal{X}^{\|\cdot\|}},\\ &\Gamma_a[y] := \omega^*x, \quad \text{for all y\in \mathcal{X}^{\|\cdot\|}},\\ &V_a[\mathbf{x}] := W(\mathbf{x}) \quad \text{for all \mathbf{x}\in \mathcal{G}_a},\\ &C_a[\{\mathbf{x},\mathbf{y}\}] := C, \quad \text{for all \{\mathbf{x},\mathbf{y}\}\in \mathcal{Y}_a}, \\ &\varepsilon := \min\{\omega^*,\alpha(y_1)/y_1\},\quad \text{where}\quad y_1:=\min\{y : y\in\mathcal{X}^{\|\cdot\|}\ \text{and}\ y\neq 0 \} . \end{align*} That the linear constraints (LC1a), (LC2a), and (LC3a) are all satisfied follows very similarly to how the linear constraints (LC1), (LC2), and (LC3) follow in the nonautonomous case, so we only show that the constraints (LC4a) are fulfilled. To do this let (\mathbf{z},\mathcal{J})\in \mathcal{Z}_a, \sigma \in \operatorname{Perm}[\{1,2,\dots,n\}], and i\in\{1,2,\dots,n+1\} be arbitrary, but fixed throughout the rest of the proof. We have to show that \label{MI} \begin{aligned} &-\Gamma_a[\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|] \\ &\geq \sum_{j=1}^n \frac{V_a[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}]- V_a[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}]} {\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})}f_{p,\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) + E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i} \sum_{j=1}^nC_a[\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j},\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\}]. \end{aligned} With the values we have assigned to the variables and the constants of the linear programming problem, inequality (\ref{MI}) holds if -\omega^*\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\| \geq \sum_{j=1}^n \frac{ W[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}] - W[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}]} { \mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}) } f_{\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) + h^2 C^* with h := a2^{-m}. Now, by the Mean-value theorem and because \omega(x) \geq 2\omega^*x for all x \geq x^*, \begin{align*} &\sum_{j=1}^n \frac{ W[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}] - W[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}]} { \mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}) } f_{\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) + h^2 C^* \\ & =\sum_{j=1}^n \Big( \frac{W[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}] - W[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}]} {\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})}- \pdiff{W}{\xi_{\sigma(j)}} (\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\Big) f_{\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\\ &\quad +\nabla W(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) \cdot \mathbf{f}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) + h^2 C^*\\ &\leq \Big\|\sum_{j=1}^n \Big( \frac{W[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}] - W[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}]} {\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})}- \pdiff{W}{\xi_{\sigma(j)}} (\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\Big)\mathbf{e}_j \Big\|_2 \|f_{\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i})\|_2 \\ &\quad - \omega(\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|) + h^2 C^*\\ &\leq B^*hA^*- 2\omega^*\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\| + h^2 C^*. \end{align*} Hence, if -\omega^*\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\| \geq hA^*B^*- 2\omega^*\| \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\| + h^2 C^*, $$inequality (\ref{MI}) follows. But, this last inequality follows from$$ h := \frac{a}{2^m} \leq \frac{\sqrt{(A^*B^*)^2+4x^*\omega^*C^*}-A^*B^*}{2C^*}, $$which implies$$ 0\geq hA^*B^* - \omega^*x^* + h^2 C^* \geq hA^*B^* - \omega^*\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\| + h^2 C^*. $$\end{proof} In Section \ref{SECALG} we will use Theorem \ref{HAUPTSATZ} to derive an algorithm for parameterizing a \operatorname{CPWA} Lyapunov function for the Switched System \ref{POLYSYS} and, if the Switched System \ref{POLYSYS} is autonomous, we will use Theorem \ref{HAUPTSATZ2} to derive an algorithm for parameterizing a time-invariant \operatorname{CPWA} Lyapunov function for the system. \section{An Algorithm for Constructing Lyapunov Functions} \label{SECALG} In this section use the results from Theorem \ref{HAUPTSATZ} and Theorem \ref{HAUPTSATZ2} to prove that the systematic scan of the initiating parameters of the linear programming problem from Definition \ref{LP} in Procedure \ref{ALGO} is an algorithm for constructing Lyapunov functions for the Switched System \ref{POLYSYS}, whenever one exists, and that Procedure \ref{ALGO2} is an algorithm for constructing time-invariant Lyapunov functions for the Switched System \ref{POLYSYS} if it is autonomous, again, whenever one exists. However, we first give a short discussion on {\it algorithms}, because we intend to prove that our procedure to generate Lyapunov functions is concordant with the concept of an algorithm, whenever the system in question possesses a Lyapunov function. Donald Knuth writes in his classic work {\it The Art of Computer Programming} on algorithms \cite{KNUTH}: \begin{quote} The modern meaning for algorithm is quite similar to that of {\it recipe}, {\it process}, {\it method}, {\it technique}, {\it procedure}, {\it routine}, {\it rigmarole}, except that the word algorithm'' connotes something just a little different. Besides merely being a finite set of rules that gives a sequence of operations for solving a specific type of problem, an algorithm has five important features: \begin{itemize} \item[(1)] {\it Finiteness.} An algorithm must always terminate in a finite number of steps. [\dots] \item[(2)] {\it Definiteness.} Each step of an algorithm must be precisely defined; the actions to be carried out must be be rigorously and unambiguously specified for each case. [\dots] \item[(3)] {\it Input.} An algorithm has zero or more {\it inputs}: quantities that are given to it initially before the algorithm begins, or dynamically as the algorithm runs. These inputs are taken from specified sets of objects. [\dots] \item[(4)] {\it Output.} An algorithm has one or more {\it outputs}: quantities that have a specified relation to the inputs. [\dots] \item[(5)] {\it Effectiveness.} An algorithm is also generally expected to be {\it effective}, in the sense that its operations must all be sufficiently basic that they can in principle be done exactly and in a finite length of time by someone using pencil and paper. \end{itemize} \end{quote} The construction scheme for a Lyapunov function we are going to derive here does comply to all of these features whenever the equilibrium at the origin is an uniformly asymptotically stable equilibrium of the Switched System \ref{POLYSYS}, and is therefore an algorithm for constructing Lyapunov functions for arbitrary switched systems possessing a uniformly asymptotically stable equilibrium. \subsection{The algorithm in the nonautonomous case} We begin by defining a procedure to construct Lyapunov functions and then we prove that it is an algorithm for constructing Lyapunov functions for arbitrary switched systems possessing a uniformly asymptotically stable equilibrium. \begin{procedure} \label{ALGO} Consider the Switched System \ref{POLYSYS} where \mathcal{P} is a finite set, let a>0 be a constant such that [-a,a]^n \subset \mathcal{U}, and let \mathcal{N}\subset\mathcal{U} be an arbitrary neighborhood of the origin. Further, let T' and T'' be arbitrary real-valued constants such that 0 \leq T' < T'' and let \|\cdot\| be an arbitrary norm on \mathbb{R}^n.$$ \fbox{\parbox{95mm}{\noindent We assume that the components of the\mathbf{f}_p$,$p\in\mathcal{P}$, have bounded second-order partial derivatives on$[T',T'']\times[-a,a]^n$.}} $$First, we have to determine a constant B such that$$ B \geq \max_{{p\in\mathcal{P} ,\; i,r,s=0,1,\dots,n \atop \tilde{\mathbf{x}} \in [T',T'']\times[-a,a]^n}} \Big|\frac{\partial^2\tilde{f}_{p,i}}{\partial \tilde{x}_r\partial \tilde{x}_s}(\tilde{\mathbf{x}})\Big|. $$The process has two integer variables that have to be initialized, namely m and N. They should be initialized as follows: Set N := 0 and assign the smallest possible positive integer to m such that$$ ]-a2^{-m},a2^{-m}[\,^n \subset \mathcal{N}. $$The process consists of the following steps: \begin{enumerate} \item[(i)] Define the piecewise scaling function \mathbf{PS}:\mathbb{R}^n \to \mathbb{R}^n and the vector \mathbf{t}:=(t_0,t_1,\dots,t_{2^m}), through$$ \mathbf{PS}(j_1,j_2,\dots,j_n) := a2^{-m}(j_1,j_2,\dots,j_n),\quad \text{for all$(j_1,j_2,\dots,j_n) \in \mathbb{Z}^n$} $$and$$ t_i := T'+i\frac{T''-T'}{2^m}, \quad \text{for$i=0,1,\dots,2^m$.} $$\item[(ii)] For every N^* = 0,1,\dots,N we do the following:\\ Generate the linear programming problem$$ {\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},[-a,a]^n,\mathbf{PS},\mathbf{t},]-a2^{N^*-m},a2^{N^*-m}[^n,\|\cdot\|) $$as defined in Definition \ref{LP} and check whether it possesses a feasible solution or not. If one of the linear programming problems possesses a feasible solution, then go to step (iii). If none of them possesses a feasible solution, then assign m := m + 1 and N := N+1 and go back to step i). \item[(iii)] Use the feasible solution to parameterize a \operatorname{CPWA} Lyapunov function for the Switched System \ref{POLYSYS} as described in Section \ref{SecFun}. \end{enumerate} \end{procedure} After all the preparation we have done, the proof that Procedure \ref{ALGO} is an algorithm for constructing Lyapunov functions for arbitrary switched systems possessing a uniformly asymptotically stable equilibrium is remarkably short. \begin{theorem}[Procedure \ref{ALGO} is an algorithm] \label{PISA} Consider the Switched System \ref{POLYSYS} where \mathcal{P} is a finite set and let a>0 be a constant such that [-a,a]^n \subset \mathcal{U}.$$ \fbox{\parbox{95mm}{\noindent We assume that the components of the$\mathbf{f}_p$,$p\in\mathcal{P}$, have bounded second-order partial derivatives on$[T',T'']\times[-a,a]^n$for\\ every$0\leq T'0$be a constant such that$[-a,a]^n \subset \mathcal{U}$, and let$\mathcal{N}\subset\mathcal{U}$be an arbitrary neighborhood of the origin. Further, assume that the system is autonomous and let$\|\cdot\|$be an arbitrary norm on$\mathbb{R}^n$. $$\fbox{\parbox{95mm}{\noindent We assume that the components of the \mathbf{f}_p, p\in\mathcal{P}, have bounded second-order partial derivatives on [-a,a]^n.}}$$ First, we have to determine a constant$B$such that $$B \geq \max_{p\in\mathcal{P} \atop {i,r,s=1,2,\dots,n \atop \mathbf{x} \in [-a,a]^n}} \Big|\frac{\partial^2 f_{p,i}}{\partial x_r\partial x_s}(\mathbf{x})\Big|.$$ The procedure has two integer variables that have to be initialized, namely$m$and$N$. They should be initialized as follows: Set$N := 0$and assign the smallest possible positive integer to$m$such that $$]-a2^{-m},a2^{-m}[\,^n \subset \mathcal{N}.$$ The procedure consists of the following steps: \begin{enumerate} \item[(i)] Define the piecewise scaling function$\mathbf{PS}:\mathbb{R}^n \to \mathbb{R}^n$through $$\mathbf{PS}(j_1,j_2,\dots,j_n) := a2^{-m}(j_1,j_2,\dots,j_n),\quad \text{for all (j_1,j_2,\dots,j_n) \in \mathbb{Z}^n}.$$ \item[(ii)] For every$N^* = 0,1,\dots,N$we do the following:\\ Generate the linear programming problem $${\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},[-a,a]^n,\mathbf{PS},]-a2^{N^*-m},a2^{N^*-m}[^n,\|\cdot\|)$$ as defined in Definition \ref{LPA} and check whether it possesses a feasible solution or not. If one of the linear programming problems possesses a feasible solution, then go to step (iii). If none of them possesses a feasible solution, then assign$m := m + 1$and$N := N+1$and go back to step i). \item[(iii)] Use the feasible solution to parameterize a$\operatorname{CPWA}$Lyapunov function for the Switched System \ref{POLYSYS} as described in Definition \ref{AUTODEF10}. \end{enumerate} \end{procedure} The proof that Procedure \ref{ALGO2} is an algorithm for constructing time-invariant Lyapunov functions for arbitrary switched systems possessing an asymptotically stable equilibrium is essentially identical to the proof of Theorem \ref{PISA}, where the nonautonomous case is treated. \begin{theorem}[Procedure \ref{ALGO2} is an algorithm] \label{PISA2} Consider the Switched System \ref{POLYSYS} where$\mathcal{P}$is a finite set and assume that it is autonomous. Let$a>0$be a constant such that$[-a,a]^n \subset \mathcal{U}$. $$\fbox{\parbox{95mm}{\noindent We assume that the components of the \mathbf{f}_p, p\in\mathcal{P}, have bounded\\ second-order partial derivatives on [-a,a]^n.}}$$ Assume further, that at least one of the following two assumptions holds: \begin{itemize} \item[(i)] There exists a time-invariant Lyapunov function$W \in\mathcal{C}^2([-a,a]^n\setminus\{\boldsymbol{0}\})$for the Switched System \ref{POLYSYS}. \item[(ii)] The origin is an asymptotically stable equilibrium point of the Switched System \ref{POLYSYS}, the set$[-a,a]^n$is contained in its region of attraction, and the functions$\mathbf{f}_p$satisfy the Lipschitz condition: for every$p\in\mathcal{P}$there exists a constant$L_P$such that $$\|\mathbf{f}_p(\mathbf{x}) - \mathbf{f}_p(\mathbf{y})\| \leq L_p\|\mathbf{x}-\mathbf{y}\|,\quad \text{for all \mathbf{x},\mathbf{y}\in [-a,a]^n.}$$ \end{itemize} Then, for every neighborhood$\mathcal{N}\subset[-a,a]^n$of the origin, no matter how small, the Procedure \ref{ALGO2} delivers, in a finite number of steps, a time-invariant Lyapunov function$V^{\it Lya}$of class$\operatorname{CPWA}$, $$V^{\it Lya} : [-a,a]^n\setminus\mathcal{N} \to \mathbb{R},$$ for the autonomous Switched System \ref{POLYSYS}. \end{theorem} \begin{proof} Almost identical to the proof of Theorem \ref{PISA}. With the same notation as in Section \ref{SECCCT}, the linear programming problem $${\bf LP}(\{\mathbf{f}_p : p\in\mathcal{P}\},[-a,a]^n,\mathbf{PS},]-a2^{N^*-m},a2^{N^*-m}[^n,\|\cdot\|)$$ possesses a feasible solution, when$m$is so large that $$\frac{a}{2^m} \leq \frac{\sqrt{(A^*B^*)^2+4x^*\omega^* C^*}-A^*B^*}{2C^*}$$ and$N^*$,$0\leq N^* \leq N$, is such that $$[-\frac{a2^{N^*}}{2^m},\frac{a2^{N^*}}{2^m}]^n \subset \{\mathbf{x} \in \mathbb{R}^n : \beta(\|\mathbf{x}\|) \leq \delta \} \cap \mathcal{N}.$$ \end{proof} Because we have already proved in Theorem \ref{TDMOL} and Theorem \ref{CONVLYA} that the autonomous Switched System \ref{POLYSYS} possesses a time-invariant Lyapunov function, if and only if an equilibrium of the system is asymptotically stable, Theorem \ref{PISA2} implies the statement: \begin{quote} It is always possible, in a finite number of steps, to construct a time-invariant Lyapunov function for the autonomous Switched System \ref{POLYSYS} with the methods presented in this monograph, whenever one exists. \end{quote} \section{Examples of Lyapunov functions generated by linear programming} \label{SECEXA} In this section we give some examples of the construction of Lyapunov functions by the linear programming problem {\bf LP}$(\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS},\mathbf{t},\mathcal{D},\|\cdot\|)$from Definition \ref{LP} and the linear programming problem {\bf LP}$(\{\mathbf{f}_p : p\in\mathcal{P}\},\mathcal{N},\mathbf{PS},\mathcal{D},\|\cdot\|)$from Definition \ref{LPA}. In all the examples we will use the infinity norm, that is$\|\cdot\| := \|\cdot\|_\infty$, in the linear programming problems. Further, we will use piecewise scaling functions$\mathbf{PS}$, whose components${\rm PS}_i$are all odd functions, that is (recall that the$i$-th component${\rm PS}_i$of$\mathbf{PS}$does only depend on the$i$-th variable$x_i$of the argument$\mathbf{x}$)${\rm PS}_i(x_i) = -{\rm PS}_i(-x_i)$. Because are only be interested in the values of a piecewise scaling functions on compact subsets$[-m,m]^n \subset\mathbb{R}^n$,$m\in \mathbb{N}_{>0}$, this implies that we can define such a function by specifying$n$vectors$\textbf{ps}_i := ({\rm ps}_{i,1},{\rm ps}_{i,2},\dots,{\rm ps}_{i,m})$,$i=1,2,\dots,n$. If we say that the piecewise scaling function$\mathbf{PS}$is defined through the ordered vector tuple$(\textbf{ps}_1,\textbf{ps}_2,\dots,\textbf{ps}_n)$, we mean that$\mathbf{PS}(\boldsymbol{0}) := \boldsymbol{0}$and that for every$i=1,2,\dots,n$and every$j=1,2,\dots,m$, we have $${\rm PS_i}(j) := {\rm ps}_{i,j}\quad \text{and}\quad {\rm PS_i}(-j) := -{\rm ps}_{i,j}.$$ If we say that the piecewise scaling function$\mathbf{PS}$is defined through the vector$\textbf{ps}$, we mean that it is defined trough the vector tuple$(\textbf{ps}_1,\textbf{ps}_2,\dots,\textbf{ps}_n)$, where$\textbf{ps}_i := \textbf{ps}$for all$i=1,2,\dots,n$. The linear programming problems were all solved by use of the GNU Linear programming kit (GLPK), version 4.8, developed by Andrew Makhorin. It is a free software that is available for download on the internet. The parameterized Lyapunov functions were drawn with gnuplot, version 3.7, developed by Thomas Williams and Colin Kelley. Just as GLPK, gnuplot is a free software that is available for download on the internet. The author is indebted to these developers. \subsection{An autonomous system} \label{SSECEXA1} As a fist example of the use of the linear programming problem from Definition \ref{LPA} and Procedure \ref{ALGO2} we consider the continuous system $$\dot{\mathbf{x}} = \mathbf{f}(\mathbf{x}),\quad \text{where}\quad \label{EXB1} \mathbf{f}(x,y) := \begin{pmatrix} x^3(y-1) \\ -\frac{x^4}{(1+x^2)^2} -\frac{y}{1+y^2} \\ \end{pmatrix}.$$ \begin{figure}[ht] \begin{center} \includegraphics[width=0.9\textwidth]{fig1} \end{center} \caption{A Lyapunov function for the system (\ref{EXB1}) generated by Algorithm \ref{ALGO2}.} \label{exfig1} \end{figure} This system is taken from Example 65 in Section 5.3 in \cite{vidyasagar}. The Jacobian of$\mathbf{f}$at the origin has the eigenvalues$0$and$-1$. Hence, the origin is not an exponentially stable equilibrium point (see, for example, Theorem 4.4 in \cite{NS} or Theorem 15 in Section 5.5 in \cite{vidyasagar}). We initialize Procedure \ref{ALGO2} with $$a := \frac{8}{15}\quad \ \text{and}\quad \ \mathcal{N} :=\, ]-\frac{2}{15},\frac{2}{15}[\,^2.$$ \begin{figure}[ht] \begin{center} \includegraphics[width=0.9\textwidth]{fig2} \end{center} \caption{A Lyapunov function for the system (\ref{EXB1}), parameterized with the linear programming problem from Definition \ref{LPA}, with a larger domain than the Lyapunov function on Figure \ref{exfig1}.} \label{exfig2} \end{figure} Further, with $$\label{XzYz} x_{(\mathbf{z},\mathcal{J})} := \big|\mathbf{e}_1\cdot \mathbf{PS}(\mathbf{R}^\mathcal{J}(\mathbf{z} + \mathbf{e}_1))\big| \quad \text{and}\quad y_{(\mathbf{z},\mathcal{J})} := \big|\mathbf{e}_2\cdot\mathbf{PS}(\mathbf{R}^\mathcal{J}(\mathbf{z} + \mathbf{e}_2))\big|,$$ we set (note that for the constants$B^{(\mathbf{z},\mathcal{J})}_{p,rs}$the index$pis redundant because the system is non-switched) \begin{align*} B^{(\mathbf{z},\mathcal{J})}_{11} &:= 6 x_{(\mathbf{z},\mathcal{J})} (1 + y_{(\mathbf{z},\mathcal{J})} ),\\ B^{(\mathbf{z},\mathcal{J})}_{12} &:= 3 x_{(\mathbf{z},\mathcal{J})}^2,\\ B^{(\mathbf{z},\mathcal{J})}_{22} &:= \begin{cases} \frac{6 y_{(\mathbf{z},\mathcal{J})} }{(1 + y_{(\mathbf{z},\mathcal{J})}^2)^2} - \frac{8 y_{(\mathbf{z},\mathcal{J})} ^3}{(1 + y_{(\mathbf{z},\mathcal{J})} ^2)^3}, &\text{ify_{(\mathbf{z},\mathcal{J})} \leq \sqrt{2} -1,}\\ 1.46, &\text{else,}\end{cases} \end{align*} for all(\mathbf{z},\mathcal{J}) \in \mathcal{Z}$in the linear programming problems. This is more effective than using one constant$B$larger than all$B^{(\mathbf{z},\mathcal{J})}_{p,rs}$for all$(\mathbf{z},\mathcal{J}) \in \mathcal{Z}$and all$r,s=1,2,\dots,n$, as done to shorten the proof of Theorem \ref{HAUPTSATZ}. Procedure \ref{ALGO} succeeds in finding a feasible solution to the linear programming problem with$m=4$and$D=2$. The corresponding Lyapunov function of class$\operatorname{CPWA}$is drawn in Figure \ref{exfig1}. We used this Lyapunov function as a starting point to parameterize a$\operatorname{CPWA}$Lyapunov function with a larger domain and succeeded with$\mathcal{N} := [-1,1]^2$,$\mathcal{D} :=\,]-0.133,0.133[^2$, and$\mathbf{PS}$defined through the vector $$\textbf{ps}:= (0.033, 0.067, 0.1, 0.133, 0.18, 0.25, 0.3, 0.38, 0.45, 0.55, 0.7, 0.85, 0.93, 1)$$ as described at the beginning of Section \ref{SECEXA}. It is drawn on Figure \ref{exfig2}. Note that the domain of the Lyapunov function on Figure \ref{exfig1}, where we used the Procedure \ref{ALGO2} to scan the parameters of the linear programming problem from Definition \ref{LPA}, is much smaller than that of the Lyapunov function on Figure \ref{exfig2}, where we used another trial-and-error procedure to scan the parameters. This is typical! The power of Procedure \ref{ALGO2} and Theorem \ref{PISA2} is that they tell us that a systematic scan will lead to a success if there exists a Lyapunov function for the system. However, as Procedure \ref{ALGO2} will not try to increase the distance between the points in the grid$\mathcal{G}$of the linear programming problem far away from the equilibrium, it is not particularly well suited to parameterize Lyapunov functions with large domains. To actually parameterize Lyapunov functions a trial-and-error procedure that first tries to parameterize a Lyapunov function in a small neighborhood of the equilibrium, and if it succeeds it tries to extend the grid with larger grid-steps farther away from the equilibrium, is more suited. \begin{figure}[ht] \begin{center} \includegraphics[width=0.7\textwidth]{fig3} \end{center} \caption{The sets$\mathcal{D}$,$\mathcal{T}$, and$\mathcal{A}$from Lemma \ref{IMPLYA} for the Lyapunov function on Figure \ref{exfig2} for the system (\ref{EXB1}).} \label{exfig3} \end{figure} In Figure \ref{exfig3} the sets$\mathcal{D}$,$\mathcal{T}$, and$\mathcal{A}$from Lemma \ref{IMPLYA2} are drawn for this particular Lyapunov function. The innermost square is the boundary of$\mathcal{D}$, the outmost figure is the boundary of the set$\mathcal{A}$, and in between the boundary of$\mathcal{T}$is plotted. Every solution to the system (\ref{EXB1}) with an initial value$\boldsymbol{\xi}$in$\mathcal{A}$will reach the square$[-0.133,0.133]^2$in a finite time$t'$and will stay in the set$\mathcal{T}$for all$t\geq t'$. \subsection{An arbitrary switched autonomous system} \label{SSECEXA2} Consider the autonomous systems \begin{gather} \dot{\mathbf{x}} = \mathbf{f}_1(\mathbf{x}),\quad \text{where}\quad \mathbf{f}_1(x,y) := \begin{pmatrix} -y \\ x -y(1-x^2+0.1x^4) \end{pmatrix}, \label{EXB2} \\ \dot{\mathbf{x}} = \mathbf{f}_2(\mathbf{x}),\quad \text{where}\quad \mathbf{f}_2(x,y) := \begin{pmatrix} -y+x(x^2+y^2-1) \\ x+y(x^2+y^2-1) \end{pmatrix} \label{EXB3}, \intertext{and} \dot{\mathbf{x}} = \mathbf{f}_3(\mathbf{x}), \label{EXB4}\\ \text{where}\ \ \mathbf{f}_3(x,y) := \begin{pmatrix} -1.5 y \\ \frac{x}{1.5} + y\left( \left(\frac{x}{1.5}\right)^2 + y^2 -1\right) \end{pmatrix}. \nonumber \end{gather} The systems (\ref{EXB2}) and (\ref{EXB3}) are taken from Exercise 1.16 in \cite{NS} and from page 194 in \cite{NSASAC} respectively. \begin{figure}[ht] \begin{center} \includegraphics[width=0.9\textwidth]{fig4} %EX1diss.jpg \end{center} \caption{A Lyapunov function for the system (\ref{EXB2}) generated by the linear programming problem from Definition \ref{LPA}.} \label{EX1FIG} \end{figure} First, we used the linear programming problem from Definition \ref{LPA} to parameterize a Lyapunov function for each of the systems (\ref{EXB2}), (\ref{EXB3}), and (\ref{EXB4}) individually. We define$x_{(\mathbf{z},\mathcal{J})}$and$y_{(\mathbf{z},\mathcal{J})}as in formula (\ref{XzYz}) and for the system (\ref{EXB2}) we set \begin{align*} B^{(\mathbf{z},\mathcal{J})}_{1,11} &:= 2y_{(\mathbf{z},\mathcal{J})} + 1.2 y_{(\mathbf{z},\mathcal{J})} x_{(\mathbf{z},\mathcal{J})}^2,\\ B^{(\mathbf{z},\mathcal{J})}_{1,12} &:= 2 x_{(\mathbf{z},\mathcal{J})} + 0.4 x_{(\mathbf{z},\mathcal{J})}^3,\\ B^{(\mathbf{z},\mathcal{J})}_{1,22} &:= 0, \end{align*} for the system (\ref{EXB3}) we set \begin{align*} B^{(\mathbf{z},\mathcal{J})}_{2,11} &:= \max\{6 x_{(\mathbf{z},\mathcal{J})}, 2 y_{(\mathbf{z},\mathcal{J})} \},\\ B^{(\mathbf{z},\mathcal{J})}_{2,12} &:= \max\{2 x_{(\mathbf{z},\mathcal{J})}, 2 y_{(\mathbf{z},\mathcal{J})} \},\\ B^{(\mathbf{z},\mathcal{J})}_{2,22} &:= \max\{2 x_{(\mathbf{z},\mathcal{J})}, 6 y_{(\mathbf{z},\mathcal{J})} \}, \end{align*} and for the system (\ref{EXB4}) we set \begin{align*} B^{(\mathbf{z},\mathcal{J})}_{3,11} &:= \frac{8}{9} y_{(\mathbf{z},\mathcal{J})},\\ B^{(\mathbf{z},\mathcal{J})}_{3,12} &:= \frac{8}{9} x_{(\mathbf{z},\mathcal{J})},\\ B^{(\mathbf{z},\mathcal{J})}_{3,22} &:= 6 y_{(\mathbf{z},\mathcal{J})}. \end{align*} \begin{figure}[ht] \begin{center} \includegraphics[width=0.9\textwidth]{fig5} %EX4diss.jpg \end{center} \caption{A Lyapunov function for the system (\ref{EXB3}) generated by the linear programming problem from Definition \ref{LPA}. } \label{EX4FIG} \end{figure} We parameterized a\operatorname{CPWA}$Lyapunov function for the system (\ref{EXB2}) by use of the linear programming problem from Definition \ref{LPA} with$\mathcal{N} := [-1.337,1.337]^2$,$\mathcal{D} := \emptyset$, and$\mathbf{PS}$defined trough the vector $$\textbf{ps} := (0.0906, 0.316, 0.569, 0.695, 0.909, 1.016, 1.163, 1.236, 1.337)$$ as described at the beginning of Section \ref{SECEXA}. The Lyapunov function is depicted on Figure \ref{EX1FIG}. We parameterized a$\operatorname{CPWA}$Lyapunov function for the system (\ref{EXB3}) by use of the linear programming problem from Definition \ref{LPA} with$\mathcal{N} := [-0.818,0.818]^2$,$\mathcal{D} := \emptyset$, and$\mathbf{PS}$defined trough the vector $$\textbf{ps} := (0.188, 0.394, 0.497, 0.639, 0.8, 0.745, 0.794, 0.806, 0.818)$$ as described at the beginning of Section \ref{SECEXA}. The Lyapunov function is depicted on Figure \ref{EX4FIG}. We parameterized a$\operatorname{CPWA}$Lyapunov function for the system (\ref{EXB4}) by use of the linear programming problem from Definition \ref{LPA} with$\mathcal{N} := [-0.506,0.506]^2$,$\mathcal{D} :=\,]-0.01,0.01[^2$, and$\mathbf{PS}$defined trough the vector $$\textbf{ps} := (0.01, 0.0325, 0.0831, 0.197, 0.432, 0.461, 0.506)$$ as described at the beginning of Section \ref{SECEXA}. The Lyapunov function is depicted on Figure \ref{EX5FIG}. \begin{figure}[ht] \begin{center} \includegraphics[width=0.9\textwidth]{fig6} %EX5diss.jpg \end{center} \caption{A Lyapunov function for the system (\ref{EXB4}) generated by the linear programming problem from Definition \ref{LPA}.} \label{EX5FIG} \end{figure} \begin{figure}[hb] \begin{center} \includegraphics[width=0.9\textwidth]{fig7} %S1S4S5.jpg \end{center} \caption{A Lyapunov function for the arbitrary switched system (\ref{EXB145}) generated by the linear programming problem from Definition \ref{LPA}.} \label{EX145FIG} \end{figure} Finally, we parameterized a$\operatorname{CPWA}$Lyapunov function for the switched system $$\label{EXB145} \dot{\mathbf{x}} = \mathbf{f}_p(\mathbf{x}),\quad p\in\{1,2,3\},$$ where the functions$\mathbf{f}_1$,$\mathbf{f}_2$, and$\mathbf{f}_3$are, of course, the functions from (\ref{EXB2}), (\ref{EXB3}), and (\ref{EXB4}), by use of the linear programming problem from Definition \ref{LPA} with$\mathcal{N} := [-0.612,0.612]^2$,$\mathcal{D} :=\,]-0.01,0.01[^2$, and$\mathbf{PS}$defined trough the vector $$\textbf{ps} := (0.01,0.0325,0.0831,0.197,0.354,0.432,0.535,0.586,0.612)$$ as described at the beginning of Section \ref{SECEXA}. The Lyapunov function is depicted on Figure \ref{EX145FIG}. Note, that this Lyapunov function is a Lyapunov function for all of the systems (\ref{EXB2}), (\ref{EXB3}), and (\ref{EXB4}) individually. \begin{figure}[ht] \begin{center} \includegraphics[width=.55\textwidth]{fig8} % S1S4S5-domain.jpg \end{center} \caption{The region of attraction secured by the Lyapunov function on Figure \ref{EX145FIG} for the switched system. All solution that start in the larger set are asymptotically attracted to the smaller set at the origin.} \label{EX145FIGDOM} \end{figure} The equilibrium's region of attraction, secured by this Lyapunov function, and the set$\mathcal{D}$are drawn on Figure \ref{EX145FIGDOM}. Every solution to the system (\ref{EXB145}) that starts in the larger set will reach the smaller set in a finite time. \subsection{A variable structure system} \label{SSECEXA3} Consider the linear systems $\dot{\mathbf{x}} = A_1 \mathbf{x},\quad \text{where}\quad \label{EXB5} A_1 :=\begin{pmatrix} 0.1 & -1 \\ 2 & 0.1 \\ \end{pmatrix}$ and $\dot{\mathbf{x}} = A_2\mathbf{x},\quad \text{where}\quad \label{EXB5-2} A_2 := \begin{pmatrix} 0.1 & -2 \\ 1 & 0.1 \\ \end{pmatrix}.$ These systems are taken from \cite{Liberzon:99}. It is easy to verify that the matrices$A_1$and$A_2$both have the eigenvalues$\lambda_\pm = 0.1 \pm i\sqrt{2}$. Therefore, by elementary linear stability theory, the systems (\ref{EXB5}) and (\ref{EXB5-2}) are both unstable. \begin{figure}[ht] \begin{center} \includegraphics[width=0.45\textwidth]{fig9a} \quad % xch9A1.jpg \quad \includegraphics[width=0.45 \textwidth]{fig9b} % xch9A2.jpg \end{center} \caption{Trajectory of the system$\dot{\mathbf{x}} = A_1\mathbf{x}$(left) and of$\dot{\mathbf{x}} = A_2\mathbf{x}$(right) starting at$(1,0)$.} \label{ch9A1FIG}% \label{ch9A2FIG} \end{figure} \begin{figure}[hb] \begin{center} \includegraphics[width=0.85\textwidth]{fig10} % Dual2.jpg \end{center} \caption{A Lyapunov function for the variable structure system (\ref{ch9sw}) generated by an altered version of the linear programming problem from Definition \ref{LPA}.} \label{ch9swFIG} \end{figure} On Figure \ref{ch9A1FIG} %and Figure \ref{ch9A2FIG} the trajectories of the systems (\ref{EXB5}) and (\ref{EXB5-2}) with the initial value$(1,0)$are depicted. That the norm of the solutions is growing with$t$in the long run is clear. However, it is equally clear, that the solution to (\ref{EXB5}) is decreasing on the sets $$Q_2 := \{(x_1,x_2) : x_1 \leq 0 \text{ and } x_2 > 0\}\quad \text{and}\quad Q_4 := \{(x_1,x_2) : x_1 \geq 0 \text{ and } x_2 >0\}$$ and that the solution to (\ref{EXB5-2}) is decreasing on the sets $$Q_1 := \{(x_1,x_2) : x_1 > 0 \ \text{and}\ x_2 \geq0\}\quad \text{and}\quad Q_3 := \{(x_1,x_2) : x_1 < 0 \ \text{and}\ x_2 \leq0\}.$$ Now, consider the switched system $$\label{ch9sw} \dot{\mathbf{x}} = A_p\mathbf{x},\quad p\in\{1,2\},$$ where the matrices$A_1$and$A_2$are the same as in (\ref{EXB5}) and (\ref{EXB5-2}). Obviously, this system is not stable under arbitrary switching, but, if we only consider solution trajectories$(t,\boldsymbol{\xi}) \mapsto \boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi}), such that \begin{gather} \boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi}) \in Q_2 \cup Q_4,\quad \text{implies}\quad \sigma(t) = 1,\ \text{and} \label{VSSC1}\\ \boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi}) \in Q_1 \cup Q_3, \quad \text{implies}\quad \sigma(t) = 2 \label{VSSC2}, \end{gather} then we would expect all trajectories under consideration to be asymptotically attracted to the equilibrium. The switched system (\ref{ch9sw}), together with the constraints (\ref{VSSC1}) and (\ref{VSSC2}), is said to be a {\it variable structure system}. The reason is quite obvious, the structure of the right-hand side of the system (\ref{ch9sw}) depends on the current position in the state-space. It is a simple task to modify the linear programming problem from Definition \ref{LPA} to parameterize a Lyapunov function for the variable structure system. Usually, one would include the constraint (LC4a), that is, \begin{align*} &-\Gamma\big[\|\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}\|\big] \\ &\geq \sum_{j=1}^n \Big( \frac{V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}] - V[\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}]} {\mathbf{e}_{\sigma(j)}\cdot(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j}- \mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1})}\tilde{f}_{p,\sigma(j)}(\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,i}) + E^{(\mathbf{z},\mathcal{J})}_{p,\sigma,i} C[\{\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j},\mathbf{y}^{(\mathbf{z},\mathcal{J})}_{\sigma,j+1}\}] \Big). \end{align*} for everyp\in\mathcal{P}$, every$(\mathbf{z},\mathcal{J})\in\mathcal{Z}$, every$\sigma \in \operatorname{Perm}[\{1,2,\dots,n\}]$, and every$i=1,2,\dots,n+1$. In the modified linear programming problem however, we exclude the constraints for some values of$p$,$(\mathbf{z},\mathcal{J})$,$\sigma$, and$i$. It goes as follows: \begin{itemize} \item[(i)] Whenever$p =2$and either$\mathcal{J} = \{1\}$or$\mathcal{J}=\{2\}$, we do not include the constraint (LC4a), for these particular values of$p$,$(\mathbf{z},\mathcal{J})$,$\sigma$, and$i$, in the linear programming problem. \item[(ii)] Whenever$p=1$and either$\mathcal{J} = \emptyset$or$\mathcal{J}=\{1,2\}$, we do not include the constraints (LC4a), for these particular values of$p$,$(\mathbf{z},\mathcal{J})$,$\sigma$, and$i$, in the linear programming problem. \end{itemize} We parameterized a Lyapunov function for the variable structure system by use of this modified linear programming problem with$\mathcal{N} := [-1.152,1.152]^2$,$\mathcal{D} :=\,]-0.01,0.01[^2$, and$\mathbf{PS}defined trough the vector \begin{align*} \textbf{ps} := (&0.00333, 0.00667, 0.01, 0.0133, 0.0166, 0.0242, \\ &0.0410, 0.0790, 0.157, 0.319, 0.652, 1.152) \end{align*} as described at the beginning of Section \ref{SECEXA}. The Lyapunov functionV^{\it Lya}$is depicted on Figure \ref{ch9swFIG}. Now, one might wonder, what information we can extract from this function$V^{\it Lya}$, which is parameterized by our modified linear programming problem. Denote by$\gamma_a$the function that is constructed from the variables$\Gamma_a[y_i]$as in Definition \ref{AUTODEF10}. Then it is easy to see that for every $$\mathbf{x} \in \big{(}\mathcal{N}\setminus\mathcal{D}\big{)}\cap \big{(}\mathcal{Q}_1 \cup \mathcal{Q}_3\big{)}$$ we have $$\limsup_{h \to 0+}\frac{V^{\it Lya}(\mathbf{x} + h A_2\mathbf{x}) - V^{\it Lya}(\mathbf{x})}{h} \leq -\gamma_a(\|\mathbf{x}\|_\infty)$$ and for every$\mathbf{x} \in \big{(}\mathcal{N}\setminus\mathcal{D}\big{)}\cap \big{(}\mathcal{Q}_2 \cup \mathcal{Q}_4\big{)}$, we have $$\limsup_{h \to 0+}\frac{V^{\it Lya}(\mathbf{x} + h A_1\mathbf{x}) - V^{\it Lya}(\mathbf{x})}{h} \leq -\gamma_a(\|\mathbf{x}\|_\infty).$$ But this includes all trajectories of the system (\ref{ch9sw}) that comply with the constraints (\ref{VSSC1}) and (\ref{VSSC2}) so $$\limsup_{h \to 0+}\frac{V^{\it Lya}(\boldsymbol{\phi}_\sigma(t+h,\boldsymbol{\xi})) - V^{\it Lya}(\boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi}))}{h} \leq -\gamma_a(\|\boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi})\|_\infty)$$ for all$\boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi})$in the interior of$\mathcal{N}\setminus\mathcal{D}$and all trajectories under consideration and therefore$V^{\it Lya}$is a Lyapunov function for the variable structure system. The equilibrium's region of attraction, secured by this Lyapunov function, is drawn on Figure \ref{ch9swFIGDOM}. \begin{figure}[ht] \begin{center} \includegraphics[width=0.5\textwidth]{fig11} % Dual2-domain.jpg \end{center} \caption{The region of attraction secured by the Lyapunov function in Figure \ref{ch9swFIG} for the variable structure system. All solution that start in the larger set are asymptotically attracted to the smaller set at the origin.} \label{ch9swFIGDOM} \end{figure} \subsection{A variable structure system with sliding modes} \label{SSECEXA4} Define the matrix$A$and the vector$\mathbf{p}$through $$A:= \begin{pmatrix} 0 & 1 \\ 1 & 0 \\ \end{pmatrix} \quad \text{and}\quad \mathbf{p} := \begin{pmatrix} 1 \\ 1 \\ \end{pmatrix}$$ and consider the systems \begin{gather} \label{EXB6} \dot{\mathbf{x}} = \mathbf{f}_1(\mathbf{x}),\quad \text{where}\quad \mathbf{f}_1(\mathbf{x}) := A\mathbf{x},\\ \label{EXB7} \dot{\mathbf{x}} = \mathbf{f}_2(\mathbf{x}),\quad \text{where}\quad \mathbf{f}_2(\mathbf{x}) := -\mathbf{p},\\ \label{EXB8} \dot{\mathbf{x}} = \mathbf{f}_3(\mathbf{x}),\quad \text{where}\quad \mathbf{f}_3(\mathbf{x}) := \mathbf{p}. \end{gather} The eigenvalues of the matrix$A$in (\ref{EXB6}) are$\lambda_\pm = \pm 1$and the equilibrium at the origin is therefore a saddle point of the system and is not stable. The systems (\ref{EXB7}) and (\ref{EXB8}) do not even possess an equilibrium. \begin{figure}[ht] \begin{center} \includegraphics[width=0.9\textwidth]{fig12} % SLBig.jpg \end{center} \caption{A Lyapunov function for the variable structure system (\ref{CH10SYS}) generated by an altered version of the linear programming problem from Definition \ref{LPA}.} \label{SLBigFIG} \end{figure} Let the sets$Q_1$,$Q_2$,$Q_3$, and$Q_4$be defined as in the last example and consider the variable structure system where we use the system (\ref{EXB6}) in$Q_2$and$Q_4$, the system (\ref{EXB7}) in$Q_1$, and the system (\ref{EXB8}) in$Q_3$. A look the direction field of the system (\ref{EXB6}) suggests that this variable structure system might be stable, but the problem is that the system does not possess a properly defined solution compatible with our solution concept in Definition \ref{DEFPOLYSYS}. The reason is that a trajectory, for example leaving$Q_4$to$Q_1$, is sent straight back by the dynamics in$Q_1$to$Q_4$, where it will, of course, be sent straight back to$Q_1$. This phenomena is often called {\it chattering} and the sets$\{\mathbf{x} \in \mathbb{R}^2 : x_1=0\}$and$\{\mathbf{x}\in\mathbb{R}^2 : x_2 = 0\}$are called the {\it sliding modes} of the dynamics. A solution concept for such variable structure systems has been developed by Filippov and others, see, for example \cite{Filippov64}, \cite{Filippov80}, and \cite{schevitz94}, or, for a brief review, \cite{wu98}. \begin{figure}[ht] \begin{center} \includegraphics[width=0.45\textwidth]{fig13} % DomSL.jpg \end{center} \caption{The region of attraction secured by the Lyapunov function in Figure \ref{SLBigFIG} for the variable structure system. All solution that start in the larger set are asymptotically attracted to the smaller set at the origin.} \label{DomSLFIG} \end{figure} \begin{figure}[ht] \begin{center} \includegraphics[width=0.9\textwidth]{fig14} % SLsmall.jpg \end{center} \caption{A Lyapunov function for the variable structure system (\ref{CH10SYS}) generated by an altered version of the linear programming problem from Definition \ref{LPA}.} \label{SLsmallFIG} \end{figure} Even though Filippov's solution trajectories are supposed to be close to the true trajectories if the switching is fast, we will use a more simple and more robust technique here to prove the stability of the system. Our approach is very simple, set$h := 0.005$and define the sets \begin{gather*} \mathcal{S}_{1,2} := \{\mathbf{x} \in \mathbb{R}^n : |x_1| < h\quad \text{and}\quad x_2 >0 \} \\ \mathcal{S}_{2,3} := \{\mathbf{x} \in \mathbb{R}^n : \,x_1 < 0\quad \text{and}\quad |x_2| 0 \quad \text{and}\quad |x_2| 0$, then every switching between the systems $\dot{\mathbf{x}} =A\mathbf{x}$ and $\dot{\mathbf{x}} = -\mathbf{p}$ is allowed as long as $t\mapsto \boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi})$ stays in $\mathcal{S}_{1,2}$. However, if, for example, $\boldsymbol{\phi}_\sigma(t'',\boldsymbol{\xi}) \in \mathcal{Q}_1\setminus \big{(} \overline{\mathcal{S}_{1,2}} \cup \overline{\mathcal{S}_{4,1}}\big{)}$ for some $t'' > t'$, then we must use the dynamics $\dot{\mathbf{x}} = -\mathbf{p}$ until $t \mapsto \boldsymbol{\phi}_\sigma(t,\mathbf{x})$ leaves $\mathcal{Q}_1\setminus \big{(} \overline{\mathcal{S}_{1,2}} \cup \overline{\mathcal{S}_{4,1}}\big{)}$. By V1, V2, and V3 we have for every trajectory $t\mapsto\boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi})$ under consideration that $$\limsup_{h \to 0+} \frac{V^{\it Lya}(\boldsymbol{\phi}_\sigma(t+h,\boldsymbol{\xi})) - V^{\it Lya}(\boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi}))}{h} \leq -\gamma_a(\|\boldsymbol{\phi}_\sigma(t,\boldsymbol{\xi})\|_\infty),$$ so the function $V^{\it Lya}$ is a Lyapunov function for this system. The parameterized Lyapunov function $V^{\it Lya}$ for the system (\ref{CH10SYS}) is depicted on Figure \ref{SLBigFIG} and its region of attraction on Figure \ref{DomSLFIG}. Because it is difficult to recognize the structure of the Lyapunov function close to the origin, a Lyapunov function for the same system, but with a much smaller domain, is depicted on Figure \ref{SLsmallFIG}. \subsection{A one-dimensional nonautonomous switched system} \label{SSECEXA5} Consider the one-dimensional systems $$\label{ODAXXX1} \dot x = f_1(t,x),\quad \text{where}\quad f_1(t,x) := -\frac{x}{1+t}$$ and $$\label{ODAXXX2} \dot x = f_2(t,x),\quad \text{where}\quad f_2(t,x) := -\frac{tx}{1+t}.$$ \begin{figure}[ht] \begin{center} \includegraphics[width=.9\textwidth]{fig15} % ODAXXX1.jpg \end{center} \caption{A Lyapunov function for the nonautonomous system (\ref{ODAXXX1}) generated by the linear programming problem from Definition \ref{LP}.} \label{ODAXXXFIG1} \end{figure} The system (\ref{ODAXXX1}) has the closed-form solution $$\phi(t,t_0,\xi) = \xi \frac{1+t_0}{1+t}$$ and the system (\ref{ODAXXX2}) has the closed-form solution $$\phi(t,t_0,\xi) = \xi e^{-(t-t_0)}\frac{1+t}{1+t_0}.$$ The origin in the state-space is therefore, for every fixed $t_0$, an asymptotically stable equilibrium point of the system (\ref{ODAXXX1}) and, because $$|\xi| e^{-(t-t_0)}\frac{1+t}{1+t_0} \leq 2 |\xi| e^{-\frac{t-t_0}{2}},$$ a uniformly exponentially stable equilibrium point of the system (\ref{ODAXXX2}). However, as can easily be verified, it is not a uniformly asymptotically stable equilibrium point of the system (\ref{ODAXXX1}). This implies that the system (\ref{ODAXXX1}) cannot possess a Lyapunov function that is defined for all $t\geq 0$. Note however, that this does not imply that we cannot parameterize a Lyapunov-like function on a compact time interval for the system (\ref{ODAXXX1}). \begin{figure}[ht] \begin{center} \includegraphics[width=.9\textwidth]{fig16} % ODAXXX2.jpg \end{center} \caption{A Lyapunov function for the nonautonomous system (\ref{ODAXXX2}) generated by the linear programming problem from Definition \ref{LP}.} \label{ODAXXXFIG2} \end{figure} We set $$t_{(\mathbf{z},\mathcal{J})} := \mathbf{e}_0\cdot \widetilde{\mathbf{PS}}(\mathbf{z})\quad \text{and}\quad x_{(\mathbf{z},\mathcal{J})} := |\mathbf{e}_1\cdot \widetilde{\mathbf{PS}}(\mathbf{R}^\mathcal{J}(\mathbf{z} + \mathbf{e}_1))|$$ and define the constants $B_{p,rs}^{(\mathbf{z},\mathcal{J})}$ from the linear programming problem from Definition \ref{LP} by \begin{align*} B_{p,00}^{(\mathbf{z},\mathcal{J})} &:= \frac{2 x_{(\mathbf{z},\mathcal{J})} }{(1+t_{(\mathbf{z},\mathcal{J})})^3},\\ B_{p,01}^{(\mathbf{z},\mathcal{J})} &:= \frac{1}{(1+t_{(\mathbf{z},\mathcal{J})})^2},\\ B_{p,11}^{(\mathbf{z},\mathcal{J})} &:= 0 \end{align*} for $p \in \{1,2\}$. We parameterized a $\operatorname{CPWA}$ Lyapunov function for the system (\ref{ODAXXX1}), the system (\ref{ODAXXX2}), and the switched system $$\label{ODAXXX3} \dot x = f_p(t,x),\quad \ p\in\{1,2\}$$ by use of the linear programming problem from Definition \ref{LP} with $\mathcal{N} := ]-1.1,1.1[$, $\mathcal{D} := ]-0.11,0.11[$, $\mathbf{PS}$ defined through the vector $${\bf ps} := (0.11, 0.22, 0.33, 0.44, 0.55, 0.66, 0.77, 0.88, 0.99, 1.1)$$ as described at the beginning of Section \ref{SECEXA}, and the vector \begin{align*} \mathbf{t} := (&0, 0.194, 0.444, 0.75, 1.111, 1.528, 2, 2.528, 3.111, 3.75, 4.444, 5.194, 6, \\ &6.861, 7.778, 8.75, 9.778, 10.861, 12, 13.194, 14.444, 15.75, 17.111, 18.528, 20). \end{align*} The Lyapunov function for the system (\ref{ODAXXX1}) is depicted on Figure \ref{ODAXXXFIG1}, the Lyapunov function for the system (\ref{ODAXXX2}) on Figure \ref{ODAXXXFIG1}, and the Lyapunov function for the arbitrary switched system (\ref{ODAXXX3}) on Figure \ref{ODAXXX3}. \begin{figure}[ht] \begin{center} \includegraphics[width=0.9\textwidth]{fig17} % ODAXXX3.jpg \end{center} \caption{A Lyapunov function for the switched non\-au\-ton\-o\-mous system (\ref{ODAXXX3}) generated by the linear programming problem from Definition \ref{LP}.} \label{ODAXXXFIG3} \end{figure} \subsection{A two-dimensional nonautonomous switched system} \label{SSECEXA6} \quad\\ Consider the two-dimensional systems $$\label{NNS1} \dot{\mathbf{x}} = \mathbf{f}_1(t,\mathbf{x}),\quad \text{where}\quad \mathbf{f}_1(t,x,y) := \begin{pmatrix} -2x + y\cos(t) \\ x\cos(t) -2y \end{pmatrix}$$ and $$\label{NNS2} \dot{\mathbf{x}} = \mathbf{f}_2(t,\mathbf{x}),\quad \text{where}\quad \mathbf{f}_2(t,x,y) := \begin{pmatrix} -2x + y\sin(t) \\ x\sin(t) -2y \end{pmatrix}.$$ \begin{figure}[ht] \begin{center} \includegraphics[width=0.9\textwidth]{fig18} % NNS1-5.jpg \end{center} \caption{The function $(x,y) \mapsto V(2,x,y)$, where $V(t,x,y)$ is the parameterized Lyapunov function for the nonautonomous system (\ref{NNS1}).} \label{NNS1-5FIG} \end{figure} \begin{figure}[ht] \begin{center} \includegraphics[width=0.9\textwidth]{fig19} % NNS2-5.jpg \end{center} \caption{The function $(x,y) \mapsto V(2,x,y)$, where $V(t,x,y)$ is the parameterized Lyapunov function for the nonautonomous system (\ref{NNS2}).} \label{NNS2-5FIG} \end{figure} We set $$x_{(\mathbf{z},\mathcal{J})} := |\mathbf{e}_1\cdot \widetilde{\mathbf{PS}}(\mathbf{R}^\mathcal{J}(\mathbf{z} + \mathbf{e}_1))| \quad \text{and}\quad y_{(\mathbf{z},\mathcal{J})} := |\mathbf{e}_2\cdot \widetilde{\mathbf{PS}}(\mathbf{R}^\mathcal{J}(\mathbf{z} + \mathbf{e}_2))|$$ and assign values to the constants $B_{p,rs}^{(\mathbf{z},\mathcal{J})}$ from the linear programming problem in Definition \ref{LP} as follows: \begin{align*} B_{p,00}^{(\mathbf{z},\mathcal{J})} &:= \max\{x_{(\mathbf{z},\mathcal{J})} , y_{(\mathbf{z},\mathcal{J})}\} ,\\ B_{p,11}^{(\mathbf{z},\mathcal{J})} &:= 0,\\ B_{p,22}^{(\mathbf{z},\mathcal{J})} &:= 0,\\ B_{p,01}^{(\mathbf{z},\mathcal{J})} &:= 1,\\ B_{p,02}^{(\mathbf{z},\mathcal{J})} &:= 1,\\ B_{p,12}^{(\mathbf{z},\mathcal{J})} &:= 0 \end{align*} for $p \in \{1,2\}$. We parameterized a Lyapunov function for the system (\ref{NNS1}), the system (\ref{NNS2}), and the switched system $$\label{NNS3} \dot{\mathbf{x}} = \mathbf{f}_p(t,\mathbf{x}),\quad \ p\in\{1,2\}$$ by use of the linear programming problem from Definition \ref{LP} with\\ $\mathcal{N} :=\,]-0.55,0.55[^2$, $\mathcal{D} :=\,]-0.11,0.11[^2$, $\mathbf{PS}$ defined through the vector $${\bf ps} := (0.11, 0.22, 0.33, 0.44, 0.55)$$ as described at the beginning of Section \ref{SECEXA}, and the vector $$\mathbf{t} := (0, 0.3125, 0.75, 1.3125, 2).$$ Because the Lyapunov functions are functions from $\mathbb{R}\times\mathbb{R}^2$ into $\mathbb{R}$ it is hardly possible to draw them in any sensible way on a two-dimensional sheet. Therefore, we only draw them exemplary for the fixed time-value $t:=2$. On figures \ref{NNS1-5FIG}, \ref{NNS2-5FIG}, and \ref{NNS3-5FIG}, the state-space dependency of the parameterized Lyapunov functions for the systems (\ref{NNS1}), (\ref{NNS2}), and (\ref{NNS3}) respectively are depicted. \begin{figure}[ht] \begin{center} \includegraphics[width=0.9\textwidth]{fig20} 5 NNS3-5.jpg \end{center} \caption{The function $(x,y) \mapsto V(2,x,y)$, where $V(t,x,y)$ is the parameterized Lyapunov function for the nonautonomous switched system (\ref{NNS3}).} \label{NNS3-5FIG} \end{figure} \section{Conclusion} \label{SECFW} In this monograph we developed an algorithm for constructing Lyapunov functions for nonlinear, nonautonomous, arbitrary switched continuous systems possessing a uniformly asymptotically stable equilibrium. The necessary stability theory of switched systems, including a converse Lyapunov theorem for arbitrary switched nonlinear, nonautonomous systems possessing a uniformly asymptotically stable equilibrium (Theorem \ref{CONVLYA}), was developed in the sections \ref{SECPRE}, \ref{SECLDM}, and \ref{SECCTS}. In the sections \ref{SECCLF}, \ref{SECCCT}, and \ref{SECALG} we presented a linear programming problem in Definition \ref{LP} that can be constructed from a finite set of nonlinear and nonautonomous differential equations $\dot{\mathbf{x}} = \mathbf{f}_p(t,\mathbf{x})$, $p\in\mathcal{P}$, where the components of the $\mathbf{f}_p$ are $\mathcal{C}^2$, and we proved that every feasible solution to the linear programming problem can be used to parameterize a common Lyapunov function for the systems. Further, we proved that if the origin in the state-space is a uniformly asymptotically stable equilibrium of the switched system $\dot{\mathbf{x}} = \mathbf{f}_\sigma(t,\mathbf{x})$, $\sigma:\mathbb{R}_{\geq0}\to\mathcal{P}$, then Procedure \ref{ALGO}, which uses the linear programming problem from Definition \ref{LP}, is an algorithm for constructing a Lyapunov function for the switched system. Finally, in Section \ref{SECEXA}, we gave several examples of Lyapunov functions that we generated by use of the linear programming problem. Especially, we generated Lyapunov functions for variable structure systems with sliding modes. It is the belief of the author that this work is a considerable advance in the Lyapunov stability theory of dynamical systems and he hopes to have convinced the reader that the numerical construction of Lyapunov functions, even for arbitrary switched, nonlinear, nonautonomous, continuous systems, is not only a theoretical possibility, but is capable of being developed to a standard tool in system analysis software in the near future. Thus, the new algorithm presented in this monograph should give system engineers a considerable advantage in comparison to the traditional approach of linearization and pure local analysis. \section*{List of Symbols} \begin{tabular}{ll} $\mathbb{R}$ & {the set of real numbers}\\ $\mathbb{R}_{\geq 0}$ & {the real-numbers larger than or equal to zero }\\ $\mathbb{R}_{> 0}$ & {the real-numbers larger than zero }\\ $\mathbb{Z}$ & {the integers }\\ $\mathbb{Z}_{\geq 0}$ & {the integers larger than or equal to zero }\\ $\mathbb{Z}_{> 0}$ & {the integers larger than zero }\\ $\mathcal{A}^n$ & {set of $n$-tuples of elements belonging to a set $\mathcal{A}$ }\\ $\mathbb{R}^n$ & {the $n$-dimensional Euclidean space, $n\in\mathbb{N}_{>0}$}\\ $\overline{\mathcal{A}}$ & {the topological closure of a set $\mathcal{A}\subset\mathbb{R}^n$ }\\ $\overline{\mathbb{R}}$ & {$\overline{\mathbb{R}}:= \mathbb{R}\cup\{-\infty\}\cup\{+\infty\}$ }\\ $\partial\mathcal{A}$ & {the boundary of a set $\mathcal{A}$}\\ $\operatorname{dom}(f)$ & {the domain of a function $f$}\\ $f(\mathcal{U})$ & {the image of a set $\mathcal{U}$ under a mapping $f$ }\\ $f^{-1}(\mathcal{U})$ & {the preimage of a set $\mathcal{U}$ with respect to a mapping $f$}\\ $\mathcal{C}(\mathcal{U})$ & {continuous real-valued functions with domain $\mathcal{U}$}\\ $\mathcal{C}^k(\mathcal{U})$ & {$k$-times continuously differentiable real-valued functions}\\ &with domain $\mathcal{U}$ \\ $[\mathcal{C}^k(\mathcal{U})]^n$ & {vector fields $\mathbf{f}=(f_1,f_2,\dots,f_n)$ of which $f_i\in \mathcal{C}^k(\mathcal{U})$ }\\ & for $i=1,2,\dots,n$ \\ $\mathcal{K}$ & {strictly monotonically increasing functions on $[0,+\infty[$}\\ & vanishing at the origin\\ $\mathcal{L}$ & {strictly monotonically decreasing functions on $[0,+\infty[$,}\\ & approaching zero at infinity\\ $\mathfrak{P}(\mathcal{A})$ & {the power set of a set $\mathcal{A}$ }\\ $\operatorname{Perm}[\mathcal{A}]$ & {the permutation group of $\mathcal{A}$, i.e., the set of all bijective functions }\\ & $\mathcal{A} \to \mathcal{A}$\\ $\operatorname{con} \mathcal{A}$ & {the convex hull of a set $\mathcal{A}$ }\\ $\operatorname{graph}(f)$ & {the graph of a function $f$ }\\ $\mathbf{e}_i$ & {the $i$-th unit vector }\\ $\mathbf{x} \cdot \mathbf{y}$ & {the inner product of vectors $\mathbf{x}$ and $\mathbf{y}$ }\\ $\|\mathbf{x}\|_p$ & {$p$-norm of a vector $\mathbf{x}$, $\|\mathbf{x}\|_p := \big(\sum_i |x_i|^p\big)^\frac{1}{p}$ if $1\leq p<+\infty$}\\ & and $\|\mathbf{x}\|_\infty :=\max_i|x_i|$ \\ $f'$ & {the derivative of a function $f$ }\\ $\dot{\mathbf{x}}$ & {the time-derivative of a vector-valued function $\mathbf{x}$ }\\ $\nabla f$ & {the gradient of a scalar field $f:\mathbb{R}^n\to \mathbb{R}$ }\\ $\nabla \mathbf{f}$ & {the Jacobian of a vector field $\mathbf{f}:\mathbb{R}^m\to \mathbb{R}^n$ }\\ $\chi_{_\mathcal{A}}$ & {the characteristic function of a set $\mathcal{A}$ }\\ $\delta_{ij}$ & {the delta Kronecker, equal to $1$ if $i=j$ and equal to $0$ if $i\neq j$ }\\ $[a,b]$ & {$[a,b]:= \{x\in\mathbb{R} | a\leq x \leq b\}$ }\\ $]a,b]$ & {$]a,b]:= \{x\in\mathbb{R} | a< x \leq b\}$ }\\ $\operatorname{supp}(f)$ & {$\operatorname{supp}(f):= \overline{ \{\mathbf{x}\in\mathbb{R}^n : f(\mathbf{x}) \neq 0\} }$ }\\ $\mathbf{R}^\mathcal{J}$ & {reflection function with respect to the set $\mathcal{J} \subset\{1,2,\dots,n\}$, }\\ & see Definition \ref{Refdef}\\ $\mathbf{PS}$ & {piecewise scaling function, see page \pageref{PSdef} }\\ $\mathfrak{S}[\mathbf{PS},\mathcal{N}]$ & {a simplicial partition of a the set $\mathbf{PS}(\mathcal{N})\subset\mathbb{R}^n$,}\\ & see Definition \ref{NN10000}\\ $f_{p,i}$ & {the $i$-th component of the vector field $\mathbf{f}_p$ }\\ $\mathcal{B}_{\|\cdot\|,R}$ & {$\mathcal{B}_{\|\cdot\|,R}:= \{\mathbf{x}\in\mathbb{R}^n: \|\mathbf{x}\| < R\}$ }\\ $\mathcal{S}_\mathcal{P}$ & {the set of all switching signals $\mathbb{R}_{\geq0}\to\mathcal{P}$, see Definition \ref{DEFSWITCHINGSIGNAL} }\\ $\dot{\mathbf{x}}=\mathbf{f}_\sigma(t,\mathbf{x})$ & {arbitrary switched system, see Switched System \ref{POLYSYS}} \end{tabular} \begin{thebibliography}{99} \bibitem{SS} R.~Adams. \newblock {\em {Sobolev Spaces}}. \newblock Acad. Press, New York, 1975. \bibitem{liberzon01} A.~Agrachev and D.~Liberzon. \newblock {Stability of switched systems: a Lie-algebraic condition}. \newblock {\em Siam Journal on Control and Optimization}, {\bf 40}:253--269, 2001. \bibitem{bhatiaszegoe} N.~Bhatia and G.~Szeg\"o. \newblock {\em {Stability Theory of Dynamical Systems}}. \newblock Springer, 1970. \bibitem{brasdsca} R.~Brayton and C.~Tong. \newblock {Stability of dynamical systems: a constructive approach}. \newblock {\em IEEE Transactions on Circuits and Systems}, {\bf 26}:224--234, 1979. \bibitem{bracsasds} R.~Brayton and C.~Tong. \newblock {Constructive stability and asymptotic stability of dynamical systems}. \newblock {\em IEEE Transactions on Circuits and Systems}, {\bf 27}:1121--1130, 1980. \bibitem{camilli:02} F.~Camilli, L.~Gr\"une, and F.~Wirth. \newblock {A generalization of Zubov's method to perturbed systems}. \newblock {\em SIAM Journal of Control and Optimization}, {\bf 40(2)}:496--515, 2002. \bibitem{daya99} W.~Dayawansa and C.~Martin. \newblock {A Converse Lyapunov Theorem for a Class of Dynamical Systems which Undergo Switching}. \newblock {\em IEEE Transactions on Automatic Control}, {\bf 44}:751--760, 1999. \bibitem{DINI} U.~Dini. \newblock {\em Fondamenti per la teoria delle funzioni di variabili reali}. \newblock Pisa, 1878, (in Italian). \bibitem{Filippov64} A.~Filippov. \newblock Differential equations with discontinuous right-hand side. \newblock {\em American Mathematical Society Translations}, {\bf 42}:191--231, 1964. \bibitem{Filippov80} A.~Filippov. \newblock Differential equations with second member discontinuous on intersecting surfaces. \newblock {\em Differential Equations}, {\bf 415}:1292--1299, 1980. \bibitem{Giesl:04} P.~Giesl. \newblock {\em {Construction of global Lyapunov functions using radial basis functions}}. \newblock Habilitation Thesis: TU M\"unchen, Germany, 2004. \bibitem{Giesl:07} P.~Giesl. \newblock {\em {Construction of Global Lyapunov Functions Using Radial Basis Functions}}. \newblock Springer, 2007. \bibitem{Giesl:07a} P.~Giesl. \newblock {Construction of global Lyapunov functions using radial basis functions with a single operator}. \newblock {\em Discrete and Continuous Dynamical Systems - Series B}, {\bf 7}:101--124, 2007. \bibitem{Giesl:07b} P.~Giesl. \newblock {On the determination of the basin of attraction of a periodic orbit in two-dimensional systems}. \newblock {\em Discrete and Continuous Dynamical Systems - Series B}, {\bf 7}:101--124, 2007. \bibitem{gurvits95} I.~Gurvits. \newblock {Stability of discrete linear inclusion}. \newblock {\em Linear Algebra and its Applications}, {\bf 231}:47--85, 1995. \bibitem{Hafstein:04b} S.~Hafstein. \newblock {A constructive converse Lyapunov theorem on asymptotically stable differential equations' stability}. \newblock {\em University Duisburg-Essen, Schriftenreihe des Instituts f\"ur Mathematik, SM-DU-576}, 2004. \bibitem{Hafstein:04} S.~Hafstein. \newblock {A constructive converse Lyapunov theorem on exponential stability}. \newblock {\em Discrete and Continuous Dynamical Systems - Series A}, {\bf 10-3}:657--678, 2004. \bibitem{Hafstein:05} S.~Hafstein. \newblock {A constructive converse Lyapunov theorem on asymptotic stability for nonlinear autonomous ordinary differential equations.} \newblock {\em Dynamical Systems{\rm:}\, An International Journal}, {\bf 20}:281--299, 2005. \bibitem{hahn} W.~Hahn. \newblock {\em {Stability of Motion}}. \newblock Springer, New York, 1967. \bibitem{hirsch04} M.~Hirsch, S.~Smale, and R.~Devaney. \newblock {\em Differential Equations, Dynamical Systems \& An Introduction to Chaos}. \newblock Elsevier, 2004. \bibitem{johansen} T.~Johansen. \newblock {Computation of Lyapunov Functions for Smooth Nonlinear Systems using Convex Optimization}. \newblock {\em Automatica}, {\bf 36}:1617--1626, 2000. \bibitem{PLCS} M.~Johansson. \newblock {\em {Piecewise Linear Control Systems}}. \newblock Ph.D. Thesis: Lund Institute of Technology, Lund, Sweden, 1999. \bibitem{copqlfhs} M.~Johansson and A.~Rantzer. \newblock {Computation of piecewise quadratic Lyapunov functions for hybrid systems}. \newblock In {\em Proceedings of the European Control Conference ECC'97}, 1997. \bibitem{cpqlf} M.~Johansson and A.~Rantzer. \newblock {On the computation of piecewise quadratic Lyapunov functions}. \newblock In {\em Proceedings of the 36th IEEE Conference on Decision and Control}, 1997. \bibitem{HLCPLPTA} P.~Julian. \newblock {\em {A High Level Canonical Piecewise Linear Representation: Theory and Applications}}. \newblock Ph.D. Thesis: Universidad Nacional del Sur, Bahia Blanca, Argentina, 1999. \bibitem{hlcplrsp} P.~Julian, A.~Desages, and O.~Agamennoni. \newblock {High-level canonical piecewise linear representation using a simplicial partition}. \newblock {\em IEEE Transactions on Circuits and Systems}, {\bf 46}:463--480, 1999. \bibitem{julppllflp} P.~Julian, J.~Guivant, and A.~Desages. \newblock {A parametrization of piecewise linear Lyapunov function via linear programming}. \newblock {\em International Journal of Control}, {\bf 72}:702--715, 1999. \bibitem{NS} H.~Khalil. \newblock {\em {Nonlinear Systems}}. \newblock Macmillan, New York, 1992. \bibitem{KNUTH} D.~Knuth. \newblock {\em The Art of Computer Programming, Volume 1, Fundamental Algorithms}. \newblock Addison Wesley, 3. edition, 1997. \bibitem{Kout02} X.~Koutsoukos and P.~Antsaklis. \newblock {Design of Stabilizing Switching Control Laws for Discrete and Continuous-Time Linear Systems Using Piecewise-Linear Lyapunov Functions}. \newblock {\em International Journal of Control}, {\bf 75}:932--945, 2002. \bibitem{Liberzon:03} D.~Liberzon. \newblock {\em {Switching in Systems and Control}}. \newblock Birkhaeuser, 2003. \bibitem{liberzon99} D.~Liberzon, J.~Hespanha, and A.~Morse. \newblock {Stability of switched systems: a Lie-algebraic condition}. \newblock {\em Systems and Control Letters}, {\bf 37}:117--122, 1999. \bibitem{Liberzon:99} D.~Liberzon and A.~Morse. \newblock Basic problems in stability and design of switched systems. \newblock {\em IEEE Control Systems Magazine}, {\bf 19-5}:59--70, 1999. \bibitem{liberzon04c} D.~Liberzon and R.~Tempo. \newblock {Common Lyapunov Functions and Gradient Algorithms}. \newblock {\em IEEE Transactions on Automatic Control}, {\bf 49}:990--994, 2004. \bibitem{lin96} Y.~Lin, E.~Sontag, and Y.~Wang. \newblock {A Smooth Converse Lyapunov Theorem for Robust Stability}. \newblock {\em SIAM Journal on Control and Optimization}, {\bf 34}:124--160, 1996. \bibitem{lya1} A.~Lyapunov. \newblock {The general problem of the stability of motion}. \newblock {\em International Journal of Control}, {\bf 55}:531--773, 1992, (English translation of the original Russian text). \bibitem{Malkin} I.~Malkin. \newblock {On a question of reversability of Liapunov's theorem on asymptotic stability}. \newblock In J.~Aggarwal and M.~Vidyasagar, editors, {\em Nonlinear Systems: Stability Analysis}, pages 161--170. Stroudsburg:Dowden, Hutchinson \& Ross, 1977. \bibitem{mancilla00} J.~Mancilla-Aguilar and R.~Garcia. \newblock {A converse Lyapunov theorem for nonlinear switched systems}. \newblock {\em System \& Control Letters}, {\bf 41}:67--71, 2000. \bibitem{Marinosson:02b} S.~Marinosson. \newblock {Lyapunov function construction for ordinary differential equations with linear programming}. \newblock {\em Dynamical Systems: An International Journal}, {\bf 17}:137--150, 2002. \bibitem{Marinosson:02a} S.~Marinosson. \newblock {\em Stability Analysis of Nonlinear Systems with Linear Programming A Lyapunov Functions Based Approach}. \newblock Ph.D. Thesis: Gerhard-Mercator-University, Duisburg, Germany, 2002. \bibitem{Daya96} C.~Martin and W.~Dayawansa. \newblock {On the existence of a Lyapunov function for a family of switching systems}. \newblock In {\em Proceedings of the 35th IEEE Conference on Decision and Control}, 1996. \bibitem{massera} J.~Massera. \newblock {On Liapunoff's conditions of stability}. \newblock {\em Annals of Mathematics}, {\bf 50}:705--721, 1949. \bibitem{massera56} J.~Massera. \newblock {Contributions to stability theory}. \newblock {\em Annals of Mathematics}, {\bf 64-1}:182--206, 1956. \bibitem{miccglfisiraps} A.~Michel, B.~Nam, and V.~Vittal. \newblock {Computer generated Lyapunov functions for interconnected systems: Improved Results with Applications to Power Systems}. \newblock {\em IEEE Transactions on Circuits and Systems}, {\bf CAS-31}:189--198, 1984. \bibitem{micsacdsscm} A.~Michel, N.~Sarabudla, and R.~Miller. \newblock {Stability analysis of complex dynamical systems: some computational methods}. \newblock {\em Circuits and Systems Signal Processing}, {\bf 1}:171--202, 1982. \bibitem{mori97} Y.~Mori, T.~Mori, and Y.~Kuroe. \newblock {A Solution to the Common Lyapunov Function Problem for Continuous-Time System}. \newblock In {\em Proceedings of the 36th IEEE Conference on Decision and Control}, 1997. \bibitem{nadzieja90} T.~Nadzieja. \newblock {Construction of a smooth Lyapunov function for an asymptotically stable set}. \newblock {\em Czechoslovak Mathematical Journal}, {\bf 40} (115):195--199, 1990. \bibitem{nare94} K.~Narendra and J.~Balakrishnan. \newblock {A common Lyapunov functions for stable LTI systems with commuting A-matrices}. \newblock {\em IEEE Transactions on Automatic Control}, {\bf 39}:2469--2471, 1994. \bibitem{ohtcglfcns} Y.~Ohta, H.~Imanishi, L.~Gong, and H.~Haneda. \newblock {Computer generated Lyapunov functions for a class of non-linear systems}. \newblock {\em IEEE Transactions on Circuits and Systems}, {\bf 40}:343--354, 1993. \bibitem{papac} A.~Papachristodoulou and S.~Prajna. \newblock {The Construction of Lyapunov Functions using the Sum of Squares Decomposition}. \newblock In {\em Proceedings of the 41st IEEE Conference on Decision and Control}, pages 3482--3487, 2002. \bibitem{parrilo} P.~Parrilo. \newblock {\em Structured Semidefinite Programs and Semialgebraic Geometry Methods in Robustness and Optimization}. \newblock Ph.D. Thesis: Caltech, Pasadena, USA, 2000. \bibitem{LFCBLP} A.~Polanski. \newblock {Lyapunov function construction by linear programming}. \newblock {\em IEEE Transactions on Automatic Control}, {\bf 42}:1013--1016, 1997. \bibitem{Roos97} C.~Roos, T.~Terlaky, and J.~Vial. \newblock {\em Theory and Algorithms for Linear Optimization}. \newblock Wiley, 1997. \bibitem{NSASAC} S.~Sastry. \newblock {\em {Nonlinear Systems: Analysis, Stability, and Control}}. \newblock Springer, New York, 1999. \bibitem{schevitz94} D.~Schevitz and B.~Paden. \newblock {Lyapunov stability theory on non-smooth systems}. \newblock {\em IEEE Transactions on Automatic Control}, {\bf 39}:1910--1914, 1994. \bibitem{TLIP} A.~Schrijver. \newblock {\em Theory of Linear and Integer Programming}. \newblock Wiley, 1983. \bibitem{shim98} H.~Shim, D.~Noh, and Seo J. \newblock {Common Lyapunov function for exponentially stable nonlinear systems}. \newblock In {\em 4th SIAM Conference on Control \& its Applications}, pages 1218--1223, 1998. \bibitem{ANC} J.-J. Slotline and W.~Li. \newblock {\em {Applied Nonlinear Control}}. \newblock Prentice-Hall, New Jersey, 1991. \bibitem{vanden93} L.~Vandenberghe and S.~Boyd. \newblock {A polynominal-time algorithm for determing quadratic Lyapunov functions for nonlinear systems}. \newblock In {\em Proceedings of the European Conference on Circuit Theory and Design}, pages 1065--1068, 1993. \bibitem{vidyasagar} M.~Vidyasagar. \newblock {\em {Nonlinear System Analysis}}. \newblock Prentice Hall, Englewood Cliffs, 2. edition, 1993. \bibitem{liberzon05} L.~Vu and D.~Liberzon. \newblock {Common Lyapunov functions for families of commuting nonlinear systems}. \newblock {\em System \& Control Letters}, {\bf 54}:405--416, 2005. \bibitem{AI} W.~Walter. \newblock {\em {Analysis I}}. \newblock Springer, Berlin, 2. edition, 1990. \bibitem{waltereng} W.~Walter. \newblock {\em Ordinary Differential Equations}. \newblock Springer, New York, 1. edition, 1991. \bibitem{EIDTDD} W.~Walter. \newblock {\em {Einf\"uhrung in die Theorie der Distributionen}}. \newblock BI-Wiss.-Verl., Mannheim, 3. edition, 1994, (in German). \bibitem{willems70} J.~Willems. \newblock {\em Stability Theory of Dynamical Systems}. \newblock Nelson, 1970. \bibitem{wilson69} F.~Wilson. \newblock {Smoothing derivatives of functions and applications}. \newblock {\em Translations of the American Mathematical Society}, {\bf 139}:413--428, 1969. \bibitem{wu98} Q.~Wu, S.~Onyshko, N.~Sepehri, and A.~Thornton-Trump. \newblock {On construction of smooth Lyapunov functions for non-smooth systems}. \newblock {\em International Journal of Control}, {\bf 69-3}:443--457, 1998. \bibitem{zubov:64} V.~Zubov. \newblock {\em {Methods of A.\,M.\,Lyapunov and their Application}}. \newblock P.\,Noordhoff, Groningen, Netherlands, 1964. \end{thebibliography} \end{document}