text
stringlengths 0
3.34M
|
---|
If $c < 0$, then $a \leq - \frac{b}{c}$ if and only if $-b \leq ca$. |
theory Exercises2
imports Main
begin
(* Exercise 2.1 *)
value "1 + (2::nat)"
value "1 + (2::int)"
value "1 - (2::nat)"
value "1 - (2::int)"
(* Exercise 2.2 *)
fun add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
"add 0 n = n" |
"add (Suc m) n = Suc (add m n)"
lemma add_assoc[simp]: "add (add x y) z = add x (add y z)"
apply (induction x)
apply (auto)
done
lemma add_r_0[simp]: "add x 0 = x"
apply (induction x)
apply (auto)
done
lemma add_succ_1[simp]: "add x (Suc y) = Suc (add x y)"
apply (induction x)
apply (auto)
done
lemma add_commut[simp]: "add x y = add y x"
apply (induction x)
apply (auto)
done
(* Exercise 2.3 *)
fun count :: "'a \<Rightarrow> 'a list \<Rightarrow> nat" where
"count _ [] = 0" |
"count x (y # ys) = (if (x = y) then 1 else 0) + (count x ys)"
theorem count_lt_length[simp]: "count x xs \<le> length xs"
apply (induction xs)
apply (auto)
done
(* Exercise 2.4 *)
fun snoc :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a list" where
"snoc [] x = [x]"|
"snoc (y#ys) x = y # (snoc ys x)"
fun reverse :: "'a list \<Rightarrow> 'a list" where
"reverse [] = []"|
"reverse (x#xs) = snoc (reverse xs) x"
theorem rev_snoc[simp]: "reverse (snoc xs x) = (x#(reverse xs))"
apply (induction xs)
apply (auto)
done
theorem rev_rev_id[simp] : "reverse (reverse x) = x"
apply (induction x)
apply (auto)
done
(* Exercise 2.5 *)
fun sum_upto :: "nat \<Rightarrow> nat" where
"sum_upto 0 = 0"|
"sum_upto (Suc n) = (Suc n) + (sum_upto n)"
theorem sum_upto_sol[simp]: "sum_upto n = n * (n + 1) div 2"
apply (induction n)
apply (auto)
done
(* Exercise 2.6 *)
datatype 'a tree = Tip | Node "'a tree" 'a "'a tree"
fun contents:: "'a tree \<Rightarrow> 'a list" where
"contents Tip = Nil"|
"contents (Node l a r) = [a] @ (contents l) @ (contents r)"
fun sum_tree:: "nat tree \<Rightarrow> nat" where
"sum_tree Tip = 0"|
"sum_tree (Node l a r) = a + (sum_tree l) + (sum_tree r)"
theorem sum_tree_thm[simp]: "sum_tree t = sum_list (contents t)"
apply (induction t)
apply (auto)
done
(* Exercise 2.7 *)
datatype 'a tree2 = Leaf 'a | Node2 "'a tree2" 'a "'a tree2"
fun mirror2 :: "'a tree2 \<Rightarrow> 'a tree2" where
"mirror2 (Leaf x) = Leaf x"|
"mirror2 (Node2 l a r) = (Node2 (mirror2 r) a (mirror2 l))"
fun pre_order :: "'a tree2 \<Rightarrow> 'a list" where
"pre_order (Leaf x) = [x]" |
"pre_order (Node2 l a r) = a # (pre_order l @ pre_order r)"
fun post_order :: "'a tree2 \<Rightarrow> 'a list" where
"post_order (Leaf x) = [x]" |
"post_order (Node2 l a r) = post_order l @ post_order r @ [a]"
theorem rev_pre_is_pos[simp]: "pre_order (mirror2 t) = rev (post_order t)"
apply (induction t rule: post_order.induct)
apply (auto)
done
(* Exercise 2.8 *)
fun intersperse:: "'a \<Rightarrow> 'a list \<Rightarrow> 'a list" where
"intersperse a [] = []"|
"intersperse a [x] = [x]"|
"intersperse a (x1 # x2 # xs) = x1 # a # x2 # (intersperse a xs)"
theorem intersperse_map[simp]:
"map f (intersperse a xs) = intersperse (f a) (map f xs)"
apply (induction xs rule: intersperse.induct)
apply (auto)
done
(* exercise 2.9 *)
fun itadd:: "nat \<Rightarrow> nat \<Rightarrow> nat" where
"itadd 0 n = n"|
"itadd (Suc m) n = itadd m (Suc n)"
lemma "itadd m n = add m n"
apply (induction m arbitrary : n)
apply (auto)
done
(* exercise 2.10 *)
datatype tree0 = Nil0 | Node0 tree0 tree0
fun nodes::"tree0 \<Rightarrow> nat" where
"nodes Nil0 = 1"|
"nodes (Node0 l r) = 1 + (nodes l) + (nodes r)"
fun explode :: "nat \<Rightarrow> tree0 \<Rightarrow> tree0" where
"explode 0 t = t"|
"explode (Suc n) t = explode n (Node0 t t)"
definition explode_size :: "nat \<Rightarrow> tree0 \<Rightarrow> nat" where
"explode_size n t = (2^n) * (1 + (nodes t)) - 1"
lemma [simp] : "nodes (explode n (Node0 t t)) = 1 + 2 * nodes (explode n t)"
apply (induction n arbitrary : t)ifexp=Bc2bool|If ifexp ifexp ifexp|Less2aexp aexp
apply (auto)
done
lemma [simp] : "Suc (2 * 2 ^ n + nodes t * (2 * 2 ^ n) - Suc (Suc 0)) = 2 * 2 ^ n + nodes t * (2 * 2 ^ n) - Suc 0"
apply (induction n)
apply (auto)
done
lemma "(nodes (explode n t)) = explode_size n t"
apply (induction n)
apply (simp_all add : explode_size_def)
apply (simp add : algebra_simps)
done
datatype exp=Var|Const int|Add exp exp|Mult exp exp
value "(1 * (2::int))"
fun eval :: "exp \<Rightarrow> int \<Rightarrow> int" where
"eval Var x = x" |
"eval (Const x) _ = x" |
"eval (Add a b) x = (eval a x) + (eval b x)" |
"eval (Mult a b) x = (eval a x) * (eval b x)"
fun evalp :: "int list \<Rightarrow> int \<Rightarrow> int" where
"evalp [] _ = 0"|
"evalp (c#cs) x = c * (x^(length (c#cs))) + (evalp cs x)"
fun degree :: "exp \<Rightarrow> int" where
"degree (Mult Var Var) = 2"|
"degree (Mult Var x) = 1 + (degree x)"|
"degree (Mult x Var) = 1 + (degree x)"|
"degree x = 0"
fun factor :: "exp \<Rightarrow> int" where
"factor (Mult x y) = (factor x) * (factor y)"|
"factor (Const x) = x"
fun coeffs :: "exp \<Rightarrow> int list" where
"coeffs (Const x) = [x]"|
"coeffs (Add a b) = (factor a) # (coeffs b)"
theorem "evalp (coeffs e) x = eval e x"
apply (induction e arbitrary : x rule : coeffs.induct)
apply (simp_all)
end |
module Main
import Data.Strings
%default total
%foreign "C:add,libcalc"
add : Int -> Int -> Int
%foreign "C:sub,libcalc"
sub : Int -> Int -> Int
%foreign "C:mul,libcalc"
mul : Int -> Int -> Int
%foreign "C:div,libcalc"
div : Int -> Int -> Int
%foreign "C:op_with_message,libcalc"
prim_opWithMessage : String -> (Int -> Int -> Int) -> Int -> Int -> PrimIO Int
opWithMessage : HasIO io => String -> (Int -> Int -> Int) -> Int -> Int -> io Int
opWithMessage s f x y = primIO $ prim_opWithMessage s f x y
readNum : HasIO io => io (Maybe Int)
readNum = do putStr "Enter number: "
ns <- getLine
case all isDigit (unpack ns) of
False => pure Nothing
True => pure (Just (cast ns))
main : IO ()
main = do Just x <- readNum
| Nothing => putStrLn "first number is invalid"
Just y <- readNum
| Nothing => putStrLn "second number is invalid"
printLn (add x y)
printLn (sub x y)
printLn (mul x y)
printLn (Main.div x y)
opWithMessage "sum" add x y
opWithMessage "diff" sub x y
opWithMessage "prod" mul x y
opWithMessage "quot" Main.div x y
pure ()
|
According to Church teaching , respect for human life requires respect for one 's own body , precluding unhealthy behavior , the abuse of food , alcohol , medicines , illegal drugs , tattoos and piercings . The Church also warns against the opposite behavior of " excessive preoccupation with the health and welfare of the body that ' idolizes ' physical perfection , fitness , and success at sports . "
|
[STATEMENT]
lemma unsynlr_sup: "unsynlr (sup x y) = unsynlr x \<union> unsynlr y"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. unsynlr (sup x y) = unsynlr x \<union> unsynlr y
[PROOF STEP]
unfolding sup_synlr_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. unsynlr (mksynlr (unsynlr x \<union> unsynlr y)) = unsynlr x \<union> unsynlr y
[PROOF STEP]
by (simp add: admS_def synlr_def) |
A Cheap Options Trade To Protect Your Portfolio Through August High Volatility, Options, Tariffs. Low volatility can be a difficult time to trade options, especially when options trading volume is also low. However, this summer may be different than what weβre used to.
Most discount options brokers charge a flat fee, plus a per contract fee for options trades. The flat fee for most discount options brokers is between $4.95 and $12.99 per trade, with per contract charges of between $0.50 and $0.95 per contract. Some discount options brokers offer a flat rate on all trades. |
function [x,P]=onePointCartInit(zCart,SRCart,higherDerivStdDev,matType)
%%ONEPOINTCARTINIT This function implements single-point initialization for
% target states that consist of components of position,
% velocity, acceleration, etc. This function initializes
% tracks from a single measurement by setting the position
% components to Cartesian measurement value with its
% associated covariance and then setting the diagonal elements
% of the rest of the components based on fixed standard
% deviations. For example, in [1] it is suggested to make the
% standard deviation for velocity vMax/3 and in Chapter 3.2.2
% of [2], vMax/2. Similar ad-hoc values could be used for
% higher moments. This function does not use Doppler/ range
% rate.
%
%INPUTS: zCart A zDimXnumMeas set of Cartesian measurements for which
% single-point differencing should be used to start tracks.
% SRCart If matType is omitted or is 0, then this is a
% zDimXzDimXnumMeas set of lower-triangular square root
% covariance matrices associated with the measurements in
% zCart. If all of the matrices are the same, then a single
% zDimXzDim matrix can be passed. If matType=1, then this is a
% set of covariance matrices.
% higherDerivStdDev A numMomentsX1 or 1XnumMoments vector containing the
% standard deviations to use for each of the moments
% (position, velocity, etc) that cannot be estimated from the
% data. As mentioned in [1] and in and in Chapter 3.2.2
% of [2], for velocity, this might be vMax/sqrt(2) or
% vMax/sqrt(3).
% matType An optional input specifying whether SRCart is a set of
% lower-triangular square roots of the covariance matrix, or
% whether it is the set of covariance matrices. Possible
% values are:
% 0 (The default if omitted or an empty matrix is passed)
% SRCart holds lower-triangular square root covariance
% matrices.
% 1 SRCart holds covariance matrices.
%
%OUTPUTS: x The xDimXnumMeas set of target state estimates. All
% non-position components are zero. xDim=zDim*(numMoments+1). The
% components are arranged position, velocity, acceleration, etc.
% For example, [x;y;z;xDot;yDot;zDot].
% P The xDimXxDimXnumMeas set of initial target state covariance
% matrices associated with x.
%
%One-point differencing is discussed in [1] and Chapter 3.2.2 of [2].
%
%REFERENCES:
%[1] M. Mallick and B. La Scala, "Comparison of single-point and two-point
% difference track initiation algorithms using position measurements".
% Acta Automatica Sinica, vol.34, no. 3, pp 258-265, Mar. 2008.
%[2] Y. Bar-Shalom, P. K. Willett, and X. Tian, Tracking and Data Fusion.
% Storrs, CT: YBS Publishing, 2011.
%
%November 2016 David F. Crouse, Naval Research Laboratory, Washington D.C.
%(UNCLASSIFIED) DISTRIBUTION STATEMENT A. Approved for public release.
if(nargin<4||isempty(matType))
matType=0;
end
zDim=size(zCart,1);
numMeas=size(zCart,2);
if(size(SRCart,3)==1)
SRCart=repmat(SRCart,1,1,numMeas);
end
numMoments=length(higherDerivStdDev);
xDim=zDim*(numMoments+1);
x=zeros(xDim,numMeas);
P=zeros(xDim,xDim,numMeas);
x(1:zDim,:)=zCart;
switch(matType)
case 0
for curMeas=1:numMeas
P(1:zDim,1:zDim,curMeas)=SRCart(:,:,curMeas)*SRCart(:,:,curMeas)';
end
case 1
for curMeas=1:numMeas
P(1:zDim,1:zDim,curMeas)=SRCart(:,:,curMeas);
end
otherwise
error('Unknown matrix type specified.')
end
sel=(zDim+1):xDim;
P(sel,sel,:)=repmat(kron(diag(higherDerivStdDev(:).^2),eye(zDim,zDim)),1,1,numMeas);
end
%LICENSE:
%
%The source code is in the public domain and not licensed or under
%copyright. The information and software may be used freely by the public.
%As required by 17 U.S.C. 403, third parties producing copyrighted works
%consisting predominantly of the material produced by U.S. government
%agencies must provide notice with such work(s) identifying the U.S.
%Government material incorporated and stating that such material is not
%subject to copyright protection.
%
%Derived works shall not identify themselves in a manner that implies an
%endorsement by or an affiliation with the Naval Research Laboratory.
%
%RECIPIENT BEARS ALL RISK RELATING TO QUALITY AND PERFORMANCE OF THE
%SOFTWARE AND ANY RELATED MATERIALS, AND AGREES TO INDEMNIFY THE NAVAL
%RESEARCH LABORATORY FOR ALL THIRD-PARTY CLAIMS RESULTING FROM THE ACTIONS
%OF RECIPIENT IN THE USE OF THE SOFTWARE.
|
<a href="https://colab.research.google.com/github/ebatty/MathToolsforNeuroscience/blob/master/Week2/Week2Tutorial1.ipynb" target="_parent"></a>
# Week 2: Linear Algebra II
# Tutorial 1
# [insert your name]
**Important reminders**: Before starting, click "File -> Save a copy in Drive". Produce a pdf for submission by "File -> Print" and then choose "Save to PDF".
To complete this tutorial, you should have watched Videos 2.1 through 2.6.
**Credits:**
The videos you watched for this week were from 3Blue1Brown. Some elements of this problem set are from or inspired by https://openedx.seas.gwu.edu/courses/course-v1:GW+EngComp4+2019/about. In particular, we are using their `plot_linear_transformation` and `plot_linear_transformations` functions, and the demonstration of the additional transformation of a matrix inverse (end of Exercise 2)
```python
# Imports
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy.linalg
# Plotting parameters
matplotlib.rcParams.update({'font.size': 22})
```
```python
# @title Plotting functions
import numpy
from numpy.linalg import inv, eig
from math import ceil
from matplotlib import pyplot, ticker, get_backend, rc
from mpl_toolkits.mplot3d import Axes3D
from itertools import cycle
_int_backends = ['GTK3Agg', 'GTK3Cairo', 'MacOSX', 'nbAgg',
'Qt4Agg', 'Qt4Cairo', 'Qt5Agg', 'Qt5Cairo',
'TkAgg', 'TkCairo', 'WebAgg', 'WX', 'WXAgg', 'WXCairo']
_backend = get_backend() # get current backend name
# shrink figsize and fontsize when using %matplotlib notebook
if _backend in _int_backends:
fontsize = 4
fig_scale = 0.75
else:
fontsize = 5
fig_scale = 1
grey = '#808080'
gold = '#cab18c' # x-axis grid
lightblue = '#0096d6' # y-axis grid
green = '#008367' # x-axis basis vector
red = '#E31937' # y-axis basis vector
darkblue = '#004065'
pink, yellow, orange, purple, brown = '#ef7b9d', '#fbd349', '#ffa500', '#a35cff', '#731d1d'
quiver_params = {'angles': 'xy',
'scale_units': 'xy',
'scale': 1,
'width': 0.012}
grid_params = {'linewidth': 0.5,
'alpha': 0.8}
def set_rc(func):
def wrapper(*args, **kwargs):
rc('font', family='serif', size=fontsize)
rc('figure', dpi=200)
rc('axes', axisbelow=True, titlesize=5)
rc('lines', linewidth=1)
func(*args, **kwargs)
return wrapper
@set_rc
def plot_vector(vectors, tails=None):
''' Draw 2d vectors based on the values of the vectors and the position of their tails.
Parameters
----------
vectors : list.
List of 2-element array-like structures, each represents a 2d vector.
tails : list, optional.
List of 2-element array-like structures, each represents the coordinates of the tail
of the corresponding vector in vectors. If None (default), all tails are set at the
origin (0,0). If len(tails) is 1, all tails are set at the same position. Otherwise,
vectors and tails must have the same length.
Examples
--------
>>> v = [(1, 3), (3, 3), (4, 6)]
>>> plot_vector(v) # draw 3 vectors with their tails at origin
>>> t = [numpy.array((2, 2))]
>>> plot_vector(v, t) # draw 3 vectors with their tails at (2,2)
>>> t = [[3, 2], [-1, -2], [3, 5]]
>>> plot_vector(v, t) # draw 3 vectors with 3 different tails
'''
vectors = numpy.array(vectors)
assert vectors.shape[1] == 2, "Each vector should have 2 elements."
if tails is not None:
tails = numpy.array(tails)
assert tails.shape[1] == 2, "Each tail should have 2 elements."
else:
tails = numpy.zeros_like(vectors)
# tile vectors or tails array if needed
nvectors = vectors.shape[0]
ntails = tails.shape[0]
if nvectors == 1 and ntails > 1:
vectors = numpy.tile(vectors, (ntails, 1))
elif ntails == 1 and nvectors > 1:
tails = numpy.tile(tails, (nvectors, 1))
else:
assert tails.shape == vectors.shape, "vectors and tail must have a same shape"
# calculate xlimit & ylimit
heads = tails + vectors
limit = numpy.max(numpy.abs(numpy.hstack((tails, heads))))
limit = numpy.ceil(limit * 1.2) # add some margins
figsize = numpy.array([2,2]) * fig_scale
figure, axis = pyplot.subplots(figsize=figsize)
axis.quiver(tails[:,0], tails[:,1], vectors[:,0], vectors[:,1], color=darkblue,
angles='xy', scale_units='xy', scale=1)
axis.set_xlim([-limit, limit])
axis.set_ylim([-limit, limit])
axis.set_aspect('equal')
# if xticks and yticks of grid do not match, choose the finer one
xticks = axis.get_xticks()
yticks = axis.get_yticks()
dx = xticks[1] - xticks[0]
dy = yticks[1] - yticks[0]
base = max(int(min(dx, dy)), 1) # grid interval is always an integer
loc = ticker.MultipleLocator(base=base)
axis.xaxis.set_major_locator(loc)
axis.yaxis.set_major_locator(loc)
axis.grid(True, **grid_params)
# show x-y axis in the center, hide frames
axis.spines['left'].set_position('center')
axis.spines['bottom'].set_position('center')
axis.spines['right'].set_color('none')
axis.spines['top'].set_color('none')
@set_rc
def plot_transformation_helper(axis, matrix, *vectors, unit_vector=True, unit_circle=False, title=None):
""" A helper function to plot the linear transformation defined by a 2x2 matrix.
Parameters
----------
axis : class matplotlib.axes.Axes.
The axes to plot on.
matrix : class numpy.ndarray.
The 2x2 matrix to visualize.
*vectors : class numpy.ndarray.
The vector(s) to plot along with the linear transformation. Each array denotes a vector's
coordinates before the transformation and must have a shape of (2,). Accept any number of vectors.
unit_vector : bool, optional.
Whether to plot unit vectors of the standard basis, default to True.
unit_circle: bool, optional.
Whether to plot unit circle, default to False.
title: str, optional.
Title of the plot.
"""
assert matrix.shape == (2,2), "the input matrix must have a shape of (2,2)"
grid_range = 20
x = numpy.arange(-grid_range, grid_range+1)
X_, Y_ = numpy.meshgrid(x,x)
I = matrix[:,0]
J = matrix[:,1]
X = I[0]*X_ + J[0]*Y_
Y = I[1]*X_ + J[1]*Y_
origin = numpy.zeros(1)
# draw grid lines
for i in range(x.size):
axis.plot(X[i,:], Y[i,:], c=gold, **grid_params)
axis.plot(X[:,i], Y[:,i], c=lightblue, **grid_params)
# draw (transformed) unit vectors
if unit_vector:
axis.quiver(origin, origin, [I[0]], [I[1]], color=green, **quiver_params)
axis.quiver(origin, origin, [J[0]], [J[1]], color=red, **quiver_params)
# draw optional vectors
color_cycle = cycle([pink, darkblue, orange, purple, brown])
if vectors:
for vector in vectors:
color = next(color_cycle)
vector_ = matrix @ vector.reshape(-1,1)
axis.quiver(origin, origin, [vector_[0]], [vector_[1]], color=color, **quiver_params)
# draw optional unit circle
if unit_circle:
alpha = numpy.linspace(0, 2*numpy.pi, 41)
circle = numpy.vstack((numpy.cos(alpha), numpy.sin(alpha)))
circle_trans = matrix @ circle
axis.plot(circle_trans[0], circle_trans[1], color=red, lw=0.8)
# hide frames, set xlimit & ylimit, set title
limit = 4
axis.spines['left'].set_position('center')
axis.spines['bottom'].set_position('center')
axis.spines['left'].set_linewidth(0.3)
axis.spines['bottom'].set_linewidth(0.3)
axis.spines['right'].set_color('none')
axis.spines['top'].set_color('none')
axis.set_xlim([-limit, limit])
axis.set_ylim([-limit, limit])
if title is not None:
axis.set_title(title)
@set_rc
def plot_linear_transformation(matrix, *vectors, unit_vector=True, unit_circle=False):
""" Plot the linear transformation defined by a 2x2 matrix using the helper
function plot_transformation_helper(). It will create 2 subplots to visualize some
vectors before and after the transformation.
Parameters
----------
matrix : class numpy.ndarray.
The 2x2 matrix to visualize.
*vectors : class numpy.ndarray.
The vector(s) to plot along with the linear transformation. Each array denotes a vector's
coordinates before the transformation and must have a shape of (2,). Accept any number of vectors.
unit_vector : bool, optional.
Whether to plot unit vectors of the standard basis, default to True.
unit_circle: bool, optional.
Whether to plot unit circle, default to False.
"""
figsize = numpy.array([4,2]) * fig_scale
figure, (axis1, axis2) = pyplot.subplots(1, 2, figsize=figsize)
plot_transformation_helper(axis1, numpy.identity(2), *vectors, unit_vector=unit_vector, unit_circle=unit_circle, title='Before transformation')
plot_transformation_helper(axis2, matrix, *vectors, unit_vector=unit_vector, unit_circle=unit_circle, title='After transformation')
@set_rc
def plot_linear_transformations(*matrices, unit_vector=True, unit_circle=False):
""" Plot the linear transformation defined by a sequence of n 2x2 matrices using the helper
function plot_transformation_helper(). It will create n+1 subplots to visualize some
vectors before and after each transformation.
Parameters
----------
*matrices : class numpy.ndarray.
The 2x2 matrices to visualize. Accept any number of matrices.
unit_vector : bool, optional.
Whether to plot unit vectors of the standard basis, default to True.
unit_circle: bool, optional.
Whether to plot unit circle, default to False.
"""
nplots = len(matrices) + 1
nx = 2
ny = ceil(nplots/nx)
figsize = numpy.array([2*nx, 2*ny]) * fig_scale
figure, axes = pyplot.subplots(nx, ny, figsize=figsize)
for i in range(nplots): # fig_idx
if i == 0:
matrix_trans = numpy.identity(2)
title = 'Before transformation'
else:
matrix_trans = matrices[i-1] @ matrix_trans
if i == 1:
title = 'After {} transformation'.format(i)
else:
title = 'After {} transformations'.format(i)
plot_transformation_helper(axes[i//nx, i%nx], matrix_trans, unit_vector=unit_vector, unit_circle=unit_circle, title=title)
# hide axes of the extra subplot (only when nplots is an odd number)
if nx*ny > nplots:
axes[-1,-1].axis('off')
```
# Key concept review & coding tips
## Linear transformations and matrices
* A matrix is basically a table of numbers.
* We can represent matrices with numpy arrays, which we create as a list of rows: \begin{bmatrix}
4 & 1 & 2\\
3 & 2 & 0\
\end{bmatrix} would be `np.array([[4, 1, 2], [3, 2, 0]])`
* Linear transformations take in an input vector and outputs a transformed vector. Under a linear transformation, all grid line remain parallel and evenly spaced and the origin remains fixed (it must preserve vector addition and scalar multiplication).
* Matrices represent linear transformations: each column corresponds to where the corresponding standard basis vector ends up after the transformation
* We can think of the matrix vector multiplication $A\bar{x}=\bar{b}$ as a linear transformation where $A$ acts on $\bar{x}$ to produce $\bar{b}$. An alternate view is to think of it as solving a system of linear equations.
* `np.linalg.solve` solves matrix vector equations like the above
* As an example, solving $A\bar{x}=\bar{b}$ is equivalent to solving the system of linear equations:
$$ \begin{align} 2x_1 + 3x_2 &= 6 \\ x_1 + 4x_2 &= 1 \end{align}$$
$$\text{if } A = \begin{bmatrix}
2 & 3 \\
1 & 4\
\end{bmatrix}, \bar{b} =\begin{bmatrix}
6 \\
1\
\end{bmatrix}$$
## Matrix multiplication
* We can envision matrix multiplication as the composition of transformations. If C = AB, element $c_{ij}$ (the element of C in the ith row and jth column) equals the dot product of the ith row of A and the jth column of B.
* There are several ways to do matrix multiplication in Python: we can use `np.dot(A, B)`, `np.matmul(A, B)` or use a special operator @ so `A @ B`
## Determinants
* The determinant of a matrix (det A) is a scalar value that can be viewed as describing the area changes induced by the corresponding linear transformation. It is negative if the linear transformation reverses the orientation of the space.
* `np.linalg.det(A)` computes the determinant
## Inverse matrices, column space, and null space
* We can sometimes take the inverse of a matrix so that $A^{-1}A = I$ where $I$ is the identity matrix (all zeros except for ones on the diagonal).
* We can use `np.linalg.inv(A)` to compute $A^{-1}$ when it exists
* `np.eye(d)` gives us the identity matrix of dimension d
* The column space of a matrix is the span of the columns of the matrix. This is equivalent to the range of the linear transformation where, in informal language, the range is everywhere that can be "gotten to" by the transformation. In other words, the range is the set of all vectors that the linear transformation maps to.
* The rank of a matrix is the dimension of the column space.
* `np.linalg.matrix_rank(A)` computes the rank
* The null space of a matrix is the set of all vectors that land on the origin after the resulting transformation. In other words, it is the set of all solutions of $A\bar{x} = \bar{0}$.
* You can use `scipy.linalg.null_space` to find a basis for the null space of a matrix.
* If the matrix A is $m$ x $n$, the null space must be a subspace of $R^n$ and the column space must be a subspace of $R^m$.
# Exercise 1: Computation corner
For each computation below, please calculate it 1) by-hand and 2) using code. Check that the answers match! For by-hand calculation, please show some work when possible. For example, for matrix multiplication, write out the computation of each element in the resulting matrix so it looks something like this:
$$A = \begin{bmatrix}
5*2+4*1 & 3*5+1*2 \\
0*1+1*2 & 3*2+4*5 \\
\end{bmatrix} $$
Note that these are completely made up numbers for demonstration purposes - the above numbers don't make sense for a matrix multiplication.
## A) Matrix multiplication
Please compute C = AB where $$A = \begin{bmatrix}
5 & 3 \\
0 & 2 \\
\end{bmatrix}, B = \begin{bmatrix}
1 & 5 \\
4 & 3 \\
\end{bmatrix} $$
**Your math answer**
$$C = \begin{bmatrix}
1 * 5 + 4 * 3 & 5*5 + 3 * 3 \\
1 * 0 + 4 * 2 & 5*0 + 3 * 2 \\
\end{bmatrix}$$
```python
# Your code answer
A = np.array([[5, 3],
[0, 2]])
B = np.array([[1, 5],
[4, 3]])
np.matmul(A,B)
```
## B) Matrix multiplication
Please compute Z = XY where $$X = \begin{bmatrix}
3 & 2 & 1 \\
1 & 2 & 7 \\
\end{bmatrix}, Y = \begin{bmatrix}
0 & 1 \\
2 & 4 \\
5 & 1 \\
\end{bmatrix} $$
Before computing, figure out what the dimensions of Z will be (no need to explicitly answer this)
**Your math answer**
The matrix will have row,col of X,Y, so 2 row by 2 col
$$X = \begin{bmatrix}
3 * 0 + 2 * 2 + 1 * 5 & 3 * 1 + 2 * 4 + 1 * 1 \\
1 * 0 + 2 * 2 + 5 * 7 & 1 * 1 + 2 * 4 + 1 * 7\\
\end{bmatrix}$$
```python
A = np.array([[3, 2, 1],
[1, 2, 7]])
B = np.array([[0, 1],
[2, 4],
[5,1]])
np.matmul(A,B)
```
## C) (Optional) Transpose
**Please come back to this problem if you complete the rest of the tutorial during class time.**
The **tranpose** of a matrix flips a matrix over its diagonal, changing the rows to columns and the columns to rows. We denote the transpose of matrix X with $X^T$. In numpy, we can get the transpose of an array X with `X.T`.
First, write out the transpose of X from part B yourself and then produce it using code.
**Your math answer**
It's like computing Z = XY where $$X = \begin{bmatrix}
9 & 12 \\
39 & 16 \\
\end{bmatrix}, Y = \begin{bmatrix}
0 & 1 \\
2 & 4 \\
5 & 1 \\
\end{bmatrix} $$
```python
# Your code answer
np.matmul(A,B).T
```
```python
np.matmul(np.matmul(A,B) , np.array([[0, -1], [1,0]]))
```
You could not compute $X^TY$ - why not?
**Your text answer**
The shapes don't match along dimension 1
# Exercise 2: Thinking about transformations
In the video *Linear transformations and matrices*, you learned that a matrix corresponding to a rotation by 90 degrees is $$X = \begin{bmatrix}
0 & -1 \\
1 & 0 \\
\end{bmatrix}$$ You also saw that one matrix for which the transformation is horizontal shear is $$X = \begin{bmatrix}
1 & 1 \\
0 & 1 \\
\end{bmatrix}$$
In this exercise, we will think about some other types of transformations. We will use `plot_linear_transformation(X)` to see the grid before and after the transformation corresponding to matrix $X$.
**Remember to think about where your basis vectors should end up! Then your matrix consists of the transformed basis vectors. Drawing out what you want to happen can help**
## A) Reflection across x2 axis
Come up with a matrix $A$ for which the corresponding linear transformation is reflection through the $x_2$ axis (flipping across the $x_2$ axis). For example, $\bar{x} = \begin{bmatrix}
1 \\
5 \\
\end{bmatrix}$ should become $\bar{b} = \begin{bmatrix}
-1 \\
5 \\
\end{bmatrix}$ when multiplied with $A$.
```python
A = np.array([[1, 0],
[0, -1]])
plot_linear_transformation(A)
```
Would you expect the determinant of A to be positive or negative? Why? Would you expect the absolute value of the determinant to be greater than 1, less than 1, or equal to 1. Why?
**Your text answer**
## B) Projection onto x1
Come up with a matrix $A$ for which the corresponding linear transformation is projecting onto the $x_1$ axis. For example, $\bar{x} = \begin{bmatrix}
1 \\
5 \\
\end{bmatrix}$ should become $\bar{b} = \begin{bmatrix}
1 \\
0 \\
\end{bmatrix}$ when multiplied with $A$.
```python
A = ...
plot_linear_transformation(A)
```
Would you expect the determinant of A to be positive or negative? Why? Would you expect the absolute value of the determinant to be greater than 1, less than 1, or equal to 1. Why?
**Your text answer**
## C) (Optional) Figuring out the transformation from a matrix
**Please come back to this problem if you complete the rest of the tutorial during class time.**
$$A = \begin{bmatrix}
3 & 1 \\
0 & 3 \\
\end{bmatrix}$$
Try to answer the below questions without looking at the plot of the transformation, but feel free to do so if you get stuck
i) This matrix is a composition of two basic transformations, where possible basic transformations are reflection, contraction, expansion, horizontal shear, vertical shear, and projection. What are the two basic transformations it is a composition of? (Hint: does this matrix look at all like either of the two in the description below Exercise 2?)
ii) Would you expect the determinant of A to be positive or negative? Why? Would you expect the absolute value of the determinant to be greater than 1, less than 1, or equal to 1. Why?
iii) Rewrite A as a matrix multiplication of two matrices where each matrix corresponds to one of the basic transformations.
**Your text answer**
```python
A = np.array([[3, 1], [0, 3]])
#plot_linear_transformation(A)
```
## Extra info: Matrix inverse transformation
We know that the inverse of a matrix essentially "undoes" the transformation of the matrix. Let's see this in action. We will plot the transformation of A then the additional transformation of $A^{-1}$ - the resulting plot should look like the original.
```python
A = np.array([[1,2], [2,1]])
A_inv = np.linalg.inv(M)
plot_linear_transformations(A, A_inv)
```
# Exercise 3: Encoding model matrices
Let's say we have a population of 3 visual neurons that respond to 3 pixel images. Each neural response is a weighted sum of the pixel image: we used this type of model in Week 1 Part 1 Exercise 2. We will now allow the pixels to have negative values.
We will look at two different populations of 3 neurons with different weights from the pixels: population f and population g. Below, we have the system of linear equations that dictates the neuron models for each population. $x_1$, $x_2$, and $x_3$ correspond to the pixel values. $r_{f1}$, $r_{f2}$, and $r_{f3}$ correspond to the responses of neurons 1, 2, and 3 in population f. $r_{g1}$, $r_{g2}$, and $r_{g3}$ correspond to the responses of neurons 1, 2, and 3 in population g.
Population f:
$$\begin{align}
x_1 + 3x_2 + 4x_3 &= r_{f1} \\
2x_1 + x_2 + 4x_3 &= r_{f2} \\
x_1 + 5x_2 + 6x_3 &= r_{f3} \\
\end{align}$$
Population g:
$$\begin{align}
x_2 + x_3 &= r_{g1} \\
6x_1 + 10x_2 &= r_{g2} \\
3x_1 + 6x_2 + x_3 &= r_{g3} \\
\end{align}$$
## A) Rewriting linear systems of equations to matrix equation
We want to rewrite the above system of equations for each population in the matrix equation $F\bar{x} = \bar{r}_f$ where $\bar{x}$ is the image and $\bar{r}_f$ is the vector of neural responses in population f. What is F?
We will do the same for population g: $G\bar{x} = \bar{r}_g$ where $\bar{r}_g$ is the vector of neural responses in population g. What is G?
**Your math answer**
F is the matrix of the coefficients such that
$$F = \begin{bmatrix}
1 & 3 & 4 \\
2 & 1 & 4 \\
1 & 5 & 6
\end{bmatrix}$$
We started with the linear system of equations view but, as always, we can think about this matrix equation in terms of a linear transformation. In particular matrices F and G are transforming vectors from a "pixel basis", where each element of a vector represents one pixel to a "neural basis" where each element represents the response of one neuron.
## B) Solving a matrix equation
We will now try to solve the matrix equation to find $\bar{x}$ for a given $\bar{r}_f$. What does this correspond to in the neuroscience setting (aka what is $\bar{x}$ here)?
**Your text answer**
As stated above, $\bar{x}$ is the image, so here it would be the afferent weights of retinal ganglion cells
Find $\bar{x}$ if $$\bar{r}_f = \begin{bmatrix}
1 \\
1 \\
2 \\
\end{bmatrix}$$
We will use two different coding methods: you will first use `np.linalg.inv`, and then `np.linalg.solve`.
```python
np.linalg.solve?
```
```python
# Define F
F = np.array([[1, 3, 4],
[2, 1, 4],
[1, 5, 6]])
# Define r_f
r_f = np.array([1,1,2])
# Find x using np.linalg.inv
x_using_inv = np.linalg.inv(F)
# Find x using np.linalg.solve
x_using_solve = np.linalg.solve(F, r_f)
# Check each method resulted in the same x
if np.all(np.isclose(x_using_inv, x_using_solve)):
print('Solutions match')
else:
print('PROBLEM: Solutions do not match!')
```
PROBLEM: Solutions do not match!
```python
x_using_solve
```
array([-2. , -1. , 1.5])
```python
x_using_inv
```
array([[ 7. , -1. , -4. ],
[ 4. , -1. , -2. ],
[-4.5, 1. , 2.5]])
## C) Solving another matrix equation
Try to repeat the steps in B for population g where
$$\bar{r}_g = \begin{bmatrix}
1 \\
1 \\
2 \\
\end{bmatrix}$$
What problem do you run into?
```python
# Define G
G = ...
# Define r_g
r_g = ...
# Find x using np.linalg.inv
x_using_inv = ...
# Find x using np.linalg.solve
x_using_solve = ...
# Check each method resulted in the same x
if np.all(np.isclose(x_using_inv, x_using_solve)):
print('Solutions match')
else:
print('PROBLEM: Solutions do not match!')
```
## D) Calculate the rank of F/G
First think: from the video *Inverse Matrices, column space, and null space*, we know that if a n x n matrix is invertible, the matrix is not "squishing" space: all of $R^n$ can be reached via the transformation. Based on this, what do you expect the ranks of F and G to be based on parts B/C? (no need to explicitly answer, just discuss)
Now compute the rank of each below and see if you were right.
```python
rank_F = ...
rank_G = ...
print('The rank of F is '+str(rank_F))
print('The rank of G is '+str(rank_G))
```
## E) Linearly independent or dependent columns
Are the columns of F linearly dependent of independent? How do you know? How about the columns of G? How do you know? (Hint: use the words rank and column space in your answer)
**Your text answer**
## F) Finding the null space
Use `scipy.linalg.null_space` to find the basis of the null spaces for F and G.
```python
F_null_space = ...
G_null_space = ...
```
From the above computation, what is the dimension of the null space for F? For G? What does the null space correspond to in our neuroscience setting?
**Your text answer**
## G) Describing the populations of neurons
So what does all this matrix examination tell us about the neural processing in populations f and g? Obviously this is a toy system but let's think about it.
i) What is the dimensionality of the population of neural responses in population f? How about in g?
ii) If we wanted to decode images from the corresponding neural responses, would we always be able to completely recover the image when looking at population f? How about population g? What does this tell us about the information loss of the neural processing?
iii) If the columns of a matrix are linearly dependent, then the rows also are. What does this mean for the neural weights in population g?
**Your text answer**
## Extra info: Invertible matrix theorem
You may have noticed that F and G have a lot of differences in terms of invertibility, the rank, the dimension of the null space, the linear dependence of columns, etc. There is a theorem that sums up a lot of these concepts: the **invertible matrix theorem**. This theorem essentially sorts square matrices into two types - invertible and not-invertible - and notes a whole bunch of qualities of each type.
Take a look at the below theorem. If you have time and really want to consolidate your knowledge, think through for each statement why they're either all true or all false. A lot of the theorem stems from what you already know as it is based on definitions or basic concepts. You do not need to memorize this theorem but it is helpful to remember that these two types of matrices exist.
The below is an informal, incomplete encapsulation of the invertible matrix theorem (aka I'm not including every single statement):
### **Invertible matrix theorem**
Let $A$ be a square n x n matrix. The following statements are either all true or all false for this matrix:
a) $A$ is an invertible matrix
b) The equation $A\bar{x} = \bar{b}$ has only the trivial solution (aka only true if $\bar{x} = \bar{0}$).
c) The columns of $A$ form a linearly independent set.
d) The equation $A\bar{x} = \bar{b}$ has at least one solution for each $\bar{b}$ in $R^n$.
e) The columns of A span $R^n$
f) $A^T$ is an invertible matrix
g) The columns of A form a basis of $R^n$
h) Col A (the column space of A) = $R^n$
i) rank A = n
j) Nul A (the null space of A) = {$\bar{0}$}
k) dim Nul A (the dimension of the null space) = 0
l) The determinant of A is not 0
|
In Chestnut Park there is a roundhouse that serves as a mini community center for the neighborhood. Sadly it is often the target of vandalism.
|
cc ------------ dpmjet3.4 - authors: S.Roesler, R.Engel, J.Ranft -------
cc -------- phojet1.12-40 - authors: S.Roesler, R.Engel, J.Ranft -------
cc - oct'13 -------
cc ----------- pythia-6.4 - authors: Torbjorn Sjostrand, Lund'10 -------
cc ---------------------------------------------------------------------
cc converted for use with FLUKA -------
cc - oct'13 -------
C...PYTBBC
C...Calculates the three-body decay of gluinos into
C...charginos and third generation fermions.
SUBROUTINE PYTBBC(I,NN,XMGLU,GAM)
C...Double precision and integer declarations.
IMPLICIT DOUBLE PRECISION(A-H, O-Z)
IMPLICIT INTEGER(I-N)
INTEGER PYCOMP
C...Parameter statement to help give large particle numbers.
PARAMETER (KSUSY1=1000000,KSUSY2=2000000,KTECHN=3000000,
&KEXCIT=4000000,KDIMEN=5000000)
C...Commonblocks.
include 'inc/pydat1'
include 'inc/pydat2'
include 'inc/pymssm'
include 'inc/pyssmt'
C...Local variables.
EXTERNAL PYSIMP,PYLAMF
DOUBLE PRECISION PYSIMP,PYLAMF
INTEGER I,NN,LIN
DOUBLE PRECISION XMG,XMG2,XMB,XMB2,XMR,XMR2
DOUBLE PRECISION XMT,XMT2,XMST(4),XMSB(4)
DOUBLE PRECISION ULR(2),VLR(2),XMQ2,XMQ4,AM,W,SBAR,SMIN,SMAX
DOUBLE PRECISION SUMME(0:100),A(4,8)
DOUBLE PRECISION COS2A,SIN2A,COS2C,SIN2C
DOUBLE PRECISION GRS,XMQ3,XMGBTR,XMGTBR,ANT1,ANT2,ANB1,ANB2
DOUBLE PRECISION XMGLU,GAM
DOUBLE PRECISION XX1(2),XX2(2),AAA(2),BBB(2),CCC(2),
&DDD(2),EEE(2),FFF(2)
SAVE XX1,XX2,AAA,BBB,CCC,DDD,EEE,FFF
DOUBLE PRECISION ALPHAW,ALPHAS
DOUBLE PRECISION AMC(2)
SAVE AMC
DOUBLE PRECISION AMBOT,AMSB(2),SINC,COSC
DOUBLE PRECISION AMTOP,AMST(2),SINA,COSA
SAVE AMSB,AMST
LOGICAL IFIRST
SAVE IFIRST
DATA IFIRST/.TRUE./
TANB=RMSS(5)
SINB=TANB/SQRT(1D0+TANB**2)
COSB=SINB/TANB
C unvar XW=PARU(102)
AMW=PMAS(24,1)
COSC=SFMIX(5,1)
SINC=SFMIX(5,3)
COSA=SFMIX(6,1)
SINA=SFMIX(6,3)
AMBOT=PYMRUN(5,XMGLU**2)
AMTOP=PYMRUN(6,XMGLU**2)
W2=SQRT(2D0)
AMW=PMAS(24,1)
FAKT1=AMBOT/W2/AMW/COSB
FAKT2=AMTOP/W2/AMW/SINB
IF(IFIRST) THEN
AMC(1)=SMW(1)
AMC(2)=SMW(2)
DO 100 JJ=1,2
CCC(JJ)=FAKT1*UMIX(JJ,2)*SINC-UMIX(JJ,1)*COSC
EEE(JJ)=FAKT2*VMIX(JJ,2)*COSC
DDD(JJ)=FAKT1*UMIX(JJ,2)*COSC+UMIX(JJ,1)*SINC
FFF(JJ)=FAKT2*VMIX(JJ,2)*SINC
XX1(JJ)=FAKT2*VMIX(JJ,2)*SINA-VMIX(JJ,1)*COSA
AAA(JJ)=FAKT1*UMIX(JJ,2)*COSA
XX2(JJ)=FAKT2*VMIX(JJ,2)*COSA+VMIX(JJ,1)*SINA
BBB(JJ)=FAKT1*UMIX(JJ,2)*SINA
100 CONTINUE
AMST(1)=PMAS(PYCOMP(KSUSY1+6),1)
AMST(2)=PMAS(PYCOMP(KSUSY2+6),1)
AMSB(1)=PMAS(PYCOMP(KSUSY1+5),1)
AMSB(2)=PMAS(PYCOMP(KSUSY2+5),1)
IFIRST=.FALSE.
ENDIF
ULR(1)=XX1(I)*XX1(I)+AAA(I)*AAA(I)
ULR(2)=XX2(I)*XX2(I)+BBB(I)*BBB(I)
VLR(1)=CCC(I)*CCC(I)+EEE(I)*EEE(I)
VLR(2)=DDD(I)*DDD(I)+FFF(I)*FFF(I)
COS2A=COSA**2-SINA**2
SIN2A=SINA*COSA*2D0
COS2C=COSC**2-SINC**2
SIN2C=SINC*COSC*2D0
XMG=XMGLU
XMT=PMAS(6,1)
XMB=PMAS(5,1)
XMR=AMC(I)
XMG2=XMG*XMG
ALPHAW=PYALEM(XMG2)
ALPHAS=PYALPS(XMG2)
XMT2=XMT*XMT
XMB2=XMB*XMB
XMR2=XMR*XMR
XMQ2=XMG2+XMT2+XMB2+XMR2
XMQ4=XMG*XMT*XMB*XMR
XMQ3=XMG2*XMR2+XMT2*XMB2
XMGBTR=(XMG2+XMB2)*(XMT2+XMR2)
XMGTBR=(XMG2+XMT2)*(XMB2+XMR2)
XMST(1)=AMST(1)*AMST(1)
XMST(2)=AMST(1)*AMST(1)
XMST(3)=AMST(2)*AMST(2)
XMST(4)=AMST(2)*AMST(2)
XMSB(1)=AMSB(1)*AMSB(1)
XMSB(2)=AMSB(2)*AMSB(2)
XMSB(3)=AMSB(1)*AMSB(1)
XMSB(4)=AMSB(2)*AMSB(2)
A(1,1)=-COSA*SINC*CCC(I)*AAA(I)-SINA*COSC*EEE(I)*XX1(I)
A(1,2)=XMG*XMB*(COSA*COSC*CCC(I)*AAA(I)+SINA*SINC*EEE(I)*XX1(I))
A(1,3)=-XMG*XMR*(COSA*COSC*CCC(I)*XX1(I)+SINA*SINC*EEE(I)*AAA(I))
A(1,4)=XMB*XMR*(COSA*SINC*CCC(I)*XX1(I)+SINA*COSC*EEE(I)*AAA(I))
A(1,5)=XMG*XMT*(COSA*COSC*EEE(I)*XX1(I)+SINA*SINC*CCC(I)*AAA(I))
A(1,6)=-XMT*XMB*(COSA*SINC*EEE(I)*XX1(I)+SINA*COSC*CCC(I)*AAA(I))
A(1,7)=XMT*XMR*(COSA*SINC*EEE(I)*AAA(I)+SINA*COSC*CCC(I)*XX1(I))
A(1,8)=-XMQ4*(COSA*COSC*EEE(I)*AAA(I)+SINA*SINC*CCC(I)*XX1(I))
A(2,1)=-COSA*COSC*DDD(I)*AAA(I)-SINA*SINC*FFF(I)*XX1(I)
A(2,2)=-XMG*XMB*(COSA*SINC*DDD(I)*AAA(I)+SINA*COSC*FFF(I)*XX1(I))
A(2,3)=XMG*XMR*(COSA*SINC*DDD(I)*XX1(I)+SINA*COSC*FFF(I)*AAA(I))
A(2,4)=XMB*XMR*(COSA*COSC*DDD(I)*XX1(I)+SINA*SINC*FFF(I)*AAA(I))
A(2,5)=XMG*XMT*(COSA*SINC*FFF(I)*XX1(I)+SINA*COSC*DDD(I)*AAA(I))
A(2,6)=XMT*XMB*(COSA*COSC*FFF(I)*XX1(I)+SINA*SINC*DDD(I)*AAA(I))
A(2,7)=-XMT*XMR*(COSA*COSC*FFF(I)*AAA(I)+SINA*SINC*DDD(I)*XX1(I))
A(2,8)=-XMQ4*(COSA*SINC*FFF(I)*AAA(I)+SINA*COSC*DDD(I)*XX1(I))
A(3,1)=-COSA*COSC*EEE(I)*XX2(I)-SINA*SINC*CCC(I)*BBB(I)
A(3,2)=XMG*XMB*(COSA*SINC*EEE(I)*XX2(I)+SINA*COSC*CCC(I)*BBB(I))
A(3,3)=XMG*XMR*(COSA*SINC*EEE(I)*BBB(I)+SINA*COSC*CCC(I)*XX2(I))
A(3,4)=-XMB*XMR*(COSA*COSC*EEE(I)*BBB(I)+SINA*SINC*CCC(I)*XX2(I))
A(3,5)=-XMG*XMT*(COSA*SINC*CCC(I)*BBB(I)+SINA*COSC*EEE(I)*XX2(I))
A(3,6)=XMT*XMB*(COSA*COSC*CCC(I)*BBB(I)+SINA*SINC*EEE(I)*XX2(I))
A(3,7)=XMT*XMR*(COSA*COSC*CCC(I)*XX2(I)+SINA*SINC*EEE(I)*BBB(I))
A(3,8)=-XMQ4*(COSA*SINC*CCC(I)*XX2(I)+SINA*COSC*EEE(I)*BBB(I))
A(4,1)=-COSA*SINC*FFF(I)*XX2(I)-SINA*COSC*DDD(I)*BBB(I)
A(4,2)=-XMG*XMB*(COSA*COSC*FFF(I)*XX2(I)+SINA*SINC*DDD(I)*BBB(I))
A(4,3)=-XMG*XMR*(COSA*COSC*FFF(I)*BBB(I)+SINA*SINC*DDD(I)*XX2(I))
A(4,4)=-XMB*XMR*(COSA*SINC*FFF(I)*BBB(I)+SINA*COSC*DDD(I)*XX2(I))
A(4,5)=-XMG*XMT*(COSA*COSC*DDD(I)*BBB(I)+SINA*SINC*FFF(I)*XX2(I))
A(4,6)=-XMT*XMB*(COSA*SINC*DDD(I)*BBB(I)+SINA*COSC*FFF(I)*XX2(I))
A(4,7)=-XMT*XMR*(COSA*SINC*DDD(I)*XX2(I)+SINA*COSC*FFF(I)*BBB(I))
A(4,8)=-XMQ4*(COSA*COSC*DDD(I)*XX2(I)+SINA*SINC*FFF(I)*BBB(I))
SMAX=(XMG-ABS(XMR))**2
SMIN=(XMB+XMT)**2+0.1D0
DO 120 LIN=0,NN-1
SBAR=SMIN+DBLE(LIN)*(SMAX-SMIN)/DBLE(NN)
AM=(XMG2-XMR2)*(XMT2-XMB2)/2D0/SBAR
GRS=SBAR-XMQ2
W=PYLAMF(SBAR,XMB2,XMT2)*PYLAMF(SBAR,XMG2,XMR2)
W=SQRT(W)/2D0/SBAR
ANT1=LOG(ABS((GRS/2D0+AM+XMST(1)-W)/(GRS/2D0+AM+XMST(1)+W)))
ANT2=LOG(ABS((GRS/2D0+AM+XMST(3)-W)/(GRS/2D0+AM+XMST(3)+W)))
ANB1=LOG(ABS((GRS/2D0-AM+XMSB(1)-W)/(GRS/2D0-AM+XMSB(1)+W)))
ANB2=LOG(ABS((GRS/2D0-AM+XMSB(2)-W)/(GRS/2D0-AM+XMSB(2)+W)))
SUMME(LIN)=-ULR(1)*W+(ULR(1)*(XMQ2/2D0-XMST(1)-XMG*XMT*SIN2A)
& +2D0*XX1(I)*AAA(I)*XMR*XMB)*ANT1
& +(ULR(1)/2D0*(XMST(1)*(XMQ2-XMST(1))-XMGTBR
& -2D0*XMG*XMT*SIN2A*(XMST(1)-XMB2-XMR2))
& +2D0*XX1(I)*AAA(I)*XMR*XMB*(XMST(1)-XMG2-XMT2)
& +4D0*SIN2A*XX1(I)*AAA(I)*XMQ4)
& *(1D0/(GRS/2D0+AM+XMST(1)-W)-1D0/(GRS/2D0+AM+XMST(1)+W))
SUMME(LIN)=SUMME(LIN)-ULR(2)*W
& +(ULR(2)*(XMQ2/2D0-XMST(3)+XMG*XMT*SIN2A)
& -2D0*XX2(I)*BBB(I)*XMR*XMB)*ANT2
& +(ULR(2)/2D0*(XMST(3)*(XMQ2-XMST(3))-XMGTBR
& +2D0*XMG*XMT*SIN2A*(XMST(3)-XMB2-XMR2))
& -2D0*XX2(I)*BBB(I)*XMR*XMB*(XMST(3)-XMG2-XMT2)
& +4D0*SIN2A*XX2(I)*BBB(I)*XMQ4)
& *(1D0/(GRS/2D0+AM+XMST(3)-W)-1D0/(GRS/2D0+AM+XMST(3)+W))
SUMME(LIN)=SUMME(LIN)-VLR(1)*W
& +(VLR(1)*(XMQ2/2D0-XMSB(1)-XMG*XMB*SIN2C)
& +2D0*CCC(I)*EEE(I)*XMR*XMT)*ANB1
& +(VLR(1)/2D0*(XMSB(1)*(XMQ2-XMSB(1))-XMGBTR
& -2D0*XMG*XMB*SIN2C*(XMSB(1)-XMT2-XMR2))
& +2D0*CCC(I)*EEE(I)*XMR*XMT*(XMSB(1)-XMG2-XMB2)
& +4D0*SIN2C*CCC(I)*EEE(I)*XMQ4)
& *(1D0/(GRS/2D0-AM+XMSB(1)-W)-1D0/(GRS/2D0-AM+XMSB(1)+W))
SUMME(LIN)=SUMME(LIN)-VLR(2)*W
& +(VLR(2)*(XMQ2/2D0-XMSB(2)+XMG*XMB*SIN2C)
& -2D0*DDD(I)*FFF(I)*XMR*XMT)*ANB2
& +(VLR(2)/2D0*(XMSB(2)*(XMQ2-XMSB(2))-XMGBTR
& +2D0*XMG*XMB*SIN2C*(XMSB(2)-XMT2-XMR2))
& -2D0*DDD(I)*FFF(I)*XMR*XMT*(XMSB(2)-XMG2-XMB2)
& +4D0*SIN2C*DDD(I)*FFF(I)*XMQ4)
& *(1D0/(GRS/2D0-AM+XMSB(2)-W)-1D0/(GRS/2D0-AM+XMSB(2)+W))
SUMME(LIN)=SUMME(LIN)+2D0*XMG*XMT*COS2A/(XMST(3)-XMST(1))
& *((AAA(I)*BBB(I)-XX1(I)*XX2(I))
& *((XMST(3)-XMB2-XMR2)*ANT2-(XMST(1)-XMB2-XMR2)*ANT1)
& +2D0*(AAA(I)*XX2(I)-XX1(I)*BBB(I))*XMB*XMR*(ANT2-ANT1))
SUMME(LIN)=SUMME(LIN)+2D0*XMG*XMB*COS2C/(XMSB(2)-XMSB(1))
& *((EEE(I)*FFF(I)-CCC(I)*DDD(I))
& *((XMSB(2)-XMT2-XMR2)*ANB2-(XMSB(1)-XMT2-XMR2)*ANB1)
& +2D0*(EEE(I)*DDD(I)-CCC(I)*FFF(I))*XMT*XMR*(ANB2-ANB1))
DO 110 J=1,4
SUMME(LIN)=SUMME(LIN)-2D0*A(J,1)*W
& +((-A(J,1)*(XMSB(J)*(GRS+XMSB(J))+XMQ3)
& +A(J,2)*(XMSB(J)-XMT2-XMR2)+A(J,3)*(SBAR-XMB2-XMT2)
& +A(J,4)*(XMSB(J)+SBAR-XMB2-XMR2)
& -A(J,5)*(XMSB(J)+SBAR-XMG2-XMT2)+A(J,6)*(XMG2+XMR2-SBAR)
& -A(J,7)*(XMSB(J)-XMG2-XMB2)+2D0*A(J,8))
& *LOG(ABS((GRS/2D0+XMSB(J)-AM-W)/(GRS/2D0+XMSB(J)-AM+W)))
& -(A(J,1)*(XMST(J)*(GRS+XMST(J))+XMQ3)
& +A(J,2)*(XMST(J)+SBAR-XMG2-XMB2)-A(J,3)*(SBAR-XMB2-XMT2)
& +A(J,4)*(XMST(J)-XMG2-XMT2)-A(J,5)*(XMST(J)-XMR2-XMB2)
& -A(J,6)*(XMG2+XMR2-SBAR)
& -A(J,7)*(XMST(J)+SBAR-XMT2-XMR2)-2D0*A(J,8))
& *LOG(ABS((GRS/2D0+XMST(J)+AM-W)/(GRS/2D0+XMST(J)+AM+W))))
& /(GRS+XMSB(J)+XMST(J))
110 CONTINUE
120 CONTINUE
SUMME(NN)=0D0
GAM= ALPHAW * ALPHAS * PYSIMP(SUMME,SMIN,SMAX,NN)
&/ (16D0 * PARU(1) * PARU(102) * XMGLU**3)
RETURN
END
|
{-# OPTIONS --cubical --safe #-}
module Cubical.Data.Int.Base where
open import Cubical.Core.Everything
open import Cubical.Data.Nat
data Int : Typeβ where
pos : (n : β) β Int
negsuc : (n : β) β Int
sucInt : Int β Int
sucInt (pos n) = pos (suc n)
sucInt (negsuc zero) = pos zero
sucInt (negsuc (suc n)) = negsuc n
predInt : Int β Int
predInt (pos zero) = negsuc zero
predInt (pos (suc n)) = pos n
predInt (negsuc n) = negsuc (suc n)
|
using WeeklyMealPlanner
using Test
@testset "WeeklyMealPlanner.jl" begin
# Write your own tests here.
end
|
import numpy as np
from lcc.utils.data_analysis import to_PAA, to_ekvi_PAA, compute_bins, fix_missing
def test_to_PAA():
for _ in range(100):
x = np.random.random_sample(np.random.randint(30, 700))
bins = np.random.randint(5, 30)
assert len(to_PAA(x, bins)[0]) == bins
def test_to_ekvi_PAA1():
n = 100
x = np.linspace(0, 1, n)
y = np.random.random_sample(n)
bins = np.random.randint(5, 30)
x_ekv1, y_ekv1 = to_ekvi_PAA(x, y, bins)
x_ekv2, y_ekv2 = to_ekvi_PAA(x, y, len(x))
x_ekv3, y_ekv3 = to_ekvi_PAA(x, y, 3 * len(x))
assert len(x_ekv1) == bins
assert len(x_ekv2) == len(x)
assert len(x_ekv3) == len(x)
assert np.nan not in y_ekv1
assert np.nan not in y_ekv2
assert np.nan not in y_ekv3
assert (y_ekv3 == y).all()
assert (y_ekv2 == y).all()
for _ in range(100):
n = np.random.randint(30, 700)
x = np.random.random_sample(n)
x1 = np.linspace(0, 1, n)
y = np.random.random_sample(n)
bins = np.random.randint(5, 30)
x_ekv1, y_ekv1 = to_ekvi_PAA(x, y, bins)
x_ekv2, y_ekv2 = to_ekvi_PAA(x, y, len(x))
x_ekv3, y_ekv3 = to_ekvi_PAA(x, y, 3 * len(x))
x_ekv4, y_ekv4 = to_ekvi_PAA(x1, y, len(x1))
assert len(x_ekv1) == bins
assert len(x_ekv2) == len(x)
assert len(x_ekv3) == len(x)
assert np.nan not in y_ekv1
assert np.nan not in y_ekv2
assert np.nan not in y_ekv3
assert (y_ekv4 == y).all()
thr = 0.1
assert abs(y_ekv1.mean() - y.mean()) / (y_ekv1.mean() + y.mean()) < thr
assert abs(y_ekv2.mean() - y.mean()) / (y_ekv2.mean() + y.mean()) < thr
assert abs(y_ekv3.mean() - y.mean()) / (y_ekv3.mean() + y.mean()) < thr
def test_compute_bins():
x1 = [1, 2, 3, 8, 9, 10]
x2 = [1, 2, 3, 4, 5, 6]
assert compute_bins(x2, days_per_bin=1.9, set_min=2) == 3
assert compute_bins(x1, days_per_bin=3, set_min=2) == 3
def test_fix_missing():
x = np.linspace(0, 10, 20)
y = x.copy()
x[:5] = np.linspace(-15, -10, 5)
xx, yy = to_ekvi_PAA(x, y)
yy[0] = np.nan
yy[-1] = np.nan
res1 = fix_missing(xx, yy, replace_at_borders=False)
assert len(res1[0]) == len(res1[1])
assert len(res1[0]) == len(xx) - 2
assert not np.isnan(res1[0]).any()
assert not np.isnan(res1[1]).any()
res2 = fix_missing(xx, yy, replace_at_borders=True)
assert len(res2[0]) == len(res2[1])
assert len(res2[0]) == len(xx)
assert not np.isnan(res2[0]).any()
assert not np.isnan(res2[1]).any()
assert res2[1][0] == res2[1][1]
assert res2[1][-2] == res2[1][-1]
# assert res1[0] == res2[0]
|
module HNSW
using LinearAlgebra
using Reexport
@reexport using Distances
export HierarchicalNSW
export add_to_graph!, set_ef!
export knn_search
include("neighborset.jl")
include("visited_lists.jl")
include("layered_graphs.jl")
include("interface.jl")
include("algorithm.jl")
end # module
|
%% Template for a preprint Letter or Article for submission
%% to the journal Nature.
%% Written by Peter Czoschke, 26 February 2004
%%
\documentclass{nature}
%% make sure you have the nature.cls and naturemag.bst files where
%% LaTeX can find them
\usepackage{graphicx} % Including figure files
\usepackage{amsmath} % Advanced maths commands
\usepackage{amssymb} % Extra maths symbols
%\usepackage{natbib}
\usepackage{hyperref}
\usepackage{url}
\usepackage{microtype}
\usepackage{rotating}
\usepackage{booktabs}
\usepackage{threeparttable}
\usepackage{tabularx}
\title{How and Why to run a Hack Week}
%% Notice placement of commas and superscripts and use of &
%% in the author list
\author{Daniela Huppenkothen$^1$, Anthony Arendt$^2$, David W. Hogg$^1$, Karthik Ram$^3$, Jake VanderPlas$^2$, and Ariel Rokem$^2$}
\makeatletter
\let\saved@includegraphics\includegraphics
\AtBeginDocument{\let\includegraphics\saved@includegraphics}
\renewenvironment*{figure}{\@float{figure}}{\end@float}
\makeatother
\begin{document}
\maketitle
\begin{affiliations}
\item Center for Data Science, New York University
\item The University of Washington eScience Institute
\item The Berkeley Institute for Data Science, UC Berkeley
\end{affiliations}
\input{sections/abstract}
\input{sections/intro}
\input{sections/what}
\input{sections/why}
\input{sections/participants}
\input{sections/themes}
\input{sections/design}
\input{sections/results}
\input{sections/conclusions}
%Spelling must be British English (Oxford English Dictionary)
%In addition, a cover letter needs to be written with the
%following:
%\begin{enumerate}
% \item A 100 word or less summary indicating on scientific grounds
%why the paper should be considered for a wide-ranging journal like
%\textsl{Nature} instead of a more narrowly focussed journal.
% \item A 100 word or less summary aimed at a non-scientific audience,
%written at the level of a national newspaper. It may be used for
%\textsl{Nature}'s press release or other general publicity.
% \item The cover letter should state clearly what is included as the
%submission, including number of figures, supporting manuscripts
%and any Supplementary Information (specifying number of items and
%format).
% \item The cover letter should also state the number of
%words of text in the paper; the number of figures and parts of
%figures (for example, 4 figures, comprising 16 separate panels in
%total); a rough estimate of the desired final size of figures in
%terms of number of pages; and a full current postal address,
%telephone and fax numbers, and current e-mail address.
%\end{enumerate}
%See \textsl{Nature}'s website
%(\texttt{http://www.nature.com/nature/submit/gta/index.html}) for
%complete submission guidelines.
%\begin{methods}
%Put methods in here. If you are going to subsection it, use
%\verb|\subsection| commands. Methods section should be less than
%800 words and if it is less than 200 words, it can be incorporated
%into the main text.
%\end{methods}
%% Put the bibliography here, most people will use BiBTeX in
%% which case the environment below should be replaced with
%% the \bibliography{} command.
% \begin{thebibliography}{1}
% \bibitem{dummy} Articles are restricted to 50 references, Letters
% to 30.
% \bibitem{dummyb} No compound references -- only one source per
% reference.
% \end{thebibliography}
\section{References}
\bibliographystyle{naturemag}
\bibliography{paper}
%% Here is the endmatter stuff: Supplementary Info, etc.
%% Use \item's to separate, default label is "Acknowledgements"
\begin{addendum}
\item{The authors would like to thank Laura Nor\'{e}n (NYU) for help on ethics and IRB, Stuart Geiger for helping to formulate the questionnaires that served as the basis for the results presented here, Brittany Fiore-Gartland and Jason Yeatman for comments on the manuscript, and Tal Yarkoni for advice regarding automated selection procedures. This work was partially supported by the Moore-Sloan Data Science Environments at UC Berkeley, New York University, and the University of Washington. Neuro hack week is supported through a grant from the National Institute for Mental Health (1R25MH112480). Daniela Huppenkothen is partially supported by the James Arthur Postdoctoral Fellowship at NYU.}
\item[Competing Interests]{The authors declare that they have no competing financial interests.}
\item[Correspondence]{Correspondence and requests for materials should be addressed to D.Huppenkothen.~(email: [email protected]).}
\end{addendum}
%%
%% TABLES
%%
%% If there are any tables, put them here.
%%
%\begin{table}
%\centering
%\caption{This is a table with scientific results.}
%\medskip
%\begin{tabular}{ccccc}
%\hline
%1 & 2 & 3 & 4 & 5\\
%\hline
%aaa & bbb & ccc & ddd & eee\\
%aaaa & bbbb & cccc & dddd & eeee\\
%aaaaa & bbbbb & ccccc & ddddd & eeeee\\
%aaaaaa & bbbbbb & cccccc & dddddd & eeeeee\\
%1.000 & 2.000 & 3.000 & 4.000 & 5.000\\
%\hline
%\end{tabular}
%\end{table}
\end{document}
|
(*
Author: Norbert Schirmer
Maintainer: Norbert Schirmer, norbert.schirmer at web de
License: LGPL
*)
(* Title: StateSpace.thy
Author: Norbert Schirmer, TU Muenchen
Copyright (C) 2004-2008 Norbert Schirmer
Some rights reserved, TU Muenchen
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA
*)
section {* State Space Template *}
theory StateSpace imports Main
begin
record 'g state = "globals"::'g
definition
upd_globals:: "('g \<Rightarrow> 'g) \<Rightarrow> ('g,'z) state_scheme \<Rightarrow> ('g,'z) state_scheme"
where
"upd_globals upd s = s\<lparr>globals := upd (globals s)\<rparr>"
record ('g, 'n, 'val) stateSP = "'g state" +
locals :: "'n \<Rightarrow> 'val"
lemma upd_globals_conv: "upd_globals f = (\<lambda>s. s\<lparr>globals := f (globals s)\<rparr>)"
by (rule ext) (simp add: upd_globals_def)
end |
/* interpolation/interp_poly.c
*
* Copyright (C) 2001 DAN, HO-JIN
* Copyright (C) 2013 Patrick Alken
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/* Modified for standalone use in polynomial directory, B.Gough 2001 */
#include <config.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_poly.h>
int
gsl_poly_dd_init (double dd[], const double xa[], const double ya[],
size_t size)
{
size_t i, j;
/* Newton's divided differences */
dd[0] = ya[0];
for (j = size - 1; j >= 1; j--)
{
dd[j] = (ya[j] - ya[j - 1]) / (xa[j] - xa[j - 1]);
}
for (i = 2; i < size; i++)
{
for (j = size - 1; j >= i; j--)
{
dd[j] = (dd[j] - dd[j - 1]) / (xa[j] - xa[j - i]);
}
}
return GSL_SUCCESS;
}
int
gsl_poly_dd_taylor (double c[], double xp,
const double dd[], const double xa[], size_t size,
double w[])
{
size_t i, j;
for (i = 0; i < size; i++)
{
c[i] = 0.0;
w[i] = 0.0;
}
w[size - 1] = 1.0;
c[0] = dd[0];
for (i = size - 1; i-- > 0;)
{
w[i] = -w[i + 1] * (xa[size - 2 - i] - xp);
for (j = i + 1; j < size - 1; j++)
{
w[j] = w[j] - w[j + 1] * (xa[size - 2 - i] - xp);
}
for (j = i; j < size; j++)
{
c[j - i] += w[j] * dd[size - i - 1];
}
}
return GSL_SUCCESS;
}
/*
gsl_poly_dd_hermite_init()
Compute divided difference representation of data
for Hermite polynomial interpolation
Inputs: dd - (output) array of size 2*size containing
divided differences, dd[k] = f[z_0,z_1,...,z_k]
za - (output) array of size 2*size containing
z values
xa - x data
ya - y data
dya - dy/dx data
size - size of xa,ya,dya arrays
Return: success
*/
int
gsl_poly_dd_hermite_init (double dd[], double za[], const double xa[], const double ya[],
const double dya[], const size_t size)
{
const size_t N = 2 * size;
size_t i, j;
/* Hermite divided differences */
dd[0] = ya[0];
/* compute: dd[j] = f[z_{j-1},z_j] for j \in [1,N-1] */
for (j = 0; j < size; ++j)
{
za[2*j] = xa[j];
za[2*j + 1] = xa[j];
if (j != 0)
{
dd[2*j] = (ya[j] - ya[j - 1]) / (xa[j] - xa[j - 1]);
dd[2*j - 1] = dya[j - 1];
}
}
dd[N - 1] = dya[size - 1];
for (i = 2; i < N; i++)
{
for (j = N - 1; j >= i; j--)
{
dd[j] = (dd[j] - dd[j - 1]) / (za[j] - za[j - i]);
}
}
return GSL_SUCCESS;
} /* gsl_poly_dd_hermite_init() */
|
C Copyright(C) 1999-2020 National Technology & Engineering Solutions
C of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with
C NTESS, the U.S. Government retains certain rights in this software.
C
C See packages/seacas/LICENSE for details
C=======================================================================
INTEGER FUNCTION INTADD (LENLST, INTLST)
C=======================================================================
C --*** INTADD *** (ETCLIB) Add all integers in list
C -- Written by Amy Gilkey - revised 11/10/87
C --
C --INTADD returns the sum of all the integers in a list.
C --
C --Parameters:
C -- LENLST - IN - the number of integers in the list
C -- INTLST - IN - the list of integers to be added
INTEGER LENLST
INTEGER INTLST(*)
INTADD = 0
DO 100 I = 1, LENLST
INTADD = INTADD + INTLST(I)
100 CONTINUE
RETURN
END
|
If $S$ is open, then $f$ is continuous on $S$ if and only if $f$ is continuous on each component of $S$. |
# Marginal likelihood for Bayesian linear regression
Author: [Zeel B Patel](https://patel-zeel.github.io/), [Nipun Batra](https://nipunbatra.github.io/)
Bayesian linear regression is defined as below,
\begin{align}
\mathbf{y} &= X\boldsymbol{\theta} + \epsilon\\
\epsilon &\sim \mathcal{N}(0, \sigma_n^2)\\
\theta &\sim \mathcal{N}(\mathbf{m}_0, S_0)
\end{align}
For a Gaussian random variable $\mathbf{z} \sim \mathcal{N}(\boldsymbol{\mu}, \Sigma)$, $A\mathbf{z} + \mathbf{b}$ is also a Gaussian random variable.
\begin{align}
\mathbf{y} = X\mathbf{\theta} + \boldsymbol{\epsilon} &\sim \mathcal{N}(\boldsymbol{\mu}', \Sigma')\\
\boldsymbol{\mu}' &= \mathbb{E}_{\theta, \epsilon}(X\mathbf{\theta}+\boldsymbol{\epsilon})\\
&= X\mathbb{E}(\mathbf{\theta}) + \mathbb{E}(\mathbf{\epsilon})\\
&= X\mathbf{m}_0\\
\\
\Sigma' &= V(X\mathbf{\theta}+\boldsymbol{\epsilon})\\
&= XV(\mathbf{\theta})X^T+V(\boldsymbol{\epsilon})\\
&= XS_0X^T + \sigma_n^2I
\end{align}
Marginal likelihood is $p(\mathbf{y})$ so,
\begin{align}
p(\mathbf{y}) &= \frac{1}{(2\pi)^{\frac{N}{2}}|\Sigma'|^{\frac{1}{2}}}\exp\left[-\frac{1}{2}(\mathbf{y}-\boldsymbol{\mu}')^T\Sigma'^{-1}(\mathbf{y}-\boldsymbol{\mu}')\right]\\
&= \frac{1}{(2\pi)^{\frac{N}{2}}|XS_0X^T + \sigma_n^2I|^{\frac{1}{2}}}\exp\left[-\frac{1}{2}(\mathbf{y}-X\mathbf{m}_0)^T(XS_0X^T + \sigma_n^2I)^{-1}(\mathbf{y}-X\mathbf{m}_0)\right]
\end{align}
## Multiplication of two Gaussians (work in progress)
We need Gaussian pdf over same variables to evaluate their multiplication. Let us convert $y$ into $\theta$.
\begin{align}
\mathbf{y} &= X\theta + \boldsymbol{\epsilon}\\
\theta &= (X^TX)^{-1}X^T(\mathbf{y} - \boldsymbol{\epsilon})\\
\text{Deriving mean and covariance of }\theta\\
E(\theta) &= (X^TX)^{-1}X^T\mathbf{y}\\
V(\theta) &= \sigma_n^2\left[(X^TX)^{-1}X^T\right]\left[(X^TX)^{-1}X^T\right]^T\\
&= \sigma_n^2(X^TX)^{-1}X^TX(X^TX)^{-1}\\
&= \sigma_n^2(X^TX)^{-1}
\end{align}
Now, we have both $p(\mathbf{y}|\boldsymbol{\theta})$ and $p(\boldsymbol{\theta})$ in terms of $\boldsymbol{\theta}$. We can apply the rules from 6.5.2 of MML book. Writing our results in terminology of 6.5.2.
\begin{align}
\mathcal{N}(x|a, A) &== \mathcal{N}(\theta|(X^TX)^{-1}X^T\mathbf{y}, \sigma_n^2(X^TX)^{-1})\\
\mathcal{N}(x|b, B) &== \mathcal{N}(\theta|\mathbf{m}_0, S_0)
\end{align}
we know that,
$$
c\mathcal{N}(\theta|\mathbf{c}, C) = \mathcal{N}(x|a, A)\mathcal{N}(x|b, B)\\
\mathcal{N}(\theta|\mathbf{c}, C) = \frac{\mathcal{N}(x|a, A)\mathcal{N}(x|b, B)}{c}
$$
In the Bayesian setting,
\begin{align}
Prior &\sim \mathcal{N}(x|b, B) == \mathcal{N}(\theta|\mathbf{m}_0, S_0)\\
Likelihood &\sim \mathcal{N}(x|a, A) == \mathcal{N}(\theta|(X^TX)^{-1}X^T\mathbf{y}, \sigma_n^2(X^TX)^{-1})\\
Posterior &\sim \mathcal{N}(\theta|\mathbf{c}, C) == \mathcal{N}(\theta|\mathbf{m}_n, S_n)\\
\text{last but not the least}\\
Marginal\;likelihood &\sim c == \mathcal{N}(\mathbf{y}|\boldsymbol{\mu}, \Sigma)
\end{align}
Let us evaluate the posterior,
\begin{align}
Posterior &\sim \mathcal{N}(\theta|\mathbf{c}, C)\\
S_n = C &= (A^{-1} + B^{-1})^{-1}\\
&= \left(\frac{X^TX}{\sigma_n^2} + S_0^{-1}\right)^{-1}\\
\mathbf{m_n} = \mathbf{c} &= C(A^{-1}a + B^{-1}b)\\
&= S_n\left(\frac{X^TX}{\sigma_n^2}(X^TX)^{-1}X^T\mathbf{y} + S_0^{-1}\mathbf{m}_0\right)\\
&= S_n\left(\frac{X^T\mathbf{y}}{\sigma_n^2} + S_0^{-1}\mathbf{m}_0\right)
\end{align}
Now, we evaluate the marginal likelihood,
\begin{align}
c &= \mathcal{N}(\mathbf{y}|\boldsymbol{\mu}, \Sigma)\\
&= (2\pi)^{-\frac{D}{2}}|A+B|^{-\frac{1}{2}}\exp\left(-\frac{1}{2}(a-b)^T(A+B)^{-1}(a-b)\right)\\
&= (2\pi)^{-\frac{D}{2}}|\sigma_n^2(X^TX)^{-1}+S_0|^{-\frac{1}{2}}\exp\left(-\frac{1}{2}((X^TX)^{-1}X^T\mathbf{y}-\mathbf{m}_0)^T(\sigma_n^2(X^TX)^{-1}+S_0)^{-1}((X^TX)^{-1}X^T\mathbf{y}-\mathbf{m}_0)\right)
\end{align}
Another well-known formulation of marginal likelihood is the following,
$$
p(\mathbf{y}) \sim \mathcal{N}(X\mathbf{m}_0, XS_0X^T + \sigma_n^2I)
$$
Let us verify if both are the same, empirically,
```python
import numpy as np
import scipy.stats
np.random.seed(0)
def ML1(X, y, m0, S0, sigma_n):
N = len(y)
return scipy.stats.multivariate_normal.pdf(y.ravel(), (X@m0).squeeze(), X@[email protected] + np.eye(N)*sigma_n**2)
def ML2(X, y, m0, S0, sigma_n):
D = len(m0)
a = np.linalg.inv(X.T@X)@X.T@y
b = m0
A = np.linalg.inv(X.T@X)*sigma_n**2
B = S0
return scipy.stats.multivariate_normal.pdf(a.ravel(), b.ravel(), A+B)
def ML3(X, y, m0, S0, sigma_n):
N = len(y)
Sn = np.linalg.inv((X.T@X)/(sigma_n**2) + np.linalg.inv(S0))
Mn = Sn@((X.T@y)/(sigma_n**2) + np.linalg.inv(S0)@m0)
LML = -0.5*N*np.log(2*np.pi) - 0.5*N*np.log(sigma_n**2) - 0.5*np.log(np.linalg.det(S0)/np.linalg.det(Sn)) - 0.5*(y.T@y)/sigma_n**2 + 0.5*([email protected](Sn)@Mn)
return np.exp(LML)
X = np.random.rand(10,2)
m0 = np.random.rand(2,1)
s0 = np.random.rand(2,2)
S0 = [email protected]
sigma_n = 10
y = np.random.rand(10,1)
ML1(X, y, m0, S0, sigma_n), ML2(X, y, m0, S0, sigma_n), ML3(X, y, m0, S0, sigma_n)
```
(9.577110083272389e-15, 0.0034284478634232078, array([[2.08309892e-14]]))
### Products of Gaussian PDFs (Work under progress)
Product of two Gaussians $\mathbf{x} \sim \mathcal{N}(\boldsymbol{\mu}_0, \Sigma_0)$ and $\mathbf{x} \sim \mathcal{N}(\boldsymbol{\mu}_1, \Sigma_1)$ is an unnormalized Gaussian.
\begin{align}
f(\mathbf{x}) &= \frac{1}{(2\pi)^{\frac{N}{2}}|\Sigma_0|^{\frac{1}{2}}}\exp\left[-\frac{1}{2}(\mathbf{x}-\boldsymbol{\mu}_0)^T\Sigma_0^{-1}(\mathbf{x}-\boldsymbol{\mu}_0)\right]\\
g(\mathbf{x}) &= \frac{1}{(2\pi)^{\frac{N}{2}}|\Sigma_1|^{\frac{1}{2}}}\exp\left[-\frac{1}{2}(\mathbf{x}-\boldsymbol{\mu}_1)^T\Sigma_1^{-1}(\mathbf{x}-\boldsymbol{\mu}_1)\right]\\
\int h(x) = \frac{1}{c}\int f(\mathbf{x})g(\mathbf{x})d\mathbf{x} &= 1
\end{align}
We need to find figure out value of $c$ to solve the integration.
\begin{align}
h(x) &= \frac{1}{(2\pi)^{\frac{N}{2}}|\Sigma|^{\frac{1}{2}}}\exp\left[-\frac{1}{2}(\mathbf{x}-\boldsymbol{\mu})^T\Sigma^{-1}(\mathbf{x}-\boldsymbol{\mu})\right] = \frac{1}{(2\pi)^{\frac{N}{2}}|\Sigma|^{\frac{1}{2}}}\exp\left[-\frac{1}{2}\left(\mathbf{x}^T\Sigma^{-1}\mathbf{x} - 2\boldsymbol{\mu}^T\Sigma^{-1}\mathbf{x} + \boldsymbol{\mu}^T\Sigma^{-1}\boldsymbol{\mu}\right)\right]\\
f(x)g(x) &= \frac{1}{(2\pi)^{\frac{N}{2}}|\Sigma_0|^{\frac{1}{2}}(2\pi)^{\frac{N}{2}}|\Sigma_1|^{\frac{1}{2}}}\exp\left[
-\frac{1}{2}(\mathbf{x}-\boldsymbol{\mu}_0)^T\Sigma_0^{-1}(\mathbf{x}-\boldsymbol{\mu}_0)
-\frac{1}{2}(\mathbf{x}-\boldsymbol{\mu}_1)^T\Sigma_1^{-1}(\mathbf{x}-\boldsymbol{\mu}_1)\right]\\
&= \frac{1}{(2\pi)^{\frac{N}{2}}|\Sigma_0|^{\frac{1}{2}}(2\pi)^{\frac{N}{2}}|\Sigma_1|^{\frac{1}{2}}}\exp\left[
-\frac{1}{2}\left(\mathbf{x}^T(\Sigma_0^{-1}+\Sigma_1^{-1})\mathbf{x}- 2\boldsymbol{\mu}^T(\Sigma_0^{-1}+\Sigma_1^{-1})\mathbf{x} + \boldsymbol{\mu}^T(\Sigma_0^{-1}+\Sigma_1^{-1})\boldsymbol{\mu}\right)
\right]\\
\end{align}
We can compare the exponent terms directly. We get the following results by doing that
\begin{align}
\Sigma^{-1} &= \Sigma_0^{-1} + \Sigma_1^{-1}\\
\Sigma &= \left(\Sigma_0^{-1} + \Sigma_1^{-1}\right)^{-1}\\
\\
\boldsymbol{\mu}^T\Sigma^{-1}\mathbf{x} &= \boldsymbol{\mu_0}^T\Sigma_0^{-1}\mathbf{x} + \boldsymbol{\mu_1}^T\Sigma_1^{-1}\mathbf{x}\\
\left(\boldsymbol{\mu}^T\Sigma^{-1}\right)\mathbf{x} &= \left(\boldsymbol{\mu_0}^T\Sigma_0^{-1} + \boldsymbol{\mu_1}^T\Sigma_1^{-1}\right)\mathbf{x}\\
\boldsymbol{\mu}^T\Sigma^{-1} &= \boldsymbol{\mu_0}^T\Sigma_0^{-1} + \boldsymbol{\mu_1}^T\Sigma_1^{-1}\\
\text{Applying transpose on both sides,}\\
\Sigma^{-1}\boldsymbol{\mu} &= \Sigma_0^{-1}\boldsymbol{\mu}_0 + \Sigma_1^{-1}\boldsymbol{\mu}_1\\
\boldsymbol{\mu} &= \Sigma\left(\Sigma_0^{-1}\boldsymbol{\mu}_0 + \Sigma_1^{-1}\boldsymbol{\mu}_1\right)
\end{align}
Now, solving for the normalizing constant $c$,
\begin{align}
\frac{c}{(2\pi)^{\frac{N}{2}}|\Sigma|^{\frac{1}{2}}} &= \frac{1}{(2\pi)^{\frac{N}{2}}|\Sigma_0|^{\frac{1}{2}}(2\pi)^{\frac{N}{2}}|\Sigma_1|^{\frac{1}{2}}}\\
c &= \frac{|\Sigma|^{\frac{1}{2}}}{(2\pi)^{\frac{N}{2}}|\Sigma_0|^{\frac{1}{2}}|\Sigma_1|^{\frac{1}{2}}}
\end{align}
If we have two Gaussians $\mathcal{N}(\mathbf{a}, A)$ and $\mathcal{N}(\mathbf{b}, B)$ for same random variable $\mathbf{x}$, Marginal likelihood can be given as,
$$
c = (2\pi)^{-N/2}|A+B|^{-1/2}\exp -\frac{1}{2}\left[(\mathbf{a} - \mathbf{b})^T(A+B)^{-1}(\mathbf{a} - \mathbf{b})\right]
$$
Here, we have two Gaussians $\mathcal{N}(0, \sigma^2I)$ and $\mathcal{N}((X^TX)^{-1}X^T\mathbf{y}, \frac{(X^TX)^{-1}}{\sigma_n^2} )$ for same random variable $\boldsymbol{\theta}$, Marginal likelihood can be given as,
$$
$$
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
```
```python
np.random.seed(0)
N = 10
D = 5
sigma_n = 0.1 # noise
sigma = 1 # variance in parameters
m0 = np.random.rand(D)
S0 = np.eye(D)*sigma**2
x = np.random.rand(N,D)
theta = np.random.rand(D,1)
y = x@theta + np.random.multivariate_normal(np.zeros(N), np.eye(N)*sigma_n**2, size=1).T
plt.scatter(x[:,0], x[:,1], c=y)
x.shape, theta.shape, y.shape
```
```python
a = np.linalg.inv(x.T@x)@x.T@y
b = m0.reshape(-1,1)
A = np.linalg.inv(x.T@x)/(sigma_n**2)
B = S0
A_inv = np.linalg.inv(A)
B_inv = np.linalg.inv(B)
c_cov = np.linalg.inv(A_inv + B_inv)
c_mean = c_cov@(A_inv@a + B_inv@b)
a.shape, A.shape, b.shape, B.shape, c_mean.shape, c_cov.shape
```
((5, 1), (5, 5), (5, 1), (5, 5), (5, 1), (5, 5))
```python
c_denom = 1/(((2*np.pi)**(D/2))*(np.linalg.det(c_cov)**0.5))
b_denom = 1/(((2*np.pi)**(D/2))*(np.linalg.det(B)**0.5))
a_denom = 1/(((2*np.pi)**(D/2))*(np.linalg.det(A)**0.5))
a_denom, b_denom, c_denom, 1/c_denom
```
(1.5040129154541655e-07,
0.010105326013811642,
0.0110028525380197,
90.88552232655665)
```python
normalizer_c = (1/(((2*np.pi)**(D/2))*(np.linalg.det(A+B)**0.5)))*np.exp(-0.5*((a-b)[email protected](A+B)@(a-b)))
norm_c_a_given_b = scipy.stats.multivariate_normal.pdf(a.squeeze(), b.squeeze(), A+B)
norm_c_b_given_a = scipy.stats.multivariate_normal.pdf(b.squeeze(), a.squeeze(), A+B)
normalizer_c, norm_c_a_given_b, norm_c_b_given_a, 1/normalizer_c
```
(array([[1.35765194e-07]]),
1.357651942204283e-07,
1.357651942204283e-07,
array([[7365658.08152844]]))
```python
a_pdf = scipy.stats.multivariate_normal.pdf(theta.squeeze(), a.squeeze(), A)
b_pdf = scipy.stats.multivariate_normal.pdf(theta.squeeze(), b.squeeze(), B)
c_pdf = scipy.stats.multivariate_normal.pdf(theta.squeeze(), c_mean.squeeze(), c_cov)
a_pdf, b_pdf, c_pdf, np.allclose(a_pdf*b_pdf, normalizer_c*c_pdf)
```
(1.5039199356435742e-07, 0.008635160418150373, 0.00956547808509135, True)
```python
K = x@[email protected] + np.eye(N)*sigma_n**2
marginal_Likelihood_closed_form = scipy.stats.multivariate_normal.pdf(y.squeeze(), (x@m0).squeeze(), K)
marginal_Likelihood_closed_form, 1/normalizer_c
```
(1.8288404157840938, array([[7365658.08152844]]))
```python
from sklearn.model_selection import KFold
from sklearn.linear_model import LinearRegression
splitter = KFold(n_splits=100)
for train_ind, test_ind in splitter(x):
train_x, train_y = x[train_ind], y[train_ind]
test_x, test_y = x[test_ind], y[test_ind]
model = LinearRegression()
model.fit(train_x, train_y)
```
## What is the relationship between marginal_Likelihood_closed_form and any calculations done in multiplications of two gaussians?
|
/* multiset/gsl_multiset.h
* based on combination/gsl_combination.h by Szymon Jaroszewicz
* based on permutation/gsl_permutation.h by Brian Gough
*
* Copyright (C) 2009 Rhys Ulerich
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __GSL_MULTISET_H__
#define __GSL_MULTISET_H__
#if !defined( GSL_FUN )
# if !defined( GSL_DLL )
# define GSL_FUN extern
# elif defined( BUILD_GSL_DLL )
# define GSL_FUN extern __declspec(dllexport)
# else
# define GSL_FUN extern __declspec(dllimport)
# endif
#endif
#include <stdlib.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_types.h>
#include <gsl/gsl_inline.h>
#include <gsl/gsl_check_range.h>
#undef __BEGIN_DECLS
#undef __END_DECLS
#ifdef __cplusplus
# define __BEGIN_DECLS extern "C" {
# define __END_DECLS }
#else
# define __BEGIN_DECLS /* empty */
# define __END_DECLS /* empty */
#endif
__BEGIN_DECLS
struct gsl_multiset_struct
{
size_t n;
size_t k;
size_t *data;
};
typedef struct gsl_multiset_struct gsl_multiset;
GSL_FUN gsl_multiset *gsl_multiset_alloc (const size_t n, const size_t k);
GSL_FUN gsl_multiset *gsl_multiset_calloc (const size_t n, const size_t k);
GSL_FUN void gsl_multiset_init_first (gsl_multiset * c);
GSL_FUN void gsl_multiset_init_last (gsl_multiset * c);
GSL_FUN void gsl_multiset_free (gsl_multiset * c);
GSL_FUN int gsl_multiset_memcpy (gsl_multiset * dest, const gsl_multiset * src);
GSL_FUN int gsl_multiset_fread (FILE * stream, gsl_multiset * c);
GSL_FUN int gsl_multiset_fwrite (FILE * stream, const gsl_multiset * c);
GSL_FUN int gsl_multiset_fscanf (FILE * stream, gsl_multiset * c);
GSL_FUN int gsl_multiset_fprintf (FILE * stream, const gsl_multiset * c, const char *format);
GSL_FUN size_t gsl_multiset_n (const gsl_multiset * c);
GSL_FUN size_t gsl_multiset_k (const gsl_multiset * c);
GSL_FUN size_t * gsl_multiset_data (const gsl_multiset * c);
GSL_FUN int gsl_multiset_valid (gsl_multiset * c);
GSL_FUN int gsl_multiset_next (gsl_multiset * c);
GSL_FUN int gsl_multiset_prev (gsl_multiset * c);
GSL_FUN INLINE_DECL size_t gsl_multiset_get (const gsl_multiset * c, const size_t i);
#ifdef HAVE_INLINE
INLINE_FUN
size_t
gsl_multiset_get (const gsl_multiset * c, const size_t i)
{
#if GSL_RANGE_CHECK
if (GSL_RANGE_COND(i >= c->k)) /* size_t is unsigned, can't be negative */
{
GSL_ERROR_VAL ("index out of range", GSL_EINVAL, 0);
}
#endif
return c->data[i];
}
#endif /* HAVE_INLINE */
__END_DECLS
#endif /* __GSL_MULTISET_H__ */
|
\ProvidesFile{sharedSetup.tex}[v1.0.0]
\documentclass[letterpaper,titlepage,twoside,canadian]{book}
% Using the British option gives dates in the form d-m-y
% The Canadian option gives dates in the form m-d-y
\usepackage{times}
\usepackage{amsmath}
\usepackage[useregional=text]{datetime2}
\usepackage{enumerate}
\usepackage{ifthen}
\usepackage{alltt}
\usepackage{calc}
\usepackage{shortvrb}
\usepackage{varioref}
\usepackage{graphicx}
\usepackage{color}
\usepackage{makeidx}
\usepackage{xspace}
\usepackage{fancyhdr}
\usepackage[section]{tocbibind}
\usepackage{epstopdf}
\usepackage{moreverb}
% Control the code, depending on whether a hyper-linked PDF is being generated:
\newboolean{generatingHyperPDF}
\setboolean{generatingHyperPDF}{true}
\newboolean{heveaActive}
%HEVEA\setboolean{heveaActive}{true}
%BEGIN LATEX
\setboolean{heveaActive}{false}
%END LATEX
% If the package 'hyperref' is disabled by commenting out the following lines,
% be sure to set the boolean 'generatingHyperPDF' to false.
\ifthenelse{\boolean{generatingHyperPDF}}%
{\ifthenelse{\boolean{heveaActive}}%
{\usepackage{hyperref}}
{\usepackage[colorlinks=true,
linkcolor=webgreen,
filecolor=webbrown,
citecolor=webgreen,
urlcolor=webblue,
pdftitle={\docTitle},
pdfauthor={\docAuthor},
pdfkeywords={\docKeywords},
pdfsubject={\docSubject},
bookmarks,
raiselinks=true,
plainpages=false,
bookmarksopen=true,
pdfstartview=Fit,
pdfpagemode=UseOutlines]{hyperref}}}
{\newcommand{\hyperpage}[1]{#1}}
\usepackage{mysects}
% Adjust the paper edges:
\setlength{\parindent}{0em}
\setlength{\textwidth}{\paperwidth-144pt}% 2"
\setlength{\marginparsep}{0pt}
\setlength{\marginparwidth}{0pt}
\setlength{\evensidemargin}{-18pt}% 0.25"
\setlength{\oddsidemargin}{-18pt}% 0.25"
% Some colours for the web:
\definecolor{webgreen}{rgb}{0,0.5,0}
\definecolor{webbrown}{rgb}{0.6,0,0}
\definecolor{webblue}{rgb}{0,0,0.5}
% Set the float behaviour:
\setcounter{bottomnumber}{2}
\setcounter{totalnumber}{4}
\renewcommand{\thefigure}{\arabic{figure}} % This fixes a 'glitch' with Hevea!
% Suppress the normal numbering of sections, et cetera:
\setcounter{secnumdepth}{-3}
\setcounter{tocdepth}{2}
% A couple of useful commands to handle italic-to-normal transitions:
\newcommand{\textitcorr}[1]{\textit{#1}\/}
\newcommand{\emphcorr}[1]{\emph{#1}\/}
% Some commands to make examples match actual output more closely:
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\pseudotab}{\textbf{ $\vdash$} }}
{\newcommand{\pseudotab}{\thinspace\boldmath{$\vdash$}\thinspace}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\tabSymbol}{\texttt{\textbf{$\vdash$}}}}
{\newcommand{\tabSymbol}{\texttt{\boldmath{$\vdash$}}}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\pseudotwotabs}{\textbf{ $\vdash$ $\vdash$ }}}
{\newcommand{\pseudotwotabs}{\thinspace\boldmath{$\vdash$}\thinspace%
\boldmath{$\vdash$}\thinspace}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\pseudothreetabs}{\textbf{ $\vdash$ $\vdash$ $\vdash$ }}}
{\newcommand{\pseudothreetabs}{\thinspace\boldmath{$\vdash$}\thinspace%
\boldmath{$\vdash$}\thinspace\boldmath{$\vdash$}\thinspace}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\fUS}{\textunderscore}}
{\newcommand{\fUS}{\begin{Large}\textbf{\textunderscore}\end{Large}}}
\newcommand{\clientName}{\fUS{}client\fUS}
\newcommand{\dollarService}{\textdollar{}ervice}
\newcommand{\serviceName}{\fUS{}service\fUS}
\newcommand{\inputOutput}{Input~/~Output}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\openSq}{\textbf{[}}}
{\newcommand{\openSq}{\boldmath{$\lbrack$}}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\closeSq}{\textbf{]}}}
{\newcommand{\closeSq}{\boldmath{$\rbrack$}}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\sqPair}{\textbf{[ ]}}}
{\newcommand{\sqPair}{\boldmath{$\lbrack\ \rbrack$}}}
\newcommand{\TBD}{\textbf{\large{TBD}}}
\newlength{\uL}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\argItem}[2]{\textbf{#1} #2}}
{\newcommand{\argItem}[2]{\makebox[4em][l]{\textbf{#1}} #2}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\cmdItem}[2]{\textbf{#1} #2}}
{\newcommand{\cmdItem}[2]{\makebox[1em][l]{\textbf{#1}} \parbox{40em}{#2}}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\optItem}[4]{\textbf{-#1} \textit{#2} | \textbf{--#3} \textit{#2} #4}}
{\newcommand{\optItem}[4]{\makebox[2em][l]{\textbf{--#1} \textit{#2}} %
\textbar{} \makebox[5.5em][l]{\textbf{{-}{-}#3} \textit{#2}} \parbox{35em}{#4}}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\longDash}{-}}
{\newcommand{\longDash}{--}}
\newcommand{\bigRightArrow}{$\Rightarrow$}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\exSp}{\ }}
{\newcommand{\exSp}{}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\condPage}{\\}}
{\newcommand{\condPage}{\newpage}}
\newcommand*{\compLang}[1]{\emphcorr{#1}}
% Use different graphics file formats for HTML versus PDF
\ifthenelse{\boolean{heveaActive}}%
{\newcommand*{\objImage}[1]{\imgsrc{#1.png}}}
{\newcommand*{\objImage}[1]{\includegraphics{#1.eps}}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand*{\objScaledImage}[2]{\imgsrc{#2.png}}}
{\newcommand*{\objScaledImage}[2]{\includegraphics[scale=#1]{#2.eps}}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand*{\objImageWithWidth}[2]{\imgsrc[width=#1]{#2.png}}}
{\newcommand*{\objImageWithWidth}[2]{\includegraphics[width=#1]{#2.eps}}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\mpmLogo}{\objImageWithWidth{150}{mpm_images/m+m_tight_logo}}}
{\newcommand{\mpmLogo}{\objImageWithWidth{5em}{mpm_images/m+m_tight_logo}}}
\newcommand*{\objPicture}[1]{\begin{center}\objImage{#1}\end{center}}
\newcommand*{\objScaledPicture}[2]{\begin{center}\objScaledImage{#2}{#1}\end{center}}
\newcommand*{\objDiagram}[3]{\begin{figure}[!ht]\centering\objImage{#1}%
\caption{#3\ }\label{diagram:#2}\end{figure}}
\newcommand{\objScaledDiagram}[4]{\begin{figure}[!ht]\centering%
\objScaledImage{#4}{#1}\caption{#3\ }\label{diagram:#2}\end{figure}}
\newcommand*{\objDoubleScaledDiagram}[5]{\begin{figure}[!ht]\centering%
\objScaledImage{#5}{#1}\hspace{3em}\objScaledImage{#5}{#2}\caption{#4\ }%
\label{diagram:#3}\end{figure}}
\newcommand*{\objDiagramRef}[1]{\ref{diagram:#1}}
\newcommand{\stdAuthor}{H~Plus~Technologies~Ltd. and Simon~Fraser~University\\
Vancouver, British~Columbia, Canada}
% First argument: index category
% Second argument: index subcategory
% Third argument: alternate index subcategory
% Fourth argument: index name
% Fifth argument: index suffix
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\multiindex}[5]{}}
{\newcommand{\multiindex}[5]{%
\ifthenelse{\equal{#2}{\default}}%
{\index{#1!#4#5}}%
{\index{#1!#2!#4#5}}}}
% First argument: label category
% Second argument: label subcategory
% Third argument: alternate label subcategory
% Fourth argument: label name
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\multilabel}[4]{}}
{\newcommand{\multilabel}[4]{%
\ifthenelse{\equal{#3}{\default}}%
{\label{#1:#4}}%
{\label{#1:#3:#4}}}}
% First argument: reference category
% Second argument: reference subcategory
% Third argument: alternate reference subcategory
% Fourth argument: reference name
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\multiref}[4]{}}
{\newcommand{\multiref}[4]{%
\ifthenelse{\equal{#3}{\default}}%
{\ref{#1:#4}}%
{\ref{#1:#3:#4}}}}
% First argument: hyperlink/index category
% Second argument: hyperlink/index name
% Third argument: alternate hyperlink/index name
\ifthenelse{\boolean{generatingHyperPDF}}%
{\newcommand{\genTag}[3]{\hyperlink{hyper.#1.#2}{\textitcorr{#3}}}}
{\newcommand{\genTag}[3]{\textitcorr{#3}}}
% First argument: hyperlink/index category
% Second argument: hyperlink/index name
\ifthenelse{\boolean{generatingHyperPDF}}%
{\newcommand{\genTarget}[2]{\hypertarget{hyper.#1.#2}{}}}
{\newcommand{\genTarget}[2]{}}
% The net effect is as follows:
% generatingHyperPDF
% 'D' \textitcorr{\color{webgreen}#3} \hypertarget{hyper.#2.#3}{}\label{#2:#3} {}
% 'E' {} {} {}
% 'M' {} \hypertarget{hyper.#2.#3}{}\label{#2:#3} {}
% 'P' {} \hypertarget{hyper.#2.#3}{}\label{#2:#3} \index{#2!#3}
% 'R' \hyperlink{hyper.#2.#3}{\textitcorr{#3}} \index{#2!#3} \ref{#2:#3}
% 'S' \textitcorr{#3} \index{#2!#3} {}
% 'X' \hyperlink{hyper.#2.#3}{\textitcorr{#3}} {} {}
% not generatingHyperPDF
% 'D' \textitcorr{\color{webgreen}#3} \index{#2!#3|(textbf}\label{#2:#3} {}
% 'E' {} \index{#2!#3|)textbf} {}
% 'M' {} \index{#2!#3|(textbf}\label{#2:#3} {}
% 'P' {} \label{#2:#3} \index{#2!#3}
% 'R' \textitcorr{#3} \index{#2!#3} \ref{#2:#3}
% 'S' \textitcorr{#3} \index{#2!#3} {}
% 'X' \textitcorr{#3} {} {}
% D = Define the object (emphasize the index, create a label);
% E = End of the object definition (close the index, no text);
% M = Define the object (no visible text)
% P =
% R = Refer to the object in the index (the default);
% S = Reference to a standard object and
% X = Don't add a reference for the object to the index (any letter except D or
% R could be used, X is preferred for mnemonic value)
% First argument: hyperlink/index category
% Second argument: hyperlink/index subcategory
% Third argument: alternate hyperlink/index subcategory
% Fourth argument: hyperlink/index name
% Fifth argument: alternate hyperlink/index name
\newcommand{\entityNameA}[5]{%
\genTarget{#1}{#4}%
\ifthenelse{\equal{#5}{\default}}%
{\textitcorr{\color{webgreen}#4}\multiindex{#1}{#2}{#3}{#4}{|textbf}}%
{\textitcorr{\color{webgreen}#5}\multiindex{#1}{#2}{#3}{#5}{|textbf}}%
\multilabel{#1}{#2}{#3}{#4}}
% First argument: hyperlink/index category
% Second argument: hyperlink/index subcategory
% Third argument: alternate hyperlink/index subcategory
% Fourth argument: hyperlink/index name
% Fifth argument: alternate hyperlink/index name
\newcommand{\entityNameD}[5]{%
\genTarget{#1}{#4}%
\ifthenelse{\equal{#5}{\default}}%
{\textitcorr{\color{webgreen}#4}\multiindex{#1}{#2}{#3}{#4}{|(textbf}}%
{\textitcorr{\color{webgreen}#5}\multiindex{#1}{#2}{#3}{#5}{|(textbf}}%
\multilabel{#1}{#2}{#3}{#4}}
% First argument: hyperlink/index category
% Second argument: hyperlink/index subcategory
% Third argument: alternate hyperlink/index subcategory
% Fourth argument: hyperlink/index name
% Fifth argument: alternate hyperlink/index name
\newcommand{\entityNameE}[5]{%
\ifthenelse{\equal{#1}{#4}}%
{}% if first and fourth argument match
{\ifthenelse{\equal{#5}{\default}}%
{\multiindex{#1}{#2}{#3}{#4}{|)textbf}}%
{\multiindex{#1}{#2}{#3}{#5}{|)textbf}}}}
% First argument: hyperlink/index category
% Second argument: hyperlink/index subcategory
% Third argument: alternate hyperlink/index subcategory
% Fourth argument: hyperlink/index name
% Fifth argument: alternate hyperlink/index name
\newcommand{\entityNameM}[5]{%
\genTarget{#1}{#4}%
\ifthenelse{\equal{#5}{\default}}%
{\multiindex{#1}{#2}{#3}{#4}{|(textbf}}%
{\multiindex{#1}{#2}{#3}{#5}{|(textbf}}%
\multilabel{#1}{#2}{#3}{#4}}
% First argument: hyperlink/index category
% Second argument: hyperlink/index subcategory
% Third argument: alternate hyperlink/index subcategory
% Fourth argument: hyperlink/index name
% Fifth argument: alternate hyperlink/index name
\newcommand{\entityNameP}[5]{%
\genTarget{#1}{#4}%
\ifthenelse{\equal{#5}{\default}}%
{#4\multiindex{#1}{#2}{#3}{#4}{|(textbf}}%
{#5\multiindex{#1}{#2}{#3}{#5}{|(textbf}}%
\multilabel{#1}{#2}{#3}{#4}}
% First argument: hyperlink/index category
% Second argument: hyperlink/index subcategory
% Third argument: alternate hyperlink/index subcategory
% Fourth argument: hyperlink/index name
% Fifth argument: alternate hyperlink/index name
\newcommand{\entityNameR}[5]{%
\ifthenelse{\equal{#5}{\default}}%
{\genTag{#1}{#4}{#4}%
\ifthenelse{\equal{#1}{#4}}%
{}% if first and fourth argument match
{\multiindex{#1}{#2}{#3}{#4}{}}}%
{\genTag{#1}{#4}{#5}%
\ifthenelse{\equal{#1}{#4}}%
{}% if first and fourth argument match
{\multiindex{#1}{#2}{#3}{#5}{}}}%
\multiref{#1}{#2}{#3}{#4}}
% First argument: hyperlink/index category
% Second argument: hyperlink/index subcategory
% Third argument: alternate hyperlink/index subcategory
% Fourth argument: hyperlink/index name
% Fifth argument: alternate hyperlink/index name
\newcommand{\entityNameS}[5]{%
\ifthenelse{\equal{#5}{\default}}%
{\textitcorr{#4}}%
{\textitcorr{#5}}%
\ifthenelse{\equal{#1}{#4}}%
{}% if first and fourth argument match
{\multiindex{#1}{#2}{#3}{#4}{}}}
% First argument: hyperlink/index category
% Second argument: hyperlink/index subcategory
% Third argument: alternate hyperlink/index subcategory
% Fourth argument: hyperlink/index name
% Fifth argument: alternate hyperlink/index name
\newcommand{\entityNameX}[5]{%
\ifthenelse{\equal{#5}{\default}}%
{\genTag{#1}{#4}{#4}}%
{\genTag{#1}{#4}{#5}}}%
% First argument: hyperlink/index category
% Second argument: hyperlink/index name
\ifthenelse{\boolean{generatingHyperPDF}}%
{\newcommand{\doVpage}[2]{}}%
{\newcommand{\doVpage}[2]{ \vpageref[(][(]{#1:#2}}}
% Use \entityReference, rather than \entityName, for the first mention of an object within
% another object, so that page ranges will be present.
\newcommand{\entityReference}[2]{\entityNameR{#1}{\default}{\default}{#2}{\default}%
\doVpage{#1}{#2}}% command if generatingHyperPDF
\ifthenelse{\boolean{generatingHyperPDF}}%
{\newcommand{\companyReference}[2]{\href{#1}{#2}}}%
{\newcommand{\companyReference}[2]{#2}}%
% First argument [optional]: alternate name
% Second argument: entity name
\newcommand{\clientNameA}[2][\default]{\entityNameA{Clients}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\clientNameD}[2][\default]{\entityNameD{Clients}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\clientNameE}[2][\default]{\entityNameE{Clients}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\clientNameM}[2][\default]{\entityNameM{Clients}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\clientNameP}[2][\default]{\entityNameP{Clients}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\clientNameR}[2][\default]{\entityNameR{Clients}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\clientNameS}[2][\default]{\entityNameS{Clients}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\clientNameX}[2][\default]{\entityNameX{Clients}{\default}{\default}{#2}{#1}}% shortcut
% First argument [optional]: alternate name
% Second argument: entity name
\newcommand{\serviceNameA}[2][\default]{\entityNameA{Services}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\serviceNameD}[2][\default]{\entityNameD{Services}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\serviceNameE}[2][\default]{\entityNameE{Services}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\serviceNameM}[2][\default]{\entityNameM{Services}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\serviceNameP}[2][\default]{\entityNameP{Services}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\serviceNameR}[2][\default]{\entityNameR{Services}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\serviceNameS}[2][\default]{\entityNameS{Services}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\serviceNameX}[2][\default]{\entityNameX{Services}{\default}{\default}{#2}{#1}}% shortcut
% First argument [optional]: alternate name
% Second argument: entity name
\newcommand{\utilityNameA}[2][\default]{\entityNameA{Utilities}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\utilityNameD}[2][\default]{\entityNameD{Utilities}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\utilityNameE}[2][\default]{\entityNameE{Utilities}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\utilityNameM}[2][\default]{\entityNameM{Utilities}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\utilityNameP}[2][\default]{\entityNameP{Utilities}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\utilityNameR}[2][\default]{\entityNameR{Utilities}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\utilityNameS}[2][\default]{\entityNameS{Utilities}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\utilityNameX}[2][\default]{\entityNameX{Utilities}{\default}{\default}{#2}{#1}}% shortcut
% First argument [optional]: alternate name
% Second argument: Subcategory
% Third argument: entity name
\newcommand{\examplesNameA}[3][\default]{\entityNameA{Examples}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\examplesNameD}[3][\default]{\entityNameD{Examples}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\examplesNameE}[3][\default]{\entityNameE{Examples}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\examplesNameM}[3][\default]{\entityNameM{Examples}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\examplesNameP}[3][\default]{\entityNameP{Examples}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\examplesNameR}[3][\default]{\entityNameR{Examples}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\examplesNameS}[3][\default]{\entityNameS{Examples}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\examplesNameX}[3][\default]{\entityNameX{Examples}{#2}{#2}{#3}{#1}}% shortcut
% First argument [optional]: alternate name
% Second argument: Subcategory
% Third argument: Alternate subcategory name
% Fourth argument: entity name
\newcommand{\requestsNameA}[4][\default]{\entityNameA{Requests}{#2}{#3}{#4}{#1}}% shortcut
\newcommand{\requestsNameD}[4][\default]{\entityNameD{Requests}{#2}{#3}{#4}{#1}}% shortcut
\newcommand{\requestsNameE}[4][\default]{\entityNameE{Requests}{#2}{#3}{#4}{#1}}% shortcut
\newcommand{\requestsNameM}[4][\default]{\entityNameM{Requests}{#2}{#3}{#4}{#1}}% shortcut
\newcommand{\requestsNameP}[4][\default]{\entityNameP{Requests}{#2}{#3}{#4}{#1}}% shortcut
\newcommand{\requestsNameR}[4][\default]{\entityNameR{Requests}{#2}{#3}{#4}{#1}}% shortcut
\newcommand{\requestsNameS}[4][\default]{\entityNameS{Requests}{#2}{#3}{#4}{#1}}% shortcut
\newcommand{\requestsNameX}[4][\default]{\entityNameX{Requests}{#2}{#3}{#4}{#1}}% shortcut
% First argument [optional]: alternate name
% Second argument: Subcategory
% Third argument: entity name
\newcommand{\exemplarsNameA}[2][\default]{\entityNameA{Exemplars}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\exemplarsNameD}[2][\default]{\entityNameD{Exemplars}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\exemplarsNameE}[2][\default]{\entityNameE{Exemplars}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\exemplarsNameM}[2][\default]{\entityNameM{Exemplars}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\exemplarsNameP}[2][\default]{\entityNameP{Exemplars}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\exemplarsNameR}[2][\default]{\entityNameR{Exemplars}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\exemplarsNameS}[2][\default]{\entityNameS{Exemplars}{\default}{\default}{#2}{#1}}% shortcut
\newcommand{\exemplarsNameX}[2][\default]{\entityNameX{Exemplars}{\default}{\default}{#2}{#1}}% shortcut
% First argument [optional]: alternate name
% Second argument: Subcategory
% Third argument: entity name
\newcommand{\classNameA}[3][\default]{\entityNameA{Classes}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\classNameD}[3][\default]{\entityNameD{Classes}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\classNameE}[3][\default]{\entityNameE{Classes}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\classNameM}[3][\default]{\entityNameM{Classes}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\classNameP}[3][\default]{\entityNameP{Classes}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\classNameR}[3][\default]{\entityNameR{Classes}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\classNameS}[3][\default]{\entityNameS{Classes}{#2}{#2}{#3}{#1}}% shortcut
\newcommand{\classNameX}[3][\default]{\entityNameX{Classes}{#2}{#2}{#3}{#1}}% shortcut
\newcommand*{\insertpart}[2]{\clearpage\renewcommand{\mymark}{#1}#2}
% First argument [optional]: alternate hyperlink name
% Second argument: section title
% Third argument: hyperlink section
% Fourth argument: simplified version of title
% Fifth argument: prefix to display before title
\ifthenelse{\boolean{heveaActive}}%
{\newcommand*{\sectionStart}[5][\default]{\section{#5\texorpdfstring{#2}{#4}}%
\renewcommand{\mymark}{#4}}}
{\ifthenelse{\boolean{generatingHyperPDF}}%
{\newcommand*{\sectionStart}[5][\default]{\clearpage\section{#5%
\texorpdfstring{#2}{#4}}%
\renewcommand{\mymark}{#4}%
\ifthenelse{\equal{#1}{\default}}%
{\hypertarget{hyper.#3.#4}{}}%
{\hypertarget{hyper.#3.#1}{}}}}%
{\newcommand*{\sectionStart}[5][\default]{\clearpage\section{#5#2}%
\renewcommand{\mymark}{#4}}}}
\newcommand*{\sectionEnd}[1]{#1} % just a notational convenience
% First argument: hyperlink section
% Second argument: hyperlink name
% Third argument: simplified version of title
\ifthenelse{\boolean{generatingHyperPDF}}%
{\newcommand{\sectionRef}[3]{\hyperlink{hyper.#1.#2}{\textitcorr{#3}}}}%
{\newcommand{\sectionRef}[3]{}}
% First argument: [optional] alternate hypertarget name
% Second argument: subsection title
% Third argument: hypertarget section
% Fourth argument: simplified version of title
\ifthenelse{\boolean{heveaActive}}%
{\newcommand*{\subsectionStart}[4][\default]{\subsection{\texorpdfstring{#2}{#4}}}}
{\ifthenelse{\boolean{generatingHyperPDF}}%
{\newcommand*{\subsectionStart}[4][\default]{\subsection{\texorpdfstring{#2}{#4}}%
\ifthenelse{\equal{#1}{\default}}%
{\hypertarget{hyper.#3.#4}{}}%
{\hypertarget{hyper.#3.#1}{}}}}%
{\newcommand*{\subsectionStart}[4][\default]{\subsection{#2}}}}
\newcommand*{\subsectionEnd}[1]{#1} % just a notational convenience
% First argument: [optional] alternate hypertarget name
% Second argument: subsubsection title
% Third argument: hypertarget section
% Fourth argument: simplified version of title
\ifthenelse{\boolean{heveaActive}}%
{\newcommand*{\subsubsectionStart}[4][\default]{\subsubsection{%
\texorpdfstring{#2}{#4}}}}
{\ifthenelse{\boolean{generatingHyperPDF}}%
{\newcommand*{\subsubsectionStart}[4][\default]{\subsubsection{%
\texorpdfstring{#2}{#4}}%
\ifthenelse{\equal{#1}{\default}}%
{\hypertarget{hyper.#3.#4}{}}%
{\hypertarget{hyper.#3.#1}{}}}}%
{\newcommand*{\subsubsectionStart}[4][\default]{\subsubsection{#2}\index{#2}}}}
\newcommand*{\subsubsectionEnd}[1]{#1} % just a notational convenience
% First argument: [optional] alternate section name
% Second argument: section title
\newcommand*{\primaryStart}[2][\default]{%
\sectionStart[#1]{#2}{Primary}{#2}{}}
\newcommand*{\primaryEnd}[1]{#1} % just a notational convenience
% First argument: section name
% Second argument: link title
\newcommand{\primaryRef}[2]{\sectionRef{Primary}{#1}{#2}}
% First argument: [optional] alternate subsection name
% Second argument: subsection title
\newcommand*{\secondaryStart}[2][\default]{%
\subsectionStart[#1]{#2}{Secondary}{#2}}
\newcommand*{\secondaryEnd}[1]{#1} % just a notational convenience
% First argument: subsection name
% Second argument: link title
\newcommand{\secondaryRef}[2]{\sectionRef{Secondary}{#1}{#2}}
% First argument: [optional] alternate subsubsection name
% Second argument: subsubsection title
\newcommand*{\tertiaryStart}[2][\default]{%
\subsubsectionStart[#1]{#2}{Tertiary}{#2}}
\newcommand*{\tertiaryEnd}[1]{#1} % just a notational convenience
% First argument: subsubsection name
% Second argument: link title
\newcommand{\tertiaryRef}[2]{\sectionRef{Tertiary}{#1}{#2}}
% First argument: [optional] alternate appendix name
% Second argument: appendix title
\newcommand*{\appendixStart}[2][\default]{%
\sectionStart[#1]{#2}{Appendix}{#2}{\appendixname{}:~}}
\newcommand*{\appendixEnd}[1]{#1} % just a notational convenience
% First argument: appendix name
% Second argument: link title
\newcommand{\appendixRef}[2]{\sectionRef{Appendix}{#1}{#2}}
\ifthenelse{\boolean{heveaActive}}%
{\newcommand{\maybeSpace}{}}
{\newcommand{\maybeSpace}{\hspace\fill}}
\newenvironment{histList}
{\begin{list}
{}
{\setlength{\labelwidth}{108pt}% 1.5"
\setlength{\leftmargin}{\labelwidth+\labelsep}
\setlength{\rightmargin}{36pt}% 0.5"
\setlength{\parsep}{0ex}
\renewcommand{\makelabel}[1]{\textbf{##1}\maybeSpace}}}
{\end{list}}
\newcommand*{\histListBegin}{\begin{histList}}
\newcommand*{\histListEnd}{\end{histList}}
\newcommand{\histListItem}[1]{\item[#1]}
\newcommand*{\outputBegin}{\begin{quote}\begin{ttfamily}\begin{small}}
\newcommand*{\outputEnd}{\end{small}\end{ttfamily}\end{quote}}
\newcommand*{\codeBegin}{\begin{ttfamily}\begin{small}}
\newcommand*{\codeEnd}{\end{small}\end{ttfamily}}
\newcommand*{\asCode}[1]{\codeBegin{}#1\codeEnd}
\newcommand*{\asBoldCode}[1]{\textbf{\asCode{#1}}}
\newcommand*{\asEmphCode}[1]{\textit{\asCode{#1}}}
\newenvironment{tightItems}%
{\begin{itemize}%
\setlength{\leftmargin}{0pt}%
\setlength{\itemsep}{0pt}%
\setlength{\parsep}{0pt}%
\setlength{\parskip}{0pt}}%
{\end{itemize}}
\newcommand{\insertFullClientParameters}{The application has five optional parameters:
\begin{itemize}
\item\optItem{h}{}{help}{display the list of optional parameters and arguments and leave}
\item\exSp\optItem{i}{}{info}{display the type of the executable and a description of the
executable and leave \longDash{} note that this option is primarily for use by the
\emph{\MMMU} application}
\item\exSp\optItem{j}{}{json}{generate \json\longDash{}formatted output}
\item\exSp\optItem{t}{}{tabs}{generate output in a tab\longDash{}delimited format}
\item\exSp\optItem{v}{}{vers}{display the version and copyright information and leave}
\end{itemize}
If neither \json{} format nor tab\longDash{}delimited format are specified, the output is
written as simple text.}
\newcommand{\insertShortClientParameters}{The application has three optional parameters:
\begin{itemize}
\item\optItem{h}{}{help}{display the list of optional parameters and arguments and leave}
\item\exSp\optItem{i}{}{info}{display the type of the executable and a description of the
executable and leave \longDash{} note that this option is primarily for use by the
\emph{\MMMU} application}
\item\exSp\optItem{v}{}{vers}{display the version and copyright information and leave}
\end{itemize}}
\newcommand{\insertFullUtilityParameters}{The application has five optional parameters:
\begin{itemize}
\item\optItem{h}{}{help}{display the list of optional parameters and arguments and leave}
\item\exSp\optItem{i}{}{info}{display the type of the executable and a description of the
executable and leave \longDash{} note that this option is primarily for use by the
\emph{\MMMU} application}
\item\exSp\optItem{j}{}{json}{generate \json\longDash{}formatted output}
\item\exSp\optItem{t}{}{tabs}{generate output in a tab\longDash{}delimited format}
\item\exSp\optItem{v}{}{vers}{display the version and copyright information and leave}
\end{itemize}
If neither \json{} format nor tab\longDash{}delimited format are specified, the output is
written as simple text.}
\newcommand{\insertShortUtilityParameters}{The application has three optional parameters:
\begin{itemize}
\item\optItem{h}{}{help}{display the list of optional parameters and arguments and leave}
\item\exSp\optItem{i}{}{info}{display the type of the executable and a description of the
executable and leave \longDash{} note that this option is primarily for use by the
\emph{\MMMU} application}
\item\exSp\optItem{v}{}{vers}{display the version and copyright information and leave}
\end{itemize}}
\newcommand{\insertAppParameters}{The application has eleven optional parameters:
\begin{itemize}
\item\optItem{a}{}{args}{display the argument descriptions for the executable and leave
\longDash{} note that this option is primarily for use by the \emph{\MMMU} application}
\item\exSp\optItem{c}{}{channel}{display the endpoint name after applying all other
options and leave}
\item\exSp\optItem{e}{v}{endpoint}{specifies an alternative endpoint name `\textit{v}' to
be used}
\item\exSp\optItem{g}{}{go}{indicates that the service is to be started immediately}
\item\exSp\optItem{h}{}{help}{display the list of optional parameters and arguments and
leave}
\item\exSp\optItem{i}{}{info}{display the type of the executable, the valid options and
a description of the executable and leave \longDash{} note that this option is primarily
for use by the \emph{\MMMU} application}
\item\exSp\optItem{m}{n}{mod}{specifies the number of bytes `\textit{n}' of the IPv4
address to be used to modify the tag}
\item\exSp\optItem{p}{v}{port}{specifies the port number `\textit{v}' to be used, if a
non\longDash{}default port is desired}
\item\exSp\optItem{r}{}{report}{report the service metrics when the application exits}
\item\exSp\optItem{t}{v}{tag}{specifies the tag `\textit{v}' to be used as part of the
service name}
\item\exSp\optItem{v}{}{vers}{display the version and copyright information and leave}
\end{itemize}}
\newcommand{\insertAutoAppParameters}{The application has ten optional parameters:
\begin{itemize}
\item\optItem{a}{}{args}{display the argument descriptions for the executable and leave
\longDash{} note that this option is primarily for use by the \emph{\MMMU} application}
\item\exSp\optItem{c}{}{channel}{display the endpoint name after applying the `tag' and
`endpoint' options, if present, and leave}
\item\optItem{e}{v}{endpoint}{specifies an alternative endpoint name `\textit{v}' to be
used}
\item\exSp\optItem{h}{}{help}{display the list of optional parameters and arguments and
leave}
\item\exSp\optItem{i}{}{info}{display the type of the executable, the valid options and
a description of the executable and leave \longDash{} note that this option is primarily
for use by the \emph{\MMMU} application}
\item\exSp\optItem{m}{n}{mod}{specifies the number of bytes `\textit{n}' of the IPv4
address to be used to modify the tag}
\item\exSp\optItem{p}{v}{port}{specifies the port number `\textit{v}' to be used, if a
non\longDash{}default port is desired}
\item\exSp\optItem{r}{}{report}{report the service metrics when the application exits}
\item\exSp\optItem{t}{v}{tag}{specifies the tag `\textit{v}' to be used as part of the
service name}
\item\exSp\optItem{v}{}{vers}{display the version and copyright information and leave}
\end{itemize}}
\newcommand{\insertTagDescription}[1]{The tag is added to the standard name of the
#1 service, so that more than one copy of the service can execute \longDash{} an \mplusm{}
installation can support multiple copies of the #1 service, but the \emph{\MMMU}
application cannot display them without a distinguishing `tag'.
If the tag is not specified, the standard name of the service will be used.}
\newcommand{\insertTagAndEndpointDescription}[4]{For details on the usage of the `endpoint', `mod' and
`tag' options, see the \emph{\WTE} appendix in the \emph{\MMMU} manual.
Once the options are set, an image similar to the following will appear in the \emph{\MMMU} window when
the service has successfully started:
\objScaledDiagram{#1}{#2}{#3}{#4}}
\newcommand{\insertFilterServiceComment}{As well as the service name, the input and output
stream names are modified if a tag is specified and the default endpoint is being used.}
\newcommand{\insertInputServiceComment}{As well as the service name, the output stream
name is modified if a tag is specified and the default endpoint is being used.}
\newcommand{\insertOutputServiceComment}{As well as the service name, the input stream
name is modified if a tag is specified and the default endpoint is being used.}
\newcommand{\insertStandardAdapterCommands}{If the application is running from a terminal
and has not been automatically started via the `\asCode{go}' option, the following
commands are available:
\begin{itemize}
\item\cmdItem{?}{display this list}
\item\exSp\cmdItem{b}{start (begin) the input and output streams}
\item\exSp\cmdItem{c}{configure the adapter}
\item\exSp\cmdItem{e}{stop (end) the input and output stream}
\item\exSp\cmdItem{q}{quit the application}
\item\exSp\cmdItem{r}{restart the input and output streams}
\end{itemize}}
\newcommand{\insertStandardServiceCommands}{If the application is running from a terminal
and has not been automatically started via the `\asCode{go}' option, the following
commands are available:
\begin{itemize}
\item\cmdItem{?}{display this list}
\item\exSp\cmdItem{b}{start (begin) the input and output streams}
\item\exSp\cmdItem{c}{configure the service}
\item\exSp\cmdItem{e}{stop (end) the input and output stream}
\item\exSp\cmdItem{q}{quit the application}
\item\exSp\cmdItem{r}{restart the input and output streams}
\end{itemize}}
% Common layout stuff:
% Set up the page layout:
\pagestyle{fancyplain}
\newcommand{\mymark}{}
\lhead[]{\fancyplain{}{\textsc{\mymark}}}
\chead[]{}
\rhead[\fancyplain{}{\textsc{\mymark}}]{}
\lfoot[Page \thepage]{\today}
\rfoot[\today]{Page \thepage}
\ifthenelse{\boolean{heveaActive}}%
{}
{\renewcommand{\headrulewidth}{0.5bp}}
\pagenumbering{roman}
\date{\MMV: \today}
\author{\stdAuthor}
\makeindex
|
If $S$ is a cone and $x \in S$, then $cx \in S$ for all $c \geq 0$. |
# Differential rotation with `starry`
In this notebook, we'll explain how to express the radial velocity field of a star at an arbitrary orientation on the sky in terms of spherical harmonics. This allows us to use `starry` to model the Rossiter-McLaughlin waveform **analytically**!
First, let's import some stuff. We'll use `sympy` for the derivations.
```python
import matplotlib.pyplot as pl
%matplotlib notebook
import numpy as np
from IPython.display import display, Math
import sympy
from sympy import *
from sympy.functions.special.tensor_functions import KroneckerDelta
print("Using sympy version", sympy.__version__)
# Initialize the session
init_session(quiet=True)
# Define our symbols
x, y, z, mu, nu, l, m, j, k, p, q, n, A, B, C, alpha, omeq = symbols('x y z mu nu l m j k p q n A B C alpha \omega_{eq}')
```
Using sympy version 1.3
## The radial velocity field of a star
The polynomial describing the radial component of the velocity field on the projected disk of a star is given by ([Short et al. 2018](https://arxiv.org/abs/1810.09565))
$f(\mathbf{x}, \mathbf{y}, \mathbf{z}) = \omega_{eq}(A\mathbf{x} + B\mathbf{y})(1 - \alpha(-B\mathbf{x} + A\mathbf{y} + C\mathbf{z})^2)$
where
$A = \sin(i)\cos(\lambda)$
$B = \sin(i)\sin(\lambda)$
$C = \cos(i)$
and $i$ and $\lambda$ are the stellar inclination and obliquity, respectively. The constant $\alpha$ is the shear due to differential rotation. We are assuming a simple linear shear according to the following equation:
$\omega = \omega_{eq}(1 - \alpha \sin^2\theta)$,
where $\omega$ is the angular rotational velocity at a point on the surface, $\omega_{eq}$ is the velocity at the equator, and $\theta$ is the polar angle (latitude).
## Let's expand this function
If we expand all the products in $f$, we can write it as a dot product of a polynomial coefficient vector and the polynomial basis defined in Luger et al. (2018):
```python
def poly_basis(n, x, y):
"""Return the n^th term in the polynomial basis."""
l = Rational(floor(sqrt(n)))
m = Rational(n - l * l - l)
mu = Rational(l - m)
nu = Rational(l + m)
if (nu % 2 == 0):
i = Rational(mu, 2)
j = Rational(nu, 2)
k = Rational(0)
else:
i = Rational(mu - 1, 2)
j = Rational(nu - 1, 2)
k = Rational(1)
return x ** i * y ** j * sqrt(1 - x ** 2 - y ** 2) ** k
# Compute the polynomial basis
basis = Matrix([poly_basis(n, x, y) for n in range(16)]).T
```
Here's what the polynomial basis looks like (recall that $z = \sqrt{1 - x^2 - y^2}$):
```python
basis
```
And here's the function we wish to express:
```python
f = omeq * (A * x + B * y) * (1 - alpha * (-B * x + A * y + C * sqrt(1 - x ** 2 - y ** 2)) ** 2)
f
```
We can use `sympy` to figure out the (exact) representation of `f` in the polynomial basis:
```python
def Coefficient(expression, term):
"""Return the coefficient multiplying `term` in `expression`."""
# Get the coefficient
coeff = expression.coeff(term)
# Set any non-constants in this coefficient to zero. If the coefficient
# is not a constant, this is not the term we are interested in!
coeff = coeff.subs(sqrt(1 - x ** 2 - y ** 2), 0).subs(x, 0).subs(y, 0)
return coeff
vec = Matrix([Coefficient(expand(f), term) for term in basis])
vec
```
We can check that dotting this vector with the polynomial basis yields the original function $f$:
```python
simplify(factor(basis.dot(vec)) - f) == 0
```
True
## Now let's represent it in terms of spherical harmonics
Now that we have the vector of polynomial coefficients `vec`, we want to apply a change-of-basis transformation to figure out their (exact) representation in terms of spherical harmonics.
As in Luger et al. (2018), let's compute the change of basis matrix from polynomials to spherical harmonic coefficients. This is the inverse of the $A_1$ matrix introduced in Luger et al. (2018). Note that it includes the normalization of $\frac{2}{\sqrt{\pi}}$ used internally by `starry`.
```python
def SA(l, m):
"""A spherical harmonic normalization constant."""
return sqrt((2 - KroneckerDelta(m, 0)) * (2 * l + 1) * factorial(l - m) / (4 * pi * factorial(l + m)))
def SB(l, m, j, k):
"""Another spherical harmonic normalization constant."""
try:
ratio = factorial(Rational(l + m + k - 1, 2)) / factorial(Rational(-l + m + k - 1, 2))
except ValueError:
ratio = 0
res = 2 ** l * Rational(factorial(m), (factorial(j) * factorial(k) * factorial(m - j) * factorial(l - m - k))) * ratio
return simplify(res)
def SC(p, q, k):
"""Return the binomial theorem coefficient `C`."""
res = factorial(Rational(k, 2)) / (factorial(Rational(q, 2)) * factorial(Rational(k - p, 2)) * factorial(Rational(p - q, 2)))
return simplify(res)
def Y(l, m, x, y):
"""Return the spherical harmonic of degree `l` and order `m`."""
res = 0
z = sqrt(1 - x ** 2 - y ** 2)
if (m >= 0):
for j in range(0, m + 1, 2):
for k in range(0, l - m + 1, 2):
for p in range(0, k + 1, 2):
for q in range(0, p + 1, 2):
res += (-1) ** ((j + p) // 2) * SA(l, m) * SB(l, m, j, k) * SC(p, q, k) * x ** (m - j + p - q) * y ** (j + q)
for k in range(1, l - m + 1, 2):
for p in range(0, k, 2):
for q in range(0, p + 1, 2):
res += (-1) ** ((j + p) // 2) * SA(l, m) * SB(l, m, j, k) * SC(p, q, k - 1) * x ** (m - j + p - q) * y ** (j + q) * z
else:
for j in range(1, abs(m) + 1, 2):
for k in range(0, l - abs(m) + 1, 2):
for p in range(0, k + 1, 2):
for q in range(0, p + 1, 2):
res += (-1) ** ((j + p - 1) // 2) * SA(l, abs(m)) * SB(l, abs(m), j, k) * SC(p, q, k) * x ** (abs(m) - j + p - q) * y ** (j + q)
for k in range(1, l - abs(m) + 1, 2):
for p in range(0, k, 2):
for q in range(0, p + 1, 2):
res += (-1) ** ((j + p - 1) // 2) * SA(l, abs(m)) * SB(l, abs(m), j, k) * SC(p, q, k - 1) * x ** (abs(m) - j + p - q) * y ** (j + q) * z
return res
def p_Y(l, m, lmax):
"""Return the polynomial basis representation of the spherical harmonic `Y_{lm}`."""
ylm = Y(l, m, x, y)
res = [ylm.subs(sqrt(1 - x ** 2 - y ** 2), 0).subs(x, 0).subs(y, 0)]
for n in range(1, (lmax + 1) ** 2):
res.append(Coefficient(ylm, poly_basis(n, x, y)))
return res
def A1(lmax, norm = 2 / sqrt(pi)):
"""Return the change of basis matrix A1. The columns of this matrix are given by `p_Y`."""
res = zeros((lmax + 1) ** 2, (lmax + 1) ** 2)
n = 0
for l in range(lmax + 1):
for m in range(-l, l + 1):
res[n] = p_Y(l, m, lmax)
n += 1
return res * norm
```
We can now evaluate the change of basis matrix from spherical harmonic coefficients to polynomials, $A_1$ for $l_\mathrm{max} = 3$. We then take the inverse to go from polynomial coeffiecients to $Y_{lm}$ coefficients:
```python
M = Matrix(A1(3)).inv()
M
```
## The end result
We can finally compute the spherical harmonic coefficients of the function $f$:
```python
ycoeffs = simplify(M * vec)
ycoeffs
```
Note that this is the **exact** spherical harmonic representation of the function `f`.
## Important note about current `starry` implementation
A few comments are in order regarding how exactly this is implemented in `starry`. This all happens behind the scenes, but it's useful to know if you're poking around in the code.
- When setting the spherical harmonic coefficients in `starry`, it is necessary to normalize the vector above by dividing it by $\pi$. This is because in `starry`, fluxes are normalized so that the integral of $Y_{0,0}$ over the disk is **unity** (instead of $\pi$).
- When limb darkening is present, `starry` requires the $Y_{0,0}$ coefficient of the map to be non-zero. But the spherical harmonic representation of the brightness-weighted velocity has $Y_{0,0} = 0$ (i.e., the star has zero net radial velocity). Overcoming this requires a bit of a *hack*. We set $Y_{0,0} = 1$ so we can limb-darken the map and compute the RM amplitude, but that means we have effectively computed $\int{(Iv + I)dS}$. We must therefore *subtract* $\int{IdS}$ to get the actual integrated brightness-weighted velocity. The RM effect amplitude is thus
$\ \ \ \ \ \ \ \ \ \ \Delta v = \frac{\int{(Iv + I)dS} - \int{IdS}}{\int{IdS}}$
We therefore compute the `starry` flux using two different `Map` instances: one to compute the $Iv + I$ term, and a uniform, limb-darkened map to compute the $I$ term.
## Interactive visualization
Below you can interactively see how different velocity profiles affect the Rossiter-McLaughlin waveform. We coded up a simple Jupyter widget to visualize the RM effect with `starry`. Try to find the parameters that best fit the measured RV data for the hot jupiter host HD 189733!
```python
from viz import visualize
visualize();
```
interactive(children=(FloatSlider(value=0.1, continuous_update=False, description='$v_\\mathrm{eq}$ [km / s]:'β¦
## One possible solution
The following values should get you a pretty good fit. They are close to the means of the posterior distributions when we do a full MCMC fit of the dataset, but they are probably not the "true" values.
$v_{eq}$: 5.00
$\lambda$: -0.50
$i$: 80.0
$\alpha$: 0.65
$u_1$: 1.50
$u_2$: -0.54
$b$: -0.66
$r/R_\star$: 0.158
|
{-# OPTIONS --cubical #-}
module n2o.N2O where
open import proto.Base
open import proto.Core
open import proto.IO
open import n2o.Network.WebSocket
open import n2o.Network.Socket
open import n2o.Network.Core
open import n2o.Network.Internal
-- open import Infinity.Proto
postulate
terminationCheck : IO β€
{-# FOREIGN GHC
import Control.Concurrent (threadDelay)
terminationCheck :: IO ()
terminationCheck = do
putStrLn "sapere aude"
threadDelay 1000000
terminationCheck #-}
{-# COMPILE GHC terminationCheck = terminationCheck #-}
data Example : Set where
Greet : Example
-- index Init = do
-- updateText "system" "What is your name?"
-- wire record { id_ = "send" , postback = Just Greet, source = "name" β· [] }
-- index (Message Greet) = do
-- Just name β get "name"
-- updateText "system" ("Hello, " <> jsEscape name)
index : Event Example β IO β€
index ev = putStrLn "Unknown event" -- TODO : monoids
about : Event Example β IO β€
about ev = putStrLn "Unknown event"
-- route : Context N2OProto Example β Context N2OProto Example
-- route c with (unpack (Request.reqPath (Context.cxRequest c)))
-- ... | "/ws/samples/static/index.html" = about
-- ... | "/ws/samples/static/about.html" = about
-- ... | _ = about
-- router : Context N2OProto Example β Context N2OProto Example
-- router c = record c { handler = mkHandler route }
-- protocols :
-- cx : Cx Example
-- cx = mkCx h r m p a d
-- where h = ?
-- r = ?
-- m = route β· []
-- p = []
-- a = ?
-- d = ?
main : IO β€
main = do
-- sock β socket AF_INET Stream (+ 0)
hPutStrLn stdout "asd"
-- protoRun 0 protos
terminationCheck
putStrLn "[*] Done"
-- main : IO β€
-- main = getLine >>= Ξ» s β putStrLn s
|
{-# OPTIONS --safe #-}
open import Generics.Prelude hiding (lookup; pi; curry)
open import Generics.Telescope
open import Generics.Desc
open import Generics.All
open import Generics.HasDesc
import Generics.Helpers as Helpers
module Generics.Constructions.Fold
{P I β} {A : Indexed P I β} (H : HasDesc {P} {I} {β} A)
{p c} {X : Predβ² I Ξ» i β Set c}
where
open HasDesc H
private
variable
V : ExTele P
i : β¦ I β§tel p
v : β¦ V β§tel p
Xβ² : β¦ I β§tel p β Set c
Xβ² i = unpredβ² I _ X i
------------------------
-- Types of the algebra
levelAlg : ConDesc P V I β Level
levelAlg (var _) = c
levelAlg (Ο {β} _ _ C) = β β levelAlg C
levelAlg (A β B) = levelAlg A β levelAlg B
AlgIndArg : (C : ConDesc P V I) β β¦ V β§tel p β Set (levelAlg C)
AlgIndArg (var f) v = Xβ² (f (p , v))
AlgIndArg (Ο ia S C) v = Ξ < ia > (S (p , v)) Ξ» s β AlgIndArg C (v , s)
AlgIndArg (A β B) v = AlgIndArg A v Γ AlgIndArg B v
AlgCon : (C : ConDesc P V I) β β¦ V β§tel p β Set (levelAlg C)
AlgCon (var f) v = Xβ² (f (p , v))
AlgCon (Ο ia S C) v = Ξ < ia > (S (p , v)) Ξ» s β AlgCon C (v , s)
AlgCon (A β B) v = AlgIndArg A v β AlgCon B v
Algebra : β k β Set (levelAlg (lookupCon D k))
Algebra k = AlgCon (lookupCon D k) tt
----------------
-- Generic fold
module _ (algs : Els Algebra) where
fold-wf : (x : Aβ² (p , i)) β Acc x β Xβ² i
foldData-wf : (x : β¦ D β§Data Aβ² (p , i)) β AllDataΟ Acc D x β Xβ² i
foldData-wf {i} (k , x) = foldCon (lookupCon D k) (algs k) x
where
foldIndArg : (C : ConDesc P V I)
(x : β¦ C β§IndArg Aβ² (p , v))
β AllIndArgΟ Acc C x
β AlgIndArg C v
foldIndArg (var f) x a = fold-wf x a
foldIndArg (Ο ia S C) x a = fun< ia > Ξ» s β foldIndArg C (app< ia > x s) (a s)
foldIndArg (A β B) (xa , xb) (aa , ab)
= foldIndArg A xa aa
, foldIndArg B xb ab
foldCon : (C : ConDesc P V I)
(alg : AlgCon C v)
(x : β¦ C β§Con Aβ² (p , v , i))
β AllConΟ Acc C x
β Xβ² i
foldCon (var _) alg refl _ = alg
foldCon (Ο ia S C) alg (s , x) a = foldCon C (app< ia > alg s) x a
foldCon (A β B) alg (xa , xb) (aa , ab)
= foldCon B (alg (foldIndArg A xa aa)) xb ab
fold-wf x (acc a) = foldData-wf (split x) a
fold : Aβ² (p , i) β Xβ² i
fold x = fold-wf x (wf x)
deriveFold : Arrows Algebra (Predβ² I Ξ» i β Aβ² (p , i) β Xβ² i)
deriveFold = curryβ Ξ» m β predβ² I _ Ξ» i β fold m
|
[STATEMENT]
lemma stream_times [simp]: "stream (t * t') = stream t * stream t'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. stream (t * t') = stream t * stream t'
[PROOF STEP]
by(simp add: times_stream_def times_tree_def) |
State Before: Ξ± : Type u_1
Ξ² : Type u_2
f : Ξ± β Option Ξ²
p : Ξ² β Bool
l : List Ξ±
β’ filter p (filterMap f l) = filterMap (fun x => Option.filter p (f x)) l State After: Ξ± : Type u_1
Ξ² : Type u_2
f : Ξ± β Option Ξ²
p : Ξ² β Bool
l : List Ξ±
β’ filterMap (fun x => Option.bind (f x) (Option.guard fun x => p x = true)) l =
filterMap (fun x => Option.filter p (f x)) l Tactic: rw [β filterMap_eq_filter, filterMap_filterMap] State Before: Ξ± : Type u_1
Ξ² : Type u_2
f : Ξ± β Option Ξ²
p : Ξ² β Bool
l : List Ξ±
β’ filterMap (fun x => Option.bind (f x) (Option.guard fun x => p x = true)) l =
filterMap (fun x => Option.filter p (f x)) l State After: case e_f
Ξ± : Type u_1
Ξ² : Type u_2
f : Ξ± β Option Ξ²
p : Ξ² β Bool
l : List Ξ±
β’ (fun x => Option.bind (f x) (Option.guard fun x => p x = true)) = fun x => Option.filter p (f x) Tactic: congr State Before: case e_f
Ξ± : Type u_1
Ξ² : Type u_2
f : Ξ± β Option Ξ²
p : Ξ² β Bool
l : List Ξ±
β’ (fun x => Option.bind (f x) (Option.guard fun x => p x = true)) = fun x => Option.filter p (f x) State After: case e_f.h
Ξ± : Type u_1
Ξ² : Type u_2
f : Ξ± β Option Ξ²
p : Ξ² β Bool
l : List Ξ±
x : Ξ±
β’ Option.bind (f x) (Option.guard fun x => p x = true) = Option.filter p (f x) Tactic: funext x State Before: case e_f.h
Ξ± : Type u_1
Ξ² : Type u_2
f : Ξ± β Option Ξ²
p : Ξ² β Bool
l : List Ξ±
x : Ξ±
β’ Option.bind (f x) (Option.guard fun x => p x = true) = Option.filter p (f x) State After: no goals Tactic: cases f x <;> simp [Option.filter, Option.guard] |
// Copyright (C) 2013-2016 Internet Systems Consortium, Inc. ("ISC")
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
#include <config.h>
#include <asiolink/asio_wrapper.h>
#include <dhcp_ddns/dhcp_ddns_log.h>
#include <dhcp_ddns/ncr_io.h>
#include <boost/algorithm/string/predicate.hpp>
namespace isc {
namespace dhcp_ddns {
NameChangeProtocol stringToNcrProtocol(const std::string& protocol_str) {
if (boost::iequals(protocol_str, "UDP")) {
return (NCR_UDP);
}
if (boost::iequals(protocol_str, "TCP")) {
return (NCR_TCP);
}
isc_throw(BadValue,
"Invalid NameChangeRequest protocol: " << protocol_str);
}
std::string ncrProtocolToString(NameChangeProtocol protocol) {
switch (protocol) {
case NCR_UDP:
return ("UDP");
case NCR_TCP:
return ("TCP");
default:
break;
}
std::ostringstream stream;
stream << "UNKNOWN(" << protocol << ")";
return (stream.str());
}
//************************** NameChangeListener ***************************
NameChangeListener::NameChangeListener(RequestReceiveHandler&
recv_handler)
: listening_(false), io_pending_(false), recv_handler_(recv_handler) {
};
void
NameChangeListener::startListening(isc::asiolink::IOService& io_service) {
if (amListening()) {
// This amounts to a programmatic error.
isc_throw(NcrListenerError, "NameChangeListener is already listening");
}
// Call implementation dependent open.
try {
open(io_service);
} catch (const isc::Exception& ex) {
stopListening();
isc_throw(NcrListenerOpenError, "Open failed: " << ex.what());
}
// Set our status to listening.
setListening(true);
// Start the first asynchronous receive.
try {
receiveNext();
} catch (const isc::Exception& ex) {
stopListening();
isc_throw(NcrListenerReceiveError, "doReceive failed: " << ex.what());
}
}
void
NameChangeListener::receiveNext() {
io_pending_ = true;
doReceive();
}
void
NameChangeListener::stopListening() {
try {
// Call implementation dependent close.
close();
} catch (const isc::Exception &ex) {
// Swallow exceptions. If we have some sort of error we'll log
// it but we won't propagate the throw.
LOG_ERROR(dhcp_ddns_logger, DHCP_DDNS_NCR_LISTEN_CLOSE_ERROR)
.arg(ex.what());
}
// Set it false, no matter what. This allows us to at least try to
// re-open via startListening().
setListening(false);
}
void
NameChangeListener::invokeRecvHandler(const Result result,
NameChangeRequestPtr& ncr) {
// Call the registered application layer handler.
// Surround the invocation with a try-catch. The invoked handler is
// not supposed to throw, but in the event it does we will at least
// report it.
try {
io_pending_ = false;
recv_handler_(result, ncr);
} catch (const std::exception& ex) {
LOG_ERROR(dhcp_ddns_logger, DHCP_DDNS_UNCAUGHT_NCR_RECV_HANDLER_ERROR)
.arg(ex.what());
}
// Start the next IO layer asynchronous receive.
// In the event the handler above intervened and decided to stop listening
// we need to check that first.
if (amListening()) {
try {
receiveNext();
} catch (const isc::Exception& ex) {
// It is possible though unlikely, for doReceive to fail without
// scheduling the read. While, unlikely, it does mean the callback
// will not get called with a failure. A throw here would surface
// at the IOService::run (or run variant) invocation. So we will
// close the window by invoking the application handler with
// a failed result, and let the application layer sort it out.
LOG_ERROR(dhcp_ddns_logger, DHCP_DDNS_NCR_RECV_NEXT_ERROR)
.arg(ex.what());
// Call the registered application layer handler.
// Surround the invocation with a try-catch. The invoked handler is
// not supposed to throw, but in the event it does we will at least
// report it.
NameChangeRequestPtr empty;
try {
io_pending_ = false;
recv_handler_(ERROR, empty);
} catch (const std::exception& ex) {
LOG_ERROR(dhcp_ddns_logger,
DHCP_DDNS_UNCAUGHT_NCR_RECV_HANDLER_ERROR)
.arg(ex.what());
}
}
}
}
//************************* NameChangeSender ******************************
NameChangeSender::NameChangeSender(RequestSendHandler& send_handler,
size_t send_queue_max)
: sending_(false), send_handler_(send_handler),
send_queue_max_(send_queue_max), io_service_(NULL) {
// Queue size must be big enough to hold at least 1 entry.
setQueueMaxSize(send_queue_max);
}
void
NameChangeSender::startSending(isc::asiolink::IOService& io_service) {
if (amSending()) {
// This amounts to a programmatic error.
isc_throw(NcrSenderError, "NameChangeSender is already sending");
}
// Clear send marker.
ncr_to_send_.reset();
// Call implementation dependent open.
try {
// Remember io service we're given.
io_service_ = &io_service;
open(io_service);
} catch (const isc::Exception& ex) {
stopSending();
isc_throw(NcrSenderOpenError, "Open failed: " << ex.what());
}
// Set our status to sending.
setSending(true);
// If there's any queued already.. we'll start sending.
sendNext();
}
void
NameChangeSender::stopSending() {
// Set it send indicator to false, no matter what. This allows us to at
// least try to re-open via startSending(). Also, setting it false now,
// allows us to break sendNext() chain in invokeSendHandler.
setSending(false);
// If there is an outstanding IO to complete, attempt to process it.
if (ioReady() && io_service_ != NULL) {
try {
runReadyIO();
} catch (const std::exception& ex) {
// Swallow exceptions. If we have some sort of error we'll log
// it but we won't propagate the throw.
LOG_ERROR(dhcp_ddns_logger,
DHCP_DDNS_NCR_FLUSH_IO_ERROR).arg(ex.what());
}
}
try {
// Call implementation dependent close.
close();
} catch (const isc::Exception &ex) {
// Swallow exceptions. If we have some sort of error we'll log
// it but we won't propagate the throw.
LOG_ERROR(dhcp_ddns_logger,
DHCP_DDNS_NCR_SEND_CLOSE_ERROR).arg(ex.what());
}
io_service_ = NULL;
}
void
NameChangeSender::sendRequest(NameChangeRequestPtr& ncr) {
if (!amSending()) {
isc_throw(NcrSenderError, "sender is not ready to send");
}
if (!ncr) {
isc_throw(NcrSenderError, "request to send is empty");
}
if (send_queue_.size() >= send_queue_max_) {
isc_throw(NcrSenderQueueFull,
"send queue has reached maximum capacity: "
<< send_queue_max_ );
}
// Put it on the queue.
send_queue_.push_back(ncr);
// Call sendNext to schedule the next one to go.
sendNext();
}
void
NameChangeSender::sendNext() {
if (ncr_to_send_) {
// @todo Not sure if there is any risk of getting stuck here but
// an interval timer to defend would be good.
// In reality, the derivation should ensure they timeout themselves
return;
}
// If queue isn't empty, then get one from the front. Note we leave
// it on the front of the queue until we successfully send it.
if (!send_queue_.empty()) {
ncr_to_send_ = send_queue_.front();
// @todo start defense timer
// If a send were to hang and we timed it out, then timeout
// handler need to cycle thru open/close ?
// Call implementation dependent send.
doSend(ncr_to_send_);
}
}
void
NameChangeSender::invokeSendHandler(const NameChangeSender::Result result) {
// @todo reset defense timer
if (result == SUCCESS) {
// It shipped so pull it off the queue.
send_queue_.pop_front();
}
// Invoke the completion handler passing in the result and a pointer
// the request involved.
// Surround the invocation with a try-catch. The invoked handler is
// not supposed to throw, but in the event it does we will at least
// report it.
try {
send_handler_(result, ncr_to_send_);
} catch (const std::exception& ex) {
LOG_ERROR(dhcp_ddns_logger, DHCP_DDNS_UNCAUGHT_NCR_SEND_HANDLER_ERROR)
.arg(ex.what());
}
// Clear the pending ncr pointer.
ncr_to_send_.reset();
// Set up the next send
try {
if (amSending()) {
sendNext();
}
} catch (const isc::Exception& ex) {
// It is possible though unlikely, for sendNext to fail without
// scheduling the send. While, unlikely, it does mean the callback
// will not get called with a failure. A throw here would surface
// at the IOService::run (or run variant) invocation. So we will
// close the window by invoking the application handler with
// a failed result, and let the application layer sort it out.
LOG_ERROR(dhcp_ddns_logger, DHCP_DDNS_NCR_SEND_NEXT_ERROR)
.arg(ex.what());
// Invoke the completion handler passing in failed result.
// Surround the invocation with a try-catch. The invoked handler is
// not supposed to throw, but in the event it does we will at least
// report it.
try {
send_handler_(ERROR, ncr_to_send_);
} catch (const std::exception& ex) {
LOG_ERROR(dhcp_ddns_logger,
DHCP_DDNS_UNCAUGHT_NCR_SEND_HANDLER_ERROR).arg(ex.what());
}
}
}
void
NameChangeSender::skipNext() {
if (!send_queue_.empty()) {
// Discards the request at the front of the queue.
send_queue_.pop_front();
}
}
void
NameChangeSender::clearSendQueue() {
if (amSending()) {
isc_throw(NcrSenderError, "Cannot clear queue while sending");
}
send_queue_.clear();
}
void
NameChangeSender::setQueueMaxSize(const size_t new_max) {
if (new_max == 0) {
isc_throw(NcrSenderError, "NameChangeSender:"
" queue size must be greater than zero");
}
send_queue_max_ = new_max;
}
const NameChangeRequestPtr&
NameChangeSender::peekAt(const size_t index) const {
if (index >= getQueueSize()) {
isc_throw(NcrSenderError,
"NameChangeSender::peekAt peek beyond end of queue attempted"
<< " index: " << index << " queue size: " << getQueueSize());
}
return (send_queue_.at(index));
}
void
NameChangeSender::assumeQueue(NameChangeSender& source_sender) {
if (source_sender.amSending()) {
isc_throw(NcrSenderError, "Cannot assume queue:"
" source sender is actively sending");
}
if (amSending()) {
isc_throw(NcrSenderError, "Cannot assume queue:"
" target sender is actively sending");
}
if (getQueueMaxSize() < source_sender.getQueueSize()) {
isc_throw(NcrSenderError, "Cannot assume queue:"
" source queue count exceeds target queue max");
}
if (!send_queue_.empty()) {
isc_throw(NcrSenderError, "Cannot assume queue:"
" target queue is not empty");
}
send_queue_.swap(source_sender.getSendQueue());
}
int
NameChangeSender::getSelectFd() {
isc_throw(NotImplemented, "NameChangeSender::getSelectFd is not supported");
}
void
NameChangeSender::runReadyIO() {
if (!io_service_) {
isc_throw(NcrSenderError, "NameChangeSender::runReadyIO"
" sender io service is null");
}
// We shouldn't be here if IO isn't ready to execute.
// By running poll we're gauranteed not to hang.
/// @todo Trac# 3325 requests that asiolink::IOService provide a
/// wrapper for poll().
io_service_->get_io_service().poll_one();
}
} // namespace isc::dhcp_ddns
} // namespace isc
|
\section{Technical Evaluation}
\label{sec:technical_evaluation}
The technical evaluation of this chapter includes several experiments that are
carried out aiming to test the performance of the adaptation process. One of
the most important developed modules of AdaptUI is the Pellet reasoning
engine port for Android. This module requires special emphasis in the evaluation,
as leads the whole adaptation process and manages the knowledge represented
through the AdaptUIOnt ontology. Besides, several other experiments are
presented. Thus, in the following sections these experiments and their results
are detailed:
% This section gathers several experiments regarding the technical part of the
% evaluation. Within this section the following experiments are included:
\begin{enumerate}[label=\alph*)]
\item First, Section~\ref{sec:performance_evaluation} presents a series
of experiments regarding the performance of \textit{Pellet4Android} in
comparison with Pellet for Java. These experiments include the default
AdaptUIOnt ontology and different versions of AdaptUIOnt with several modifications
of the axioms sets. For more details of the \textit{Pellet4Android} mobile
reasoning engine see Section~\ref{sec:pellet4android}.
\item Secondly, AdaptUI is compared to another user adaptation solution:
Imhotep. This framework is detailed in Section~\ref{sec:imhotep_comparison}.
\item Next, several scenarios are presented to evaluate the adaptation
process of AdaptUI. Besides, a comparison with Imhotep user adaptation
framework is also performed. The scenarios and their details are given in
Section~\ref{sec:scenarios}.
\item Finally, 5 developers with experience in developing Android based
applications have evaluated the provided \acs{api}. This experiment is detailed
in Section~\ref{sec:developers}.
\end{enumerate}
\subsection{Performance Evaluation: Pellet and Pellet4Android}
\label{sec:performance_evaluation}
During this section different experiments are presented in order to
discuss the results of using mobile reasoning engines. In this dissertation an
evaluation of the reasoning performance of \textit{Pellet4Android}, the Android
based version of Pellet ported for AdaptUI, is performed. To do so, this evaluation
considers Pellet, the desktop Java based reasoning engine, to make a comparison
between the performance results of both solutions. This evaluation has been
divided into three different experiments:
\begin{enumerate}
\item The first experiment evaluates the AdaptUIOnt ontology version with both
reasoning engines. When loading the ontology several rules are processed and
triggered. As a result of this experiment the corresponding performance
results are compared.
\item For the second experiment the set of ABox axioms of the AdaptUIOnt
ontology is increased. The ABox gathers the knowledge about the
individuals, including concepts and roles assertions, and individuals
equality and inequality~\citep{krotzsch_description_2012}. In other words,
it describes the attributes of instances (or individuals), the roles between
instances, and other assertions about instances regarding their class
membership with the TBox concepts~\citep{abox_tbox}. The ABox, RBox and TBox
belong to the \ac{owl} 2 Description Logic (DL). As for these experiments several
modifications have been carried out in these axiom sets, a more concrete
description of the concepts represented by each set is detailed in
Table~\ref{tbl:dl_terminology}. Hence, increasing the number of individuals
might result into difference performance results comparing both platforms.
\item Next, the set of \ac{swrl} axioms of the AdaptUIOnt ontology is modified
by increasing its amount of axioms. This axiom set collects axioms related to
the rules included in the ontology. Therefore, this experiment focuses on the
reasoning capabilities of each version of Pellet.
\end{enumerate}
\input{5_experiments_and_results/table_abox_tbox}
% For these experiments several tables and the corresponding charts are included,
% showing the results obtained during the tests. In these tables the ontology
% triples, ABox and \ac{swrl} axioms sets, mean, median and deviation are shown. The
% triples are obtained by a SPARQL query (see Listing~\ref{lst:fuseki}). On the
% other hand the axioms are requested through the \ac{owl}-API.
%
% \lstset{label=lst:fuseki, language=java, basicstyle=\footnotesize, frame=single,
% keywordstyle=\color{blue}, captionpos=b, caption={SPARQL query for obtaining the
% number of triples of the ontology. Between the $WHERE$ braces there is:
% $?s$, which represents the subject; $?p$, representing the predicate; and $?o$,
% which represents the object.}, breakatwhitespace=false, breaklines=true}
% \begin{lstlisting}
% SELECT (COUNT(*) AS ?no) WHERE { ?s ?p ?o }
% \end{lstlisting}
The cited experiments have been performed using two different types of devices.
The ones running Pellet have been launched using a desktop environment. On the
contrary, as \textit{Pellet4Android} is an Android based version of the reasoning
engine, several mobile devices have been tested. The characteristics of all the
devices used for these experiments are detailed in Table~\ref{tbl:devices_specs}.
\input{5_experiments_and_results/devices_specs}
The mobile devices shown in the previous table have not been chosen arbitrarily.
As can be seen in Figure~\ref{fig:android_market}, at February 2014 the global
spread of Samsung devices represent the 65\% share of all Android devices.
\begin{figure}
\centering
\includegraphics[width=0.75\textwidth]{android_market.pdf}
\caption{Global Android share~\citep{samsung_share_2014}. As this pie chart
illustrates, Samsung devices represent the 65\% of the Android worldwide market
share.}
\label{fig:android_market}
\end{figure}
According to the explanation of this section, in the following lines each
experiment is described and evaluated. First, the default AdaptUIOnt reasoning
performance comparing Pellet and \textit{Pellet4Android} is introduced in
Section~\ref{sec:eval_default_ont}. Next, these reasoning engines are
tested using a modification of the default ontology, increasing the ABox axioms
set. This experiment is described in Section~\ref{sec:eval_abox}.
Finally, the AdaptUIOnt default ontology's \ac{swrl} axioms set is increased to
evaluate the performance of both reasoning engines when dealing with larger
sets of rules (see Section~\ref{sec:eval_swrl}).
\subsubsection{Using the Default AdaptUIOnt Ontology}
\label{sec:eval_default_ont}
In this experiment the default version of the AdaptUIOnt ontology is used as
input of the reasoning engines in order to evaluate their performance. As
AdaptUIOnt has been designed to be a lightweight ontology to be available to be
used with mobile reasoning engines, this experiment shows how \textit{Pellet4Android}
performs in comparison with the results obtained by Pellet in a desktop
environment. These results are shown in Table~\ref{tbl:eval_default_ont}.
\begin{table}
\caption{Pellet and \textit{Pellet4Android} comparison loading the default
AdaptUIOnt ontology.}
\label{tbl:eval_default_ont}
\footnotesize
\centering
\begin{tabular}{l l r r r r r r}
\hline
& & \multicolumn{2}{c}{\textbf{Axioms}} &
\multicolumn{3}{c}{\textbf{Results}} \\
\textbf{Device} & \textbf{Triples}& \textbf{ABox} & \textbf{\ac{swrl}}
& \textbf{Mean} & \textbf{Median} & \textbf{Deviation} \\
\hline
Acer laptop & 2,779 & 37 & 13 & 0.946 & 0.951 & 0.017 \\
(Pellet) \\
Galaxy SIII Mini& 2,779& 37 & 13 & 2.764 & 2.737 & 0.127 \\
(\textit{Pellet4Android}) \\
Galaxy SIII & 2,779 & 37 & 13 & 1.649 & 1.652 & 0.076 \\
(\textit{Pellet4Android}) \\
Nexus 10 & 2,779 & 37 & 13 & 5.147 & 5.122 & 0.205 \\
(\textit{Pellet4Android}) \\
\hline
\end{tabular}
\end{table}
Surprisingly Table~\ref{tbl:eval_default_ont} reveals that the Nexus 10 performs
worse than the other two mobile devices. This is usually due to the available
and allocated memory that Java estimates for launching the required reasoning
tasks. Table~\ref{tbl:eval_abox} for instance shows a more usual behaviour of
the Nexus 10, which matches its hardware specifications.
Figure~\ref{fig:pellet_default} illustrates the differences of using Pellet or
\textit{Pellet4Android} when loading the default ABox and \ac{swrl} axiom sets in
AdaptUIOnt. As shown in Figure~\ref{fig:pellet_default} and in
Table~\ref{tbl:eval_default_ont}, the performance of Pellet for Java
environments is better in any case if we compare it with the Android based
version. However, and although there are several differences depending on the
used mobile device, the differences are small. This is specially remarkable in
the case of the Samsung Galaxy SIII. This device performs a remarkable 1.649
seconds, which is just approximately 0.7 seconds slower than Pellet for Java.
\begin{figure}
\centering
\includegraphics[width=0.50\textwidth]{pellet_default.pdf}
\caption{Pellet and \textit{Pellet4Android} performance comparison using the
default AdaptUIOnt ontology. See Table~\ref{tbl:eval_default_ont}.}
\label{fig:pellet_default}
\end{figure}
\subsubsection{Incrementing the ABox Axioms Set}
\label{sec:eval_abox}
In this case the experiment consists in incrementing the ABox axioms set of
the AdaptUIOnt ontology. As stated before, the ABox describes the attributes of
instances, the roles between instances, and other assertions about instances
regarding their class membership with the TBox concepts~\citep{abox_tbox}.
The modification of the ABox has been carried out by incrementing the amount of
instances in 5,000, 10,000, 15,000 and finally reaching 20,000 instances. Thus,
with this experiment we want to evaluate how increasing the number of
individuals might result into a performance penalization, specially in the case
of \textit{Pellet4Android}. Table~\ref{tbl:eval_abox} shows the results of this
experiment.
\begin{table}
\caption{Pellet and \textit{Pellet4Android} comparison loading the AdaptUIOnt
ontology with an increment in the ABox axiom set.}
\label{tbl:eval_abox}
\footnotesize
\centering
\begin{tabular}{l l r r r r r r}
\hline
\textbf{Device} & & \multicolumn{2}{c}{\textbf{Axioms}} &
\multicolumn{3}{c}{\textbf{Results}} \\
\textbf{(Reasoner)} & \textbf{Triples}& \textbf{ABox} & \textbf{\ac{swrl}}
& \textbf{Mean} & \textbf{Median} & \textbf{Deviation} \\
\hline
Acer laptop & 12,779 & 5,000 & 13 & 3.014 & 3.012 & 0.034 \\
(Pellet) & 22,779 & 10,000 & 13 & 3.903 & 3.905 & 0.052 \\
& 32,779 & 15,000 & 13 & 4.228 & 4.231 & 0.036 \\
& 42,779 & 20,000 & 13 & 4.539 & 4.541 & 0.042 \\
\hline
Galaxy SIII Mini& 12,779& 5,000& 13& 59.412& 59.508& 0.708 \\
(\textit{Pellet4Android}) & 22,779 & 10,000 & 13 & 30.321 & 30.327 & 0.347 \\
& 32,779 & 15,000 & 13 & 87.957 & 87.018 & 1.108 \\
& 42,779 & 20,000 & 13 & 183.882&183.879 & 2.101 \\
\hline
Galaxy SIII & 12,779 & 5,000 & 13 & 36.336 & 36.194 & 0.668 \\
(\textit{Pellet4Android})& 22,779 & 10,000 & 13 & 16.471 & 16.439 & 0.288\\
& 32,779 & 15,000 & 13 & 45.387 & 45.593 & 0.729\\
& 42,779 & 20,000 & 13 & 97.151 & 97.440 & 1.120\\
\hline
Nexus 10 & 12,779 & 5,000 & 13 & 14.171 & 14.428 & 0.525\\
(\textit{Pellet4Android})& 22,779 & 10,000 & 13 & 9.024 & 9.065 & 0.291\\
& 32,779 & 15,000 & 13 & 17.944& 17.969 & 0.496\\
& 42,779 & 20,000 & 13 & 32.070 & 32.019 & 0.588\\
\hline
\end{tabular}
\end{table}
Figure~\ref{fig:pellet_abox} illustrates the differences between Pellet and
\textit{Pellet4Android} when loading the AdaptUIOnt ontology with an increment
applied to the ABox axioms set. As shown in the chart (accordingly with
Table~\ref{tbl:eval_abox}) the performance of Pellet for Java environments
keeps being better in comparison with the Android based version. While Pellet
maintains its performance under 5 seconds, Android devices require much more
time to perform the same reasoning. In fact, specially the Samsung Galaxy SIII
Mini mobile device requires more than 180 seconds to evaluate the corresponding
20,000 instances, while the Samsung Galaxy SIII needs approximately 97 seconds
and the Nexus 10 not more than 32.
\begin{figure}
\centering
\includegraphics[width=0.80\textwidth]{pellet_abox.pdf}
\caption{Pellet and \textit{Pellet4Android} performance comparison using the
AdaptUIOnt ontology increasing the ABox axioms set. See Table~\ref{tbl:eval_abox}.}
\label{fig:pellet_abox}
\end{figure}
\subsubsection{Incrementing the \ac{swrl} Axioms Set}
\label{sec:eval_swrl}
Finally, this last experiment consists on incrementing the \ac{swrl} axioms set of
the AdaptUIOnt ontology, which collects the axioms related to the rules
included in the ontology. Using an amount of 5,000, 10,000, 15,000 and
finally 20,000 rules we aim to evaluate how increasing the number of rules
penalize the performance of Pellet, specially in the case of
\textit{Pellet4Android}. Table~\ref{tbl:eval_swrl} shows the results of this
experiment.
\begin{table}
\caption{Pellet and \textit{Pellet4Android} comparison loading the AdaptUIOnt
ontology with an increment in the \ac{swrl} axiom set.}
\label{tbl:eval_swrl}
\footnotesize
\centering
\begin{tabular}{l l r r r r r r}
\hline
& & \multicolumn{2}{c}{\textbf{Axioms}} &
\multicolumn{3}{c}{\textbf{Results}} \\
\textbf{Device} & \textbf{Triples}& \textbf{ABox} & \textbf{\ac{swrl}}
& \textbf{Mean} & \textbf{Median} & \textbf{Deviation} \\
\hline
Acer laptop & 82,779 & 37 & 5,013 & 4.770 & 4.732 & 0.141 \\
(Pellet) & 162,779 & 37 & 10,013 & 6.327 & 6.296 & 0.164 \\
& 242,779 & 37 & 15,013 & 7.427 & 7.194 & 0.444 \\
& 322,779 & 37 & 20,013 & 8.147 & 8.117 & 0.105 \\
\hline
Galaxy SIII Mini& 82,779 & 37 & 5,013 & 96.878 & 96.988 & 0.109 \\
(\textit{Pellet4Android}) & 162,779& 37 & 10,013 & 101.656 & 101.899 & 0.322 \\
& 242,779 & 37 & 15,013 & 243.981 & 244.011 & 0.298 \\
& 322,779 & 37 & 20,013 & 331.433 & 331.894 & 0.110 \\
\hline
Galaxy SIII & 82,779 & 37 & 5,013 & 84.121 & 84.121 & 0.869 \\
(\textit{Pellet4Android})& 162,779 & 37 & 10,013 & 74.248 & 74.103 & 0.250\\
& 242,779 & 37 & 15,013 & 209.431 & 208.628 & 1.699 \\
& 322,779 & 37 & 20,013 & 216.005 & 216.077 & 1.202 \\
\hline
Nexus 10 & 82,779 & 37 & 5,013 & 22.317 & 22.471 & 0.333 \\
(\textit{Pellet4Android})& 162,779& 37 & 10,013& 45.193 & 44.736 & 1.312 \\
& 242,779& 37 & 15,013& 85.543 & 88.134 & 5.490 \\
& 322,779& 37 & 20,013& 107.151& 106.131& 2.749 \\
\hline
\end{tabular}
\end{table}
Figure~\ref{fig:pellet_swrl} illustrates the differences between Pellet and
\textit{Pellet4Android} when running the different sets of \ac{swrl} axioms of the
AdaptUIOnt ontology. As shown in the chart, it takes more time to evaluate the
set of rules than the instances. In fact, the scale of the Y axis reaches 350
seconds, while in Figure~\ref{fig:pellet_abox} the maximum mean value reaches
less than 200 seconds.
\begin{figure}
\centering
\includegraphics[width=0.80\textwidth]{pellet_swrl.pdf}
\caption{Pellet and \textit{Pellet4Android} performance comparison using the
AdaptUIOnt ontology increasing the \ac{swrl} axioms set. See Table~\ref{tbl:eval_swrl}.}
\label{fig:pellet_swrl}
\end{figure}
\subsubsection{Discussion}
\label{sec:performance_discussion}
During these experiments, and as shown in Table~\ref{tbl:eval_default_ont},
Table~\ref{tbl:eval_abox} and Table~\ref{tbl:eval_swrl}, neither the TBox or
RBox axioms sets have been modified. These collections belong to what is called
the terminology knowledge in \ac{owl} 2, and it does not affect the performance of
the reasoning process. Hence, the TBox and the RBox contain 266 and 22 axioms
respectively during the experiments.
On the contrary, sets of 5,000, 10,000, 15,000 and 20,000 axioms have been added
to the ABox and \ac{swrl} axioms sets to demonstrate the performance penalization
when using large axioms sets with the tested reasoning engine. These modifications
of the AdaptUIOnt ontology instance set have been performed through several
Python scripts, which have modified the corresponding default \textit{adaptui.owl} file.
\subsubsection{Conclusions}
\label{sec:performance_conclusions}
These results demonstrate that, although managing semantics in mobile devices is
possible, they are still far from the results obtained in a desktop environment
when dealing with complex reasoning tasks. Nonetheless, \textit{Pellet4Android}
is a good approximation of a reasoner to run on mobile devices. A native Pellet
for Android port written in C++ would probably improve these results. But one of
the AdaptUIOnt ontology's benefits is its lightness. Using less than 400 axioms,
this ontology is able to model a full adaptation domain, considering the user and
his/her capabilities, the surrounding environment and the device characteristics.
The results shown in Table~\ref{tbl:eval_default_ont} show how \textit{Pellet4Android}
responses properly when dealing with the AdaptUIOnt ontology. However, more efforts
in mobile reasoners would benefit these systems.
% Repasar bien
Regarding the ABox collection of axioms, the first conclusion that is extracted
from the results shown in Table~\ref{tbl:eval_abox} is that Pellet running in a
PC has a very optimal response. For example, increasing the number of instances
to almost 20,000 instances just reduces the response time in around 4 seconds
(see Figure~\ref{fig:pellet_abox}). Increasing the ABox slows down the final
performance, incrementing the number of seconds for loading the ontology. On
the contrary, running this experiment in the mobile devices reveals a lack of
efficiency. First, each device's hardware capabilities are taken into account.
The first mobile device, the Samsung Galaxy SIII~Mini needs around 2.764
seconds for loading the default AdaptUI ontology, which consists of 37 ABox and
13 \ac{swrl} axioms. This same case takes 1.649 seconds in the Samsung Galaxy
SIII and 5.147 seconds in the Nexus 10. Although these figures might appear to
be enough considering we are dealing with limited hardware, by increasing them
in the same way that it has been done with the Acer laptop results into unmanageable
time responses. For instance, loading 5,032 ABox axioms takes 3.014 seconds for
the laptop and Pellet, for the Samsung Galaxy SIII Mini it takes 59.412 seconds,
36.336 seconds for the Samsung Galaxy SIII and 14.171 seconds for the Nexus 10.
Nevertheless, these differences are more significant when dealing with the \ac{swrl}
axioms set. In this case, although Pellet's performance does not exceed 9
seconds, the differences with the ABox axioms set are much bigger regarding
\textit{Pellet4Android}. In the best case, the Nexus 10 needs around 22 seconds
to reason over 5,000 rules, while the Samsung Galaxy SIII Mini needs more
than 90. This depicts the existing differences of performance depending on the
Android device we choose.
As a final conclusion, taking into account the presented results, \textit{Pellet4Android}
presents promising performance results when dealing with the AdaptUIOnt. The
lightness of the ontology (considering the amount of axioms) assures a more
than acceptable performance. |
module Builtin.Size where
open import Agda.Builtin.Size public
|
Aston Villa 's arch @-@ rivals are Birmingham City , with games between the two clubs known as the Second City Derby . Historically though , West Bromwich Albion have arguably been Villa 's greatest rivals , a view highlighted in a fan survey , conducted in 2003 . The two teams contested three FA Cup finals in the late 19th century . Villa also enjoy less heated local rivalries with Wolverhampton Wanderers and Coventry City . Through the relegation of West Brom and Birmingham City , to the Football League Championship , in the 2005 β 06 season , at the start of 2006 β 07 Premiership season , Villa were the only Midlands club in that League . The nearest opposing team Villa faced during that season was Sheffield United , who played 62 miles ( 100 km ) away in South Yorkshire . For the 2010 β 11 season , West Bromwich Albion were promoted and joined Aston Villa , Wolverhampton Wanderers , and Birmingham City in the Premier League . This marked the first time that the " West Midlands ' Big Four " clubs have been in the Premier League at the same time , and the first time together in the top flight since the 1983 β 84 season . Birmingham were relegated at the end of the 2010 β 11 season , ending this period .
|
module Main
%default total
%logging declare.data.parameters 20
%logging eval.eta 10
-- explicit
data Value : (value : Nat -> Type) -> Type where
EmptyV : {0 value : Nat -> Type} -> Value (\ n => value n)
data TValue : Nat -> Type where
MkTupleV : Value (\n => TValue n) -> TValue n
|
(*
* Copyright 2023, Proofcraft Pty Ltd
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
(*
Lemmas on arch get/set object etc
*)
theory ArchAcc_AI
imports
SubMonad_AI
"Lib.Crunch_Instances_NonDet"
begin
context Arch begin global_naming ARM
bundle unfold_objects =
obj_at_def[simp]
kernel_object.splits[split]
arch_kernel_obj.splits[split]
get_object_wp [wp]
bundle unfold_objects_asm =
obj_at_def[simp]
kernel_object.split_asm[split]
arch_kernel_obj.split_asm[split]
definition
"valid_asid asid s \<equiv> arm_asid_map (arch_state s) asid \<noteq> None"
lemma get_asid_pool_wp [wp]:
"\<lbrace>\<lambda>s. \<forall>pool. ko_at (ArchObj (ASIDPool pool)) p s \<longrightarrow> Q pool s\<rbrace>
get_asid_pool p
\<lbrace>Q\<rbrace>"
apply (simp add: get_asid_pool_def get_object_def)
apply (wp|wpc)+
apply (clarsimp simp: obj_at_def)
done
lemma set_asid_pool_typ_at [wp]:
"set_asid_pool ptr pool \<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace>"
apply (simp add: set_asid_pool_def set_object_def get_object_def)
apply wp
including unfold_objects
by clarsimp
lemmas set_asid_pool_typ_ats [wp] = abs_typ_at_lifts [OF set_asid_pool_typ_at]
lemma get_pd_wp [wp]:
"\<lbrace>\<lambda>s. \<forall>pd. ko_at (ArchObj (PageDirectory pd)) p s \<longrightarrow> Q pd s\<rbrace> get_pd p \<lbrace>Q\<rbrace>"
unfolding get_pd_def including unfold_objects by wpsimp
lemma get_pde_wp:
"\<lbrace>\<lambda>s. \<forall>pd. ko_at (ArchObj (PageDirectory pd)) (p && ~~ mask pd_bits) s \<longrightarrow>
Q (pd (ucast (p && mask pd_bits >> 2))) s\<rbrace>
get_pde p
\<lbrace>Q\<rbrace>"
by (simp add: get_pde_def) wp
lemma get_pde_inv [wp]: "get_pde p \<lbrace>P\<rbrace>"
by (wpsimp wp: get_pde_wp)
bundle pagebits =
pd_bits_def[simp] pt_bits_def[simp]
pageBits_def[simp] mask_lower_twice[simp]
and.assoc[where ?'a = \<open>'a::len word\<close>,symmetric,simp] obj_at_def[simp]
pde.splits[split]
pte.splits[split]
lemma get_master_pde_wp:
"\<lbrace>\<lambda>s. \<forall>pd. ko_at (ArchObj (PageDirectory pd)) (p && ~~ mask pd_bits) s
\<longrightarrow> Q (case (pd (ucast (p && ~~ mask 6 && mask pd_bits >> 2))) of
SuperSectionPDE x xa xb \<Rightarrow> pd (ucast (p && ~~ mask 6 && mask pd_bits >> 2))
| _ \<Rightarrow> pd (ucast (p && mask pd_bits >> 2))) s\<rbrace>
get_master_pde p
\<lbrace>Q\<rbrace>"
apply (simp add: get_master_pde_def)
apply (wp get_pde_wp | wpc)+
including pagebits
by auto
lemma store_pde_typ_at [wp]:
"store_pde ptr pde \<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace>"
apply (simp add: store_pde_def set_pd_def set_object_def get_object_def)
apply (wpsimp simp: obj_at_def a_type_def)
done
lemmas store_pde_typ_ats [wp] = abs_typ_at_lifts [OF store_pde_typ_at]
lemma get_pt_wp [wp]:
"\<lbrace>\<lambda>s. \<forall>pt. ko_at (ArchObj (PageTable pt)) p s \<longrightarrow> Q pt s\<rbrace> get_pt p \<lbrace>Q\<rbrace>"
apply (simp add: get_pt_def get_object_def)
apply (wpsimp simp: obj_at_def)
done
lemma get_pte_wp:
"\<lbrace>\<lambda>s. \<forall>pt. ko_at (ArchObj (PageTable pt)) (p && ~~mask pt_bits) s \<longrightarrow>
Q (pt (ucast (p && mask pt_bits >> 2))) s\<rbrace>
get_pte p
\<lbrace>Q\<rbrace>"
by (simp add: get_pte_def) wp
lemma get_pte_inv [wp]:
"\<lbrace>P\<rbrace> get_pte p \<lbrace>\<lambda>_. P\<rbrace>"
by (wpsimp wp: get_pte_wp)
lemma get_master_pte_wp:
"\<lbrace>\<lambda>s. \<forall>pt. ko_at (ArchObj (PageTable pt)) (p && ~~ mask pt_bits) s \<longrightarrow>
Q (case pt (ucast (p && ~~ mask 6 && mask pt_bits >> 2)) of
LargePagePTE x xa xb \<Rightarrow>
pt (ucast (p && ~~ mask 6 && mask pt_bits >> 2))
| _ \<Rightarrow> pt (ucast (p && mask pt_bits >> 2)))
s\<rbrace>
get_master_pte p \<lbrace>Q\<rbrace>"
apply (simp add: get_master_pte_def)
apply (wp get_pte_wp | wpc)+
including pagebits
by auto
lemma store_pte_typ_at:
"store_pte ptr pte \<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace>"
apply (simp add: store_pte_def set_pt_def set_object_def get_object_def)
apply (wpsimp simp: obj_at_def a_type_def)
done
lemmas store_pte_typ_ats [wp] = abs_typ_at_lifts [OF store_pte_typ_at]
lemma lookup_pt_slot_inv:
"lookup_pt_slot pd vptr \<lbrace>P\<rbrace>"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply clarsimp
done
lemma lookup_pt_slot_inv_any:
"\<lbrace>\<lambda>s. \<forall>x. Q x s\<rbrace> lookup_pt_slot pd vptr \<lbrace>Q\<rbrace>,-"
"\<lbrace>E\<rbrace> lookup_pt_slot pd vptr -, \<lbrace>\<lambda>ft. E\<rbrace>"
apply (simp_all add: lookup_pt_slot_def)
apply (wpsimp wp: get_pde_wp)+
done
crunch cte_wp_at[wp]: set_irq_state "\<lambda>s. P (cte_wp_at P' p s)"
lemma set_pt_cte_wp_at:
"set_pt ptr val \<lbrace>\<lambda>s. P (cte_wp_at P' p s)\<rbrace>"
apply (simp add: set_pt_def)
apply (wpsimp wp: set_object_wp_strong)
apply (subst cte_wp_at_after_update')
apply (clarsimp simp: a_type_def obj_at_def split: if_splits kernel_object.splits)+
done
lemma set_pd_cte_wp_at:
"set_pd ptr val \<lbrace>\<lambda>s. P (cte_wp_at P' p s)\<rbrace>"
apply (simp add: set_pd_def)
apply (wpsimp wp: set_object_wp_strong)
apply (subst cte_wp_at_after_update')
including unfold_objects
apply (clarsimp simp: a_type_def split: if_splits)+
done
lemma set_asid_pool_cte_wp_at:
"set_asid_pool ptr val \<lbrace>\<lambda>s. P (cte_wp_at P' p s)\<rbrace>"
apply (simp add: set_asid_pool_def)
including unfold_objects_asm
by (wpsimp wp: set_object_wp_strong
simp: a_type_def cte_wp_at_after_update'
split: if_splits)
lemma set_pt_pred_tcb_at[wp]:
"set_pt ptr val \<lbrace>pred_tcb_at proj P t\<rbrace>"
apply (simp add: set_pt_def)
apply (wpsimp wp: set_object_wp_strong)
apply (clarsimp simp: pred_tcb_at_def obj_at_def)
done
lemma set_pd_pred_tcb_at[wp]:
"set_pd ptr val \<lbrace>pred_tcb_at proj P t\<rbrace>"
apply (simp add: set_pd_def)
apply (wpsimp wp: set_object_wp_strong)
apply (clarsimp simp: pred_tcb_at_def obj_at_def)
done
lemma set_asid_pool_pred_tcb_at[wp]:
"set_asid_pool ptr val \<lbrace>pred_tcb_at proj P t\<rbrace>"
apply (subst set_asid_pool_def)
by (wpsimp wp: set_object_wp_strong
simp: pred_tcb_at_def obj_at_def)
lemma mask_pd_bits_inner_beauty:
"is_aligned p 2 \<Longrightarrow>
(p && ~~ mask pd_bits) + (ucast ((ucast (p && mask pd_bits >> 2))::12 word) << 2) = (p::word32)"
by (rule mask_split_aligned; simp add: pd_bits_def pageBits_def)
lemma more_pd_inner_beauty:
fixes x :: "12 word"
fixes p :: word32
assumes x: "x \<noteq> ucast (p && mask pd_bits >> 2)"
shows "(p && ~~ mask pd_bits) + (ucast x << 2) = p \<Longrightarrow> False"
by (rule mask_split_aligned_neg[OF _ _ x]; simp add: pd_bits_def pageBits_def)
lemma mask_pt_bits_inner_beauty:
"is_aligned p 2 \<Longrightarrow>
(p && ~~ mask pt_bits) + (ucast ((ucast (p && mask pt_bits >> 2))::word8) << 2) = (p::word32)"
by (rule mask_split_aligned; simp add: pt_bits_def pageBits_def)
lemma more_pt_inner_beauty:
fixes x :: "word8"
fixes p :: word32
assumes x: "x \<noteq> ucast (p && mask pt_bits >> 2)"
shows "(p && ~~ mask pt_bits) + (ucast x << 2) = p \<Longrightarrow> False"
by (rule mask_split_aligned_neg[OF _ _ x]; simp add: pt_bits_def pageBits_def)
lemma set_pd_aligned [wp]:
"set_pd base pd \<lbrace>pspace_aligned\<rbrace>"
by (wpsimp simp: set_pd_def)
crunch aligned [wp]: store_pde pspace_aligned
(wp: hoare_drop_imps)
lemmas undefined_validE_R = hoare_FalseE_R[where f=undefined]
lemma arch_derive_cap_valid_cap:
"\<lbrace>valid_cap (cap.ArchObjectCap arch_cap)\<rbrace>
arch_derive_cap arch_cap
\<lbrace>valid_cap\<rbrace>, -"
apply(simp add: arch_derive_cap_def)
apply(cases arch_cap, simp_all add: arch_derive_cap_def o_def)
apply(rule hoare_pre, wpc?, wp+;
clarsimp simp add: cap_aligned_def valid_cap_def split: option.splits)+
done
lemma arch_derive_cap_inv:
"arch_derive_cap arch_cap \<lbrace>P\<rbrace>"
apply(simp add: arch_derive_cap_def, cases arch_cap, simp_all)
apply(rule hoare_pre, wpc?, wp+; simp)+
done
definition
"valid_mapping_entries m \<equiv> case m of
Inl (InvalidPTE, _) \<Rightarrow> \<top>
| Inl (LargePagePTE _ _ _, xs) \<Rightarrow> \<lambda>s. \<forall>p \<in> set xs. pte_at p s
| Inl (SmallPagePTE _ _ _, xs) \<Rightarrow> \<lambda>s. \<forall>p \<in> set xs. pte_at p s
| Inr (InvalidPDE, _) \<Rightarrow> \<top>
| Inr (PageTablePDE _ _ _, _) \<Rightarrow> \<bottom>
| Inr (SectionPDE _ _ _ _, xs) \<Rightarrow> \<lambda>s. \<forall>p \<in> set xs. pde_at p s
| Inr (SuperSectionPDE _ _ _, xs) \<Rightarrow> \<lambda>s. \<forall>p \<in> set xs. pde_at p s"
definition "invalid_pte_at p \<equiv> obj_at (\<lambda>ko. \<exists>pt. ko = (ArchObj (PageTable pt))
\<and> pt (ucast (p && mask pt_bits) >> 2) = pte.InvalidPTE) (p && ~~ mask pt_bits)"
definition "invalid_pde_at p \<equiv> obj_at (\<lambda>ko. \<exists>pd. ko = (ArchObj (PageDirectory pd))
\<and> pd (ucast (p && mask pd_bits) >> 2) = pde.InvalidPDE) (p && ~~ mask pd_bits)"
definition
"valid_slots m \<equiv> case m of
Inl (pte, xs) \<Rightarrow>
\<lambda>s. xs \<noteq> [] \<and>
(\<forall>p \<in> set xs. (\<exists>\<rhd> (p && ~~ mask pt_bits) and pte_at p) s) \<and>
wellformed_pte pte \<and> valid_pte pte s
| Inr (pde, xs) \<Rightarrow>
\<lambda>s. xs \<noteq> [] \<and>
(\<forall>p \<in> set xs. (\<exists>\<rhd> (p && ~~ mask pd_bits) and pde_at p) s \<and>
ucast (p && mask pd_bits >> 2) \<notin> kernel_mapping_slots) \<and>
wellformed_pde pde \<and> valid_pde pde s"
crunch inv[wp]: get_master_pte P
crunch inv[wp]: get_master_pde P
lemma ucast_mask_asid_low_bits [simp]:
"ucast ((asid::word32) && mask asid_low_bits) = (ucast asid :: 10 word)"
by (word_eqI_solve simp: asid_low_bits_def)
lemma ucast_ucast_asid_high_bits [simp]:
"ucast (ucast (asid_high_bits_of asid)::word32) = asid_high_bits_of asid"
by word_eqI_solve
lemma mask_asid_low_bits_ucast_ucast:
"((asid::word32) && mask asid_low_bits) = ucast (ucast asid :: 10 word)"
by (word_eqI_solve simp: asid_low_bits_def)
lemma set_asid_pool_cur [wp]:
"set_asid_pool p a \<lbrace>\<lambda>s. P (cur_thread s)\<rbrace>"
unfolding set_asid_pool_def by (wpsimp wp: get_object_wp)
lemma set_asid_pool_cur_tcb [wp]:
"set_asid_pool p a \<lbrace>\<lambda>s. cur_tcb s\<rbrace>"
unfolding cur_tcb_def
by (rule hoare_lift_Pf [where f=cur_thread]; wp)
crunch arch [wp]: set_asid_pool "\<lambda>s. P (arch_state s)"
(wp: get_object_wp)
lemma set_asid_pool_valid_arch [wp]:
"set_asid_pool p a \<lbrace>valid_arch_state\<rbrace>"
by (rule valid_arch_state_lift) (wp set_asid_pool_typ_at)+
lemma set_asid_pool_valid_objs [wp]:
"set_asid_pool p a \<lbrace>valid_objs\<rbrace>"
apply (simp add: set_asid_pool_def)
apply (wp set_object_valid_objs get_object_wp)
including unfold_objects
by (clarsimp simp: a_type_def valid_obj_def arch_valid_obj_def)
lemma pde_at_aligned_vptr:
"\<lbrakk>x \<in> set [0 , 4 .e. 0x3C]; page_directory_at pd s;
pspace_aligned s; is_aligned vptr 24 \<rbrakk>
\<Longrightarrow> pde_at (x + lookup_pd_slot pd vptr) s"
apply (clarsimp simp: lookup_pd_slot_def Let_def
obj_at_def pde_at_def)
apply (drule(1) pspace_alignedD[rotated])
apply (clarsimp simp: a_type_def
split: kernel_object.split_asm
arch_kernel_obj.split_asm if_split_asm
cong: kernel_object.case_cong)
apply (prop_tac "is_aligned x 2")
subgoal
apply (clarsimp simp: upto_enum_step_def word_shift_by_2)
by (rule is_aligned_shiftl_self)
apply (simp add: aligned_add_aligned word_bits_conv
is_aligned_shiftl_self)+
apply (prop_tac "pd = (x + (pd + (vptr >> 20 << 2)) && ~~ mask pd_bits)")
subgoal
supply bit_simps[simp del]
apply (subst mask_lower_twice[symmetric, where n=6])
apply (simp add: pd_bits_def pageBits_def)
apply (subst add.commute, subst add_mask_lower_bits)
apply (erule aligned_add_aligned)
apply (intro is_aligned_shiftl is_aligned_shiftr)
apply simp
apply (simp add: word_bits_conv)
apply simp
apply (subst upper_bits_unset_is_l2p_32[unfolded word_bits_conv])
apply simp
apply (clarsimp simp: upto_enum_step_def word_shift_by_2)
apply (rule shiftl_less_t2n[where m=6, simplified])
apply (rule word_leq_minus_one_le)
apply simp+
apply (rule sym, rule add_mask_lower_bits)
apply (simp add: pd_bits_def pageBits_def)
apply simp
apply (subst upper_bits_unset_is_l2p_32[unfolded word_bits_conv])
apply (simp add: pd_bits_def pageBits_def)
apply (rule shiftl_less_t2n)
apply (rule shiftr_less_t2n')
apply (simp add: pd_bits_def pageBits_def)
by (simp add: pd_bits_def pageBits_def)+
apply simp
done
lemma pde_shifting:
"\<lbrakk>is_aligned (vptr::word32) 24; x \<le> 0xF\<rbrakk> \<Longrightarrow> x + (vptr >> 20) < 0x1000"
apply (rule order_less_le_trans)
apply (subst upper_bits_unset_is_l2p_32 [where n=12, symmetric])
apply (clarsimp simp: word_bits_def)
prefer 2
apply simp
apply (clarsimp simp: word_bits_def)
subgoal premises prems for n'
proof -
have H: "(0xF::word32) < 2 ^ 4" by simp
from prems show ?thesis
apply (subst (asm) word_plus_and_or_coroll)
apply word_eqI
subgoal for n
apply (spec "20 + n")
apply (simp add: word_size)
apply (insert H)
apply (drule (1) order_le_less_trans)
apply (drule bang_is_le)
apply (drule_tac z="2 ^ 4" in order_le_less_trans, assumption)
apply (drule word_power_increasing)
by simp+
apply (clarsimp simp: word_size nth_shiftl nth_shiftr is_aligned_nth)
apply (erule disjE)
apply (insert H)[1]
apply (drule (1) order_le_less_trans)
apply (drule bang_is_le)
apply (drule order_le_less_trans[where z="2 ^ 4"], assumption)
apply (drule word_power_increasing; simp)
apply (spec "20 + n'")
apply (frule test_bit_size)
by (simp add: word_size)
qed
done
lemma p_le_0xF_helper:
"((p::word32) \<le> 0xF) = (\<forall>n'\<ge>4. n'< word_bits \<longrightarrow> \<not> p !! n')"
apply (subst upper_bits_unset_is_l2p_32)
apply (simp add: word_bits_def)
apply (auto intro: plus_one_helper dest: plus_one_helper2)
done
lemma pd_shifting:
"is_aligned (pd::word32) 14 \<Longrightarrow> pd + (vptr >> 20 << 2) && ~~ mask pd_bits = pd"
apply (rule word_eqI[rule_format])
apply (subst word_plus_and_or_coroll)
apply (rule word_eqI)
subgoal for \<dots> na
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth)
apply (spec na)
apply (simp add: linorder_not_less)
apply (drule test_bit_size)+
by (simp add: word_size)
subgoal for n
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth word_ops_nth_size
pd_bits_def pageBits_def linorder_not_less)
apply (rule iffI)
apply clarsimp
apply (drule test_bit_size)+
apply (simp add: word_size)
apply clarsimp
apply (spec n)
by simp
done
lemma pd_shifting_dual:
"is_aligned (pd::word32) 14 \<Longrightarrow> pd + (vptr >> 20 << 2) && mask pd_bits = vptr >> 20 << 2"
apply (simp add: pd_bits_def pageBits_def)
apply (subst word_plus_and_or_coroll)
apply (rule word_eqI)
subgoal for n
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth)
apply (spec n)
apply (simp add: linorder_not_less)
apply (drule test_bit_size)+
by (simp add: word_size)
apply (rule word_eqI)
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth word_ops_nth_size
pd_bits_def pageBits_def linorder_not_less)
apply (rule iffI)
apply clarsimp
apply clarsimp
apply (drule test_bit_size)+
apply (simp add: word_size)
done
lemma pd_shifting_at:
"\<lbrakk> page_directory_at pd s; pspace_aligned s \<rbrakk> \<Longrightarrow>
pd + (vptr >> 20 << 2) && ~~ mask pd_bits = pd"
apply (rule pd_shifting)
apply (clarsimp simp: pspace_aligned_def obj_at_def)
apply (drule bspec, blast)
including unfold_objects
by (clarsimp simp: a_type_def)
lemma kernel_mapping_slots_empty_pdeI:
"\<lbrakk>equal_kernel_mappings s; valid_global_objs s; valid_arch_state s;
kheap s p = Some (ArchObj (PageDirectory pd)); x \<in> kernel_mapping_slots\<rbrakk> \<Longrightarrow>
(\<forall>r. pde_ref (pd x) = Some r \<longrightarrow> r \<in> set (second_level_tables (arch_state s))) \<and> valid_pde_mappings (pd x)"
apply (clarsimp simp: invs_def valid_state_def equal_kernel_mappings_def valid_global_objs_def)
apply (erule_tac x=p in allE, erule_tac x="arm_global_pd (arch_state s)" in allE)
including unfold_objects
apply clarsimp
by (simp add: empty_table_def valid_arch_state_def a_type_def)
lemma invs_valid_global_pts:
"invs s \<Longrightarrow> valid_global_pts s"
by (clarsimp simp: invs_def valid_state_def valid_arch_state_def)
lemma is_aligned_pt:
"page_table_at pt s \<Longrightarrow> pspace_aligned s
\<Longrightarrow> is_aligned pt pt_bits"
apply (clarsimp simp: obj_at_def)
apply (drule(1) pspace_alignedD)
apply (simp add: pt_bits_def pageBits_def)
done
lemma is_aligned_global_pt:
"\<lbrakk>x \<in> set (arm_global_pts (arch_state s)); pspace_aligned s; valid_arch_state s\<rbrakk>
\<Longrightarrow> is_aligned x pt_bits"
by (metis valid_arch_state_def valid_global_pts_def
is_aligned_pt)
lemma data_at_aligned:
"\<lbrakk> data_at sz p s; pspace_aligned s \<rbrakk> \<Longrightarrow> is_aligned p (pageBitsForSize sz)"
by (erule pspace_alignedE[where x=p]; fastforce simp: data_at_def obj_at_def)
lemma page_table_pte_at_diffE:
"\<lbrakk> page_table_at p s; q - p = x << 2;
x < 2^(pt_bits - 2); pspace_aligned s \<rbrakk> \<Longrightarrow> pte_at q s"
apply (clarsimp simp: diff_eq_eq add.commute)
apply (erule(2) page_table_pte_atI)
done
lemma pte_at_aligned_vptr:
"\<lbrakk>x \<in> set [0 , 4 .e. 0x3C]; page_table_at pt s;
pspace_aligned s; is_aligned vptr 16 \<rbrakk>
\<Longrightarrow> pte_at (x + (pt + (((vptr >> 12) && 0xFF) << 2))) s"
apply (erule page_table_pte_at_diffE[where x="(x >> 2) + ((vptr >> 12) && 0xFF)"];simp?)
apply (simp add: word_shiftl_add_distrib upto_enum_step_def)
apply (clarsimp simp: word_shift_by_2 shiftr_shiftl1 is_aligned_shift)
apply (subst add.commute, rule is_aligned_add_less_t2n)
apply (rule is_aligned_andI1[where n=4], rule is_aligned_shiftr, simp)
apply (rule shiftr_less_t2n)
apply (clarsimp dest!: upto_enum_step_subset[THEN subsetD])
apply (erule order_le_less_trans, simp)
apply (simp add: pt_bits_def pageBits_def)
apply (simp add: pt_bits_def pageBits_def)
apply (rule order_le_less_trans, rule word_and_le1, simp)
done
lemma lookup_pt_slot_ptes_aligned_valid:
"\<lbrace>valid_vspace_objs and valid_arch_state
and equal_kernel_mappings and pspace_aligned
and valid_global_objs
and \<exists>\<rhd> pd and page_directory_at pd
and K (is_aligned vptr 16)\<rbrace>
lookup_pt_slot pd vptr
\<lbrace>\<lambda>r s. is_aligned r 6 \<and> (\<forall>x\<in>set [0 , 4 .e. 0x3C]. pte_at (x + r) s)\<rbrace>, -"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply (clarsimp simp: lookup_pd_slot_def Let_def)
apply (simp add: pd_shifting_at)
apply (frule (2) valid_vspace_objsD)
apply (clarsimp simp: )
subgoal for s _ _ x
apply (prop_tac "page_table_at (ptrFromPAddr x) s")
subgoal
apply (bspec "(ucast (pd + (vptr >> 20 << 2) && mask pd_bits >> 2))";clarsimp)
apply (frule kernel_mapping_slots_empty_pdeI)
apply ((simp add: obj_at_def pte_at_def;fail)+)[4]
by (clarsimp simp: pde_ref_def valid_global_pts_def valid_arch_state_def second_level_tables_def)
apply (rule conjI)
apply (rule is_aligned_add)
apply (rule is_aligned_weaken, erule(1) is_aligned_pt)
apply (simp add: pt_bits_def pageBits_def)
apply (rule is_aligned_shiftl)
apply (rule is_aligned_andI1)
apply (rule is_aligned_shiftr, simp)
apply clarsimp
by (erule(1) pte_at_aligned_vptr, simp+)
done
lemma p_0x3C_shift:
"is_aligned (p :: word32) 6 \<Longrightarrow>
(\<forall>p\<in>set [p , p + 4 .e. p + 0x3C]. f p) = (\<forall>x\<in>set [0, 4 .e. 0x3C]. f (x + p))"
apply (clarsimp simp: upto_enum_step_def add.commute)
apply (frule is_aligned_no_overflow, simp add: word_bits_def)
apply (simp add: linorder_not_le [symmetric])
apply (erule notE)
apply (simp add: add.commute)
apply (erule word_random)
apply simp
done
lemma lookup_pt_slot_pte [wp]:
"\<lbrace>pspace_aligned and valid_vspace_objs and valid_arch_state
and equal_kernel_mappings and valid_global_objs
and \<exists>\<rhd> pd and page_directory_at pd\<rbrace>
lookup_pt_slot pd vptr \<lbrace>pte_at\<rbrace>,-"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply (clarsimp simp: lookup_pd_slot_def Let_def)
apply (simp add: pd_shifting_at)
apply (drule (2) valid_vspace_objsD)
apply (clarsimp simp: )
apply (bspec "ucast (pd + (vptr >> 20 << 2) && mask pd_bits >> 2)")
apply clarsimp
apply (erule page_table_pte_atI, simp_all)
apply (simp add: pt_bits_def pageBits_def)
apply (rule order_le_less_trans, rule word_and_le1, simp)
apply (frule kernel_mapping_slots_empty_pdeI)
apply (simp add: obj_at_def)+
apply (clarsimp simp: pde_ref_def)
apply (rule page_table_pte_atI, simp_all)
apply (simp add: valid_arch_state_def valid_global_pts_def second_level_tables_def)
apply (simp add: pt_bits_def pageBits_def)
apply (rule order_le_less_trans, rule word_and_le1, simp)
done
lemma shiftr_w2p:
"x < len_of TYPE('a) \<Longrightarrow>
2 ^ x = (2^(len_of TYPE('a) - 1) >> (len_of TYPE('a) - 1 - x) :: 'a :: len word)"
apply simp
apply (rule word_eqI)
apply (auto simp: word_size nth_shiftr nth_w2p)
done
lemma vptr_shiftr_le_2p:
"(vptr :: word32) >> 20 < 2 ^ pageBits"
apply (rule le_less_trans[rotated])
apply (rule and_mask_less' [where w=max_word])
apply (simp add: pageBits_def)
apply (rule word_leI)
apply (simp add: word_size nth_shiftr)
apply (drule test_bit_size)
apply (simp add: pageBits_def word_size)
done
lemma page_directory_pde_at_lookupI:
"\<lbrakk>page_directory_at pd s; pspace_aligned s\<rbrakk> \<Longrightarrow> pde_at (lookup_pd_slot pd vptr) s"
apply (simp add: lookup_pd_slot_def Let_def)
apply (erule (1) page_directory_pde_atI[rotated 2])
apply (rule vptr_shiftr_le_2p)
done
lemma vptr_shiftr_le_2pt:
"((vptr :: word32) >> 12) && 0xFF < 2 ^ (pt_bits - 2)"
apply (clarsimp simp: word_FF_is_mask pt_bits_def pageBits_def)
apply (rule and_mask_less_size[where n=8, simplified])
apply (clarsimp simp: word_size)
done
lemma page_table_pte_at_lookupI:
"\<lbrakk>page_table_at pt s; pspace_aligned s\<rbrakk> \<Longrightarrow> pte_at (lookup_pt_slot_no_fail pt vptr) s"
apply (simp add: lookup_pt_slot_no_fail_def)
apply (erule (1) page_table_pte_atI[rotated 2])
apply (rule vptr_shiftr_le_2pt)
done
lemmas lookup_pt_slot_ptes[wp] =
lookup_pt_slot_ptes_aligned_valid
[@ \<open>post_asm \<open>thin_tac "is_aligned x y" for x y\<close>\<close>]
lemmas lookup_pt_slot_ptes2[wp] =
lookup_pt_slot_ptes_aligned_valid
[@ \<open>post_asm \<open>drule (1) p_0x3C_shift[THEN iffD2], thin_tac _\<close>\<close>]
lemma create_mapping_entries_valid [wp]:
"\<lbrace>pspace_aligned and valid_arch_state and valid_vspace_objs
and equal_kernel_mappings and valid_global_objs
and \<exists>\<rhd> pd and page_directory_at pd and
K ((sz = ARMLargePage \<longrightarrow> is_aligned vptr 16) \<and>
(sz = ARMSuperSection \<longrightarrow> is_aligned vptr 24)) \<rbrace>
create_mapping_entries base vptr sz vm_rights attrib pd
\<lbrace>\<lambda>m. valid_mapping_entries m\<rbrace>, -"
apply (cases sz)
apply (rule hoare_pre)
apply (wp|simp add: valid_mapping_entries_def largePagePTE_offsets_def)+
apply clarsimp
apply (erule (1) page_directory_pde_at_lookupI)
apply (rule hoare_pre)
apply (clarsimp simp add: valid_mapping_entries_def)
apply wp
apply (simp add: lookup_pd_slot_def Let_def)
apply (prop_tac "is_aligned pd 14")
apply (clarsimp simp: obj_at_def add.commute invs_def valid_state_def valid_pspace_def pspace_aligned_def)
apply (drule bspec, blast)
apply (clarsimp simp: a_type_def split: kernel_object.splits arch_kernel_obj.splits if_split_asm)
apply (clarsimp simp: superSectionPDE_offsets_def)
apply (clarsimp simp: upto_enum_step_def word_shift_by_2)
apply (clarsimp simp: pde_at_def)
apply (simp add: add.commute add.left_commute)
apply (subst add_mask_lower_bits)
apply (simp add: pd_bits_def pageBits_def)
apply (clarsimp simp: pd_bits_def pageBits_def)
apply (subst (asm) word_plus_and_or_coroll)
prefer 2
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth p_le_0xF_helper word_bits_def)
apply (drule test_bit_size)+
apply (simp add: word_size)
apply (rule word_eqI)
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth p_le_0xF_helper word_bits_def)
apply (frule_tac w=vptr in test_bit_size)
apply (simp add: word_size)
apply (thin_tac "All _")
subgoal for \<dots> n
apply (spec "18+n")
by simp
apply (clarsimp simp: a_type_simps)
apply (rule aligned_add_aligned is_aligned_shiftl_self
| simp add: word_bits_conv)+
done
lemma set_pt_distinct [wp]:
"set_pt p pt \<lbrace>pspace_distinct\<rbrace>"
apply (simp add: set_pt_def)
apply (wpsimp wp: set_object_wp_strong)
apply (clarsimp simp: obj_at_def a_type_def pspace_distinct_same_type
split: kernel_object.splits arch_kernel_obj.splits if_splits)
done
lemma set_pd_distinct [wp]:
"set_pd p pd \<lbrace>pspace_distinct\<rbrace>"
apply (simp add: set_pd_def)
apply (wp set_object_distinct[THEN hoare_set_object_weaken_pre] get_object_wp)
apply (clarsimp simp: obj_at_def a_type_def
split: kernel_object.splits arch_kernel_obj.splits)
done
lemma store_pte_valid_objs [wp]:
"\<lbrace>(%s. wellformed_pte pte) and valid_objs\<rbrace> store_pte p pte \<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: store_pte_def set_pt_def get_pt_def bind_assoc set_object_def get_object_def)
apply (rule hoare_pre)
apply (wp|wpc)+
apply (clarsimp simp: valid_objs_def dom_def simp del: fun_upd_apply)
subgoal for \<dots> ptr _
apply (rule valid_obj_same_type)
apply (cases "ptr = p && ~~ mask pt_bits")
apply (erule allE, erule impE, blast)
apply (clarsimp simp: valid_obj_def arch_valid_obj_def)
apply clarsimp
apply fastforce
apply (erule allE, erule impE, blast)
apply (clarsimp simp: valid_obj_def arch_valid_obj_def)
apply assumption
by (simp add: a_type_def)
done
lemma set_pt_caps_of_state [wp]:
"set_pt p pt \<lbrace>\<lambda>s. P (caps_of_state s)\<rbrace>"
apply (simp add: set_pt_def)
apply (wpsimp wp: set_object_wp_strong simp: obj_at_def a_type_simps)
apply (subst cte_wp_caps_of_lift)
prefer 2
apply assumption
apply (auto simp: cte_wp_at_cases a_type_def)
done
lemma set_pd_caps_of_state [wp]:
"set_pd p pd \<lbrace>\<lambda>s. P (caps_of_state s)\<rbrace>"
apply (simp add: set_pd_def bind_assoc)
apply (wpsimp wp: set_object_wp_strong simp: obj_at_def)
apply (subst cte_wp_caps_of_lift)
prefer 2
apply assumption
by (case_tac ko; simp add: cte_wp_at_cases a_type_simps split: if_splits)
lemma store_pte_aligned [wp]:
"store_pte pt p \<lbrace>pspace_aligned\<rbrace>"
apply (simp add: store_pte_def set_pt_def)
apply (wp set_object_aligned)
including unfold_objects
by (clarsimp simp: a_type_def)
lemma store_pde_valid_objs [wp]:
"\<lbrace>(%s. wellformed_pde pde) and valid_objs\<rbrace> store_pde p pde \<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: store_pde_def set_pd_def get_pd_def bind_assoc set_object_def get_object_def)
apply (rule hoare_pre)
apply (wp|wpc)+
apply (clarsimp simp: valid_objs_def dom_def simp del: fun_upd_apply)
subgoal for \<dots> ptr _
apply (rule valid_obj_same_type)
apply (cases "ptr = p && ~~ mask pd_bits")
apply (erule allE, erule impE, blast)
apply (clarsimp simp: valid_obj_def arch_valid_obj_def)
apply clarsimp
apply fastforce
apply (erule allE, erule impE, blast)
apply (clarsimp simp: valid_obj_def arch_valid_obj_def)
apply assumption
by (simp add: a_type_def)
done
lemma set_asid_pool_aligned [wp]:
"set_asid_pool p ptr \<lbrace>pspace_aligned\<rbrace>"
apply (simp add: set_asid_pool_def)
including unfold_objects
apply (wpsimp wp: set_object_wp_strong pspace_aligned_obj_update[rotated])
done
lemma set_asid_pool_distinct [wp]:
"set_asid_pool p ptr \<lbrace>pspace_distinct\<rbrace>"
apply (simp add: set_asid_pool_def)
including unfold_objects
by (wpsimp wp: set_object_wp_strong pspace_distinct_same_type)
lemma store_pde_arch [wp]:
"\<lbrace>\<lambda>s. P (arch_state s)\<rbrace> store_pde p pde \<lbrace>\<lambda>_ s. P (arch_state s)\<rbrace>"
by (simp add: store_pde_def set_pd_def get_object_def) wpsimp
lemma store_pte_valid_pte [wp]:
"\<lbrace>valid_pte pt\<rbrace> store_pte p pte \<lbrace>\<lambda>_. valid_pte pt\<rbrace>"
by (wp valid_pte_lift store_pte_typ_at)
lemma store_pde_valid_pde [wp]:
"\<lbrace>valid_pde pde\<rbrace> store_pde slot pde' \<lbrace>\<lambda>rv. valid_pde pde\<rbrace>"
by (wp valid_pde_lift store_pde_typ_at)
lemma set_pd_typ_at [wp]:
"\<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace> set_pd ptr pd \<lbrace>\<lambda>_ s. P (typ_at T p s)\<rbrace>"
apply (simp add: set_pd_def)
by (wpsimp wp: set_object_wp_strong simp: obj_at_def)
lemma set_pd_valid_objs:
"\<lbrace>(%s. \<forall>i. wellformed_pde (pd i)) and valid_objs\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: set_pd_def)
by (wpsimp wp: set_object_valid_objs simp: valid_obj_def)
lemma set_pd_iflive:
"\<lbrace>\<lambda>s. if_live_then_nonz_cap s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. if_live_then_nonz_cap s\<rbrace>"
apply (subst set_pd_def)
including unfold_objects
by (wpsimp wp: set_object_iflive[THEN hoare_set_object_weaken_pre]
simp: live_def hyp_live_def a_type_def)
lemma set_pd_zombies:
"\<lbrace>\<lambda>s. zombies_final s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. zombies_final s\<rbrace>"
apply (subst set_pd_def)
apply (wp set_object_zombies[THEN hoare_set_object_weaken_pre])
including unfold_objects
by (clarsimp simp: a_type_def)
lemma set_pd_zombies_state_refs:
"\<lbrace>\<lambda>s. P (state_refs_of s)\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. P (state_refs_of s)\<rbrace>"
apply (subst set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_wp_strong
simp: a_type_def)
apply (erule rsubst [where P=P], rule ext)
apply (simp add: state_refs_of_def)
done
lemma set_pd_zombies_state_hyp_refs:
"\<lbrace>\<lambda>s. P (state_hyp_refs_of s)\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. P (state_hyp_refs_of s)\<rbrace>"
apply (subst set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_wp_strong
simp: a_type_def)
apply (erule rsubst [where P=P], rule ext)
apply (simp add: state_hyp_refs_of_def)
done
lemma set_pd_cdt:
"\<lbrace>\<lambda>s. P (cdt s)\<rbrace> set_pd p pd \<lbrace>\<lambda>_ s. P (cdt s)\<rbrace>"
unfolding set_pd_def by (wpsimp wp: get_object_wp)
lemma set_pd_valid_mdb:
"\<lbrace>\<lambda>s. valid_mdb s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. valid_mdb s\<rbrace>"
apply (rule valid_mdb_lift)
by (wpsimp wp: set_pd_cdt set_object_wp simp: set_pd_def)+
lemma set_pd_valid_idle:
"\<lbrace>\<lambda>s. valid_idle s\<rbrace> set_pd p pd \<lbrace>\<lambda>_ s. valid_idle s\<rbrace>"
by (wpsimp wp: valid_idle_lift set_object_wp simp: set_pd_def)
lemma set_pd_ifunsafe:
"\<lbrace>\<lambda>s. if_unsafe_then_cap s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. if_unsafe_then_cap s\<rbrace>"
unfolding set_pd_def including unfold_objects
by (wpsimp wp: set_object_ifunsafe[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_pd_reply_caps:
"\<lbrace>\<lambda>s. valid_reply_caps s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. valid_reply_caps s\<rbrace>"
by (wp valid_reply_caps_st_cte_lift)
lemma set_pd_reply_masters:
"\<lbrace>valid_reply_masters\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_. valid_reply_masters\<rbrace>"
by (wp valid_reply_masters_cte_lift)
lemma global_refs_kheap [simp]:
"global_refs (kheap_update f s) = global_refs s"
by (simp add: global_refs_def)
crunch global_ref [wp]: set_pd "\<lambda>s. P (global_refs s)"
(wp: crunch_wps)
crunch arch [wp]: set_pd "\<lambda>s. P (arch_state s)"
(wp: crunch_wps)
crunch idle [wp]: set_pd "\<lambda>s. P (idle_thread s)"
(wp: crunch_wps)
crunch irq [wp]: set_pd "\<lambda>s. P (interrupt_irq_node s)"
(wp: crunch_wps)
lemma set_pd_valid_global:
"\<lbrace>\<lambda>s. valid_global_refs s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. valid_global_refs s\<rbrace>"
by (wp valid_global_refs_cte_lift)
lemma set_pd_valid_arch:
"\<lbrace>\<lambda>s. valid_arch_state s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. valid_arch_state s\<rbrace>"
by (wp valid_arch_state_lift)
lemma set_pd_cur:
"\<lbrace>\<lambda>s. cur_tcb s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_ s. cur_tcb s\<rbrace>"
apply (simp add: cur_tcb_def set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_simps)
apply (simp add: is_tcb_def)
done
crunch interrupt_states[wp]: set_pd "\<lambda>s. P (interrupt_states s)"
(wp: crunch_wps)
lemma set_pd_vspace_objs_unmap:
"\<lbrace>valid_vspace_objs and (\<lambda>s. (\<exists>\<rhd>p) s \<longrightarrow> valid_vspace_obj (PageDirectory pd') s) and
obj_at (\<lambda>ko. vs_refs (ArchObj (PageDirectory pd')) \<subseteq> vs_refs ko) p\<rbrace>
set_pd p pd' \<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
apply (simp add: set_pd_def)
apply (wpsimp wp: set_object_vspace_objs[THEN hoare_set_object_weaken_pre])
including unfold_objects
apply (clarsimp simp: a_type_def)
done
declare graph_of_None_update[simp]
declare graph_of_Some_update[simp]
lemma set_pt_typ_at [wp]:
"\<lbrace>\<lambda>s. P (typ_at T p s)\<rbrace> set_pt ptr pt \<lbrace>\<lambda>_ s. P (typ_at T p s)\<rbrace>"
apply (simp add: set_pt_def)
by (wpsimp wp: set_object_wp_strong simp: obj_at_def)
lemma set_pt_valid_objs:
"\<lbrace>(%s. \<forall>i. wellformed_pte (pt i)) and valid_objs\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_. valid_objs\<rbrace>"
apply (simp add: set_pt_def)
apply (wp set_object_valid_objs)
apply (clarsimp split: kernel_object.splits
arch_kernel_obj.splits)
apply (clarsimp simp: valid_obj_def obj_at_def a_type_def
arch_valid_obj_def)
done
lemma set_pt_iflive:
"\<lbrace>\<lambda>s. if_live_then_nonz_cap s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. if_live_then_nonz_cap s\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_iflive[THEN hoare_set_object_weaken_pre]
simp: live_def hyp_live_def a_type_def)
done
lemma set_pt_zombies:
"\<lbrace>\<lambda>s. zombies_final s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. zombies_final s\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_zombies[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
done
lemma set_pt_zombies_state_refs:
"\<lbrace>\<lambda>s. P (state_refs_of s)\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. P (state_refs_of s)\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_def)
apply (erule rsubst [where P=P])
apply (rule ext)
apply (clarsimp simp: state_refs_of_def)
done
lemma set_pt_zombies_state_hyp_refs:
"\<lbrace>\<lambda>s. P (state_hyp_refs_of s)\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. P (state_hyp_refs_of s)\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_def)
apply (erule rsubst [where P=P])
apply (rule ext)
apply (clarsimp simp: state_hyp_refs_of_def)
done
lemma set_pt_cdt:
"\<lbrace>\<lambda>s. P (cdt s)\<rbrace> set_pt p pt \<lbrace>\<lambda>_ s. P (cdt s)\<rbrace>"
unfolding set_pt_def including unfold_objects by wpsimp
lemma set_pt_valid_mdb:
"\<lbrace>\<lambda>s. valid_mdb s\<rbrace> set_pt p pt \<lbrace>\<lambda>_ s. valid_mdb s\<rbrace>"
including unfold_objects
by (wpsimp wp: set_pt_cdt valid_mdb_lift simp: set_pt_def set_object_def)
lemma set_pt_valid_idle:
"\<lbrace>\<lambda>s. valid_idle s\<rbrace> set_pt p pt \<lbrace>\<lambda>_ s. valid_idle s\<rbrace>"
including unfold_objects
by (wpsimp wp: valid_idle_lift simp: set_pt_def)
lemma set_pt_ifunsafe:
"\<lbrace>\<lambda>s. if_unsafe_then_cap s\<rbrace> set_pt p pt \<lbrace>\<lambda>_ s. if_unsafe_then_cap s\<rbrace>"
including unfold_objects by (wpsimp wp: set_object_ifunsafe[THEN hoare_set_object_weaken_pre]
simp: set_pt_def a_type_def)
lemma set_pt_reply_caps:
"\<lbrace>\<lambda>s. valid_reply_caps s\<rbrace> set_pt p pt \<lbrace>\<lambda>_ s. valid_reply_caps s\<rbrace>"
by (wp valid_reply_caps_st_cte_lift)
lemma set_pt_reply_masters:
"\<lbrace>valid_reply_masters\<rbrace> set_pt p pt \<lbrace>\<lambda>_. valid_reply_masters\<rbrace>"
by (wp valid_reply_masters_cte_lift)
crunch global_ref [wp]: set_pt "\<lambda>s. P (global_refs s)"
(wp: crunch_wps)
crunch arch [wp]: set_pt "\<lambda>s. P (arch_state s)"
(wp: crunch_wps)
crunch idle [wp]: set_pt "\<lambda>s. P (idle_thread s)"
(wp: crunch_wps)
crunch irq [wp]: set_pt "\<lambda>s. P (interrupt_irq_node s)"
(wp: crunch_wps)
lemma set_pt_valid_global:
"\<lbrace>\<lambda>s. valid_global_refs s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. valid_global_refs s\<rbrace>"
by (wp valid_global_refs_cte_lift)
lemma set_pt_valid_arch_state[wp]:
"\<lbrace>\<lambda>s. valid_arch_state s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. valid_arch_state s\<rbrace>"
by (wp valid_arch_state_lift)
lemma set_pt_cur:
"\<lbrace>\<lambda>s. cur_tcb s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_ s. cur_tcb s\<rbrace>"
unfolding set_pt_def cur_tcb_def including unfold_objects
by (wpsimp wp: set_object_wp_strong simp: a_type_def is_tcb)
lemma set_pt_aligned [wp]:
"\<lbrace>pspace_aligned\<rbrace> set_pt p pt \<lbrace>\<lambda>_. pspace_aligned\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_aligned[THEN hoare_set_object_weaken_pre])
crunch interrupt_states[wp]: set_pt "\<lambda>s. P (interrupt_states s)"
(wp: crunch_wps)
lemma set_pt_vspace_objs [wp]:
"\<lbrace>valid_vspace_objs and (\<lambda>s. (\<exists>\<rhd>p) s \<longrightarrow> valid_vspace_obj (PageTable pt) s)\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_vspace_objs[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
by (simp add: vs_refs_def)
lemma set_pt_vs_lookup [wp]:
"\<lbrace>\<lambda>s. P (vs_lookup s)\<rbrace> set_pt p pt \<lbrace>\<lambda>x s. P (vs_lookup s)\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_def)
apply (erule rsubst [where P=P])
apply (rule order_antisym)
apply (rule vs_lookup_sub)
apply (clarsimp simp: vs_refs_def)
prefer 3
apply (rule vs_lookup_sub)
apply (clarsimp simp: vs_refs_def split: if_split_asm)
apply blast+
apply auto
done
lemma store_pte_vs_lookup [wp]:
"\<lbrace>\<lambda>s. P (vs_lookup s)\<rbrace> store_pte x pte \<lbrace>\<lambda>_ s. P (vs_lookup s)\<rbrace>"
unfolding store_pte_def by wpsimp
lemma unique_table_caps_ptD:
"\<lbrakk> cs p = Some cap; cap_asid cap = None;
cs p' = Some cap'; is_pt_cap cap; is_pt_cap cap';
obj_refs cap' = obj_refs cap;
unique_table_caps cs\<rbrakk>
\<Longrightarrow> p = p'"
by (fastforce simp add: unique_table_caps_def)
lemma unique_table_caps_pdD:
"\<lbrakk> cs p = Some cap; cap_asid cap = None;
cs p' = Some cap'; is_pd_cap cap; is_pd_cap cap';
obj_refs cap' = obj_refs cap;
unique_table_caps cs\<rbrakk>
\<Longrightarrow> p = p'"
by (fastforce simp add: unique_table_caps_def)
lemma valid_objs_caps:
"valid_objs s \<Longrightarrow> valid_caps (caps_of_state s) s"
apply (clarsimp simp: valid_caps_def)
apply (erule (1) caps_of_state_valid_cap)
done
lemma simpler_set_pt_def:
"set_pt p pt =
(\<lambda>s. if \<exists>pt. kheap s p = Some (ArchObj (PageTable pt)) then
({((), s\<lparr>kheap := kheap s(p \<mapsto> ArchObj (PageTable pt))\<rparr>)}, False)
else ({}, True))"
apply (rule ext)
apply (clarsimp simp: set_pt_def set_object_def get_object_def assert_def
put_def get_def simpler_gets_def bind_def return_def fail_def)
apply (rule conjI)
apply (clarsimp simp: set_pt_def set_object_def get_object_def assert_def
put_def get_def simpler_gets_def bind_def
return_def fail_def a_type_def
split: kernel_object.splits
arch_kernel_obj.splits)
using a_type_def aa_type_APageTableE apply fastforce
done
lemma valid_set_ptI:
"(!!s opt. \<lbrakk>P s; kheap s p = Some (ArchObj (PageTable opt))\<rbrakk>
\<Longrightarrow> Q () (s\<lparr>kheap := kheap s(p \<mapsto> ArchObj (PageTable pt))\<rparr>))
\<Longrightarrow> \<lbrace>P\<rbrace> set_pt p pt \<lbrace>Q\<rbrace>"
by (rule validI) (clarsimp simp: simpler_set_pt_def split: if_split_asm)
lemma set_pt_table_caps [wp]:
"\<lbrace>valid_table_caps and (\<lambda>s. valid_caps (caps_of_state s) s) and
(\<lambda>s. ((\<exists>slot. caps_of_state s slot =
Some (ArchObjectCap (PageTableCap p None))) \<longrightarrow>
pt = (\<lambda>x. InvalidPTE)) \<or>
(\<forall>slot. \<exists>asid. caps_of_state s slot =
Some (ArchObjectCap (PageTableCap p (Some asid)))))\<rbrace>
set_pt p pt
\<lbrace>\<lambda>rv. valid_table_caps\<rbrace>"
unfolding valid_table_caps_def
apply (rule valid_set_ptI)
apply (intro allI impI, simp add: obj_at_def del: HOL.imp_disjL)
apply (cut_tac s=s and val= "ArchObj (PageTable pt)" and p=p
in caps_of_state_after_update[folded fun_upd_def])
apply (simp add: obj_at_def)
apply (clarsimp simp del: HOL.imp_disjL)
apply (thin_tac "ALL x. P x" for P)
apply (case_tac cap, simp_all add: is_pd_cap_def is_pt_cap_def)
apply (erule disjE)
apply (simp add: valid_caps_def)
apply ((drule spec)+, erule impE, assumption)
apply (rename_tac arch_cap)
apply (case_tac arch_cap,
simp_all add: valid_cap_def obj_at_def aa_type_simps)
apply clarsimp
apply (erule impE, fastforce simp: cap_asid_def split: option.splits)
apply (erule disjE, simp add: empty_table_def)
apply (drule_tac x=a in spec, drule_tac x=b in spec)
apply (clarsimp simp add: cap_asid_def split: option.splits)
done
lemma set_object_caps_of_state:
"\<lbrace>(\<lambda>s. \<not>(tcb_at p s) \<and> \<not>(\<exists>n. cap_table_at n p s)) and
K ((\<forall>x y. obj \<noteq> CNode x y) \<and> (\<forall>x. obj \<noteq> TCB x)) and
(\<lambda>s. P (caps_of_state s))\<rbrace>
set_object p obj
\<lbrace>\<lambda>_ s. P (caps_of_state s)\<rbrace>"
apply (wpsimp wp: set_object_wp_strong)
apply (erule rsubst[where P=P])
apply (rule ext)
apply (simp add: caps_of_state_cte_wp_at obj_at_def is_cap_table_def
is_tcb_def)
apply (auto simp: cte_wp_at_cases)
done
lemma set_pt_valid_vspace_objs[wp]:
"valid (\<lambda>s. valid_vspace_objs s \<and> ((\<exists>\<rhd> p) s \<longrightarrow> (\<forall>x. valid_pte (pt x) s)))
(set_pt p pt) (\<lambda>_. valid_vspace_objs)"
apply (rule valid_set_ptI)
apply (clarsimp simp: valid_vspace_objs_def)
subgoal for s opt pa rs ao
apply (spec pa)
apply (prop_tac "(\<exists>\<rhd> pa) s")
apply (rule exI[where x=rs])
apply (erule vs_lookupE)
apply clarsimp
apply (erule vs_lookupI)
apply (erule rtrancl.induct, simp)
subgoal for \<dots> b c
apply (prop_tac "(b \<rhd>1 c) s")
apply (thin_tac "_ : rtrancl _")+
apply (clarsimp simp add: vs_lookup1_def obj_at_def vs_refs_def
split: if_split_asm)
by simp
apply simp
apply (spec ao)
apply (cases "pa = p")
apply (clarsimp simp: obj_at_def)
subgoal for _ x
apply (drule_tac x=x in spec)
by (cases "pt x"; clarsimp simp: data_at_def obj_at_def a_type_simps)
apply (cases ao; simp add: obj_at_def a_type_simps)
apply clarsimp
apply (drule bspec, assumption, clarsimp)
apply clarsimp
subgoal for "fun" _ x
apply (spec x)
by (cases "fun x"; clarsimp simp: obj_at_def data_at_def a_type_simps)
apply clarsimp
apply (drule bspec,fastforce)
subgoal for "fun" x
by (cases "fun x"; clarsimp simp: data_at_def obj_at_def a_type_simps)
done
done
lemma set_pt_valid_vs_lookup [wp]:
"\<lbrace>\<lambda>s. valid_vs_lookup s \<and> valid_arch_state s \<and>
valid_vspace_objs s \<and> ((\<exists>\<rhd> p) s \<longrightarrow> (\<forall>x. valid_pte (pt x) s)) \<and>
(\<forall>ref. (ref \<unrhd> p) s \<longrightarrow>
(\<forall>x p. pte_ref_pages (pt x) = Some p \<longrightarrow>
(\<exists>p' cap. caps_of_state s p' = Some cap \<and>
p \<in> obj_refs cap \<and>
vs_cap_ref cap =
Some (VSRef (ucast x) (Some APageTable) # ref))))\<rbrace>
set_pt p pt
\<lbrace>\<lambda>rv. valid_vs_lookup\<rbrace>"
using set_pt_valid_vspace_objs[of p pt] set_pt_valid_arch_state[of p pt]
apply (clarsimp simp: valid_def simpler_set_pt_def)
apply (drule_tac x=s in spec)+
apply (clarsimp simp: valid_vs_lookup_def split: if_split_asm)
apply (erule (1) vs_lookup_pagesE_alt)
apply (clarsimp simp: valid_arch_state_def valid_asid_table_def
fun_upd_def)
apply (drule_tac x=pa in spec)
apply (drule_tac x="[VSRef (ucast a) None]" in spec)+
apply simp
apply (drule vs_lookup_pages_atI)
apply simp
apply (subst caps_of_state_after_update, simp add: obj_at_def)
apply simp
apply (drule_tac x=pa in spec)
apply (drule_tac x="[VSRef (ucast b) (Some AASIDPool),
VSRef (ucast a) None]" in spec)+
apply simp
apply (drule vs_lookup_pages_apI)
apply (simp split: if_split_asm)
apply simp+
apply (subst caps_of_state_after_update, simp add: obj_at_def)
apply simp
apply (drule_tac x=pa in spec)
apply (drule_tac x="[VSRef (ucast c) (Some APageDirectory),
VSRef (ucast b) (Some AASIDPool),
VSRef (ucast a) None]" in spec)+
apply simp
apply (drule vs_lookup_pages_pdI)
apply (simp split: if_split_asm)+
apply (subst caps_of_state_after_update, simp add: obj_at_def)
apply fastforce
apply (clarsimp simp: fun_upd_def split: if_split_asm)
apply (thin_tac "valid_vspace_objs s" for s, thin_tac "valid_arch_state s" for s)
apply (subst caps_of_state_after_update, simp add: obj_at_def)
apply (thin_tac "\<forall>p ref. P p ref" for P)
apply (drule_tac x="[VSRef (ucast c) (Some APageDirectory),
VSRef (ucast b) (Some AASIDPool),
VSRef (ucast a) None]" in spec)
apply (thin_tac "valid_pte pte s" for pte s)
apply (erule impE, fastforce intro: vs_lookup_pdI)
apply (drule_tac x=d in spec)
apply (erule impE)
apply (erule (5) vs_lookup_pdI[THEN vs_lookup_pages_vs_lookupI])
apply (drule spec, drule spec, erule impE, assumption)
apply assumption
apply (thin_tac "valid_vspace_objs s" for s, thin_tac "valid_arch_state s" for s)
apply (subst caps_of_state_after_update, simp add: obj_at_def)
apply (thin_tac "\<forall>ref. (ref \<unrhd> p) s \<longrightarrow> P ref" for P)
apply (drule_tac x=pa in spec)
apply (drule_tac x="[VSRef (ucast d) (Some APageTable),
VSRef (ucast c) (Some APageDirectory),
VSRef (ucast b) (Some AASIDPool),
VSRef (ucast a) None]" in spec)
apply (thin_tac "(\<exists>\<rhd> p) s \<longrightarrow> P" for P)
apply (erule impE, fastforce intro: vs_lookup_pages_ptI)
apply simp
done
lemma set_pt_arch_caps [wp]:
"\<lbrace>valid_arch_caps and valid_arch_state and valid_vspace_objs and
(\<lambda>s. valid_caps (caps_of_state s) s) and
(\<lambda>s. ((\<exists>slot. caps_of_state s slot =
Some (ArchObjectCap (PageTableCap p None))) \<longrightarrow>
pt = (\<lambda>x. InvalidPTE)) \<or>
(\<forall>slot. \<exists>asid. caps_of_state s slot =
Some (ArchObjectCap (PageTableCap p (Some asid))))) and
(\<lambda>s. ((\<exists>\<rhd> p) s \<longrightarrow> (\<forall>x. valid_pte (pt x) s)) \<and>
(\<forall>ref. (ref \<unrhd> p) s \<longrightarrow>
(\<forall>x p. pte_ref_pages (pt x) = Some p \<longrightarrow>
(\<exists>p' cap. caps_of_state s p' = Some cap \<and>
p \<in> obj_refs cap \<and>
vs_cap_ref cap =
Some (VSRef (ucast x) (Some APageTable) # ref)))))\<rbrace>
set_pt p pt \<lbrace>\<lambda>_. valid_arch_caps\<rbrace>"
unfolding valid_arch_caps_def
apply (rule hoare_pre)
apply (wp set_pt_valid_vs_lookup)
apply clarsimp
done
lemma valid_global_refsD2:
"\<lbrakk> caps_of_state s ptr = Some cap; valid_global_refs s \<rbrakk>
\<Longrightarrow> global_refs s \<inter> cap_range cap = {}"
by (cases ptr,
simp add: valid_global_refs_def valid_refs_def
cte_wp_at_caps_of_state)
lemma valid_global_refsD:
"\<lbrakk> valid_global_refs s; cte_wp_at ((=) cap) ptr s;
r \<in> global_refs s \<rbrakk>
\<Longrightarrow> r \<notin> cap_range cap"
apply (clarsimp simp: cte_wp_at_caps_of_state)
apply (drule(1) valid_global_refsD2)
apply fastforce
done
lemma set_pt_global_objs [wp]:
"\<lbrace>valid_global_objs and valid_arch_state and
(\<lambda>s. p \<in> set (arm_global_pts (arch_state s)) \<longrightarrow>
(\<forall>x. aligned_pte (pt x)))\<rbrace>
set_pt p pt
\<lbrace>\<lambda>rv. valid_global_objs\<rbrace>"
apply (rule valid_set_ptI)
apply (clarsimp simp: valid_global_objs_def valid_arch_state_def valid_vspace_obj_def
valid_vso_at_def obj_at_def empty_table_def)
done
crunch v_ker_map[wp]: set_pt "valid_kernel_mappings"
(ignore: set_object wp: set_object_v_ker_map crunch_wps)
lemma set_pt_asid_map [wp]:
"\<lbrace>valid_asid_map\<rbrace> set_pt p pt \<lbrace>\<lambda>_. valid_asid_map\<rbrace>"
apply (simp add: valid_asid_map_def vspace_at_asid_def)
apply (rule hoare_lift_Pf2 [where f="arch_state"])
apply wp+
done
lemma set_pt_only_idle [wp]:
"\<lbrace>only_idle\<rbrace> set_pt p pt \<lbrace>\<lambda>_. only_idle\<rbrace>"
by (wp only_idle_lift)
lemma set_pt_equal_mappings [wp]:
"\<lbrace>equal_kernel_mappings\<rbrace> set_pt p pt \<lbrace>\<lambda>rv. equal_kernel_mappings\<rbrace>"
by (simp add: set_pt_def | wp set_object_equal_mappings get_object_wp)+
lemma set_pt_valid_global_vspace_mappings:
"\<lbrace>\<lambda>s. valid_global_vspace_mappings s \<and> valid_global_objs s \<and> p \<notin> global_refs s\<rbrace>
set_pt p pt
\<lbrace>\<lambda>rv. valid_global_vspace_mappings\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_global_vspace_mappings)
lemma set_pt_kernel_window[wp]:
"\<lbrace>pspace_in_kernel_window\<rbrace> set_pt p pt \<lbrace>\<lambda>rv. pspace_in_kernel_window\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_pspace_in_kernel_window[THEN hoare_set_object_weaken_pre])
lemma set_pt_respects_device_region[wp]:
"\<lbrace>pspace_respects_device_region\<rbrace> set_pt p pt \<lbrace>\<lambda>rv. pspace_respects_device_region\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_pspace_respects_device_region[THEN hoare_set_object_weaken_pre])
lemma set_pt_caps_in_kernel_window[wp]:
"\<lbrace>cap_refs_in_kernel_window\<rbrace> set_pt p pt \<lbrace>\<lambda>rv. cap_refs_in_kernel_window\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_cap_refs_in_kernel_window[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_pt_caps_respects_device_region[wp]:
"\<lbrace>cap_refs_respects_device_region\<rbrace> set_pt p pt \<lbrace>\<lambda>rv. cap_refs_respects_device_region\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_cap_refs_respects_device_region[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_pt_valid_ioc[wp]:
"\<lbrace>valid_ioc\<rbrace> set_pt p pt \<lbrace>\<lambda>_. valid_ioc\<rbrace>"
unfolding set_pt_def including unfold_objects
by (wpsimp wp: set_object_valid_ioc_no_caps[THEN hoare_set_object_weaken_pre]
simp: is_tcb is_cap_table)
lemma valid_machine_stateE:
assumes vm: "valid_machine_state s"
assumes e: "\<lbrakk>in_user_frame p s
\<or> underlying_memory (machine_state s) p = 0 \<rbrakk> \<Longrightarrow> E "
shows E
using vm
apply (clarsimp simp: valid_machine_state_def)
apply (drule_tac x = p in spec)
apply (rule e)
apply auto
done
lemma in_user_frame_same_type_upd:
"\<lbrakk>typ_at type p s; type = a_type obj; in_user_frame q s\<rbrakk>
\<Longrightarrow> in_user_frame q (s\<lparr>kheap := kheap s(p \<mapsto> obj)\<rparr>)"
apply (clarsimp simp: in_user_frame_def obj_at_def)
apply (rule_tac x=sz in exI)
apply (auto simp: a_type_simps)
done
lemma in_device_frame_same_type_upd:
"\<lbrakk>typ_at type p s; type = a_type obj ; in_device_frame q s\<rbrakk>
\<Longrightarrow> in_device_frame q (s\<lparr>kheap := kheap s(p \<mapsto> obj)\<rparr>)"
apply (clarsimp simp: in_device_frame_def obj_at_def)
apply (rule_tac x=sz in exI)
apply (auto simp: a_type_simps)
done
lemma store_word_offs_in_user_frame[wp]:
"\<lbrace>\<lambda>s. in_user_frame p s\<rbrace> store_word_offs a x w \<lbrace>\<lambda>_ s. in_user_frame p s\<rbrace>"
unfolding in_user_frame_def
by (wp hoare_vcg_ex_lift)
lemma store_word_offs_in_device_frame[wp]:
"\<lbrace>\<lambda>s. in_device_frame p s\<rbrace> store_word_offs a x w \<lbrace>\<lambda>_ s. in_device_frame p s\<rbrace>"
unfolding in_device_frame_def
by (wp hoare_vcg_ex_lift)
lemma as_user_in_user_frame[wp]:
"\<lbrace>\<lambda>s. in_user_frame p s\<rbrace> as_user t m \<lbrace>\<lambda>_ s. in_user_frame p s\<rbrace>"
unfolding in_user_frame_def
by (wp hoare_vcg_ex_lift)
lemma as_user_in_device_frame[wp]:
"\<lbrace>\<lambda>s. in_device_frame p s\<rbrace> as_user t m \<lbrace>\<lambda>_ s. in_device_frame p s\<rbrace>"
unfolding in_device_frame_def
by (wp hoare_vcg_ex_lift)
crunch obj_at[wp]: load_word_offs "\<lambda>s. P (obj_at Q p s)"
lemma load_word_offs_in_user_frame[wp]:
"\<lbrace>\<lambda>s. in_user_frame p s\<rbrace> load_word_offs a x \<lbrace>\<lambda>_ s. in_user_frame p s\<rbrace>"
unfolding in_user_frame_def
by (wp hoare_vcg_ex_lift)
lemma valid_machine_state_heap_updI:
assumes vm : "valid_machine_state s"
assumes tyat : "typ_at type p s"
shows
" a_type obj = type \<Longrightarrow> valid_machine_state (s\<lparr>kheap := kheap s(p \<mapsto> obj)\<rparr>)"
apply (clarsimp simp: valid_machine_state_def)
subgoal for p
apply (rule valid_machine_stateE[OF vm,where p = p])
apply (elim disjE,simp_all)
apply (drule(1) in_user_frame_same_type_upd[OF tyat])
apply simp+
done
done
lemma set_pt_vms[wp]:
"\<lbrace>valid_machine_state\<rbrace> set_pt p pt \<lbrace>\<lambda>_. valid_machine_state\<rbrace>"
unfolding set_pt_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_def)
apply (erule valid_machine_state_heap_updI)
apply (fastforce simp: a_type_simps)+
done
crunch valid_irq_states[wp]: set_pt "valid_irq_states"
(wp: crunch_wps)
crunch valid_irq_states[wp]: set_pd "valid_irq_states"
(wp: crunch_wps)
lemma set_pt_invs:
"\<lbrace>invs and (\<lambda>s. \<forall>i. wellformed_pte (pt i)) and
(\<lambda>s. (\<exists>\<rhd>p) s \<longrightarrow> valid_vspace_obj (PageTable pt) s) and
(\<lambda>s. \<exists>slot asid. caps_of_state s slot =
Some (cap.ArchObjectCap (arch_cap.PageTableCap p asid)) \<and>
(pt = (\<lambda>x. InvalidPTE) \<or> asid \<noteq> None)) and
(\<lambda>s. \<forall>ref. (ref \<unrhd> p) s \<longrightarrow>
(\<forall>x p. pte_ref_pages (pt x) = Some p \<longrightarrow>
(\<exists>p' cap. caps_of_state s p' = Some cap \<and>
p \<in> obj_refs cap \<and>
vs_cap_ref cap =
Some (VSRef (ucast x) (Some APageTable) # ref))))\<rbrace>
set_pt p pt
\<lbrace>\<lambda>_. invs\<rbrace>"
apply (simp add: invs_def valid_state_def valid_pspace_def)
apply (rule hoare_pre)
apply (wp set_pt_valid_objs set_pt_iflive set_pt_zombies
set_pt_zombies_state_refs set_pt_zombies_state_hyp_refs set_pt_valid_mdb
set_pt_valid_idle set_pt_ifunsafe set_pt_reply_caps
set_pt_valid_arch_state set_pt_valid_global set_pt_cur
set_pt_reply_masters valid_irq_node_typ
valid_irq_handlers_lift
set_pt_valid_global_vspace_mappings)
apply (clarsimp dest!: valid_objs_caps)
apply (rule conjI[rotated])
apply (subgoal_tac "p \<notin> global_refs s", simp add: global_refs_def)
apply (frule (1) valid_global_refsD2)
apply (clarsimp simp add: cap_range_def is_pt_cap_def)
apply (thin_tac "ALL x. P x" for P)+
apply (clarsimp simp: valid_arch_caps_def unique_table_caps_def)
apply (drule_tac x=aa in spec, drule_tac x=ba in spec)
apply (drule_tac x=a in spec, drule_tac x=b in spec)
apply (clarsimp simp: is_pt_cap_def cap_asid_def)
done
lemma vs_lookup_pages_pt_eq:
"\<lbrakk>valid_vspace_objs s;
\<forall>p\<in>ran (arm_asid_table (arch_state s)). asid_pool_at p s;
page_table_at p s\<rbrakk>
\<Longrightarrow> (ref \<unrhd> p) s = (ref \<rhd> p) s"
apply (rule iffI[rotated])
apply (erule vs_lookup_pages_vs_lookupI)
apply (erule (2) vs_lookup_pagesE_alt)
apply (clarsimp simp: obj_at_def)+
apply (clarsimp simp: obj_at_def pde_ref_pages_def
split: pde.splits)
apply (erule (5) vs_lookup_pdI)
apply (auto simp: obj_at_def pte_ref_pages_def data_at_def
split: pte.splits)
done
(* NOTE: we use vs_lookup in the precondition because in this case,
both are equivalent, but vs_lookup is generally preserved
by store_pte while vs_lookup_pages might not. *)
lemma store_pte_invs [wp]:
"\<lbrace>invs and (\<lambda>s. (\<exists>\<rhd>(p && ~~ mask pt_bits)) s \<longrightarrow> valid_pte pte s) and
(\<lambda>s. wellformed_pte pte) and
(\<lambda>s. \<exists>slot asid. caps_of_state s slot =
Some (ArchObjectCap
(PageTableCap (p && ~~ mask pt_bits) asid)) \<and>
(pte = InvalidPTE \<or> asid \<noteq> None)) and
(\<lambda>s. \<forall>ref. (ref \<rhd> (p && ~~ mask pt_bits)) s \<longrightarrow>
(\<forall>q. pte_ref_pages pte = Some q \<longrightarrow>
(\<exists>p' cap. caps_of_state s p' = Some cap \<and>
q \<in> obj_refs cap \<and>
vs_cap_ref cap =
Some (VSRef (p && mask pt_bits >> 2)
(Some APageTable) # ref))))\<rbrace>
store_pte p pte \<lbrace>\<lambda>_. invs\<rbrace>"
apply (simp add: store_pte_def)
apply (wp dmo_invs set_pt_invs)
apply clarsimp
apply (intro conjI)
apply (drule invs_valid_objs)
apply (fastforce simp: valid_objs_def dom_def obj_at_def valid_obj_def arch_valid_obj_def)
apply clarsimp
apply (drule (1) valid_vspace_objsD, fastforce)
apply simp
apply (thin_tac "All _")
apply (rule exI)+
apply (rule conjI, assumption)
subgoal premises prems for \<dots> asid
proof (cases asid)
case (Some a) from this show ?thesis
by fastforce
next
case None from this prems show ?thesis
apply clarsimp
apply (rule ext)
apply clarsimp
apply (frule invs_pd_caps)
apply (clarsimp simp add: valid_table_caps_def simp del: HOL.imp_disjL)
apply (spec "p && ~~ mask pt_bits")
apply (drule spec)+
apply (erule impE, assumption)
by (simp add: is_pt_cap_def cap_asid_def empty_table_def obj_at_def)
qed
apply (clarsimp simp: obj_at_def)
apply (intro impI conjI allI)
apply (drule (2) vs_lookup_pages_pt_eq[OF invs_vspace_objs invs_ran_asid_table,
THEN iffD1, rotated -1])
apply (clarsimp simp: obj_at_def a_type_simps)
apply (drule spec, erule impE, assumption)+
apply (erule exEI)+
apply clarsimp
apply (rule sym)
apply (rule ucast_ucast_len)
apply (rule shiftr_less_t2n)
using and_mask_less'[of 10 p]
apply (simp add: pt_bits_def pageBits_def)
subgoal for \<dots> pa
apply (thin_tac "All _", thin_tac "_ \<longrightarrow> _", thin_tac "_ \<or> _")
apply (frule invs_valid_vs_lookup)
apply (simp add: valid_vs_lookup_def)
apply (spec pa)
apply (drule spec, erule impE)
apply (erule vs_lookup_pages_step)
by (fastforce simp: vs_lookup_pages1_def obj_at_def
vs_refs_pages_def graph_of_def image_def) simp
done
lemma set_asid_pool_iflive [wp]:
"\<lbrace>\<lambda>s. if_live_then_nonz_cap s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. if_live_then_nonz_cap s\<rbrace>"
apply (simp add: set_asid_pool_def)
including unfold_objects
by (wpsimp wp: set_object_iflive[THEN hoare_set_object_weaken_pre]
simp: a_type_def live_def hyp_live_def)
lemma set_asid_pool_zombies [wp]:
"\<lbrace>\<lambda>s. zombies_final s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. zombies_final s\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by (wpsimp wp: set_object_zombies[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_asid_pool_zombies_state_refs [wp]:
"\<lbrace>\<lambda>s. P (state_refs_of s)\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. P (state_refs_of s)\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong
simp: a_type_def)
apply (erule rsubst [where P=P], rule ext)
apply (clarsimp simp: state_refs_of_def)
done
lemma set_asid_pool_zombies_state_hyp_refs [wp]:
"\<lbrace>\<lambda>s. P (state_hyp_refs_of s)\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. P (state_hyp_refs_of s)\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong
simp: a_type_def)
apply (erule rsubst [where P=P], rule ext)
apply (simp add: state_hyp_refs_of_def)
done
lemma set_asid_pool_cdt [wp]:
"\<lbrace>\<lambda>s. P (cdt s)\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. P (cdt s)\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by wpsimp
lemma set_asid_pool_caps_of_state [wp]:
"\<lbrace>\<lambda>s. P (caps_of_state s)\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>_ s. P (caps_of_state s)\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong
simp: a_type_def)
apply (subst cte_wp_caps_of_lift)
prefer 2
apply assumption
apply (clarsimp simp: cte_wp_at_cases)
done
lemma set_asid_pool_valid_mdb [wp]:
"\<lbrace>\<lambda>s. valid_mdb s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. valid_mdb s\<rbrace>"
including unfold_objects
by (wpsimp wp: valid_mdb_lift simp: set_asid_pool_def set_object_def)
lemma set_asid_pool_valid_idle [wp]:
"\<lbrace>\<lambda>s. valid_idle s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. valid_idle s\<rbrace>"
including unfold_objects
by (wpsimp wp: valid_idle_lift simp: set_asid_pool_def)
lemma set_asid_pool_ifunsafe [wp]:
"\<lbrace>\<lambda>s. if_unsafe_then_cap s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. if_unsafe_then_cap s\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by (wpsimp wp: set_object_ifunsafe[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_asid_pool_reply_caps [wp]:
"\<lbrace>\<lambda>s. valid_reply_caps s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. valid_reply_caps s\<rbrace>"
by (wp valid_reply_caps_st_cte_lift)
lemma set_asid_pool_reply_masters [wp]:
"\<lbrace>valid_reply_masters\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_. valid_reply_masters\<rbrace>"
by (wp valid_reply_masters_cte_lift)
crunch global_ref [wp]: set_asid_pool "\<lambda>s. P (global_refs s)"
(wp: crunch_wps)
crunch idle [wp]: set_asid_pool "\<lambda>s. P (idle_thread s)"
(wp: crunch_wps)
crunch irq [wp]: set_asid_pool "\<lambda>s. P (interrupt_irq_node s)"
(wp: crunch_wps)
crunch valid_irq_states[wp]: set_asid_pool "valid_irq_states"
(wp: crunch_wps)
lemma set_asid_pool_valid_global [wp]:
"\<lbrace>\<lambda>s. valid_global_refs s\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_ s. valid_global_refs s\<rbrace>"
by (wp valid_global_refs_cte_lift)
crunch interrupt_states[wp]: set_asid_pool "\<lambda>s. P (interrupt_states s)"
(wp: crunch_wps)
lemma set_asid_pool_vspace_objs_unmap':
"\<lbrace>valid_vspace_objs and (\<lambda>s. (\<exists>\<rhd>p) s \<longrightarrow> valid_vspace_obj (ASIDPool ap) s) and
obj_at (\<lambda>ko. \<exists>ap'. ko = ArchObj (ASIDPool ap') \<and> graph_of ap \<subseteq> graph_of ap') p\<rbrace>
set_asid_pool p ap \<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
apply (wpsimp wp: set_object_vspace_objs simp: a_type_simps)
apply (fastforce simp: vs_refs_def)
done
lemma set_asid_pool_vspace_objs_unmap:
"\<lbrace>valid_vspace_objs and ko_at (ArchObj (ASIDPool ap)) p\<rbrace>
set_asid_pool p (ap |` S) \<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
apply (wp set_asid_pool_vspace_objs_unmap')
apply (clarsimp simp: obj_at_def graph_of_restrict_map)
apply (drule valid_vspace_objsD, simp add: obj_at_def, assumption)
apply simp
by (auto simp: obj_at_def dest!: ran_restrictD)
lemma set_asid_pool_table_caps [wp]:
"\<lbrace>valid_table_caps\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>_. valid_table_caps\<rbrace>"
apply (rule valid_table_caps_lift)
apply (rule set_asid_pool_caps_of_state)
apply wpsimp
unfolding set_asid_pool_def including unfold_objects
apply (wpsimp wp: set_object_wp_strong simp: a_type_def empty_table_def)
apply (metis kernel_object_exhaust)
done
lemma set_asid_pool_vs_lookup_unmap':
"\<lbrace>valid_vs_lookup and
obj_at (\<lambda>ko. \<exists>ap'. ko = ArchObj (ASIDPool ap') \<and> graph_of ap \<subseteq> graph_of ap') p\<rbrace>
set_asid_pool p ap \<lbrace>\<lambda>_. valid_vs_lookup\<rbrace>"
apply (simp add: valid_vs_lookup_def pred_conj_def)
apply (rule hoare_lift_Pf2 [where f=caps_of_state];wp?)
apply (simp add: set_asid_pool_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp simp: obj_at_def simp del: fun_upd_apply del: disjCI
split: kernel_object.splits arch_kernel_obj.splits)
subgoal for \<dots> pa ref
apply (spec pa)
apply (spec ref)
apply (erule impE)
apply (erule vs_lookup_pages_stateI)
by (clarsimp simp: obj_at_def vs_refs_pages_def split: if_split_asm)
fastforce+
done
lemma set_asid_pool_vs_lookup_unmap:
"\<lbrace>valid_vs_lookup and ko_at (ArchObj (ASIDPool ap)) p\<rbrace>
set_asid_pool p (ap |` S) \<lbrace>\<lambda>_. valid_vs_lookup\<rbrace>"
apply (wp set_asid_pool_vs_lookup_unmap')
by (clarsimp simp: obj_at_def
elim!: subsetD [OF graph_of_restrict_map])
lemma valid_pte_typ_at:
"(\<And>T p. typ_at (AArch T) p s = typ_at (AArch T) p s') \<Longrightarrow>
valid_pte pte s = valid_pte pte s'"
by (case_tac pte, auto simp add: data_at_def)
lemma valid_pde_typ_at:
"(\<And>T p. typ_at (AArch T) p s = typ_at (AArch T) p s') \<Longrightarrow>
valid_pde pde s = valid_pde pde s'"
by (case_tac pde, auto simp add: data_at_def)
lemma valid_vspace_obj_same_type:
"\<lbrakk>valid_vspace_obj ao s; kheap s p = Some ko; a_type ko' = a_type ko\<rbrakk>
\<Longrightarrow> valid_vspace_obj ao (s\<lparr>kheap := kheap s(p \<mapsto> ko')\<rparr>)"
apply (rule hoare_to_pure_kheap_upd[OF valid_vspace_obj_typ])
by (auto simp: obj_at_def)
lemma set_asid_pool_global_objs [wp]:
"\<lbrace>valid_global_objs and valid_arch_state\<rbrace>
set_asid_pool p ap
\<lbrace>\<lambda>_. valid_global_objs\<rbrace>"
apply (simp add: set_asid_pool_def set_object_def a_type_def)
apply (wp get_object_wp)
apply (clarsimp simp del: fun_upd_apply
split: kernel_object.splits arch_kernel_obj.splits)
apply (clarsimp simp: valid_global_objs_def valid_vso_at_def)
apply (rule conjI)
apply (clarsimp simp: obj_at_def)
apply (rule conjI)
subgoal by (clarsimp simp: valid_arch_state_def obj_at_def a_type_def)
apply clarsimp
apply (erule (1) valid_vspace_obj_same_type)
subgoal by (simp add: a_type_def)
apply (rule conjI)
subgoal by (clarsimp simp: obj_at_def valid_arch_state_def a_type_def)
apply (clarsimp simp: obj_at_def)
apply (drule (1) bspec)
by clarsimp
crunch v_ker_map[wp]: set_asid_pool "valid_kernel_mappings"
(ignore: set_object wp: set_object_v_ker_map crunch_wps)
lemma set_asid_pool_restrict_asid_map:
"\<lbrace>valid_asid_map and ko_at (ArchObj (ASIDPool ap)) p and
(\<lambda>s. \<forall>asid. asid \<le> mask asid_bits \<longrightarrow> ucast asid \<notin> S \<longrightarrow>
arm_asid_table (arch_state s) (asid_high_bits_of asid) = Some p \<longrightarrow>
arm_asid_map (arch_state s) asid = None)\<rbrace>
set_asid_pool p (ap |` S) \<lbrace>\<lambda>_. valid_asid_map\<rbrace>"
apply (simp add: set_asid_pool_def valid_asid_map_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp split: kernel_object.splits arch_kernel_obj.splits
simp del: fun_upd_apply)
apply (drule(1) bspec)
apply (clarsimp simp: vspace_at_asid_def obj_at_def graph_of_def)
apply (drule subsetD, erule domI)
apply simp
apply (drule spec, drule(1) mp)
apply simp
apply (erule vs_lookupE)
apply (rule vs_lookupI, simp)
apply (clarsimp simp: vs_asid_refs_def graph_of_def)
apply (drule rtranclD)
apply (erule disjE, clarsimp)
apply clarsimp
apply (drule tranclD)
apply clarsimp
apply (rule r_into_rtrancl)
apply (drule vs_lookup1D)
apply clarsimp
apply (subst vs_lookup1_def)
apply (clarsimp simp: obj_at_def)
apply (erule rtranclE)
apply (clarsimp simp: vs_refs_def graph_of_def)
apply (rule image_eqI[where x="(_, _)"])
apply (simp add: split_def)
apply (clarsimp simp: restrict_map_def)
apply (drule ucast_up_inj, simp)
apply (simp add: mask_asid_low_bits_ucast_ucast)
apply (drule ucast_up_inj, simp)
apply clarsimp
apply clarsimp
apply (drule vs_lookup1_trans_is_append)
apply clarsimp
apply (drule vs_lookup1D)
by clarsimp
lemma set_asid_pool_asid_map_unmap:
"\<lbrace>valid_asid_map and ko_at (ArchObj (ASIDPool ap)) p and
(\<lambda>s. \<forall>asid. asid \<le> mask asid_bits \<longrightarrow>
ucast asid = x \<longrightarrow>
arm_asid_table (arch_state s) (asid_high_bits_of asid) = Some p \<longrightarrow>
arm_asid_map (arch_state s) asid = None)\<rbrace>
set_asid_pool p (ap(x := None)) \<lbrace>\<lambda>_. valid_asid_map\<rbrace>"
using set_asid_pool_restrict_asid_map[where S="- {x}"]
by (simp add: restrict_map_def fun_upd_def if_flip)
lemma set_asid_pool_vspace_objs_unmap_single:
"\<lbrace>valid_vspace_objs and ko_at (ArchObj (ASIDPool ap)) p\<rbrace>
set_asid_pool p (ap(x := None)) \<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
using set_asid_pool_vspace_objs_unmap[where S="- {x}"]
by (simp add: restrict_map_def fun_upd_def if_flip)
lemma set_asid_pool_only_idle [wp]:
"\<lbrace>only_idle\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>_. only_idle\<rbrace>"
by (wp only_idle_lift set_asid_pool_typ_at)
lemma set_asid_pool_equal_mappings [wp]:
"\<lbrace>equal_kernel_mappings\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>rv. equal_kernel_mappings\<rbrace>"
by (simp add: set_asid_pool_def | wp set_object_equal_mappings get_object_wp)+
lemma set_asid_pool_valid_global_vspace_mappings[wp]:
"\<lbrace>valid_global_vspace_mappings\<rbrace>
set_asid_pool p ap \<lbrace>\<lambda>rv. valid_global_vspace_mappings\<rbrace>"
apply (simp add: set_asid_pool_def)
apply (wp set_object_global_vspace_mappings[THEN hoare_set_object_weaken_pre])
including unfold_objects
by (clarsimp simp: a_type_def)
lemma set_asid_pool_kernel_window[wp]:
"\<lbrace>pspace_in_kernel_window\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>rv. pspace_in_kernel_window\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by (wpsimp wp: set_object_pspace_in_kernel_window[THEN hoare_set_object_weaken_pre])
lemma set_asid_pool_pspace_respects_device_region[wp]:
"\<lbrace>pspace_respects_device_region\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>rv. pspace_respects_device_region\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by (wpsimp wp: set_object_pspace_respects_device_region[THEN hoare_set_object_weaken_pre])
lemma set_asid_pool_caps_kernel_window[wp]:
"\<lbrace>cap_refs_in_kernel_window\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>rv. cap_refs_in_kernel_window\<rbrace>"
unfolding set_asid_pool_def including unfold_objects
by (wpsimp wp: set_object_cap_refs_in_kernel_window[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
lemma set_asid_pool_caps_respects_device_region[wp]:
"\<lbrace>cap_refs_respects_device_region\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>rv. cap_refs_respects_device_region\<rbrace>"
apply (simp add: set_asid_pool_def)
apply (wp set_object_cap_refs_respects_device_region[THEN hoare_set_object_weaken_pre])
including unfold_objects
by (simp add: a_type_def)
lemma set_asid_pool_valid_ioc[wp]:
"\<lbrace>valid_ioc\<rbrace> set_asid_pool p ap \<lbrace>\<lambda>_. valid_ioc\<rbrace>"
apply (simp add: set_asid_pool_def)
apply (wp set_object_valid_ioc_no_caps[THEN hoare_set_object_weaken_pre])
including unfold_objects
by (clarsimp simp: valid_def get_object_def simpler_gets_def assert_def
return_def fail_def bind_def
a_type_simps is_tcb is_cap_table)
lemma set_asid_pool_vms[wp]:
"\<lbrace>valid_machine_state\<rbrace> set_asid_pool p S \<lbrace>\<lambda>_. valid_machine_state\<rbrace>"
apply (simp add: set_asid_pool_def set_object_def)
apply (wp get_object_wp)
apply clarify
apply (erule valid_machine_state_heap_updI)
apply (fastforce simp: a_type_def obj_at_def
split: kernel_object.splits arch_kernel_obj.splits)+
done
lemma set_asid_pool_invs_restrict:
"\<lbrace>invs and ko_at (ArchObj (ASIDPool ap)) p and
(\<lambda>s. \<forall>asid. asid \<le> mask asid_bits \<longrightarrow> ucast asid \<notin> S \<longrightarrow>
arm_asid_table (arch_state s) (asid_high_bits_of asid) = Some p \<longrightarrow>
arm_asid_map (arch_state s) asid = None)\<rbrace>
set_asid_pool p (ap |` S) \<lbrace>\<lambda>_. invs\<rbrace>"
apply (simp add: invs_def valid_state_def valid_pspace_def
valid_arch_caps_def)
apply (wp valid_irq_node_typ set_asid_pool_typ_at
set_asid_pool_vspace_objs_unmap valid_irq_handlers_lift
set_asid_pool_vs_lookup_unmap set_asid_pool_restrict_asid_map)
apply simp
done
lemmas set_asid_pool_cte_wp_at1[wp]
= hoare_cte_wp_caps_of_state_lift [OF set_asid_pool_caps_of_state]
lemma mdb_cte_at_set_asid_pool[wp]:
"\<lbrace>\<lambda>s. mdb_cte_at (swp (cte_wp_at ((\<noteq>) cap.NullCap)) s) (cdt s)\<rbrace>
set_asid_pool y pool
\<lbrace>\<lambda>r s. mdb_cte_at (swp (cte_wp_at ((\<noteq>) cap.NullCap)) s) (cdt s)\<rbrace>"
apply (clarsimp simp:mdb_cte_at_def)
apply (simp only: imp_conv_disj)
apply (wp hoare_vcg_disj_lift hoare_vcg_all_lift)
done
lemma set_asid_pool_invs_unmap:
"\<lbrace>invs and ko_at (ArchObj (ASIDPool ap)) p and
(\<lambda>s. \<forall>asid. asid \<le> mask asid_bits \<longrightarrow> ucast asid = x \<longrightarrow>
arm_asid_table (arch_state s) (asid_high_bits_of asid) = Some p \<longrightarrow>
arm_asid_map (arch_state s) asid = None)\<rbrace>
set_asid_pool p (ap(x := None)) \<lbrace>\<lambda>_. invs\<rbrace>"
using set_asid_pool_invs_restrict[where S="- {x}"]
by (simp add: restrict_map_def fun_upd_def if_flip)
lemma valid_slots_typ_at:
assumes x: "\<And>T p. \<lbrace>typ_at (AArch T) p\<rbrace> f \<lbrace>\<lambda>rv. typ_at (AArch T) p\<rbrace>"
assumes y: "\<And>p. \<lbrace>\<exists>\<rhd> p\<rbrace> f \<lbrace>\<lambda>rv. \<exists>\<rhd> p\<rbrace>"
shows "\<lbrace>valid_slots m\<rbrace> f \<lbrace>\<lambda>rv. valid_slots m\<rbrace>"
unfolding valid_slots_def
by (cases m; clarsimp; wp x y hoare_vcg_const_Ball_lift valid_pte_lift
valid_pde_lift pte_at_atyp pde_at_atyp)
lemma ucast_ucast_id:
"(len_of TYPE('a)) < (len_of TYPE('b)) \<Longrightarrow> ucast ((ucast (x::('a::len) word))::('b::len) word) = x"
by (auto intro: ucast_up_ucast_id simp: is_up_def source_size_def target_size_def word_size)
lemma kernel_base_kernel_mapping_slots:
"x < kernel_base \<Longrightarrow> ucast (x >> 20) \<notin> kernel_mapping_slots"
apply (simp add: kernel_mapping_slots_def kernel_base_def)
apply (subst ucast_le_ucast[symmetric, where 'a=12 and 'b=32])
apply simp
apply (subst ucast_ucast_mask)
apply (simp add: ucast_def)
apply (subst less_mask_eq)
apply (rule vptr_shiftr_le_2p[unfolded pageBits_def])
apply (subst word_not_le)
apply word_bitwise
done
lemma lookup_pt_slot_looks_up [wp]:
"\<lbrace>ref \<rhd> pd and K (is_aligned pd 14 \<and> vptr < kernel_base)
and valid_arch_state and valid_vspace_objs and equal_kernel_mappings
and pspace_aligned and valid_global_objs\<rbrace>
lookup_pt_slot pd vptr
\<lbrace>\<lambda>pt_slot. (VSRef (vptr >> 20 << 2 >> 2) (Some APageDirectory) # ref) \<rhd> (pt_slot && ~~ mask pt_bits)\<rbrace>, -"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply clarsimp
apply (rule vs_lookup_step, assumption)
apply (clarsimp simp: vs_lookup1_def lookup_pd_slot_def Let_def pd_shifting pd_shifting_dual)
apply (rule exI, rule conjI, assumption)
subgoal for s _ x
apply (prop_tac "ptrFromPAddr x + ((vptr >> 12) && 0xFF << 2) && ~~ mask pt_bits = ptrFromPAddr x")
apply (prop_tac "is_aligned (ptrFromPAddr x) 10")
apply (drule (2) valid_vspace_objsD)
apply clarsimp
apply (erule_tac x="ucast (vptr >> 20 << 2 >> 2)" in ballE)
apply (thin_tac "obj_at P x s" for P x)+
apply (clarsimp simp: obj_at_def invs_def valid_state_def valid_pspace_def pspace_aligned_def)
apply (drule bspec, blast)
apply (clarsimp simp: a_type_def
split: kernel_object.splits arch_kernel_obj.splits if_split_asm)
apply (frule kernel_mapping_slots_empty_pdeI)
apply ((simp add: obj_at_def)+)[4]
apply (clarsimp simp: pde_ref_def second_level_tables_def)
apply (erule is_aligned_global_pt[unfolded pt_bits_def pageBits_def, simplified])
apply simp+
apply (subgoal_tac "(vptr >> 12) && 0xFF << 2 < 2 ^ 10")
apply (subst is_aligned_add_or, (simp add: pt_bits_def pageBits_def)+)
apply (subst word_ao_dist)
apply (subst mask_out_sub_mask [where x="(vptr >> 12) && 0xFF << 2"])
apply (subst less_mask_eq, simp)
apply (subst is_aligned_neg_mask_eq, simp)
apply (clarsimp simp: valid_arch_state_def valid_global_pts_def)
apply (rule shiftl_less_t2n, simp)
apply (rule and_mask_less'[where n=8, unfolded mask_def, simplified], (simp )+)
apply (subst shiftl_shiftr_id)
apply (simp add: word_bits_def)+
apply word_bitwise
apply (subst (asm) shiftl_shiftr_id)
apply (simp add: word_bits_def)+
apply word_bitwise
apply (erule vs_refs_pdI)
apply (erule kernel_base_kernel_mapping_slots)
apply (intro allI impI)
apply (simp add: nth_shiftr)
apply (rule bang_big[simplified])
by (simp add: word_size)
done
lemma lookup_pt_slot_reachable [wp]:
"\<lbrace>\<exists>\<rhd> pd and K (is_aligned pd 14 \<and> vptr < kernel_base)
and valid_arch_state and valid_vspace_objs and equal_kernel_mappings
and pspace_aligned and valid_global_objs\<rbrace>
lookup_pt_slot pd vptr
\<lbrace>\<lambda>pt_slot. \<exists>\<rhd> (pt_slot && ~~ mask pt_bits)\<rbrace>, -"
apply (simp add: pred_conj_def ex_simps [symmetric] del: ex_simps)
apply (rule hoare_vcg_ex_lift_R1)
apply (rule hoare_pre)
apply (rule hoare_post_imp_R)
apply (rule lookup_pt_slot_looks_up)
prefer 2
apply clarsimp
apply assumption
apply fastforce
done
lemma lookup_pt_slot_reachable2 [wp]:
"\<lbrace>\<exists>\<rhd> pd and K (is_aligned pd 14 \<and> is_aligned vptr 16 \<and> vptr < kernel_base)
and valid_arch_state and valid_vspace_objs and equal_kernel_mappings
and pspace_aligned and valid_global_objs\<rbrace>
lookup_pt_slot pd vptr
\<lbrace>\<lambda>rv s. \<forall>x\<in>set [0 , 4 .e. 0x3C]. (\<exists>\<rhd> (x + rv && ~~ mask pt_bits)) s\<rbrace>, -"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply clarsimp
apply (rule exI)
apply (rule vs_lookup_step, assumption)
apply (clarsimp simp: vs_lookup1_def lookup_pd_slot_def Let_def pd_shifting pd_shifting_dual
add.commute add.left_commute)
apply (rule exI, rule conjI, assumption)
apply (rule_tac x="VSRef (vptr >> 20 << 2 >> 2) (Some APageDirectory)" in exI)
apply (subgoal_tac "ptrFromPAddr x + (xa + ((vptr >> 12) && 0xFF << 2)) && ~~ mask pt_bits = ptrFromPAddr x")
prefer 2
apply (subgoal_tac "is_aligned (ptrFromPAddr x) 10")
prefer 2
apply (drule (2) valid_vspace_objsD)
apply clarsimp
apply (erule_tac x="ucast (vptr >> 20 << 2 >> 2)" in ballE)
apply (thin_tac "obj_at P x s" for P x)+
apply (clarsimp simp: obj_at_def pspace_aligned_def)
apply (drule bspec, blast)
apply (clarsimp simp: a_type_def
split: kernel_object.splits arch_kernel_obj.splits if_split_asm)
apply (frule kernel_mapping_slots_empty_pdeI)
apply (simp add: obj_at_def)+
apply clarsimp
apply (erule_tac x="ptrFromPAddr x" in allE)
apply (clarsimp simp: pde_ref_def second_level_tables_def)
apply (rule is_aligned_global_pt[unfolded pt_bits_def pageBits_def, simplified])
apply simp+
apply (subst add_mask_lower_bits)
apply (simp add: pt_bits_def pageBits_def)
prefer 2
apply simp
apply (clarsimp simp: pt_bits_def pageBits_def)
apply (clarsimp simp: upto_enum_step_def word_shift_by_2 p_le_0xF_helper)
apply (thin_tac "pda x = t" for x t)
apply (subst (asm) word_plus_and_or_coroll)
apply (rule word_eqI)
apply (clarsimp simp: word_size word_bits_def nth_shiftr nth_shiftl is_aligned_nth word_FF_is_mask)
apply (erule_tac x="n - 2" in allE)
apply simp
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth word_FF_is_mask word_bits_def)
apply (rule conjI, rule refl)
apply (simp add: add.commute add.left_commute)
apply (rule vs_refs_pdI)
prefer 3
apply (clarsimp simp: word_ops_nth_size word_size nth_shiftr nth_shiftl)
apply (drule test_bit_size)
apply (simp add: word_size)
apply fastforce
apply (subst shiftl_shiftr_id)
apply (simp add: word_bits_def)+
apply word_bitwise
apply (erule kernel_base_kernel_mapping_slots)
done
lemma lookup_pt_slot_reachable3 [wp]:
"\<lbrace>\<exists>\<rhd> pd and K (is_aligned pd 14 \<and> is_aligned vptr 16 \<and> vptr < kernel_base)
and valid_arch_state and valid_vspace_objs and equal_kernel_mappings
and pspace_aligned and valid_global_objs\<rbrace>
lookup_pt_slot pd vptr
\<lbrace>\<lambda>p s. \<forall>x\<in>set [p, p + 4 .e. p + 0x3C]. (\<exists>\<rhd> (x && ~~ mask pt_bits)) s\<rbrace>, -"
apply (simp add: lookup_pt_slot_def)
apply (wp get_pde_wp|wpc)+
apply (clarsimp del: ballI)
apply (clarsimp simp: lookup_pd_slot_def Let_def del: ballI)
apply (simp add: pd_shifting)
apply (frule (2) valid_vspace_objsD)
apply (clarsimp del: ballI)
apply (erule_tac x="(ucast (pd + (vptr >> 20 << 2) && mask pd_bits >> 2))" in ballE)
apply (clarsimp del: ballI)
apply (subgoal_tac "is_aligned (ptrFromPAddr x) 10")
prefer 2
apply (thin_tac "ko_at P p s" for P p)+
apply (clarsimp simp: obj_at_def add.commute add.left_commute pspace_aligned_def)
apply (drule bspec, blast)
apply (clarsimp simp: a_type_def split: kernel_object.splits arch_kernel_obj.splits if_split_asm)
apply (subst p_0x3C_shift)
apply (rule aligned_add_aligned, assumption)
apply (clarsimp intro!: is_aligned_andI1 is_aligned_shiftl is_aligned_shiftr)
apply simp
apply clarsimp
apply (rule exI)
apply (rule vs_lookup_step, assumption)
apply (clarsimp simp: vs_lookup1_def lookup_pd_slot_def Let_def pd_shifting pd_shifting_dual add.commute add.left_commute)
apply (rule exI, rule conjI, assumption)
apply (rule_tac x="VSRef (vptr >> 20 << 2 >> 2) (Some APageDirectory)" in exI)
apply (rule conjI, rule refl)
apply (subgoal_tac "ptrFromPAddr x + (xc + ((vptr >> 12) && 0xFF << 2)) && ~~ mask pt_bits = ptrFromPAddr x")
prefer 2
apply (subst add_mask_lower_bits)
apply (simp add: pt_bits_def pageBits_def)
prefer 2
apply simp
apply (clarsimp simp: pt_bits_def pageBits_def)
apply (clarsimp simp: upto_enum_step_def word_shift_by_2 p_le_0xF_helper)
apply (thin_tac "pda x = t" for x t)
apply (subst (asm) word_plus_and_or_coroll)
apply (rule word_eqI)
apply (clarsimp simp: word_size word_bits_def nth_shiftr nth_shiftl is_aligned_nth word_FF_is_mask)
apply (erule_tac x="n - 2" in allE)
apply simp
apply (clarsimp simp: word_size nth_shiftr nth_shiftl is_aligned_nth word_FF_is_mask word_bits_def)
apply (simp add: add.commute add.left_commute)
apply (rule vs_refs_pdI)
prefer 3
apply (clarsimp simp: word_ops_nth_size word_size nth_shiftr nth_shiftl)
apply (drule test_bit_size)
apply (simp add: word_size)
apply fastforce
apply (subst shiftl_shiftr_id)
apply (simp add: word_bits_def)+
apply word_bitwise
apply (erule kernel_base_kernel_mapping_slots)
apply clarsimp
apply (subst (asm) mask_add_aligned, simp add: pd_bits_def pageBits_def)+
apply (simp add: shiftr_over_and_dist)
apply (subst (asm) shiftl_shiftr_id, (simp add: word_bits_conv)+, word_bitwise)+
apply (subst (asm) shiftr_mask2, (simp add: pd_bits_def pageBits_def)+)+
apply (simp add: shiftr_mask_eq[where x=vptr and n=20, unfolded word_size, simplified])
apply (drule kernel_base_kernel_mapping_slots, simp)
done
lemma pd_aligned:
"\<lbrakk>pspace_aligned s; page_directory_at pd s\<rbrakk> \<Longrightarrow> is_aligned pd 14"
apply (clarsimp simp: pspace_aligned_def obj_at_def)
apply (drule bspec, blast)
apply (clarsimp simp: a_type_def split: kernel_object.splits arch_kernel_obj.splits if_split_asm)
done
lemma shiftr_shiftl_mask_pd_bits:
"(((vptr :: word32) >> 20) << 2) && mask pd_bits = (vptr >> 20) << 2"
apply (rule iffD2 [OF mask_eq_iff_w2p])
apply (simp add: pd_bits_def pageBits_def word_size)
apply (rule shiftl_less_t2n)
apply (rule shiftr_less_t2n3,
simp_all add: pd_bits_def word_bits_def pageBits_def)
done
lemma triple_shift_fun:
"x >> 20 << 2 >> 2 = (x :: ('a :: len) word) >> 20"
apply (rule word_eqI)
apply (simp add: word_size nth_shiftr nth_shiftl)
apply safe
apply (drule test_bit_size)
apply (simp add: word_size)
done
lemma shiftr_20_unat_ucast:
"unat (ucast (x >> 20 :: word32) :: 12 word) = unat (x >> 20)"
using vptr_shiftr_le_2p[where vptr=x]
apply (simp only: unat_ucast)
apply (rule mod_less)
apply (rule unat_less_power)
apply (simp add: word_bits_def)
apply (simp add: pageBits_def)
done
lemma shiftr_20_less:
"((ucast (x >> 20) :: 12 word) < ucast (y >> 20)) = ((x >> 20 :: word32) < y >> 20)"
"((ucast (x >> 20) :: 12 word) \<le> ucast (y >> 20)) = ((x >> 20 :: word32) \<le> y >> 20)"
by (simp add: word_less_nat_alt word_le_nat_alt shiftr_20_unat_ucast)+
lemma kernel_base_aligned_pageBits:
"is_aligned kernel_base pageBits"
by (simp add: is_aligned_def kernel_base_def pageBits_def)
lemma kernel_base_ge_observation:
"(kernel_base \<le> x) = (x && ~~ mask 29 = kernel_base)"
apply (subst mask_in_range)
apply (simp add: kernel_base_def is_aligned_def)
apply (simp add: kernel_base_def)
done
lemma kernel_base_less_observation:
"(x < kernel_base) = (x && ~~ mask 29 \<noteq> kernel_base)"
apply (simp add: linorder_not_le[symmetric] kernel_base_ge_observation)
done
lemma vptr_shifting_helper_magic:
"(x = 0) \<or> (x < 2 ^ 4 \<and> vmsz_aligned (vptr::word32) ARMSuperSection)
\<Longrightarrow> (x << 2) + (vptr >> 20 << 2) = ((vptr + (x << 20)) >> 20 << 2)"
apply (erule disjE, simp_all)
apply (clarsimp simp: vmsz_aligned_def)
apply (subst is_aligned_add_or, assumption)
apply (rule shiftl_less_t2n)
apply simp
apply simp
apply (simp add: shiftl_over_or_dist shiftr_over_or_dist)
apply (subst shiftl_shiftr_id)
apply (simp add: word_bits_def)
apply (simp add: word_bits_def)
apply unat_arith
apply (subst field_simps, rule is_aligned_add_or[where n=6])
apply (intro is_aligned_shiftl is_aligned_shiftr)
apply simp
apply (rule shiftl_less_t2n, simp_all)
done
lemma less_kernel_base_mapping_slots_both:
"\<lbrakk> vptr < kernel_base; is_aligned pd pd_bits;
(x = 0)
\<or> (x < 2 ^ 4 \<and> vmsz_aligned vptr ARMSuperSection) \<rbrakk>
\<Longrightarrow> ucast ((x << 2) + lookup_pd_slot pd vptr && mask pd_bits >> 2)
\<notin> kernel_mapping_slots"
apply (simp add: lookup_pd_slot_def Let_def)
apply (subst field_simps, subst mask_add_aligned, assumption)
apply (subst vptr_shifting_helper_magic)
apply simp
apply (simp add: shiftr_shiftl_mask_pd_bits triple_shift_fun)
apply (simp add: kernel_mapping_slots_def linorder_not_le
shiftr_20_less)
apply (rule le_m1_iff_lt[THEN iffD1,THEN iffD1])
apply (simp add:kernel_base_def)
apply (erule disjE)
apply (drule word_less_sub_1)
apply simp
apply (drule le_shiftr[where n=20])
apply (clarsimp simp :kernel_base_def vmsz_aligned_def)+
apply (drule(1) gap_between_aligned)
apply (simp add:is_aligned_def)
apply simp
apply (rule order.trans[OF le_shiftr])
apply (rule word_plus_mono_right[OF _ is_aligned_no_wrap'[where off = "2^24-1"]])
apply (rule word_less_sub_1)
apply (rule shiftl_less_t2n)
apply simp+
apply (clarsimp dest!:word_less_sub_1)
apply (erule order.trans[OF le_shiftr])
apply simp
done
lemmas less_kernel_base_mapping_slots
= less_kernel_base_mapping_slots_both[where x=0, simplified]
lemma is_aligned_lookup_pd_slot:
"\<lbrakk>is_aligned vptr 24; is_aligned pd 6\<rbrakk>
\<Longrightarrow> is_aligned (lookup_pd_slot pd vptr) 6"
apply (clarsimp simp: lookup_pd_slot_def)
apply (erule aligned_add_aligned)
apply (rule is_aligned_shiftl)
apply (rule is_aligned_shiftr)
apply simp
apply (simp add: word_bits_conv)
done
lemma lookup_pd_slot_eq:
"is_aligned pd pd_bits \<Longrightarrow>
(lookup_pd_slot pd vptr && ~~ mask pd_bits) = pd"
apply (clarsimp simp: lookup_pd_slot_def)
apply (erule conjunct2[OF is_aligned_add_helper])
apply (rule shiftl_less_t2n)
apply (rule shiftr_less_t2n3)
apply (simp_all add: pd_bits_def pageBits_def)
done
lemma is_aligned_lookup_pt_slot_no_fail:
"\<lbrakk>is_aligned vptr 16; is_aligned pt 6\<rbrakk>
\<Longrightarrow> is_aligned (lookup_pt_slot_no_fail pt vptr) 6"
apply (clarsimp simp: lookup_pt_slot_no_fail_def)
apply (erule aligned_add_aligned)
apply (rule is_aligned_shiftl)
apply (rule is_aligned_andI1)
apply (rule is_aligned_shiftr)
apply simp
apply simp
done
lemma lookup_pt_slot_non_empty:
"\<lbrace>valid_vspace_objs and \<exists>\<rhd> pd and page_directory_at pd and pspace_aligned
and K (is_aligned vptr 16 \<and> vptr < kernel_base)\<rbrace>
lookup_pt_slot pd vptr \<lbrace>\<lambda>rv s. [rv , rv + 4 .e. rv + 0x3C] \<noteq> []\<rbrace>, -"
apply (simp add:lookup_pt_slot_def)
apply (wp get_pde_wp| wpc | clarsimp)+
apply (simp add:valid_vspace_objs_def)
apply (drule_tac x = "(lookup_pd_slot pd vptr && ~~ mask pd_bits)" in spec)
apply (erule impE)
apply (subst lookup_pd_slot_eq)
apply (clarsimp simp: obj_at_def)
apply (drule_tac p = pd in pspace_alignedD)
apply simp
apply (simp add:obj_bits_def pageBits_def pd_bits_def)
apply fastforce
apply (drule spec)
apply (erule(1) impE)
apply (clarsimp simp:)
apply (drule_tac x = "(ucast (lookup_pd_slot pd vptr && mask pd_bits >> 2))" in bspec)
apply (drule less_kernel_base_mapping_slots)
apply (clarsimp simp: obj_at_def)
apply (drule_tac p = pd in pspace_alignedD)
apply simp
apply (simp add:obj_bits_def pageBits_def pd_bits_def)
apply simp
apply (clarsimp simp: obj_at_def)
apply (drule_tac p = "(ptrFromPAddr x)" in pspace_alignedD)
apply simp
apply (drule arg_cong[where f = length])
apply (subst (asm) length_upto_enum_step)
apply (rule_tac sz = 6 in is_aligned_no_wrap'[rotated])
apply simp
apply (erule aligned_add_aligned)
apply (rule is_aligned_shiftl)
apply (rule is_aligned_andI1[OF is_aligned_shiftr])
apply simp
apply (simp add:word_bits_conv)
apply (simp add:word_bits_conv)
done
(* FIXME: move *)
lemma pd_bits: "pd_bits = 14"
by (simp add: pd_bits_def pageBits_def)
lemma word_shift_by_n:
"x * (2^n) = (x::'a::len word) << n"
by (simp add: shiftl_t2n)
lemma create_mapping_entries_valid_slots [wp]:
"\<lbrace>valid_arch_state and valid_vspace_objs and equal_kernel_mappings
and pspace_aligned and valid_global_objs
and \<exists>\<rhd> pd and page_directory_at pd and data_at sz (ptrFromPAddr base) and
K (vmsz_aligned base sz \<and> vmsz_aligned vptr sz \<and> vptr < kernel_base
\<and> vm_rights \<in> valid_vm_rights)\<rbrace>
create_mapping_entries base vptr sz vm_rights attrib pd
\<lbrace>\<lambda>m. valid_slots m\<rbrace>, -"
apply (cases sz)
apply (rule hoare_pre)
apply (wp lookup_pt_slot_inv | simp add: valid_slots_def)+
apply (clarsimp simp: vmsz_aligned_def pd_aligned pageBits_def)
apply (rule hoare_pre)
apply (simp add: valid_slots_def largePagePTE_offsets_def pd_bits_def)
apply (wpsimp wp: lookup_pt_slot_inv lookup_pt_slot_non_empty
| simp add: valid_slots_def ball_conj_distrib largePagePTE_offsets_def)+
apply (clarsimp simp: pd_aligned vmsz_aligned_def upto_enum_def upto_enum_step_def
is_aligned_weaken pageBits_def)
apply (clarsimp simp add: valid_slots_def)
apply (rule hoare_pre)
apply wp
apply (clarsimp simp: valid_slots_def)
apply (rule conjI)
apply (simp add: lookup_pd_slot_def Let_def)
apply (fastforce simp: pd_shifting pd_aligned)
apply (simp add: page_directory_pde_at_lookupI)
apply (erule less_kernel_base_mapping_slots)
apply (simp add: pd_aligned pd_bits)
apply simp
apply (clarsimp simp: superSectionPDE_offsets_def)
apply (rule hoare_pre)
apply (clarsimp simp add: valid_slots_def)
apply wp
apply simp
apply (elim conjE)
apply (thin_tac "vmsz_aligned base b" for b)
apply (subgoal_tac "is_aligned pd 14")
prefer 2
apply (clarsimp simp: pd_aligned)
apply (clarsimp simp: upto_enum_step_def word_shift_by_2)
apply (clarsimp simp: obj_at_def pde_at_def)
apply (subgoal_tac "is_aligned pd pd_bits")
prefer 2
apply (simp add: pd_bits)
apply (rule conjI, simp add: upto_enum_def)
apply (intro allI impI)
apply (subst less_kernel_base_mapping_slots_both,assumption+)
apply (simp add: word_leq_minus_one_le)
apply (simp add: pd_bits vmsz_aligned_def)
apply (frule (1) is_aligned_lookup_pd_slot
[OF _ is_aligned_weaken[of _ 14 6, simplified]])
apply (subgoal_tac "(p<<2) + lookup_pd_slot pd vptr && ~~ mask 14 = pd")
prefer 2
apply (subst add.commute add.left_commute)
apply (subst and_not_mask_twice[where n=6 and m=14, simplified, symmetric])
apply (subst is_aligned_add_helper[THEN conjunct2], simp)
apply (rule shiftl_less_t2n)
apply (rule word_less_sub_le[THEN iffD1], simp+)
apply (erule lookup_pd_slot_eq[simplified pd_bits])
apply (simp add: a_type_simps)
apply (subst add.commute)
apply (fastforce intro!: aligned_add_aligned is_aligned_shiftl_self)
done
lemma store_pde_lookup_pd:
"\<lbrace>\<exists>\<rhd> pd and page_directory_at pd and valid_vspace_objs
and (\<lambda>s. valid_asid_table (arm_asid_table (arch_state s)) s)\<rbrace>
store_pde p pde \<lbrace>\<lambda>_. \<exists>\<rhd> pd\<rbrace>"
apply (simp add: store_pde_def set_pd_def set_object_def)
apply (wp get_object_wp)
apply clarsimp
apply (clarsimp simp: obj_at_def)
apply (erule vs_lookupE)
apply (clarsimp simp: vs_asid_refs_def graph_of_def)
apply (drule rtranclD)
apply (erule disjE)
apply clarsimp
apply (rule exI)
apply (rule vs_lookup_atI)
apply simp
apply clarsimp
apply (frule (1) valid_asid_tableD)
apply (frule vs_lookup_atI)
apply (frule (2) stronger_vspace_objsD)
apply (clarsimp simp: obj_at_def a_type_def)
apply (case_tac ao, simp_all, clarsimp)
apply (drule tranclD)
apply clarsimp
apply (drule rtranclD)
apply (erule disjE)
apply clarsimp
apply (rule_tac x=ref in exI)
apply (rule vs_lookup_step)
apply (rule vs_lookup_atI)
apply simp
apply (clarsimp simp: vs_lookup1_def)
apply (clarsimp simp: obj_at_def vs_refs_def graph_of_def)
apply clarsimp
apply (drule (1) vs_lookup_step)
apply (frule (2) stronger_vspace_objsD)
apply clarsimp
apply (drule vs_lookup1D)
apply clarsimp
apply (erule obj_atE)+
apply (clarsimp simp: vs_refs_def graph_of_def)
apply (drule bspec, blast)
apply (erule obj_atE)+
apply clarsimp
apply (drule tranclD)
apply clarsimp
apply (drule rtranclD)
apply clarsimp
apply (drule vs_lookup1D)
apply clarsimp
apply (erule obj_atE)+
apply (clarsimp simp: vs_refs_def graph_of_def)
apply (erule_tac x=ab in ballE)
apply (case_tac "pdb ab", simp_all add: pde_ref_def split: if_split_asm)
apply (erule obj_atE)
apply clarsimp
apply (erule disjE)
apply (clarsimp simp: a_type_def)
apply clarsimp
apply (drule tranclD)
apply clarsimp
apply (drule vs_lookup1D)
apply clarsimp
apply (erule obj_atE)+
apply (clarsimp simp: vs_refs_def graph_of_def)
done
lemma store_pde_vspace_objs_unmap:
"\<lbrace>valid_vspace_objs
and valid_pde pde
and K (pde_ref pde = None)\<rbrace>
store_pde p pde \<lbrace>\<lambda>_. valid_vspace_objs\<rbrace>"
apply (simp add: store_pde_def)
apply (wp set_pd_vspace_objs_unmap)
apply clarsimp
apply (rule conjI)
apply clarsimp
apply (drule (1) valid_vspace_objsD, fastforce)
apply (simp add:)
apply (clarsimp simp add: obj_at_def vs_refs_def)
apply (rule pair_imageI)
apply (simp add: graph_of_def split: if_split_asm)
done
(* FIXME: remove magic numbers in other lemmas, use in pde_at_aligned_vptr et al *)
lemma lookup_pd_slot_add_eq:
"\<lbrakk> is_aligned pd pd_bits; is_aligned vptr 24; x \<in> set [0 , 4 .e. 0x3C] \<rbrakk>
\<Longrightarrow> (x + lookup_pd_slot pd vptr && ~~ mask pd_bits) = pd"
apply (simp add: pageBits_def add.commute add.left_commute lookup_pd_slot_def Let_def)
apply (clarsimp simp: upto_enum_step_def word_shift_by_2)
apply (erule add_mask_lower_bits2)
apply (subgoal_tac "2 < pd_bits \<and> size vptr \<le> 18 + pd_bits")
apply (simp add: and_mask_0_iff_le_mask le_mask_iff)
apply (subst word_plus_and_or_coroll)
apply (subst word_bw_comms)
apply (rule aligned_mask_disjoint[where n=6])
apply (rule is_aligned_shiftl, rule is_aligned_shiftr, simp)
apply (rule order.trans, rule leq_high_bits_shiftr_low_bits_leq_bits[where high_bits=4])
apply (clarsimp simp: mask_def, simp)
apply (clarsimp simp: shiftr_over_or_dist word_or_zero)
apply (intro conjI)
apply (clarsimp simp: shiftl_shiftr3)
apply (subgoal_tac "xa >> pd_bits - 2 = 0", simp)
apply (rule shiftr_le_0, rule unat_less_helper)
apply (erule order.strict_trans1, simp)
apply (clarsimp simp: pd_bits_def pageBits_def)
apply (clarsimp simp: shiftl_shiftr2 shiftr_shiftr shiftr_zero_size)
apply (clarsimp simp: pd_bits_def pageBits_def word_bits_size word_bits_def)
done
lemma vs_lookup_arch_update:
"arm_asid_table (f (arch_state s)) = arm_asid_table (arch_state s) \<Longrightarrow>
vs_lookup (arch_state_update f s) = vs_lookup s"
apply (rule order_antisym)
apply (rule vs_lookup_sub)
apply (clarsimp simp: obj_at_def)
apply simp
apply (rule vs_lookup_sub)
apply (clarsimp simp: obj_at_def)
apply simp
done
lemma vs_lookup_pages_arch_update:
"arm_asid_table (f (arch_state s)) = arm_asid_table (arch_state s) \<Longrightarrow>
vs_lookup_pages (arch_state_update f s) = vs_lookup_pages s"
apply (rule order_antisym)
apply (rule vs_lookup_pages_sub)
apply (clarsimp simp: obj_at_def)
apply simp
apply (rule vs_lookup_pages_sub)
apply (clarsimp simp: obj_at_def)
apply simp
done
lemma vs_lookup_asid_map [iff]:
"vs_lookup (s\<lparr>arch_state := arm_asid_map_update f (arch_state s)\<rparr>) = vs_lookup s"
by (simp add: vs_lookup_arch_update)
lemma vs_lookup_hwasid_table [iff]:
"vs_lookup (s\<lparr>arch_state := arm_hwasid_table_update f (arch_state s)\<rparr>) = vs_lookup s"
by (simp add: vs_lookup_arch_update)
lemma vs_lookup_next_asid [iff]:
"vs_lookup (s\<lparr>arch_state := arm_next_asid_update f (arch_state s)\<rparr>) = vs_lookup s"
by (simp add: vs_lookup_arch_update)
lemma vs_lookup_pages_asid_map[iff]:
"vs_lookup_pages (s\<lparr>arch_state := arm_asid_map_update f (arch_state s)\<rparr>) =
vs_lookup_pages s"
by (simp add: vs_lookup_pages_arch_update)
lemma vs_lookup_pages_hwasid_table[iff]:
"vs_lookup_pages (s\<lparr>arch_state := arm_hwasid_table_update f (arch_state s)\<rparr>) =
vs_lookup_pages s"
by (simp add: vs_lookup_pages_arch_update)
lemma vs_lookup_pages_next_asid[iff]:
"vs_lookup_pages (s\<lparr>arch_state := arm_next_asid_update f (arch_state s)\<rparr>) =
vs_lookup_pages s"
by (simp add: vs_lookup_pages_arch_update)
lemma valid_vspace_objs_arch_update:
"arm_asid_table (f (arch_state s)) = arm_asid_table (arch_state s) \<Longrightarrow>
valid_vspace_objs (arch_state_update f s) = valid_vspace_objs s"
apply (rule iffI)
apply (erule valid_vspace_objs_stateI)
apply (clarsimp simp: obj_at_def)
apply simp
apply simp
apply (erule valid_vspace_objs_stateI)
apply (clarsimp simp: obj_at_def)
apply simp
apply simp
done
lemma store_pte_valid_vspace_objs[wp]:
"\<lbrace>valid_vspace_objs and valid_pte pte\<rbrace>
store_pte p pte
\<lbrace>\<lambda>_. (valid_vspace_objs)\<rbrace>"
unfolding store_pte_def
apply wp
apply clarsimp
apply (unfold valid_vspace_objs_def)
apply (erule_tac x="p && ~~ mask pt_bits" in allE)
apply auto
done
crunch valid_arch [wp]: store_pte valid_arch_state
lemma set_pd_vs_lookup_unmap:
"\<lbrace>valid_vs_lookup and
obj_at (\<lambda>ko. vs_refs_pages (ArchObj (PageDirectory pd)) \<subseteq> vs_refs_pages ko) p\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_. valid_vs_lookup\<rbrace>"
apply (simp add: valid_vs_lookup_def pred_conj_def)
apply (rule hoare_lift_Pf2 [where f=caps_of_state])
prefer 2
apply wp
apply (simp add: set_pd_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp simp del: fun_upd_apply del: disjCI
split: kernel_object.splits arch_kernel_obj.splits)
apply (erule allE)+
apply (erule impE)
apply (erule vs_lookup_pages_stateI)
apply (clarsimp simp: obj_at_def split: if_split_asm)
apply simp
apply simp
done
lemma unique_table_caps_pdE:
"\<lbrakk> unique_table_caps cs; cs p = Some cap; cap_asid cap = None;
cs p' = Some cap'; cap_asid cap' = Some v; is_pd_cap cap;
is_pd_cap cap'; obj_refs cap' = obj_refs cap \<rbrakk>
\<Longrightarrow> P"
apply (frule(6) unique_table_caps_pdD[where cs=cs])
apply simp
done
lemma set_pd_table_caps [wp]:
"\<lbrace>valid_table_caps and (\<lambda>s.
(obj_at (empty_table (set (second_level_tables (arch_state s)))) p s \<longrightarrow>
empty_table (set (second_level_tables (arch_state s))) (ArchObj (PageDirectory pd))) \<or>
(\<exists>slot cap. caps_of_state s slot = Some cap \<and> is_pd_cap cap \<and> p \<in> obj_refs cap \<and> cap_asid cap \<noteq> None) \<and>
valid_caps (caps_of_state s) s \<and>
unique_table_caps (caps_of_state s))\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_. valid_table_caps\<rbrace>"
unfolding valid_table_caps_def
apply (simp add: pred_conj_def
del: split_paired_All split_paired_Ex imp_disjL)
apply (rule hoare_lift_Pf2 [where f=caps_of_state])
prefer 2
apply wp
apply (unfold set_pd_def set_object_def)
apply (wp get_object_wp)
apply (rule allI, intro impI)
apply (elim exE conjE)
apply (elim allEI)
apply (intro impI, simp)
apply (clarsimp simp: obj_at_def)
apply (erule disjE)
apply (erule(6) unique_table_caps_pdE)
apply (clarsimp simp: is_arch_cap_simps)
apply (simp add: valid_caps_def)
apply (erule_tac x=a in allE, erule allE, erule allE, erule (1) impE)
apply (clarsimp simp: is_arch_cap_simps valid_cap_def)
apply (clarsimp simp: obj_at_def)
done
lemma set_pd_global_objs[wp]:
"\<lbrace>valid_global_objs and valid_global_refs and
valid_arch_state and
(\<lambda>s. (obj_at (empty_table (set (second_level_tables (arch_state s)))) p s
\<longrightarrow> empty_table (set (second_level_tables (arch_state s)))
(ArchObj (PageDirectory pd)))
\<or> (\<exists>slot. cte_wp_at (\<lambda>cap. p \<in> obj_refs cap) slot s))\<rbrace>
set_pd p pd \<lbrace>\<lambda>rv. valid_global_objs\<rbrace>"
apply (simp add: set_pd_def second_level_tables_def)
apply (wpsimp wp: set_object_wp_strong)
apply (clarsimp simp add: valid_global_objs_def valid_vso_at_def
cte_wp_at_caps_of_state second_level_tables_def)
apply (intro conjI)
apply (clarsimp simp: obj_at_def
simp del: valid_vspace_obj.simps)
apply (intro conjI impI)
apply (clarsimp simp del: valid_vspace_obj.simps)
apply (erule disjE)
apply (drule(1) empty_table_is_valid)+
apply (rule valid_vspace_obj_same_type, (simp add: valid_vspace_obj_def)+)
apply (clarsimp simp: a_type_def)
apply (drule (1) valid_global_refsD2)
apply (simp add: cap_range_def global_refs_def)
apply (rule valid_vspace_obj_same_type, simp+)
apply (simp add: a_type_def)
apply (clarsimp simp: obj_at_def)
apply (drule (1) valid_global_refsD2)
apply (simp add: cap_range_def global_refs_def)
apply clarsimp
apply (clarsimp simp: obj_at_def
simp del: valid_vspace_obj.simps)
apply (drule(1) bspec, clarsimp simp: a_type_def)
done
lemma eq_ucast_word12[simp]:
"((ucast (x :: 12 word) :: word32) = ucast y) = (x = y)"
apply safe
apply (drule_tac f="ucast :: (word32 \<Rightarrow> 12 word)" in arg_cong)
apply (simp add: ucast_up_ucast_id is_up_def
source_size_def target_size_def word_size)
done
lemma set_pd_unmap_mappings:
"\<lbrace>valid_kernel_mappings and
obj_at (\<lambda>ko. vs_refs (ArchObj (PageDirectory pd)) \<subseteq> vs_refs ko) p
and obj_at (\<lambda>ko. \<exists>pd'. ko = ArchObj (PageDirectory pd')
\<and> (\<forall>x \<in> kernel_mapping_slots. pd x = pd' x)) p\<rbrace>
set_pd p pd
\<lbrace>\<lambda>rv. valid_kernel_mappings\<rbrace>"
apply (simp add: set_pd_def)
apply (wp set_object_v_ker_map get_object_wp)
apply (clarsimp simp: obj_at_def
split: kernel_object.split_asm
arch_kernel_obj.split_asm)
apply (simp add: vs_refs_def)
subgoal premises prems for s x r x3
apply (cases "x \<in> kernel_mapping_slots")
proof goal_cases
case False
with prems show ?thesis
apply -
apply (drule subsetD)
apply (rule image_eqI[rotated])
apply (rule pde_graph_ofI[rotated, rotated])
apply ((simp;fail)+)[4]
apply (clarsimp simp: valid_kernel_mappings_def
dest!: graph_ofD)
apply (drule bspec, erule ranI)
by (simp add: valid_kernel_mappings_if_pd_def)
next
case True
with prems show ?thesis
apply clarsimp
apply (bspec x)
apply (clarsimp simp: valid_kernel_mappings_def ran_def valid_kernel_mappings_if_pd_def)
apply (erule allE[where x="ArchObj (PageDirectory x3)"])
apply clarsimp
apply (erule impE)
apply (erule exI[where x=p])
apply (erule allE[where x=x], erule allE[where x=r])
by clarsimp+
qed
done
lemma set_pd_asid_map [wp]:
"\<lbrace>valid_asid_map\<rbrace> set_pd p pd \<lbrace>\<lambda>_. valid_asid_map\<rbrace>"
apply (simp add: set_pd_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp simp: a_type_def simp del: fun_upd_apply
split: kernel_object.splits
arch_kernel_obj.splits)
apply (clarsimp simp: valid_asid_map_def)
apply (drule bspec, blast)
apply (clarsimp simp: vspace_at_asid_def obj_at_def)
apply (erule vs_lookupE)
apply (rule vs_lookupI, simp)
apply (clarsimp simp: vs_asid_refs_def dest!: graph_ofD)
apply (frule vs_lookup1_trans_is_append)
apply clarsimp
apply (drule rtranclD)
apply clarsimp
apply (drule tranclD)
apply clarsimp
apply (drule vs_lookup1D)
apply clarsimp
apply (rule rtrancl_trans)
apply (rule r_into_rtrancl)
apply (rule vs_lookup1I)
apply (clarsimp simp: obj_at_def)
apply (rule conjI, clarsimp)
prefer 2
apply clarsimp
apply (rule refl)
apply clarsimp
apply (clarsimp simp: vs_refs_def)
apply (drule vs_lookup1_trans_is_append)
apply clarsimp
apply assumption
apply (rule refl)
apply (frule vs_lookup1_trans_is_append, clarsimp)
apply (drule rtranclD)
apply (erule disjE, clarsimp)
apply clarsimp
apply (drule tranclD)
apply clarsimp
apply (drule vs_lookup1D)
apply clarsimp
apply (drule vs_lookup1_trans_is_append, clarsimp)
done
lemma set_pd_only_idle [wp]:
"\<lbrace>only_idle\<rbrace> set_pd p pd \<lbrace>\<lambda>_. only_idle\<rbrace>"
by (wp only_idle_lift)
lemma set_pd_equal_kernel_mappings_triv:
"\<lbrace>obj_at (\<lambda>ko. \<exists>pd'. ko = (ArchObj (PageDirectory pd'))
\<and> (\<forall>x \<in> kernel_mapping_slots. pd x = pd' x)) p
and equal_kernel_mappings\<rbrace>
set_pd p pd
\<lbrace>\<lambda>rv. equal_kernel_mappings\<rbrace>"
apply (simp add: set_pd_def)
apply (wp set_object_equal_mappings get_object_wp)
apply (clarsimp simp: obj_at_def)
apply (simp add: equal_kernel_mappings_def obj_at_def)
done
lemma set_pd_global_mappings[wp]:
"\<lbrace>\<lambda>s. valid_global_vspace_mappings s \<and> valid_global_objs s
\<and> p \<notin> global_refs s\<rbrace>
set_pd p pd
\<lbrace>\<lambda>rv. valid_global_vspace_mappings\<rbrace>"
apply (simp add: set_pd_def)
apply (wp set_object_global_vspace_mappings get_object_wp)
apply simp
done
lemma set_pd_kernel_window[wp]:
"\<lbrace>pspace_in_kernel_window\<rbrace> set_pd p pd \<lbrace>\<lambda>rv. pspace_in_kernel_window\<rbrace>"
apply (simp add: set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_pspace_in_kernel_window[THEN hoare_set_object_weaken_pre])
done
lemma set_pd_device_region[wp]:
"\<lbrace>pspace_respects_device_region\<rbrace> set_pd p pd \<lbrace>\<lambda>rv. pspace_respects_device_region\<rbrace>"
apply (simp add: set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_pspace_respects_device_region[THEN hoare_set_object_weaken_pre])
done
lemma set_pd_caps_kernel_window[wp]:
"\<lbrace>cap_refs_in_kernel_window\<rbrace> set_pd p pd \<lbrace>\<lambda>rv. cap_refs_in_kernel_window\<rbrace>"
apply (simp add: set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_cap_refs_in_kernel_window[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
done
lemma set_pd_caps_respects_device_region[wp]:
"\<lbrace>cap_refs_respects_device_region\<rbrace> set_pd p pd \<lbrace>\<lambda>rv. cap_refs_respects_device_region\<rbrace>"
apply (simp add: set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_cap_refs_respects_device_region[THEN hoare_set_object_weaken_pre]
simp: a_type_def)
done
lemma set_pd_valid_ioc[wp]:
"\<lbrace>valid_ioc\<rbrace> set_pd p pt \<lbrace>\<lambda>_. valid_ioc\<rbrace>"
apply (simp add: set_pd_def)
including unfold_objects
apply (wpsimp wp: set_object_valid_ioc_no_caps[THEN hoare_set_object_weaken_pre]
simp: a_type_def is_tcb is_cap_table)
done
lemma set_pd_vms[wp]:
"\<lbrace>valid_machine_state\<rbrace> set_pd p pt \<lbrace>\<lambda>_. valid_machine_state\<rbrace>"
apply (simp add: set_pd_def)
apply (wp set_object_wp_strong)
apply clarify
apply (erule valid_machine_state_heap_updI)
apply (fastforce simp: a_type_def obj_at_def
split: Structures_A.kernel_object.splits arch_kernel_obj.splits)+
done
lemma vs_refs_pages_subset2:
"\<lbrakk>vs_refs_pages ko \<subseteq> vs_refs_pages ko';
(\<forall>ao. (ko = ArchObj ao) \<longrightarrow> valid_vspace_obj ao s);
(\<forall>ao'. (ko' = ArchObj ao') \<longrightarrow> valid_vspace_obj ao' s)\<rbrakk>
\<Longrightarrow> vs_refs ko \<subseteq> vs_refs ko'"
apply clarsimp
apply (drule (1) subsetD[OF _ subsetD[OF vs_refs_pages_subset]])
apply (case_tac ko; simp add: vs_refs_def)
subgoal for fstref b arch_kernel_obj
apply (cases arch_kernel_obj; simp add: vs_refs_def)
apply (cases ko'; simp add: vs_refs_pages_def)
subgoal for \<dots> arch_kernel_obja
by (cases arch_kernel_obja;clarsimp)
apply (cases ko'; simp add: vs_refs_pages_def)
subgoal for \<dots> arch_kernel_obja
apply (cases arch_kernel_obja; clarsimp)
apply (clarsimp simp: graph_of_def split: if_splits)
subgoal for "fun" a
apply (cut_tac
imageI[where
A="{(x, y). (if x \<in> kernel_mapping_slots then None else pde_ref (fun x)) = Some y}"
and f="(\<lambda>(r, y). (VSRef (ucast r) (Some APageDirectory), y))" and x="(a,b)"])
apply simp
apply (clarsimp simp: pde_ref_def pde_ref_pages_def
split: pde.splits)
apply (drule bspec,simp)+
apply (simp add: valid_pde_def)
apply (clarsimp simp: data_at_def obj_at_def a_type_def)
apply (drule bspec, simp split: if_splits)+
by (clarsimp simp: obj_at_def a_type_def data_at_def)
done
done
done
lemma set_pd_invs_unmap:
"\<lbrace>invs and (\<lambda>s. \<forall>i. wellformed_pde (pd i)) and
(\<lambda>s. (\<exists>\<rhd>p) s \<longrightarrow> valid_vspace_obj (PageDirectory pd) s) and
obj_at (\<lambda>ko. vs_refs_pages (ArchObj (PageDirectory pd)) \<subseteq> vs_refs_pages ko) p and
obj_at (\<lambda>ko. vs_refs (ArchObj (PageDirectory pd)) \<subseteq> vs_refs ko) p and
obj_at (\<lambda>ko. \<exists>pd'. ko = ArchObj (PageDirectory pd')
\<and> (\<forall>x \<in> kernel_mapping_slots. pd x = pd' x)) p and
(\<lambda>s. p \<notin> global_refs s) and
(\<lambda>s. (obj_at (empty_table (set (second_level_tables (arch_state s)))) p s \<longrightarrow>
empty_table (set (second_level_tables (arch_state s))) (ArchObj (PageDirectory pd))))\<rbrace>
set_pd p pd
\<lbrace>\<lambda>_. invs\<rbrace>"
apply (simp add: invs_def valid_state_def valid_pspace_def valid_arch_caps_def)
apply (rule hoare_pre)
apply (wp set_pd_valid_objs set_pd_iflive set_pd_zombies
set_pd_zombies_state_refs set_pd_valid_mdb
set_pd_valid_idle set_pd_ifunsafe set_pd_reply_caps
set_pd_valid_arch set_pd_valid_global set_pd_cur
set_pd_reply_masters valid_irq_node_typ set_pd_zombies_state_hyp_refs
set_pd_vspace_objs_unmap set_pd_vs_lookup_unmap
valid_irq_handlers_lift
set_pd_unmap_mappings set_pd_equal_kernel_mappings_triv)
apply (clarsimp simp: cte_wp_at_caps_of_state valid_arch_caps_def
del: disjCI)
done
lemma store_pde_invs_unmap:
"\<lbrace>invs and valid_pde pde and (\<lambda>s. wellformed_pde pde)
and K (ucast (p && mask pd_bits >> 2) \<notin> kernel_mapping_slots)
and (\<lambda>s. p && ~~ mask pd_bits \<notin> global_refs s)
and K (pde = InvalidPDE)\<rbrace>
store_pde p pde \<lbrace>\<lambda>_. invs\<rbrace>"
apply (simp add: store_pde_def del: split_paired_Ex)
apply (wp set_pd_invs_unmap)
apply (clarsimp simp del: split_paired_Ex del: exE)
apply (rule conjI)
apply (drule invs_valid_objs)
apply (fastforce simp: valid_objs_def dom_def obj_at_def valid_obj_def)
apply (rule conjI)
apply clarsimp
apply (drule (1) valid_vspace_objsD, fastforce)
apply simp
apply (rule conjI)
apply (clarsimp intro!: pair_imageI
simp: obj_at_def vs_refs_def vs_refs_pages_def map_conv_upd graph_of_def pde_ref_def pde_ref_pages_def
split: if_split_asm)+
apply (clarsimp simp: empty_table_def)
apply (cases pde, (auto simp: pde_ref_def valid_pde_mappings_def split:if_split_asm))
done
lemma store_pde_state_refs_of:
"\<lbrace>\<lambda>s. P (state_refs_of s)\<rbrace> store_pde ptr val \<lbrace>\<lambda>rv s. P (state_refs_of s)\<rbrace>"
apply (simp add: store_pde_def set_pd_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp elim!: rsubst[where P=P] intro!: ext)
apply (clarsimp simp: state_refs_of_def obj_at_def)
done
lemma store_pde_state_hyp_refs_of:
"\<lbrace>\<lambda>s. P (state_hyp_refs_of s)\<rbrace> store_pde ptr val \<lbrace>\<lambda>rv s. P (state_hyp_refs_of s)\<rbrace>"
apply (simp add: store_pde_def set_pd_def set_object_def)
apply (wp get_object_wp)
apply (clarsimp elim!: rsubst[where P=P] intro!: ext)
apply (clarsimp simp: state_hyp_refs_of_def obj_at_def)
done
lemma valid_asid_map_next_asid [iff]:
"valid_asid_map (s\<lparr>arch_state := arm_next_asid_update f (arch_state s)\<rparr>) =
valid_asid_map s"
by (simp add: valid_asid_map_def vspace_at_asid_def)
lemma pspace_respects_device_region_dmo:
assumes valid_f: "\<And>P. \<lbrace>\<lambda>ms. P (device_state ms)\<rbrace> f \<lbrace>\<lambda>r ms. P (device_state ms)\<rbrace>"
shows "\<lbrace>pspace_respects_device_region\<rbrace>do_machine_op f\<lbrace>\<lambda>r. pspace_respects_device_region\<rbrace>"
apply (clarsimp simp: do_machine_op_def gets_def select_f_def simpler_modify_def bind_def valid_def
get_def return_def)
apply (drule_tac P1 = "(=) (device_state (machine_state s))" in use_valid[OF _ valid_f])
apply auto
done
lemma cap_refs_respects_device_region_dmo:
assumes valid_f: "\<And>P. \<lbrace>\<lambda>ms. P (device_state ms)\<rbrace> f \<lbrace>\<lambda>r ms. P (device_state ms)\<rbrace>"
shows "\<lbrace>cap_refs_respects_device_region\<rbrace>do_machine_op f\<lbrace>\<lambda>r. cap_refs_respects_device_region\<rbrace>"
apply (clarsimp simp: do_machine_op_def gets_def select_f_def simpler_modify_def bind_def valid_def
get_def return_def)
apply (drule_tac P1 = "(=) (device_state (machine_state s))" in use_valid[OF _ valid_f])
apply auto
done
lemma machine_op_lift_device_state[wp]:
"\<lbrace>\<lambda>ms. P (device_state ms)\<rbrace> machine_op_lift f \<lbrace>\<lambda>_ ms. P (device_state ms)\<rbrace>"
by (clarsimp simp: machine_op_lift_def NonDetMonadVCG.valid_def bind_def
machine_rest_lift_def gets_def simpler_modify_def get_def return_def
select_def ignore_failure_def select_f_def
split: if_splits)
crunches invalidateLocalTLB_ASID, invalidateLocalTLB_VAASID, setHardwareASID, isb, dsb,
set_current_pd, storeWord, cleanByVA_PoU, cleanL2Range
for device_state_inv[wp]: "\<lambda>ms. P (device_state ms)"
(simp: writeTTBR0_def
ignore_del: invalidateLocalTLB_ASID invalidateLocalTLB_VAASID setHardwareASID isb
dsb storeWord cleanByVA_PoU cleanL2Range)
lemma as_user_inv:
assumes x: "\<And>P. \<lbrace>P\<rbrace> f \<lbrace>\<lambda>x. P\<rbrace>"
shows "\<lbrace>P\<rbrace> as_user t f \<lbrace>\<lambda>x. P\<rbrace>"
proof -
have P: "\<And>a b input. (a, b) \<in> fst (f input) \<Longrightarrow> b = input"
by (rule use_valid [OF _ x], assumption, rule refl)
have Q: "\<And>s ps. ps (kheap s) = kheap s \<Longrightarrow> kheap_update ps s = s"
by simp
show ?thesis
apply (simp add: as_user_def gets_the_def assert_opt_def set_object_def get_object_def split_def)
apply wp
apply (clarsimp dest!: P)
apply (subst Q)
prefer 2
apply assumption
apply (rule ext)
apply (simp add: get_tcb_def)
apply (case_tac "kheap s t"; simp)
apply (case_tac a; simp)
apply (clarsimp simp: arch_tcb_context_set_def arch_tcb_context_get_def)
done
qed
crunches getRegister
for inv[wp]: P
(simp: getRegister_def)
lemmas user_getreg_inv[wp] = as_user_inv[OF getRegister_inv]
end
end
|
devtools::revdep_check()
devtools::revdep_check_save_summary()
devtools::revdep_check_print_problems()
|
[STATEMENT]
lemma is_semialgebraicE:
assumes "is_semialgebraic n S"
shows "S \<in> semialg_sets n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. S \<in> semialg_sets n
[PROOF STEP]
using assms is_semialgebraic_def
[PROOF STATE]
proof (prove)
using this:
is_semialgebraic n S
is_semialgebraic ?n ?S = (?S \<in> semialg_sets ?n)
goal (1 subgoal):
1. S \<in> semialg_sets n
[PROOF STEP]
by blast |
theory Chap2_2
imports Main
begin
datatype nat = Z | Suc nat
fun add :: "nat \<Rightarrow> nat \<Rightarrow> nat" where
"add Z n = n"
| "add (Suc m) n = Suc (add m n)"
lemma add_b: "add n Z = n"
apply (induction n)
apply (rule add.simps(1))
apply (subst add.simps(2))
apply (subst nat.inject)
apply assumption
done
lemma add_i:
assumes f1: "\<And>m. add m n = add n m"
shows "add m (Suc n) = add (Suc n) m"
apply (induction m)
apply (subst add.simps(1))
apply (rule add_b[symmetric])
apply (subst (1 2) add.simps(2))
apply (subst nat.inject)
apply (subst f1[symmetric])
apply (subst add.simps(2))
apply (subst f1)
apply (subst add.simps(2)[symmetric])
by assumption
lemma add_comm: "add m n = add n m"
apply (induction n arbitrary: m)
apply (subst add.simps(1))
apply (rule add_b)
apply (erule add_i)
done
value "1 + (2::Nat.nat)"
value "1 + (2::int)"
value "1 - (2::Nat.nat)"
value "1 - (2::int)"
fun double :: "nat \<Rightarrow> nat" where
"double Z = Z"
| "double (Suc n) = Suc (Suc (double n))"
lemma double_add: "double n = add n n"
apply (induction n)
apply simp+
apply (subst (2) add_comm)
by simp
fun count :: "'a list \<Rightarrow> 'a \<Rightarrow> Nat.nat" where
"count [] y = 0"
| "count (x#xs) y = (if x = y then 1 else 0) + count xs y"
lemma count_leq_len: "count xs y \<le> length xs"
apply (induction xs)
by simp+
fun snoc :: "'a list \<Rightarrow> 'a \<Rightarrow> 'a list" where
"snoc [] y = [y]"
| "snoc (x#xs) y = x#(snoc xs y)"
fun rev :: "'a list \<Rightarrow> 'a list" where
"rev [] = []"
| "rev (x#xs) = snoc (rev xs) x"
lemma rev_snoc: "rev (snoc xs x) = x#(rev xs)"
apply (induction xs)
apply simp
apply (subst snoc.simps)
apply (subst rev.simps)
apply (rule_tac a="rev (snoc xs x)" and b="x#rev xs" in forw_subst)
apply assumption
apply (subst snoc.simps)
apply (subst rev.simps)
by (rule refl)
lemma rev_inv: "rev (rev xs) = xs"
apply (induction xs)
apply simp
by (simp add: rev_snoc)
fun sum_upto :: "Nat.nat \<Rightarrow> Nat.nat" where
"sum_upto 0 = 0"
| "sum_upto (Nat.Suc n) = n + 1 + sum_upto n"
lemma "sum_upto n = n * (n + 1) div 2"
apply (induction n)
apply simp
apply (subst sum_upto.simps)
apply (rule_tac a="sum_upto n" and b="n * (n+1) div 2" in forw_subst)
by simp+
end
|
Formal statement is: proposition homotopic_loops_imp_path: "homotopic_loops s p q \<Longrightarrow> path p \<and> path q" Informal statement is: If two loops are homotopic, then they are both paths. |
-- ------------------------------------------------------------ [ Literals.idr ]
||| Module : Literals.idr
||| Copyright : (c) Jan de Muijnck-Hughes
||| License : see LICENSE
|||
||| Deal with literal values at the type-level.
module Commons.Data.Literals
%default total
%access public export
||| Proof that the given value level `b` of type `ty` has value `a`.
data Literal : (ty : Type)
-> (a : ty)
-> Type
where
MkLiteral : (b : ty)
-> (prf : b = a)
-> Literal ty a
newLiteral : (b : ty) -> Literal ty b
newLiteral b = MkLiteral b Refl
||| Representation of String literals.
LitString : String -> Type
LitString str = Literal String str
||| Proof that the given natural `o` is the successor of `n`.
data Next : (n : Nat) -> Type where
MkNext : (o : Nat) -> (prf : o = S n) -> Next n
newNext : (o,n : Nat) -> (prf : o = S n) -> Next n
newNext o _ prf = MkNext o prf
-- --------------------------------------------------------------------- [ EOF ]
|
import category_theory.abelian.ext
import for_mathlib.derived_functor
noncomputable theory
universe variables uα΅£ v u
open category_theory opposite
namespace Ext
variables (R : Type uα΅£) [ring R] {C : Type u} [category.{v} C] [abelian C] [linear R C]
[enough_projectives C]
local notation `Ext` i `,` A `,` B := ((Ext R C i).obj (op A)).obj B
def Ξ΄ (n : β) (A : short_exact_sequence C) (B : C) :
(Ext n , A.1 , B) βΆ (Ext (n+1) , A.3 , B) :=
let E := (((linear_yoneda R C).obj B).right_op.left_derived n),
E' := (((linear_yoneda R C).obj B).right_op.left_derived (n+1)) in
quiver.hom.unop (show E'.obj A.3 βΆ E.obj A.1, from functor.left_derived.Ξ΄ _ _ _)
lemma six_term_exact_seq (n : β) (A : short_exact_sequence C) (B : C) :
exact_seq (Module.{v} R) [
((Β«ExtΒ» R C n).map A.g.op).app B, ((Β«ExtΒ» R C n).map A.f.op).app B,
Ξ΄ R n A B,
((Β«ExtΒ» R C (n+1)).map A.g.op).app B, ((Β«ExtΒ» R C (n+1)).map A.f.op).app B
] :=
begin
apply exact_seq.of_op,
exact functor.left_derived.six_term_exact_seq _ n A,
end
end Β«ExtΒ»
|
/*
// Copyright (c) 2000-2009, Texas Engineering Experiment Station (TEES), a
// component of the Texas A&M University System.
// All rights reserved.
// The information and source code contained herein is the exclusive
// property of TEES and may not be disclosed, examined or reproduced
// in whole or in part without explicit written authorization from TEES.
*/
#include <iostream>
#include <stapl/utility/tuple.hpp>
#include <stapl/domains/indexed.hpp>
#include <stapl/views/vector_view.hpp>
#include <stapl/containers/vector/vector.hpp>
#include <stapl/views/array_view.hpp>
#include <stapl/containers/array/array.hpp>
#include <stapl/views/map_view.hpp>
#include <stapl/containers/map/map.hpp>
#include <stapl/containers/unordered_multiset/unordered_multiset.hpp>
#include <stapl/skeletons/serial.hpp>
#include <stapl/algorithms/algorithm.hpp>
#include <stapl/algorithms/functional.hpp>
#include <stapl/runtime.hpp>
#include <stapl/stream.hpp>
#include <sstream>
#include <boost/bind.hpp>
#include <boost/program_options.hpp>
#include <boost/property_tree/ptree.hpp>
#include "json_parser.hpp"
using namespace std;
#include "testutil.hpp"
#include "rel_alpha_data.h"
// #define BIG_FILES // Uncomment when big and huge files are present
// #define TRIPLE_MAP_REDUCE // Uncomment when the bug is fixed
// Disable some tests by commenting the defines below:
// 2-lvl tests:
#define TEST_SET_2LVL_01
#define TEST_SET_2LVL_02
#define TEST_SET_2LVL_03
// #define TEST_SET_2LVL_04 // COMPILES BUT DOESN'T RUN : MAP BUG
#define TEST_SET_2LVL_05
#define TEST_SET_2LVL_06
// 3-lvl tests:
#define TEST_SET_3LVL_01
// #define TEST_SET_3LVL_02 // #ifdef TRIPLE_MAP_REDUCE
// #define TEST_SET_3LVL_03 // #ifdef TRIPLE_MAP_REDUCE
// #define TEST_SET_3LVL_04 // #ifdef TRIPLE_MAP_REDUCE
// #define TEST_SET_3LVL_05 // MAP BUG
// #define TEST_SET_3LVL_06 // MAP BUG
/*=========================================================================*/
#ifdef TEST_SET_2LVL_01
size_t nestpar_unordered_multiset_01(size_t, stapl::stream<ofstream>& );
#endif
#ifdef TEST_SET_2LVL_02
size_t nestpar_unordered_multiset_02(size_t, stapl::stream<ofstream>& );
#endif
#ifdef TEST_SET_2LVL_03
size_t nestpar_unordered_multiset_03(size_t, stapl::stream<ofstream>& );
#endif
#ifdef TEST_SET_2LVL_04
size_t nestpar_unordered_multiset_04(size_t, stapl::stream<ofstream>& );
#endif
#ifdef TEST_SET_2LVL_05
size_t nestpar_unordered_multiset_05(size_t, stapl::stream<ofstream>& );
#endif
#ifdef TEST_SET_2LVL_06
size_t nestpar_unordered_multiset_06(size_t,
stapl::stream<ifstream>&,
stapl::stream<ofstream>& );
#endif
#ifdef TEST_SET_3LVL_01
size_t nestpar_unordered_multiset_10(size_t, stapl::stream<ofstream>& );
#endif
#ifdef TEST_SET_3LVL_02
size_t nestpar_unordered_multiset_11(size_t, stapl::stream<ofstream>& );
#endif
#ifdef TEST_SET_3LVL_03
size_t nestpar_unordered_multiset_12(size_t, stapl::stream<ofstream>& );
#endif
#ifdef TEST_SET_3LVL_04
size_t nestpar_unordered_multiset_13(size_t, stapl::stream<ofstream>& );
#endif
#ifdef TEST_SET_3LVL_05
size_t nestpar_unordered_multiset_14(size_t, stapl::stream<ofstream>& );
#endif
#ifdef TEST_SET_3LVL_06
size_t nestpar_unordered_multiset_15(size_t, stapl::stream<ofstream>& );
#endif
/*=========================================================================*/
extern int prime100k[];
extern int rand100k[];
int fibo[20] = { 1, 2, 3, 5, 8,
13, 21, 34, 55, 89,
144, 233, 377, 610, 987,
1597, 2584, 4181, 6765, 10946 };
/*=========================================================================*/
typedef stapl::unordered_multiset<int> set_int_tp;
// -- 2-lvl set : --
typedef stapl::vector< set_int_tp > vec_set_int_tp;
typedef stapl::vector_view<vec_set_int_tp> vec_set_int_vw_tp;
typedef stapl::array< set_int_tp > ary_set_int_tp;
typedef stapl::array_view<ary_set_int_tp> ary_set_int_vw_tp;
typedef stapl::map< int, set_int_tp > map_set_int_tp;
typedef stapl::map_view<map_set_int_tp> map_set_int_vw_tp;
// 3 level :
// vec<vec<set>>
typedef stapl::vector<vec_set_int_tp> vec_vec_set_int_tp;
typedef stapl::vector_view<vec_vec_set_int_tp> vec_vec_set_int_vw_tp;
// ary<vec<set>>
typedef stapl::array<vec_set_int_tp> ary_vec_set_int_tp;
typedef stapl::array_view<ary_vec_set_int_tp> ary_vec_set_int_vw_tp;
// vec<ary<set>>
typedef stapl::vector<ary_set_int_tp> vec_ary_set_int_tp;
typedef stapl::vector_view<vec_ary_set_int_tp> vec_ary_set_int_vw_tp;
// vec<map<set>>
typedef stapl::vector<map_set_int_tp> vec_map_set_int_tp;
typedef stapl::vector_view<vec_map_set_int_tp> vec_map_set_int_vw_tp;
// map<ary<set>>
typedef stapl::map< int, ary_set_int_tp > map_ary_set_int_tp;
typedef stapl::map_view<map_ary_set_int_tp> map_ary_set_int_vw_tp;
// -----------------
typedef stapl::array<size_t> ary_sz_tp;
typedef stapl::array_view<ary_sz_tp> ary_sz_vw_tp;
typedef stapl::indexed_domain<int> ndx_dom_tp;
/*=========================================================================*/
typedef stapl::identity<int> id_int_wf;
typedef stapl::identity<size_t> id_un_wf;
typedef stapl::negate<int> neg_int_wf;
typedef stapl::plus<int> add_int_wf;
typedef stapl::minus<int> sub_int_wf;
typedef stapl::min<int> min_int_wf;
typedef stapl::max<int> max_int_wf;
typedef stapl::bit_xor<size_t> xor_un_wf;
typedef stapl::bit_or<size_t> ior_un_wf;
typedef stapl::bit_and<size_t> and_un_wf;
/*=========================================================================*/
void open_zin(int model, int test, stapl::stream<ifstream>& zin)
{
switch ( model ) {
case 1:
switch( test ) {
case 1:
zin.open("tiny_factors.zin");
break;
default:
zin.open("tiny_primes.zin");
break;
}
break;
case 100:
switch( test ) {
case 1:
zin.open("data/small_factors.zin");
break;
default:
zin.open("data/small_primes.zin");
break;
}
break;
case 10000:
switch( test ) {
case 1:
zin.open("data/medium_factors.zin");
break;
default:
zin.open("data/medium_primes.zin");
break;
}
break;
#ifdef BIG_FILES
case 1000000:
switch( test ) {
case 1:
zin.open("data/big_factors.zin");
break;
default:
zin.open("data/big_primes.zin");
break;
}
break;
case 100000000:
switch( test ) {
case 1:
zin.open("data/huge_factors.zin");
break;
default:
zin.open("data/huge_primes.zin");
break;
}
break;
#endif
}
}
struct put_val_wf
{
private:
// pointer to p_object is necessary to allow work function serialization.
stapl::stream<ofstream> m_zout;
public:
typedef void result_type;
put_val_wf(stapl::stream<ofstream> const& zout)
: m_zout(zout)
{ }
template <typename Ref>
void operator()(Ref val)
{
m_zout << val << " ";
}
void define_type(stapl::typer& t)
{
t.member(m_zout);
}
};
struct nestpar_cksum_wf
{
typedef size_t result_type;
template<typename Ref1>
size_t operator()(Ref1 v1)
{
return stapl::map_reduce(id_un_wf(), xor_un_wf(), v1);
}
};
struct map_set_cksum_wf
{
typedef int result_type;
template<typename Element>
int operator()(Element elem) const
{
return stapl::map_reduce(id_int_wf(), xor_un_wf(), elem.second);
}
};
struct map_outer_3lvl_cksum_wf
{
typedef int result_type;
template<typename Element>
int operator()(Element elem) const
{
return stapl::map_reduce(nestpar_cksum_wf(), xor_un_wf(), elem.second);
}
};
struct map_middle_3lvl_cksum_wf
{
typedef int result_type;
template<typename Element>
int operator()(Element elem) const
{
return stapl::map_reduce(map_set_cksum_wf(), xor_un_wf(), elem);
}
};
struct inner_set_show_wf
{
private:
// pointer to p_object is necessary to allow work function serialization.
stapl::stream<ofstream> m_zout;
public:
typedef void result_type;
inner_set_show_wf(stapl::stream<ofstream> const& zout)
: m_zout(zout)
{ }
template<typename Element>
void operator()(Element elem)
{
//FIXME, ss shouldn't be necessary
stringstream ss;
ss << "{" << elem << "}" << endl;
m_zout << ss.str();
}
void define_type(stapl::typer& t)
{
t.member(m_zout);
}
};
struct outer_map_show_inn_set_wf
{
private:
// pointer to p_object is necessary to allow work function serialization.
stapl::stream<ofstream> m_zout;
public:
typedef void result_type;
outer_map_show_inn_set_wf(stapl::stream<ofstream> const& zout)
: m_zout(zout)
{ }
template<typename Element>
void operator()(Element elem)
{
stapl::serial_io(inner_set_show_wf(m_zout), elem.second);
}
void define_type(stapl::typer& t)
{
t.member(m_zout);
}
};
struct roll_wf
{
typedef void result_type;
template <typename View1, typename View2>
void operator()(View1 length, View2 & limit) const
{
length = limit;
}
};
/*=========================================================================*/
int opt_test = -1;
char *opt_data = 0;
stapl::exit_code stapl_main(int argc, char **argv)
{
stapl::stream<ifstream> zin;
stapl::stream<ofstream> zout;
char *temp = 0;
for ( int argi = 1; argi < argc; ) {
char * opt = argv[argi++];
if ('-' == opt[0] ) {
switch ( opt[1] ) {
case 'h':
cerr << "Specify the data size with -d $X \n"
<< "With $X = "<< endl
<< " t for tiny" << endl
<< " s for small" << endl
<< " m for medium" << endl;
break;
case 'd':
opt_data = argv[argi++];
break;
}
} else {
cerr << "unknown command line argument " << opt << endl;
}
}
int model = -1;
switch ( opt_data[0] ) {
case 't':
model = 1;
break;
case 's':
model = 100;
break;
case 'm':
model = 10000;
break;
case 'b':
model = 10000000;
break;
case 'h':
model = 100000000;
break;
default:
cerr << "opt_data " << opt_data << endl;
break;
}
if (model == -1) {
std::cerr << "usage: exe -d [t|s|m]\n";
exit(1);
}
int first_test = 1;
int last_test = 36;
if (opt_test != -1 ) {
first_test = opt_test;
last_test = opt_test;
}
bool ok = true;
first_test = 1; last_test = 12;
if ( stapl::get_location_id() == 0 )
std::cout << "Nested Parallel Set " << endl;
for ( int test=first_test; test<=last_test; test++ ) {
if ( stapl::get_location_id() == 0 )
std::cerr << "Test #" << test << ": ";
size_t result = 0;
bool disabled = false;
switch ( test) {
case 1:
#ifdef TEST_SET_2LVL_01
result = nestpar_unordered_multiset_01(model, zout);
#else
disabled = true;
#endif
break;
case 2:
#ifdef TEST_SET_2LVL_02
result = nestpar_unordered_multiset_02(model, zout);
#else
disabled = true;
#endif
break;
case 3:
#ifdef TEST_SET_2LVL_03
result = nestpar_unordered_multiset_03(model, zout);
#else
disabled = true;
#endif
break;
case 4:
#ifdef TEST_SET_2LVL_04
result = nestpar_unordered_multiset_04(model, zout);
#else
disabled = true;
#endif
break;
case 5:
#ifdef TEST_SET_2LVL_05
result = nestpar_unordered_multiset_05(model, zout);
#else
disabled = true;
#endif
break;
case 6:
#ifdef TEST_SET_2LVL_06
result = nestpar_unordered_multiset_06(model, zin, zout);
#else
disabled = true;
#endif
break;
case 7:
#ifdef TEST_SET_3LVL_01
result = nestpar_unordered_multiset_10(model, zout);
#else
disabled = true;
#endif
break;
case 8:
#ifdef TEST_SET_3LVL_02
result = nestpar_unordered_multiset_11(model, zout);
#else
disabled = true;
#endif
break;
case 9:
#ifdef TEST_SET_3LVL_03
result = nestpar_unordered_multiset_12(model, zout);
#else
disabled = true;
#endif
break;
case 10:
#ifdef TEST_SET_3LVL_04
result = nestpar_unordered_multiset_13(model, zout);
#else
disabled = true;
#endif
break;
case 11:
#ifdef TEST_SET_3LVL_05
result = nestpar_unordered_multiset_14(model, zout);
#else
disabled = true;
#endif
break;
case 12:
#ifdef TEST_SET_3LVL_06
result = nestpar_unordered_multiset_15(model, zout);
#else
disabled = true;
#endif
break;
default:
std::cerr << endl
<< "-- test "
<< test
<< " not yet implemented --"
<< endl;
break;
}
bool passed;
passed = (result == 0);
if ( stapl::get_location_id() == 0) {
if (disabled)
std::cerr << "Disabled" << endl;
else
{
if (passed)
std::cerr << "[PASSED] " << endl;
else
std::cerr << "[FAILED] " << endl;
}
}
}
return EXIT_SUCCESS;
}
/*=========================================================================
* heterogeneous nested structures
*=========================================================================*/
// --------------
// ---- Fill ----
// --------------
struct insert_wf
{
typedef void result_type;
size_t m_size;
insert_wf(size_t sz)
: m_size(sz)
{ }
template <typename T,typename View>
void operator()(T i,View& vw)
{
if ( m_size <= 100000 )
vw.insert(rand_nums[i]);
else
{
size_t k = i % 10000;
vw.insert( prime_nums[k] * prime_nums[10000-k] );
}
}
void define_type(stapl::typer& t)
{
t.member(m_size);
}
};
struct nestpar_inner_set_fill_wf
{
typedef void result_type;
size_t m_size;
nestpar_inner_set_fill_wf(size_t sz)
: m_size(sz)
{ }
template <typename View1>
void operator()(View1 vw1) //FIXME: View1 & vw1 should work.
{
insert_wf insertwf(m_size);
stapl::map_func(insertwf,
stapl::counting_view<size_t>(m_size),
stapl::make_repeat_view(vw1));
}
void define_type(stapl::typer& t)
{
t.member(m_size);
}
};
struct nestpar_inner_set_fill_fromfile_wf
{
private:
size_t m_size;
// pointer to p_object is necessary to allow work function serialization.
stapl::stream<ifstream> m_zin;
public:
typedef void result_type;
nestpar_inner_set_fill_fromfile_wf(size_t sz,
stapl::stream<ifstream> const& zin)
: m_size(sz),m_zin(zin)
{ }
template <typename ViewOverSet>
void operator()(ViewOverSet vw_set)//FIXME: ViewOverSet & vw_set should work.
{
typename ViewOverSet::value_type t;
for ( size_t i = 0; i < m_size; i++ ) {
m_zin >> t;
vw_set.insert(t);
}
}
void define_type(stapl::typer& t)
{
t.member(m_size);
t.member(m_zin);
}
};
// --------------
// ---- Show ----
// --------------
struct nestpar_inner_set_show_wf
{
private:
// pointer to p_object is necessary to allow work function serialization.
stapl::stream<ofstream> m_zout;
public:
typedef void result_type;
nestpar_inner_set_show_wf(stapl::stream<ofstream> const& zout)
: m_zout(zout)
{ }
template <typename View1>
void operator()(View1 const &vw1)
{
stapl::serial_io(put_val_wf(m_zout), vw1);
}
void define_type(stapl::typer& t)
{
t.member(m_zout);
}
};
struct nestpar_outer_cksum_wf
{
typedef size_t result_type;
template<typename Ref1>
size_t operator()(Ref1 v1)
{
return stapl::map_reduce(nestpar_cksum_wf(), xor_un_wf(), v1);
}
};
// -------------
// -- Process --
// -------------
struct nestpar_size_process_wf
{
typedef long int result_type;
template <typename View1>
long int operator()(View1 const &vw1)
{
return vw1.size();
}
};
struct nestpar_map_reduce_process_wf
{
typedef long int result_type;
template <typename View1>
long int operator()(View1 const &vw1)
{
return stapl::map_reduce( id_int_wf(), add_int_wf(), vw1);
}
};
template<typename NestedContainer,
typename NestedContainerView,
typename FillWF = nestpar_inner_set_fill_wf >
class two_lvl_test_wrapper
{
public:
template <typename Filename,typename ProcessWF>
size_t run_test(size_t model,
Filename output_filename,
ProcessWF & proc_wf,
stapl::stream<ofstream>& zout)
{
size_t size = 100 * model;
size_t limit = 100 * model;
set_random_seed();
zout.open(output_filename);
stapl::counter<stapl::default_timer> ctr;
ctr.start();
NestedContainer a(size);
NestedContainerView a_vw(a);
stapl::map_func(FillWF(model*2), a_vw );
stapl::map_func(proc_wf, a_vw );
stapl::serial_io(nestpar_inner_set_show_wf(zout), a_vw );
ctr.stop();
double time1 = ctr.value();
zout.close();
size_t result = stapl::map_reduce(nestpar_cksum_wf(), xor_un_wf(), a_vw);
return result;
}
template <typename Filename,typename ProcessWF>
size_t run_test(size_t model,
Filename output_filename,
ProcessWF & proc_wf,
stapl::stream<ifstream>& zin,
stapl::stream<ofstream>& zout)
{
size_t size = 100 * model;
size_t limit = 100 * model;
set_random_seed();
zout.open(output_filename);
stapl::counter<stapl::default_timer> ctr;
ctr.start();
NestedContainer a(size);
NestedContainerView a_vw(a);
stapl::map_func(FillWF(model*2,zin), a_vw );
stapl::map_func(proc_wf, a_vw );
stapl::serial_io(nestpar_inner_set_show_wf(zout), a_vw );
ctr.stop();
double time1 = ctr.value();
zout.close();
size_t result = stapl::map_reduce(nestpar_cksum_wf(), xor_un_wf(), a_vw);
return result;
}
};
template<typename NestedContainer, typename NestedContainerView>
class two_lvl_test_wrapper_mapreduce
{
public:
template <typename Filename,typename ProcessWF>
size_t run_test(size_t model,
Filename output_filename,
ProcessWF & proc_wf,
stapl::stream<ofstream>& zout)
{
size_t size = 100 * model;
size_t limit = 100 * model;
set_random_seed();
zout.open(output_filename);
stapl::counter<stapl::default_timer> ctr;
ctr.start();
NestedContainer a(size);
NestedContainerView a_vw(a);
stapl::map_func(nestpar_inner_set_fill_wf(model*2), a_vw );
add_int_wf::result_type res=stapl::map_reduce( proc_wf, add_int_wf(), a_vw);
if (stapl::get_location_id() == 0)
{
//FIXME, ss shouldn't be necessary
stringstream ss;
ss << "map_reduce result (accumulate all elements from all sets):"
<< res << endl;
zout << ss.str();
}
stapl::serial_io(nestpar_inner_set_show_wf(zout), a_vw );
ctr.stop();
double time1 = ctr.value();
zout.close();
return stapl::map_reduce(nestpar_cksum_wf(), xor_un_wf(), a_vw);
}
};
// --------------------------
// - 3-level-deep functions -
// --------------------------
// --------------
// ---- Fill ----
// --------------
template <typename FillWF>
struct nestpar_outer_fill_wf
{
typedef void result_type;
size_t m_size;
nestpar_outer_fill_wf(size_t sz)
: m_size(sz)
{ }
template <typename View1>
void operator()(View1 const& vw1)
{
stapl::map_func(FillWF(m_size), vw1);
}
void define_type(stapl::typer& t)
{
t.member(m_size);
}
};
struct middle_insert_wf
{
typedef void result_type;
size_t m_size;
middle_insert_wf(size_t sz)
: m_size(sz)
{ }
template <typename T,typename View>
void operator()(T i,View& vw)
{
if ( m_size <= 100000 ) {
for ( size_t j = 0; j < m_size; j++ ) {
vw[ prime_nums[i] ].insert(rand_nums[j]);
}
} else {
for ( size_t j = 0; j < m_size; j+=10000 ) {
for ( size_t k = 0; k < 10000; k++ )
vw[ prime_nums[i%10000] * prime_nums[10000-(i%10000)] ].insert(
rand_nums[k] * rand_nums[10000-k]);
}
}
}
void define_type(stapl::typer& t)
{
t.member(m_size);
}
};
struct nestpar_middlemap_set_fill_wf
{
typedef void result_type;
size_t m_size;
nestpar_middlemap_set_fill_wf(size_t sz)
: m_size(sz)
{ }
template <typename View1>
void operator()(View1 const& map_set_vw1)
{
middle_insert_wf insertwf(m_size);
stapl::map_func(insertwf,
stapl::counting_view<size_t>(m_size),
stapl::make_repeat_view(map_set_vw1));
}
void define_type(stapl::typer& t)
{
t.member(m_size);
}
};
// --------------
// ---- Show ----
// --------------
struct nestpar_outer_show_wf
{
private:
// pointer to p_object is necessary to allow work function serialization.
stapl::stream<ofstream> m_zout;
public:
typedef void result_type;
nestpar_outer_show_wf(stapl::stream<ofstream> const& zout)
: m_zout(zout)
{ }
template <typename View1>
void operator()(View1 const& vw1)
{
stapl::serial_io(nestpar_inner_set_show_wf(m_zout), vw1);
}
void define_type(stapl::typer& t)
{
t.member(m_zout);
}
};
struct outer_map_3lvl_show_inn_set_wf
{
private:
// pointer to p_object is necessary to allow work function serialization.
stapl::stream<ofstream> m_zout;
public:
typedef void result_type;
outer_map_3lvl_show_inn_set_wf(stapl::stream<ofstream> const& zout)
: m_zout(zout)
{ }
template<typename Element>
void operator()(Element elem)
{
stapl::serial_io(nestpar_inner_set_show_wf(m_zout), elem.second);
}
void define_type(stapl::typer& t)
{
t.member(m_zout);
}
};
struct nestpar_middle_map_show_wf
{
private:
// pointer to p_object is necessary to allow work function serialization.
stapl::stream<ofstream> m_zout;
public:
typedef void result_type;
nestpar_middle_map_show_wf(stapl::stream<ofstream> const& zout)
: m_zout(zout)
{ }
template <typename View1>
void operator()(View1 const& vw1)
{
stapl::serial_io(outer_map_show_inn_set_wf(m_zout), vw1);
}
void define_type(stapl::typer& t)
{
t.member(m_zout);
}
};
// -------------
// -- Process --
// -------------
template <typename InnerProcWF>
struct nestpar_outer_process_wf
{
typedef void result_type;
InnerProcWF m_proc;
nestpar_outer_process_wf(InnerProcWF proc)
: m_proc(proc)
{ }
template <typename View1>
void operator()(View1 const& vw1)
{
stapl::map_func(m_proc, vw1);
}
void define_type(stapl::typer& t)
{
t.member(m_proc);
}
};
struct nestpar_outerMap_process_wf
{
typedef void result_type;
template <typename View1>
void operator()(View1 const& vw1)
{
stapl::map_func(nestpar_map_reduce_process_wf(), vw1.second);
}
};
struct nestpar_middle_map_reduce_process_wf
{
typedef int result_type;
template <typename View1>
int operator()(View1 const &vw1)
{
return stapl::map_reduce(nestpar_map_reduce_process_wf(),
add_int_wf(),
vw1);
}
};
struct nestpar_3lvl_cksum_wf
{
typedef size_t result_type;
template<typename Ref1>
size_t operator()(Ref1 v1)
{
return stapl::map_reduce(nestpar_outer_cksum_wf(), xor_un_wf(), v1);
}
};
template<typename NestedContainer,
typename NestedContainerView,
typename FillWF = nestpar_inner_set_fill_wf>
class three_lvl_test_wrapper
{
public:
template <typename Filename,typename ProcessWF>
size_t run_test(size_t model,
Filename output_filename,
ProcessWF & proc_wf,
stapl::stream<ofstream>& zout)
{
size_t size = 100 * model;
size_t limit = 100 * model;
set_random_seed();
zout.open(output_filename);
stapl::counter<stapl::default_timer> ctr;
ctr.start();
//Array which specifies the size of the 2nd-lvl container:
ary_sz_tp len_1(model * 10);
ary_sz_vw_tp len_1_vw(len_1);
stapl::map_func(roll_wf(), len_1_vw, stapl::make_repeat_view(limit));
NestedContainer a(len_1_vw);
NestedContainerView a_vw(a);
stapl::map_func(nestpar_outer_fill_wf<FillWF>(model*2), a_vw );
stapl::map_func(nestpar_outer_process_wf<ProcessWF>(proc_wf), a_vw );
stapl::serial_io(nestpar_outer_show_wf(zout), a_vw);
ctr.stop();
double time1 = ctr.value();
zout.close();
#ifdef TRIPLE_MAP_REDUCE
size_t cksum = stapl::map_reduce(nestpar_3lvl_cksum_wf(),
xor_un_wf(),
a_vw);
return cksum;
#else
return 0;
#endif
}
};
template<typename NestedContainer,
typename NestedContainerView,
typename FillWF = nestpar_inner_set_fill_wf>
class three_lvl_test_wrapper_mapreduce
{
public:
template <typename Filename,typename ProcessWF>
size_t run_test(size_t model,
Filename output_filename,
ProcessWF & proc_wf,
stapl::stream<ofstream>& zout)
{
size_t size = 100 * model;
size_t limit = 100 * model;
set_random_seed();
zout.open(output_filename);
stapl::counter<stapl::default_timer> ctr;
ctr.start();
//Array which specifies the size of the 2nd-lvl container:
ary_sz_tp len_1(model * 10);
ary_sz_vw_tp len_1_vw(len_1);
stapl::map_func(roll_wf(), len_1_vw, stapl::make_repeat_view(limit));
NestedContainer a(len_1_vw);
NestedContainerView a_vw(a);
stapl::map_func(nestpar_outer_fill_wf<FillWF>(model*2), a_vw );
add_int_wf::result_type res = stapl::map_reduce(
proc_wf,
add_int_wf(),
a_vw);
stringstream ss;
ss << "map_reduce result (accumulate all elements from all sets):"
<< res << endl;
zout << ss.str();
stapl::serial_io(nestpar_outer_show_wf(zout), a_vw);
ctr.stop();
double time1 = ctr.value();
zout.close();
#ifdef TRIPLE_MAP_REDUCE
size_t cksum = stapl::map_reduce(nestpar_3lvl_cksum_wf(),
xor_un_wf(),
a_vw);
return cksum;
#else
return 0;
#endif
}
};
//////////////////////////////////////////////////////////////////////
// nested parallelism : vec(unordered_multiset(int))
// construct vector with generator, traverse with serial
//////////////////////////////////////////////////////////////////////
#ifdef TEST_SET_2LVL_01
size_t nestpar_unordered_multiset_01(size_t model,stapl::stream<ofstream>& zout)
{
two_lvl_test_wrapper <vec_set_int_tp, vec_set_int_vw_tp> test_case;
nestpar_size_process_wf proc;
return test_case.run_test(model, "np_unordered_multiset_01.zout", proc, zout);
}
#endif
//////////////////////////////////////////////////////////////////////
// nested parallelism : vec(unordered_multiset(int))
// construct vector with generator, traverse with mapreduce
//////////////////////////////////////////////////////////////////////
#ifdef TEST_SET_2LVL_02
size_t nestpar_unordered_multiset_02(size_t model,stapl::stream<ofstream>& zout)
{
two_lvl_test_wrapper <vec_set_int_tp, vec_set_int_vw_tp> test_case;
nestpar_map_reduce_process_wf proc;
return test_case.run_test(model, "np_unordered_multiset_02.zout", proc, zout);
}
#endif
//////////////////////////////////////////////////////////////////////
// nested parallelism : ary(unordered_multiset(int))
// construct array with generator, traverse with mapreduce
//////////////////////////////////////////////////////////////////////
#ifdef TEST_SET_2LVL_03
size_t nestpar_unordered_multiset_03(size_t model,stapl::stream<ofstream>& zout)
{
two_lvl_test_wrapper <ary_set_int_tp, ary_set_int_vw_tp> test_case;
nestpar_map_reduce_process_wf proc;
return test_case.run_test(model, "np_unordered_multiset_03.zout", proc, zout);
}
#endif
//////////////////////////////////////////////////////////////////////
// nested parallelism : map(unordered_multiset(int))
// construct map data from C arrays, traverse with mapreduce
//////////////////////////////////////////////////////////////////////
#ifdef TEST_SET_2LVL_04
struct nestpar_set_04_process_wf
{
typedef int result_type;
template <typename Element>
int operator()(Element elem)
{
return stapl::map_reduce( id_int_wf(), add_int_wf(), elem.second);
}
};
size_t nestpar_unordered_multiset_04(size_t model,stapl::stream<ofstream>& zout)
{
size_t size = 100 * model;
size_t limit = 100 * model;
set_random_seed();
zout.open("np_unordered_multiset_04.zout");
stapl::counter<stapl::default_timer> ctr;
ctr.start();
ary_sz_tp len(size);
ary_sz_vw_tp len_vw(len);
stapl::map_func(roll_wf(), len_vw, stapl::make_repeat_view(limit));
ndx_dom_tp map_dom(0, 100000);
map_set_int_tp a(map_dom);
map_set_int_vw_tp a_vw(a);
if ( stapl::get_location_id() == 0 ) {
if ( size <= 100000 ) {
for ( size_t i = 0; i < size; i++ ) {
for ( size_t j = 0; j < limit; j++ ) {
a[ prime_nums[i] ].insert(rand_nums[j]);
}
}
} else {
for ( size_t i = 0; i < size; i += 10000 ) {
for ( size_t k = 0; k < 10000; k++ ) {
for ( size_t jj = 0; jj < limit; jj += 10000 )
for ( size_t j = 0; j < 10000; j++ ) {
a[ prime_nums[k] * prime_nums[10000-k] ].insert(
rand_nums[j] * rand_nums[10000-j]);
}
}
}
}
}
stapl::rmi_fence();
stapl::map_func(nestpar_set_04_process_wf(), a_vw );
stapl::serial_io(outer_map_show_inn_set_wf(zout), a_vw );
ctr.stop();
double time1 = ctr.value();
zout.close();
return stapl::map_reduce(map_set_cksum_wf(), xor_un_wf(), a_vw);
}
#endif
//////////////////////////////////////////////////////////////////////
// nested parallelism : vec(unordered_multiset(int))
// construct vector with generator, process with stapl algorithm
// apply unary function in data parallel manner
//////////////////////////////////////////////////////////////////////
#ifdef TEST_SET_2LVL_05
struct nestpar_set_05_process_wf
{
typedef int result_type;
template <typename View1>
int operator()(View1 const &vw1)
{
return stapl::accumulate(vw1, -2000000);
}
};
size_t nestpar_unordered_multiset_05(size_t model,stapl::stream<ofstream>& zout)
{
two_lvl_test_wrapper_mapreduce <vec_set_int_tp, vec_set_int_vw_tp> test_case;
nestpar_set_05_process_wf proc;
return test_case.run_test(model, "np_unordered_multiset_05.zout", proc, zout);
}
#endif
#ifdef TEST_SET_2LVL_06
//////////////////////////////////////////////////////////////////////
// nested parallelism : vec(ary(int))
// construct vector with serial I/O,
// process with map_reduce / work function, display with counting view
//////////////////////////////////////////////////////////////////////
size_t nestpar_unordered_multiset_06(size_t model, stapl::stream<ifstream>& zin,
stapl::stream<ofstream>& zout)
{
open_zin(model,2,zin);
two_lvl_test_wrapper <vec_set_int_tp,
vec_set_int_vw_tp,
nestpar_inner_set_fill_fromfile_wf> test_case;
nestpar_map_reduce_process_wf proc;
return test_case.run_test(
model, "np_unordered_multiset_06.zout", proc, zin, zout);
}
#endif
//////////////////////////////////////////////////////////////////////
// nested parallelism : vec(vec(unordered_multiset(int))
// construct array with serial I/O,
// process with mapreduce / work function, display with serial
//////////////////////////////////////////////////////////////////////
#ifdef TEST_SET_3LVL_01
size_t nestpar_unordered_multiset_10(size_t model,stapl::stream<ofstream>& zout)
{
three_lvl_test_wrapper <vec_vec_set_int_tp, vec_vec_set_int_vw_tp> test_case;
nestpar_size_process_wf proc;
return test_case.run_test(model, "np_unordered_multiset_10.zout", proc, zout);
}
#endif
//////////////////////////////////////////////////////////////////////
// nested parallelism : vec(vec(unordered_multiset(int))
// construct vector with serial I/O,
// process with stapl algorithm, display with serial
//////////////////////////////////////////////////////////////////////
#ifdef TEST_SET_3LVL_02
struct nestpar_set_11_process_wf
{
typedef long int result_type;
template <typename View1>
long int operator()(View1 const &vw1)
{
return stapl::accumulate(vw1, -2000000);
}
};
struct nestpar_set_11_middle_process_wf
{
typedef long int result_type;
template <typename View1>
long int operator()(View1 const &vw1)
{
return stapl::map_reduce(nestpar_set_11_process_wf(),
add_int_wf(),
vw1);
}
};
size_t nestpar_unordered_multiset_11(size_t model,stapl::stream<ofstream>& zout)
{
three_lvl_test_wrapper_mapreduce <vec_vec_set_int_tp,
vec_vec_set_int_vw_tp> test_case;
nestpar_set_11_middle_process_wf proc;
return test_case.run_test(model, "np_unordered_multiset_11.zout", proc, zout);
}
#endif
//////////////////////////////////////////////////////////////////////
// nested parallelism of various containers
// construct vector with serial I/O,
// process with map_reduce, display with serial
//////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////
// vec<ary<unordered_multiset <int> > >
//////////////////////////////////////////////////////////////////////
#ifdef TEST_SET_3LVL_03
size_t nestpar_unordered_multiset_12(size_t model,stapl::stream<ofstream>& zout)
{
three_lvl_test_wrapper <vec_ary_set_int_tp, vec_ary_set_int_vw_tp> test_case;
nestpar_map_reduce_process_wf proc;
return test_case.run_test(model, "np_unordered_multiset_12.zout", proc, zout);
}
#endif
//////////////////////////////////////////////////////////////////////
// ary<vec<unordered_multiset <int> > >
//////////////////////////////////////////////////////////////////////
#ifdef TEST_SET_3LVL_04
size_t nestpar_unordered_multiset_13(size_t model,stapl::stream<ofstream>& zout)
{
three_lvl_test_wrapper <ary_vec_set_int_tp, ary_vec_set_int_vw_tp> test_case;
nestpar_map_reduce_process_wf proc;
return test_case.run_test(model, "np_unordered_multiset_13.zout", proc, zout);
}
#endif
//////////////////////////////////////////////////////////////////////
// vec<map<unordered_multiset <int> > >
//////////////////////////////////////////////////////////////////////
#ifdef TEST_SET_3LVL_05
struct nestpar_set_14_process_wf
{
typedef int result_type;
template <typename Element>
int operator()(Element elem)
{
return stapl::map_reduce( id_int_wf(), add_int_wf(), elem.second);
}
};
size_t nestpar_unordered_multiset_14(size_t model,stapl::stream<ofstream>& zout)
{
size_t size = 100 * model;
size_t limit = 100 * model;
set_random_seed();
zout.open("np_unordered_multiset_14.zout");
stapl::counter<stapl::default_timer> ctr;
ctr.start();
vec_map_set_int_tp a(model * 10);
vec_map_set_int_vw_tp a_vw(a);
stapl::map_func(nestpar_middlemap_set_fill_wf(model*2), a_vw );
nestpar_set_14_process_wf proc_wf;
stapl::map_func(nestpar_outer_process_wf<nestpar_set_14_process_wf>(proc_wf),
a_vw );
stapl::serial_io(nestpar_middle_map_show_wf(zout), a_vw);
ctr.stop();
double time1 = ctr.value();
zout.close();
return stapl::map_reduce(map_middle_3lvl_cksum_wf(), xor_un_wf(), a_vw);
}
#endif
//////////////////////////////////////////////////////////////////////
// map<ary<unordered_multiset <int> > >
//////////////////////////////////////////////////////////////////////
#ifdef TEST_SET_3LVL_06
size_t nestpar_unordered_multiset_15(size_t model,stapl::stream<ofstream>& zout)
{
size_t size = 100 * model;
size_t limit = 100 * model;
set_random_seed();
zout.open("np_unordered_multiset_15.zout");
stapl::counter<stapl::default_timer> ctr;
ctr.start();
ndx_dom_tp map_dom(0, 16277216);
map_ary_set_int_tp a(map_dom);
map_ary_set_int_vw_tp a_vw(a);
ary_sz_tp len(size);
ary_sz_vw_tp len_vw(len);
stapl::map_func(roll_wf(), len_vw, stapl::make_repeat_view(limit));
if ( stapl::get_location_id() == 0 ) {
if ( size <= 100000 ) {
for ( size_t i = 0; i < size; i++ ) {
a[ prime_nums[i]].resize(len_vw[i]);
}
for ( size_t i = 0; i < size; i++ ) {
for ( size_t j = 0; j < len_vw[i]; j++ ) {
int val = rand_nums[j];
a[ prime_nums[i] ][ val ].insert(rand_nums[j]);
}
}
} else {
for ( size_t i = 0; i < size; i += 10000 ) {
a[ prime_nums[i] ].resize(len_vw[i]);
}
for ( size_t i = 0; i < size; i += 10000 ) {
for ( size_t j = 0; j < len_vw[i]; j++ ) {
for ( size_t k = 0; k < 10000; k++ ) {
int val = rand_nums[j];
a[ prime_nums[k] * prime_nums[10000-k] ][ val ].insert(
rand_nums[j]);
}
}
}
}
}
stapl::rmi_fence();
stapl::map_func(nestpar_outerMap_process_wf(), a_vw );
stapl::serial_io(outer_map_3lvl_show_inn_set_wf(zout), a_vw);
ctr.stop();
double time1 = ctr.value();
zout.close();
return stapl::map_reduce(map_outer_3lvl_cksum_wf(), xor_un_wf(), a_vw);
}
#endif
|
using Statistics
using LinearAlgebra
using Distributions
include(string(pwd(),"/src/gensys.jl"))
include(string(pwd(),"/hmc/diffs_estrut_v3.jl"))
include(string(pwd(),"/misc/matrix_no_kalman.jl"))
# See Scjmitt-GrohΓ© paper on evaluating likelihoods without the Kalman filter to get the notation
function dSx(h,Sx,dh,dQ)
m = size(h,1)
Kmm = commutation_matrix(m,m)
D = duplication_matrix(m)
b1 = (I(m^2) - kron(h,h))
b2 = kron(h*Sx,I(m)) + kron(I(m),h*Sx)*Kmm
res = -inv(b1)*(b2*dh' - D*dQ')
return res
end
function kron_and_sum(M,k)
res = zeros(size(M).^2)
for j in 1:k
term = kron(M^(k-j),(M')^j)
res += term
end
return res
end
function dSy(g,h,Sx,dSx,dh,j) #j is the power of h
m = size(h,1)
Kmm = commutation_matrix(m,m)
h_pow = h^j
ks = kron_and_sum(h,j)
b1 = kron(g,g*h*Sx)*ks
b2 = kron(g*h_pow*Sx,g)
b3 = kron(g*h_pow,g*h)
res = (b1*Kmm+b2)*dh' + b3*dSx
return res
end
function diff_ll(P,y,dP)
P_inv = inv(P)
return -1/2*vec(P_inv)'*dP + 1/2*kron(y'*P_inv,y'*P_inv)*dP
end
function log_like_dsge(par,data;kalman_tol = 1e-10)
#order to par
#alfa
#beta
#epsilon
#theta
#sig
#sigma: this is the std dev of the innovation
#phi
#phi_pi
#phi_y
#rho_v
#data will have t x p dimension: lines are periods p are variables
alfa = par[1]
bet = par[2]
epsilon = par[3]
theta = par[4]
sig = par[5]
#par 6 is coded bellow see line 56
phi = par[7]
phi_pi = par[8]
phi_y = par[9]
rho_v = par[10]
THETA = (1-alfa)/(1-alfa+alfa*epsilon)
lamb = (1-theta)*(1-bet*theta)/theta*THETA
kappa = lamb*(sig+(phi+alfa)/(1-alfa))
nobs = size(data,1)
l = size(data,2)
GAMMA_0 = [bet 0 0 0;
1 sig 0 0;
0 0 0 0;
0 0 0 1]
GAMMA_1 = [ 1 -kappa 0 0;
0 sig 1 0;
-phi_pi -phi_y 1 -1;
0 0 0 rho_v]
PSI = [0; 0; 0; 1]
PI = [bet 0;
1 sig;
0 0;
0 0]
p = size(GAMMA_1,1) #number of endogenous vars
sol = gensys(GAMMA_0,GAMMA_1,PSI,PI; verbose = false)
if sum(sol.eu) != 2
return -Inf, repeat([0],length(par))
end
#Sig = zeros(p,p)
#Sig[4,4] = par[6]
G = zeros(1,p)
G[1,2] = 1
A = sol.Theta1
R = [0] .+ 1e-8
Q = par[6]^2*sol.Theta2*sol.Theta2'
dA,dB = diff_mod(par)
#dB = dB[1:4,:]
dA = dA'*I
dG = zeros(10,4)
dQ = (kron(sol.Theta2,I(p)) + kron(I(p),sol.Theta2)*commutation_matrix(4,1))*dB
dQ = pinv(duplication_matrix(4))*dQ
dQ = dQ'
dR = zeros(size(par,1),1)
Sx = solve_lyapunov_vec(A,Q)
S = build_variance(G,A,Q,nobs)
S = copy(S)
dSx_mat = dSx(A,Q,dA,dQ)
dSy_foo(j) = dSy(G,A,Sx,dSx_mat,dA,j)
d_vecS = map(dSy_foo,1:(nobs))
sel_mat = Toeplitz(1:nobs,1:nobs)
sel_mat = Int.(sel_mat)
dS_mat = d_vecS[sel_mat]
dS_mat = vec(dS_mat)
dS_mat = mapreduce(x->dS_mat[x],vcat,1:nobs^2)
dll = diff_ll(S,data,dS_mat)
dist = MvNormal(S)
llh = logpdf(dist,data)
return llh,dll
end
|
module Flexidisc.Transformation.TransHeader
import Flexidisc.Dec.IsYes
import Flexidisc.Header.Type
import Flexidisc.OrdList
import Flexidisc.Transformation.Type
%default total
%access public export
data TransHeader : (k : Type) -> Type where
T : (o : Ord k) => OrdList k o MapValue -> TransHeader k
Nil : Ord k => TransHeader k
Nil = T []
(::) : (k, MapValue) -> TransHeader k -> TransHeader k
(::) x (T h) = T (insert x h)
IsFresh : (DecEq label) => (l : label) -> (xs : TransHeader label) -> Type
IsFresh l (T xs) = IsYes (decFresh l xs)
toLabels : TransHeader k -> List k
toLabels (T xs) = toLabels xs
toSource : TransHeader k -> Header k
toSource (T xs) = H (toSource xs)
toTarget : TransHeader k -> Header k
toTarget (T xs) = H (toTarget xs)
|
The corn crake breeds from Britain and Ireland east through Europe to central Siberia . Although it has vanished from much of its historic range , this bird was once found in suitable habitats in Eurasia everywhere between latitudes 41 Β° N and 62 Β° N. There is also a sizable population in western China , but this species nests only rarely in northern Spain and in Turkey . Old claims of breeding in South Africa are incorrect , and result from misidentification of eggs in a museum collection which are actually those of the African rail .
|
[STATEMENT]
lemma partn_lst_less:
assumes M: "partn_lst r B \<alpha> n" and eq: "length \<alpha>' = length \<alpha>" and "List.set \<alpha>' \<subseteq> ON"
and le: "\<And>i. i < length \<alpha> \<Longrightarrow> \<alpha>'!i \<le> \<alpha>!i "
and r: "wf r" "trans r" "total_on B r" and "small B"
shows "partn_lst r B \<alpha>' n"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Partitions.partn_lst r B \<alpha>' n
[PROOF STEP]
proof (clarsimp simp: partn_lst_def)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>f. f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'} \<Longrightarrow> \<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
fix f
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>f. f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'} \<Longrightarrow> \<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
assume "f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'}"
[PROOF STATE]
proof (state)
this:
f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'}
goal (1 subgoal):
1. \<And>f. f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'} \<Longrightarrow> \<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'}
[PROOF STEP]
obtain i H where "i < length \<alpha>"
and "H \<subseteq> B" "small H" and H: "ordertype H r = (\<alpha>!i)"
and fi: "f ` nsets H n \<subseteq> {i}"
[PROOF STATE]
proof (prove)
using this:
f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'}
goal (1 subgoal):
1. (\<And>i H. \<lbrakk>i < length \<alpha>; H \<subseteq> B; small H; ordertype H r = \<alpha> ! i; f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'}
Partitions.partn_lst r B \<alpha> n
length \<alpha>' = length \<alpha>
list.set \<alpha>' \<subseteq> ON
?i < length \<alpha> \<Longrightarrow> \<alpha>' ! ?i \<le> \<alpha> ! ?i
wf r
trans r
total_on B r
small B
goal (1 subgoal):
1. (\<And>i H. \<lbrakk>i < length \<alpha>; H \<subseteq> B; small H; ordertype H r = \<alpha> ! i; f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by (auto simp: partn_lst_def smaller_than_small)
[PROOF STATE]
proof (state)
this:
i < length \<alpha>
H \<subseteq> B
small H
ordertype H r = \<alpha> ! i
f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
goal (1 subgoal):
1. \<And>f. f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'} \<Longrightarrow> \<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
i < length \<alpha>
H \<subseteq> B
small H
ordertype H r = \<alpha> ! i
f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
have bij: "bij_betw (ordermap H r) H (elts (\<alpha>!i))"
[PROOF STATE]
proof (prove)
using this:
i < length \<alpha>
H \<subseteq> B
small H
ordertype H r = \<alpha> ! i
f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
goal (1 subgoal):
1. bij_betw (ordermap H r) H (elts (\<alpha> ! i))
[PROOF STEP]
using ordermap_bij [of r H]
[PROOF STATE]
proof (prove)
using this:
i < length \<alpha>
H \<subseteq> B
small H
ordertype H r = \<alpha> ! i
f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
\<lbrakk>wf r; total_on H r; small H\<rbrakk> \<Longrightarrow> bij_betw (ordermap H r) H (elts (ordertype H r))
goal (1 subgoal):
1. bij_betw (ordermap H r) H (elts (\<alpha> ! i))
[PROOF STEP]
by (smt assms(8) in_mono r(1) r(3) smaller_than_small total_on_def)
[PROOF STATE]
proof (state)
this:
bij_betw (ordermap H r) H (elts (\<alpha> ! i))
goal (1 subgoal):
1. \<And>f. f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'} \<Longrightarrow> \<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
define H' where "H' = inv_into H (ordermap H r) ` (elts (\<alpha>'!i))"
[PROOF STATE]
proof (state)
this:
H' = inv_into H (ordermap H r) ` elts (\<alpha>' ! i)
goal (1 subgoal):
1. \<And>f. f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'} \<Longrightarrow> \<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
have "H' \<subseteq> H"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. H' \<subseteq> H
[PROOF STEP]
using bij \<open>i < length \<alpha>\<close> bij_betw_imp_surj_on le
[PROOF STATE]
proof (prove)
using this:
bij_betw (ordermap H r) H (elts (\<alpha> ! i))
i < length \<alpha>
bij_betw ?f ?A ?B \<Longrightarrow> ?f ` ?A = ?B
?i < length \<alpha> \<Longrightarrow> \<alpha>' ! ?i \<le> \<alpha> ! ?i
goal (1 subgoal):
1. H' \<subseteq> H
[PROOF STEP]
by (force simp: H'_def image_subset_iff intro: inv_into_into)
[PROOF STATE]
proof (state)
this:
H' \<subseteq> H
goal (1 subgoal):
1. \<And>f. f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'} \<Longrightarrow> \<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
H' \<subseteq> H
goal (1 subgoal):
1. \<And>f. f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'} \<Longrightarrow> \<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
have ot: "ordertype H' r = (\<alpha>'!i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. ordertype H' r = \<alpha>' ! i
[PROOF STEP]
proof (subst ordertype_eq_iff)
[PROOF STATE]
proof (state)
goal (6 subgoals):
1. Ord (\<alpha>' ! i)
2. wf r
3. small H'
4. total_on H' r
5. trans r
6. \<exists>f. bij_betw f H' (elts (\<alpha>' ! i)) \<and> (\<forall>x\<in>H'. \<forall>y\<in>H'. (f x < f y) = ((x, y) \<in> r))
[PROOF STEP]
show "Ord (\<alpha>' ! i)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. Ord (\<alpha>' ! i)
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
Partitions.partn_lst r B \<alpha> n
length \<alpha>' = length \<alpha>
list.set \<alpha>' \<subseteq> ON
?i < length \<alpha> \<Longrightarrow> \<alpha>' ! ?i \<le> \<alpha> ! ?i
wf r
trans r
total_on B r
small B
goal (1 subgoal):
1. Ord (\<alpha>' ! i)
[PROOF STEP]
by (simp add: \<open>i < length \<alpha>\<close> subset_eq)
[PROOF STATE]
proof (state)
this:
Ord (\<alpha>' ! i)
goal (5 subgoals):
1. wf r
2. small H'
3. total_on H' r
4. trans r
5. \<exists>f. bij_betw f H' (elts (\<alpha>' ! i)) \<and> (\<forall>x\<in>H'. \<forall>y\<in>H'. (f x < f y) = ((x, y) \<in> r))
[PROOF STEP]
show "small H'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. small H'
[PROOF STEP]
by (simp add: H'_def)
[PROOF STATE]
proof (state)
this:
small H'
goal (4 subgoals):
1. wf r
2. total_on H' r
3. trans r
4. \<exists>f. bij_betw f H' (elts (\<alpha>' ! i)) \<and> (\<forall>x\<in>H'. \<forall>y\<in>H'. (f x < f y) = ((x, y) \<in> r))
[PROOF STEP]
show "\<exists>f. bij_betw f H' (elts (\<alpha>' ! i)) \<and> (\<forall>x\<in>H'. \<forall>y\<in>H'. (f x < f y) = ((x, y) \<in> r))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>f. bij_betw f H' (elts (\<alpha>' ! i)) \<and> (\<forall>x\<in>H'. \<forall>y\<in>H'. (f x < f y) = ((x, y) \<in> r))
[PROOF STEP]
proof (intro exI conjI ballI)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. bij_betw ?f H' (elts (\<alpha>' ! i))
2. \<And>x y. \<lbrakk>x \<in> H'; y \<in> H'\<rbrakk> \<Longrightarrow> (?f x < ?f y) = ((x, y) \<in> r)
[PROOF STEP]
show "bij_betw (ordermap H r) H' (elts (\<alpha>' ! i))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. bij_betw (ordermap H r) H' (elts (\<alpha>' ! i))
[PROOF STEP]
using \<open>H' \<subseteq> H\<close>
[PROOF STATE]
proof (prove)
using this:
H' \<subseteq> H
goal (1 subgoal):
1. bij_betw (ordermap H r) H' (elts (\<alpha>' ! i))
[PROOF STEP]
by (metis H'_def \<open>i < length \<alpha>\<close> bij bij_betw_inv_into_RIGHT bij_betw_subset le less_eq_V_def)
[PROOF STATE]
proof (state)
this:
bij_betw (ordermap H r) H' (elts (\<alpha>' ! i))
goal (1 subgoal):
1. \<And>x y. \<lbrakk>x \<in> H'; y \<in> H'\<rbrakk> \<Longrightarrow> (ordermap H r x < ordermap H r y) = ((x, y) \<in> r)
[PROOF STEP]
show "(ordermap H r x < ordermap H r y) = ((x, y) \<in> r)"
if "x \<in> H'" "y \<in> H'" for x y
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (ordermap H r x < ordermap H r y) = ((x, y) \<in> r)
[PROOF STEP]
proof (intro iffI ordermap_mono_less)
[PROOF STATE]
proof (state)
goal (7 subgoals):
1. ordermap H r x < ordermap H r y \<Longrightarrow> (x, y) \<in> r
2. (x, y) \<in> r \<Longrightarrow> (x, y) \<in> r
3. (x, y) \<in> r \<Longrightarrow> wf r
4. (x, y) \<in> r \<Longrightarrow> trans r
5. (x, y) \<in> r \<Longrightarrow> x \<in> H
6. (x, y) \<in> r \<Longrightarrow> y \<in> H
7. (x, y) \<in> r \<Longrightarrow> small H
[PROOF STEP]
assume "ordermap H r x < ordermap H r y"
[PROOF STATE]
proof (state)
this:
ordermap H r x < ordermap H r y
goal (7 subgoals):
1. ordermap H r x < ordermap H r y \<Longrightarrow> (x, y) \<in> r
2. (x, y) \<in> r \<Longrightarrow> (x, y) \<in> r
3. (x, y) \<in> r \<Longrightarrow> wf r
4. (x, y) \<in> r \<Longrightarrow> trans r
5. (x, y) \<in> r \<Longrightarrow> x \<in> H
6. (x, y) \<in> r \<Longrightarrow> y \<in> H
7. (x, y) \<in> r \<Longrightarrow> small H
[PROOF STEP]
then
[PROOF STATE]
proof (chain)
picking this:
ordermap H r x < ordermap H r y
[PROOF STEP]
show "(x, y) \<in> r"
[PROOF STATE]
proof (prove)
using this:
ordermap H r x < ordermap H r y
goal (1 subgoal):
1. (x, y) \<in> r
[PROOF STEP]
by (metis \<open>H \<subseteq> B\<close> assms(8) calculation in_mono leD ordermap_mono_le r smaller_than_small that total_on_def)
[PROOF STATE]
proof (state)
this:
(x, y) \<in> r
goal (6 subgoals):
1. (x, y) \<in> r \<Longrightarrow> (x, y) \<in> r
2. (x, y) \<in> r \<Longrightarrow> wf r
3. (x, y) \<in> r \<Longrightarrow> trans r
4. (x, y) \<in> r \<Longrightarrow> x \<in> H
5. (x, y) \<in> r \<Longrightarrow> y \<in> H
6. (x, y) \<in> r \<Longrightarrow> small H
[PROOF STEP]
qed (use assms that \<open>H' \<subseteq> H\<close> \<open>small H\<close> in auto)
[PROOF STATE]
proof (state)
this:
\<lbrakk>?x \<in> H'; ?y \<in> H'\<rbrakk> \<Longrightarrow> (ordermap H r ?x < ordermap H r ?y) = ((?x, ?y) \<in> r)
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
\<exists>f. bij_betw f H' (elts (\<alpha>' ! i)) \<and> (\<forall>x\<in>H'. \<forall>y\<in>H'. (f x < f y) = ((x, y) \<in> r))
goal (3 subgoals):
1. wf r
2. total_on H' r
3. trans r
[PROOF STEP]
show "total_on H' r"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. total_on H' r
[PROOF STEP]
using r
[PROOF STATE]
proof (prove)
using this:
wf r
trans r
total_on B r
goal (1 subgoal):
1. total_on H' r
[PROOF STEP]
by (meson \<open>H \<subseteq> B\<close> \<open>H' \<subseteq> H\<close> subsetD total_on_def)
[PROOF STATE]
proof (state)
this:
total_on H' r
goal (2 subgoals):
1. wf r
2. trans r
[PROOF STEP]
qed (use r in auto)
[PROOF STATE]
proof (state)
this:
ordertype H' r = \<alpha>' ! i
goal (1 subgoal):
1. \<And>f. f \<in> [B]\<^bsup>n\<^esup> \<rightarrow> {..<length \<alpha>'} \<Longrightarrow> \<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
H' \<subseteq> H
ordertype H' r = \<alpha>' ! i
[PROOF STEP]
show "\<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}"
[PROOF STATE]
proof (prove)
using this:
H' \<subseteq> H
ordertype H' r = \<alpha>' ! i
goal (1 subgoal):
1. \<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
using \<open>H \<subseteq> B\<close> \<open>i < length \<alpha>\<close> fi assms
[PROOF STATE]
proof (prove)
using this:
H' \<subseteq> H
ordertype H' r = \<alpha>' ! i
H \<subseteq> B
i < length \<alpha>
f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
Partitions.partn_lst r B \<alpha> n
length \<alpha>' = length \<alpha>
list.set \<alpha>' \<subseteq> ON
?i < length \<alpha> \<Longrightarrow> \<alpha>' ! ?i \<le> \<alpha> ! ?i
wf r
trans r
total_on B r
small B
goal (1 subgoal):
1. \<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
[PROOF STEP]
by (metis image_mono nsets_mono subset_trans)
[PROOF STATE]
proof (state)
this:
\<exists>i<length \<alpha>'. \<exists>H\<subseteq>B. ordertype H r = \<alpha>' ! i \<and> f ` [H]\<^bsup>n\<^esup> \<subseteq> {i}
goal:
No subgoals!
[PROOF STEP]
qed |
IsLeapYear := function(n)
return (n mod 4 = 0) and ((n mod 100 <> 0) or (n mod 400 = 0));
end;
# alternative using built-in function
IsLeapYear := function(n)
return DaysInYear(n) = 366;
end;
|
(* @TAG(OTHER_LGPL) *)
(*
Author: Norbert Schirmer
Maintainer: Norbert Schirmer, norbert.schirmer at web de
License: LGPL
*)
(* Title: Semantic.thy
Author: Norbert Schirmer, TU Muenchen
Copyright (C) 2004-2008 Norbert Schirmer
Some rights reserved, TU Muenchen
This library is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation; either version 2.1 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
USA
*)
section {* Big-Step Semantics for Simpl *}
theory Semantic imports Language begin
notation
restrict_map ("_|\<^bsub>_\<^esub>" [90, 91] 90)
datatype ('s,'f) xstate = Normal 's | Abrupt 's | Fault 'f | Stuck
definition isAbr::"('s,'f) xstate \<Rightarrow> bool"
where "isAbr S = (\<exists>s. S=Abrupt s)"
lemma isAbr_simps [simp]:
"isAbr (Normal s) = False"
"isAbr (Abrupt s) = True"
"isAbr (Fault f) = False"
"isAbr Stuck = False"
by (auto simp add: isAbr_def)
lemma isAbrE [consumes 1, elim?]: "\<lbrakk>isAbr S; \<And>s. S=Abrupt s \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
by (auto simp add: isAbr_def)
lemma not_isAbrD:
"\<not> isAbr s \<Longrightarrow> (\<exists>s'. s=Normal s') \<or> s = Stuck \<or> (\<exists>f. s=Fault f)"
by (cases s) auto
definition isFault:: "('s,'f) xstate \<Rightarrow> bool"
where "isFault S = (\<exists>f. S=Fault f)"
lemma isFault_simps [simp]:
"isFault (Normal s) = False"
"isFault (Abrupt s) = False"
"isFault (Fault f) = True"
"isFault Stuck = False"
by (auto simp add: isFault_def)
lemma isFaultE [consumes 1, elim?]: "\<lbrakk>isFault s; \<And>f. s=Fault f \<Longrightarrow> P\<rbrakk> \<Longrightarrow> P"
by (auto simp add: isFault_def)
lemma not_isFault_iff: "(\<not> isFault t) = (\<forall>f. t \<noteq> Fault f)"
by (auto elim: isFaultE)
(* ************************************************************************* *)
subsection {* Big-Step Execution: @{text "\<Gamma>\<turnstile>\<langle>c, s\<rangle> \<Rightarrow> t"} *}
(* ************************************************************************* *)
text {* The procedure environment *}
type_synonym ('s,'p,'f) body = "'p \<Rightarrow> ('s,'p,'f) com option"
inductive
"exec"::"[('s,'p,'f) body,('s,'p,'f) com,('s,'f) xstate,('s,'f) xstate]
\<Rightarrow> bool" ("_\<turnstile> \<langle>_,_\<rangle> \<Rightarrow> _" [60,20,98,98] 89)
for \<Gamma>::"('s,'p,'f) body"
where
Skip: "\<Gamma>\<turnstile>\<langle>Skip,Normal s\<rangle> \<Rightarrow> Normal s"
| Guard: "\<lbrakk>s\<in>g; \<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> \<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Guard f g c,Normal s\<rangle> \<Rightarrow> t"
| GuardFault: "s\<notin>g \<Longrightarrow> \<Gamma>\<turnstile>\<langle>Guard f g c,Normal s\<rangle> \<Rightarrow> Fault f"
| FaultProp [intro,simp]: "\<Gamma>\<turnstile>\<langle>c,Fault f\<rangle> \<Rightarrow> Fault f"
| Basic: "\<Gamma>\<turnstile>\<langle>Basic f,Normal s\<rangle> \<Rightarrow> Normal (f s)"
| Spec: "(s,t) \<in> r
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Spec r,Normal s\<rangle> \<Rightarrow> Normal t"
| SpecStuck: "\<forall>t. (s,t) \<notin> r
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Spec r,Normal s\<rangle> \<Rightarrow> Stuck"
| Seq: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c\<^sub>1,Normal s\<rangle> \<Rightarrow> s'; \<Gamma>\<turnstile>\<langle>c\<^sub>2,s'\<rangle> \<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Seq c\<^sub>1 c\<^sub>2,Normal s\<rangle> \<Rightarrow> t"
| CondTrue: "\<lbrakk>s \<in> b; \<Gamma>\<turnstile>\<langle>c\<^sub>1,Normal s\<rangle> \<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Cond b c\<^sub>1 c\<^sub>2,Normal s\<rangle> \<Rightarrow> t"
| CondFalse: "\<lbrakk>s \<notin> b; \<Gamma>\<turnstile>\<langle>c\<^sub>2,Normal s\<rangle> \<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Cond b c\<^sub>1 c\<^sub>2,Normal s\<rangle> \<Rightarrow> t"
| WhileTrue: "\<lbrakk>s \<in> b; \<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> \<Rightarrow> s'; \<Gamma>\<turnstile>\<langle>While b c,s'\<rangle> \<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>While b c,Normal s\<rangle> \<Rightarrow> t"
| WhileFalse: "\<lbrakk>s \<notin> b\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>While b c,Normal s\<rangle> \<Rightarrow> Normal s"
| Call: "\<lbrakk>\<Gamma> p=Some bdy;\<Gamma>\<turnstile>\<langle>bdy,Normal s\<rangle> \<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow> t"
| CallUndefined: "\<lbrakk>\<Gamma> p=None\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow> Stuck"
| StuckProp [intro,simp]: "\<Gamma>\<turnstile>\<langle>c,Stuck\<rangle> \<Rightarrow> Stuck"
| DynCom: "\<lbrakk>\<Gamma>\<turnstile>\<langle>(c s),Normal s\<rangle> \<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>DynCom c,Normal s\<rangle> \<Rightarrow> t"
| Throw: "\<Gamma>\<turnstile>\<langle>Throw,Normal s\<rangle> \<Rightarrow> Abrupt s"
| AbruptProp [intro,simp]: "\<Gamma>\<turnstile>\<langle>c,Abrupt s\<rangle> \<Rightarrow> Abrupt s"
| CatchMatch: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c\<^sub>1,Normal s\<rangle> \<Rightarrow> Abrupt s'; \<Gamma>\<turnstile>\<langle>c\<^sub>2,Normal s'\<rangle> \<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Catch c\<^sub>1 c\<^sub>2,Normal s\<rangle> \<Rightarrow> t"
| CatchMiss: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c\<^sub>1,Normal s\<rangle> \<Rightarrow> t; \<not>isAbr t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Catch c\<^sub>1 c\<^sub>2,Normal s\<rangle> \<Rightarrow> t"
inductive_cases exec_elim_cases [cases set]:
"\<Gamma>\<turnstile>\<langle>c,Fault f\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>c,Stuck\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>c,Abrupt s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Skip,s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Seq c1 c2,s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Guard f g c,s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Basic f,s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Spec r,s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Cond b c1 c2,s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>While b c,s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Call p,s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>DynCom c,s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Throw,s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Catch c1 c2,s\<rangle> \<Rightarrow> t"
inductive_cases exec_Normal_elim_cases [cases set]:
"\<Gamma>\<turnstile>\<langle>c,Fault f\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>c,Stuck\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>c,Abrupt s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Skip,Normal s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Guard f g c,Normal s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Basic f,Normal s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Spec r,Normal s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Seq c1 c2,Normal s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Cond b c1 c2,Normal s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>While b c,Normal s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>DynCom c,Normal s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Throw,Normal s\<rangle> \<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> \<Rightarrow> t"
lemma exec_block:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Normal t; \<Gamma>\<turnstile>\<langle>c s t,Normal (return s t)\<rangle> \<Rightarrow> u\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>block init bdy return c,Normal s\<rangle> \<Rightarrow> u"
apply (unfold block_def)
by (fastforce intro: exec.intros)
lemma exec_blockAbrupt:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Abrupt t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>block init bdy return c,Normal s\<rangle> \<Rightarrow> Abrupt (return s t)"
apply (unfold block_def)
by (fastforce intro: exec.intros)
lemma exec_blockFault:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Fault f\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>block init bdy return c,Normal s\<rangle> \<Rightarrow> Fault f"
apply (unfold block_def)
by (fastforce intro: exec.intros)
lemma exec_blockStuck:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Stuck\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>block init bdy return c,Normal s\<rangle> \<Rightarrow> Stuck"
apply (unfold block_def)
by (fastforce intro: exec.intros)
lemma exec_call:
"\<lbrakk>\<Gamma> p=Some bdy;\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Normal t; \<Gamma>\<turnstile>\<langle>c s t,Normal (return s t)\<rangle> \<Rightarrow> u\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> \<Rightarrow> u"
apply (simp add: call_def)
apply (rule exec_block)
apply (erule (1) Call)
apply assumption
done
lemma exec_callAbrupt:
"\<lbrakk>\<Gamma> p=Some bdy;\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Abrupt t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> \<Rightarrow> Abrupt (return s t)"
apply (simp add: call_def)
apply (rule exec_blockAbrupt)
apply (erule (1) Call)
done
lemma exec_callFault:
"\<lbrakk>\<Gamma> p=Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Fault f\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> \<Rightarrow> Fault f"
apply (simp add: call_def)
apply (rule exec_blockFault)
apply (erule (1) Call)
done
lemma exec_callStuck:
"\<lbrakk>\<Gamma> p=Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Stuck\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> \<Rightarrow> Stuck"
apply (simp add: call_def)
apply (rule exec_blockStuck)
apply (erule (1) Call)
done
lemma exec_callUndefined:
"\<lbrakk>\<Gamma> p=None\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> \<Rightarrow> Stuck"
apply (simp add: call_def)
apply (rule exec_blockStuck)
apply (erule CallUndefined)
done
lemma Fault_end: assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t" and s: "s=Fault f"
shows "t=Fault f"
using exec s by (induct) auto
lemma Stuck_end: assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t" and s: "s=Stuck"
shows "t=Stuck"
using exec s by (induct) auto
lemma Abrupt_end: assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t" and s: "s=Abrupt s'"
shows "t=Abrupt s'"
using exec s by (induct) auto
lemma exec_Call_body_aux:
"\<Gamma> p=Some bdy \<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Call p,s\<rangle> \<Rightarrow> t = \<Gamma>\<turnstile>\<langle>bdy,s\<rangle> \<Rightarrow> t"
apply (rule)
apply (fastforce elim: exec_elim_cases )
apply (cases s)
apply (cases t)
apply (auto intro: exec.intros dest: Fault_end Stuck_end Abrupt_end)
done
lemma exec_Call_body':
"p \<in> dom \<Gamma> \<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Call p,s\<rangle> \<Rightarrow> t = \<Gamma>\<turnstile>\<langle>the (\<Gamma> p),s\<rangle> \<Rightarrow> t"
apply clarsimp
by (rule exec_Call_body_aux)
lemma exec_block_Normal_elim [consumes 1]:
assumes exec_block: "\<Gamma>\<turnstile>\<langle>block init bdy return c,Normal s\<rangle> \<Rightarrow> t"
assumes Normal:
"\<And>t'.
\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Normal t';
\<Gamma>\<turnstile>\<langle>c s t',Normal (return s t')\<rangle> \<Rightarrow> t\<rbrakk>
\<Longrightarrow> P"
assumes Abrupt:
"\<And>t'.
\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Abrupt t';
t = Abrupt (return s t')\<rbrakk>
\<Longrightarrow> P"
assumes Fault:
"\<And>f.
\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Fault f;
t = Fault f\<rbrakk>
\<Longrightarrow> P"
assumes Stuck:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Stuck;
t = Stuck\<rbrakk>
\<Longrightarrow> P"
assumes
"\<lbrakk>\<Gamma> p = None; t = Stuck\<rbrakk> \<Longrightarrow> P"
shows "P"
using exec_block
apply (unfold block_def)
apply (elim exec_Normal_elim_cases)
apply simp_all
apply (case_tac s')
apply simp_all
apply (elim exec_Normal_elim_cases)
apply simp
apply (drule Abrupt_end) apply simp
apply (erule exec_Normal_elim_cases)
apply simp
apply (rule Abrupt,assumption+)
apply (drule Fault_end) apply simp
apply (erule exec_Normal_elim_cases)
apply simp
apply (drule Stuck_end) apply simp
apply (erule exec_Normal_elim_cases)
apply simp
apply (case_tac s')
apply simp_all
apply (elim exec_Normal_elim_cases)
apply simp
apply (rule Normal, assumption+)
apply (drule Fault_end) apply simp
apply (rule Fault,assumption+)
apply (drule Stuck_end) apply simp
apply (rule Stuck,assumption+)
done
lemma exec_call_Normal_elim [consumes 1]:
assumes exec_call: "\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> \<Rightarrow> t"
assumes Normal:
"\<And>bdy t'.
\<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Normal t';
\<Gamma>\<turnstile>\<langle>c s t',Normal (return s t')\<rangle> \<Rightarrow> t\<rbrakk>
\<Longrightarrow> P"
assumes Abrupt:
"\<And>bdy t'.
\<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Abrupt t';
t = Abrupt (return s t')\<rbrakk>
\<Longrightarrow> P"
assumes Fault:
"\<And>bdy f.
\<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Fault f;
t = Fault f\<rbrakk>
\<Longrightarrow> P"
assumes Stuck:
"\<And>bdy.
\<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> \<Rightarrow> Stuck;
t = Stuck\<rbrakk>
\<Longrightarrow> P"
assumes Undef:
"\<lbrakk>\<Gamma> p = None; t = Stuck\<rbrakk> \<Longrightarrow> P"
shows "P"
using exec_call
apply (unfold call_def)
apply (cases "\<Gamma> p")
apply (erule exec_block_Normal_elim)
apply (elim exec_Normal_elim_cases)
apply simp
apply simp
apply (elim exec_Normal_elim_cases)
apply simp
apply simp
apply (elim exec_Normal_elim_cases)
apply simp
apply simp
apply (elim exec_Normal_elim_cases)
apply simp
apply (rule Undef,assumption,assumption)
apply (rule Undef,assumption+)
apply (erule exec_block_Normal_elim)
apply (elim exec_Normal_elim_cases)
apply simp
apply (rule Normal,assumption+)
apply simp
apply (elim exec_Normal_elim_cases)
apply simp
apply (rule Abrupt,assumption+)
apply simp
apply (elim exec_Normal_elim_cases)
apply simp
apply (rule Fault, assumption+)
apply simp
apply (elim exec_Normal_elim_cases)
apply simp
apply (rule Stuck,assumption,assumption,assumption)
apply simp
apply (rule Undef,assumption+)
done
lemma exec_dynCall:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>call init (p s) return c,Normal s\<rangle> \<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>dynCall init p return c,Normal s\<rangle> \<Rightarrow> t"
apply (simp add: dynCall_def)
by (rule DynCom)
lemma exec_dynCall_Normal_elim:
assumes exec: "\<Gamma>\<turnstile>\<langle>dynCall init p return c,Normal s\<rangle> \<Rightarrow> t"
assumes call: "\<Gamma>\<turnstile>\<langle>call init (p s) return c,Normal s\<rangle> \<Rightarrow> t \<Longrightarrow> P"
shows "P"
using exec
apply (simp add: dynCall_def)
apply (erule exec_Normal_elim_cases)
apply (rule call,assumption)
done
lemma exec_Seq': "\<lbrakk>\<Gamma>\<turnstile>\<langle>c1,s\<rangle> \<Rightarrow> s'; \<Gamma>\<turnstile>\<langle>c2,s'\<rangle> \<Rightarrow> s''\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Seq c1 c2,s\<rangle> \<Rightarrow> s''"
apply (cases s)
apply (fastforce intro: exec.intros)
apply (fastforce dest: Abrupt_end)
apply (fastforce dest: Fault_end)
apply (fastforce dest: Stuck_end)
done
lemma exec_assoc: "\<Gamma>\<turnstile>\<langle>Seq c1 (Seq c2 c3),s\<rangle> \<Rightarrow> t = \<Gamma>\<turnstile>\<langle>Seq (Seq c1 c2) c3,s\<rangle> \<Rightarrow> t"
by (blast elim!: exec_elim_cases intro: exec_Seq' )
(* ************************************************************************* *)
subsection {* Big-Step Execution with Recursion Limit: @{text "\<Gamma>\<turnstile>\<langle>c, s\<rangle> =n\<Rightarrow> t"} *}
(* ************************************************************************* *)
inductive "execn"::"[('s,'p,'f) body,('s,'p,'f) com,('s,'f) xstate,nat,('s,'f) xstate]
\<Rightarrow> bool" ("_\<turnstile> \<langle>_,_\<rangle> =_\<Rightarrow> _" [60,20,98,65,98] 89)
for \<Gamma>::"('s,'p,'f) body"
where
Skip: "\<Gamma>\<turnstile>\<langle>Skip,Normal s\<rangle> =n\<Rightarrow> Normal s"
| Guard: "\<lbrakk>s\<in>g; \<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Guard f g c,Normal s\<rangle> =n\<Rightarrow> t"
| GuardFault: "s\<notin>g \<Longrightarrow> \<Gamma>\<turnstile>\<langle>Guard f g c,Normal s\<rangle> =n\<Rightarrow> Fault f"
| FaultProp [intro,simp]: "\<Gamma>\<turnstile>\<langle>c,Fault f\<rangle> =n\<Rightarrow> Fault f"
| Basic: "\<Gamma>\<turnstile>\<langle>Basic f,Normal s\<rangle> =n\<Rightarrow> Normal (f s)"
| Spec: "(s,t) \<in> r
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Spec r,Normal s\<rangle> =n\<Rightarrow> Normal t"
| SpecStuck: "\<forall>t. (s,t) \<notin> r
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Spec r,Normal s\<rangle> =n\<Rightarrow> Stuck"
| Seq: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c\<^sub>1,Normal s\<rangle> =n\<Rightarrow> s'; \<Gamma>\<turnstile>\<langle>c\<^sub>2,s'\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Seq c\<^sub>1 c\<^sub>2,Normal s\<rangle> =n\<Rightarrow> t"
| CondTrue: "\<lbrakk>s \<in> b; \<Gamma>\<turnstile>\<langle>c\<^sub>1,Normal s\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Cond b c\<^sub>1 c\<^sub>2,Normal s\<rangle> =n\<Rightarrow> t"
| CondFalse: "\<lbrakk>s \<notin> b; \<Gamma>\<turnstile>\<langle>c\<^sub>2,Normal s\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Cond b c\<^sub>1 c\<^sub>2,Normal s\<rangle> =n\<Rightarrow> t"
| WhileTrue: "\<lbrakk>s \<in> b; \<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> s';
\<Gamma>\<turnstile>\<langle>While b c,s'\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>While b c,Normal s\<rangle> =n\<Rightarrow> t"
| WhileFalse: "\<lbrakk>s \<notin> b\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>While b c,Normal s\<rangle> =n\<Rightarrow> Normal s"
| Call: "\<lbrakk>\<Gamma> p=Some bdy;\<Gamma>\<turnstile>\<langle>bdy,Normal s\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Call p ,Normal s\<rangle> =Suc n\<Rightarrow> t"
| CallUndefined: "\<lbrakk>\<Gamma> p=None\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Call p ,Normal s\<rangle> =Suc n\<Rightarrow> Stuck"
| StuckProp [intro,simp]: "\<Gamma>\<turnstile>\<langle>c,Stuck\<rangle> =n\<Rightarrow> Stuck"
| DynCom: "\<lbrakk>\<Gamma>\<turnstile>\<langle>(c s),Normal s\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>DynCom c,Normal s\<rangle> =n\<Rightarrow> t"
| Throw: "\<Gamma>\<turnstile>\<langle>Throw,Normal s\<rangle> =n\<Rightarrow> Abrupt s"
| AbruptProp [intro,simp]: "\<Gamma>\<turnstile>\<langle>c,Abrupt s\<rangle> =n\<Rightarrow> Abrupt s"
| CatchMatch: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c\<^sub>1,Normal s\<rangle> =n\<Rightarrow> Abrupt s'; \<Gamma>\<turnstile>\<langle>c\<^sub>2,Normal s'\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Catch c\<^sub>1 c\<^sub>2,Normal s\<rangle> =n\<Rightarrow> t"
| CatchMiss: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c\<^sub>1,Normal s\<rangle> =n\<Rightarrow> t; \<not>isAbr t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Catch c\<^sub>1 c\<^sub>2,Normal s\<rangle> =n\<Rightarrow> t"
inductive_cases execn_elim_cases [cases set]:
"\<Gamma>\<turnstile>\<langle>c,Fault f\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>c,Stuck\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>c,Abrupt s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Skip,s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Seq c1 c2,s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Guard f g c,s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Basic f,s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Spec r,s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Cond b c1 c2,s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>While b c,s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Call p ,s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>DynCom c,s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Throw,s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Catch c1 c2,s\<rangle> =n\<Rightarrow> t"
inductive_cases execn_Normal_elim_cases [cases set]:
"\<Gamma>\<turnstile>\<langle>c,Fault f\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>c,Stuck\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>c,Abrupt s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Skip,Normal s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Guard f g c,Normal s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Basic f,Normal s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Spec r,Normal s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Seq c1 c2,Normal s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Cond b c1 c2,Normal s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>While b c,Normal s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>DynCom c,Normal s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Throw,Normal s\<rangle> =n\<Rightarrow> t"
"\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> =n\<Rightarrow> t"
lemma execn_Skip': "\<Gamma>\<turnstile>\<langle>Skip,t\<rangle> =n\<Rightarrow> t"
by (cases t) (auto intro: execn.intros)
lemma execn_Fault_end: assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" and s: "s=Fault f"
shows "t=Fault f"
using exec s by (induct) auto
lemma execn_Stuck_end: assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" and s: "s=Stuck"
shows "t=Stuck"
using exec s by (induct) auto
lemma execn_Abrupt_end: assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" and s: "s=Abrupt s'"
shows "t=Abrupt s'"
using exec s by (induct) auto
lemma execn_block:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Normal t; \<Gamma>\<turnstile>\<langle>c s t,Normal (return s t)\<rangle> =n\<Rightarrow> u\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>block init bdy return c,Normal s\<rangle> =n\<Rightarrow> u"
apply (unfold block_def)
by (fastforce intro: execn.intros)
lemma execn_blockAbrupt:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Abrupt t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>block init bdy return c,Normal s\<rangle> =n\<Rightarrow> Abrupt (return s t)"
apply (unfold block_def)
by (fastforce intro: execn.intros)
lemma execn_blockFault:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Fault f\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>block init bdy return c,Normal s\<rangle> =n\<Rightarrow> Fault f"
apply (unfold block_def)
by (fastforce intro: execn.intros)
lemma execn_blockStuck:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Stuck\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>block init bdy return c,Normal s\<rangle> =n\<Rightarrow> Stuck"
apply (unfold block_def)
by (fastforce intro: execn.intros)
lemma execn_call:
"\<lbrakk>\<Gamma> p=Some bdy;\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Normal t;
\<Gamma>\<turnstile>\<langle>c s t,Normal (return s t)\<rangle> =Suc n\<Rightarrow> u\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> =Suc n\<Rightarrow> u"
apply (simp add: call_def)
apply (rule execn_block)
apply (erule (1) Call)
apply assumption
done
lemma execn_callAbrupt:
"\<lbrakk>\<Gamma> p=Some bdy;\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Abrupt t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> =Suc n\<Rightarrow> Abrupt (return s t)"
apply (simp add: call_def)
apply (rule execn_blockAbrupt)
apply (erule (1) Call)
done
lemma execn_callFault:
"\<lbrakk>\<Gamma> p=Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Fault f\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> =Suc n\<Rightarrow> Fault f"
apply (simp add: call_def)
apply (rule execn_blockFault)
apply (erule (1) Call)
done
lemma execn_callStuck:
"\<lbrakk>\<Gamma> p=Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Stuck\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> =Suc n\<Rightarrow> Stuck"
apply (simp add: call_def)
apply (rule execn_blockStuck)
apply (erule (1) Call)
done
lemma execn_callUndefined:
"\<lbrakk>\<Gamma> p=None\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> =Suc n\<Rightarrow> Stuck"
apply (simp add: call_def)
apply (rule execn_blockStuck)
apply (erule CallUndefined)
done
lemma execn_block_Normal_elim [consumes 1]:
assumes execn_block: "\<Gamma>\<turnstile>\<langle>block init bdy return c,Normal s\<rangle> =n\<Rightarrow> t"
assumes Normal:
"\<And>t'.
\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Normal t';
\<Gamma>\<turnstile>\<langle>c s t',Normal (return s t')\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow> P"
assumes Abrupt:
"\<And>t'.
\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Abrupt t';
t = Abrupt (return s t')\<rbrakk>
\<Longrightarrow> P"
assumes Fault:
"\<And>f.
\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Fault f;
t = Fault f\<rbrakk>
\<Longrightarrow> P"
assumes Stuck:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =n\<Rightarrow> Stuck;
t = Stuck\<rbrakk>
\<Longrightarrow> P"
assumes Undef:
"\<lbrakk>\<Gamma> p = None; t = Stuck\<rbrakk> \<Longrightarrow> P"
shows "P"
using execn_block
apply (unfold block_def)
apply (elim execn_Normal_elim_cases)
apply simp_all
apply (case_tac s')
apply simp_all
apply (elim execn_Normal_elim_cases)
apply simp
apply (drule execn_Abrupt_end) apply simp
apply (erule execn_Normal_elim_cases)
apply simp
apply (rule Abrupt,assumption+)
apply (drule execn_Fault_end) apply simp
apply (erule execn_Normal_elim_cases)
apply simp
apply (drule execn_Stuck_end) apply simp
apply (erule execn_Normal_elim_cases)
apply simp
apply (case_tac s')
apply simp_all
apply (elim execn_Normal_elim_cases)
apply simp
apply (rule Normal,assumption+)
apply (drule execn_Fault_end) apply simp
apply (rule Fault,assumption+)
apply (drule execn_Stuck_end) apply simp
apply (rule Stuck,assumption+)
done
lemma execn_call_Normal_elim [consumes 1]:
assumes exec_call: "\<Gamma>\<turnstile>\<langle>call init p return c,Normal s\<rangle> =n\<Rightarrow> t"
assumes Normal:
"\<And>bdy i t'.
\<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =i\<Rightarrow> Normal t';
\<Gamma>\<turnstile>\<langle>c s t',Normal (return s t')\<rangle> =Suc i\<Rightarrow> t; n = Suc i\<rbrakk>
\<Longrightarrow> P"
assumes Abrupt:
"\<And>bdy i t'.
\<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =i\<Rightarrow> Abrupt t'; n = Suc i;
t = Abrupt (return s t')\<rbrakk>
\<Longrightarrow> P"
assumes Fault:
"\<And>bdy i f.
\<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =i\<Rightarrow> Fault f; n = Suc i;
t = Fault f\<rbrakk>
\<Longrightarrow> P"
assumes Stuck:
"\<And>bdy i.
\<lbrakk>\<Gamma> p = Some bdy; \<Gamma>\<turnstile>\<langle>bdy,Normal (init s)\<rangle> =i\<Rightarrow> Stuck; n = Suc i;
t = Stuck\<rbrakk>
\<Longrightarrow> P"
assumes Undef:
"\<And>i. \<lbrakk>\<Gamma> p = None; n = Suc i; t = Stuck\<rbrakk> \<Longrightarrow> P"
shows "P"
using exec_call
apply (unfold call_def)
apply (cases n)
apply (simp only: block_def)
apply (fastforce elim: execn_Normal_elim_cases)
apply (cases "\<Gamma> p")
apply (erule execn_block_Normal_elim)
apply (elim execn_Normal_elim_cases)
apply simp
apply simp
apply (elim execn_Normal_elim_cases)
apply simp
apply simp
apply (elim execn_Normal_elim_cases)
apply simp
apply simp
apply (elim execn_Normal_elim_cases)
apply simp
apply (rule Undef,assumption,assumption,assumption)
apply (rule Undef,assumption+)
apply (erule execn_block_Normal_elim)
apply (elim execn_Normal_elim_cases)
apply simp
apply (rule Normal,assumption+)
apply simp
apply (elim execn_Normal_elim_cases)
apply simp
apply (rule Abrupt,assumption+)
apply simp
apply (elim execn_Normal_elim_cases)
apply simp
apply (rule Fault,assumption+)
apply simp
apply (elim execn_Normal_elim_cases)
apply simp
apply (rule Stuck,assumption,assumption,assumption,assumption)
apply (rule Undef,assumption,assumption,assumption)
apply (rule Undef,assumption+)
done
lemma execn_dynCall:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>call init (p s) return c,Normal s\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>dynCall init p return c,Normal s\<rangle> =n\<Rightarrow> t"
apply (simp add: dynCall_def)
by (rule DynCom)
lemma execn_Seq':
"\<lbrakk>\<Gamma>\<turnstile>\<langle>c1,s\<rangle> =n\<Rightarrow> s'; \<Gamma>\<turnstile>\<langle>c2,s'\<rangle> =n\<Rightarrow> s''\<rbrakk>
\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Seq c1 c2,s\<rangle> =n\<Rightarrow> s''"
apply (cases s)
apply (fastforce intro: execn.intros)
apply (fastforce dest: execn_Abrupt_end)
apply (fastforce dest: execn_Fault_end)
apply (fastforce dest: execn_Stuck_end)
done
lemma execn_mono:
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
shows "\<And> m. n \<le> m \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> =m\<Rightarrow> t"
using exec
by (induct) (auto intro: execn.intros dest: Suc_le_D)
lemma execn_Suc:
"\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> =Suc n\<Rightarrow> t"
by (rule execn_mono [OF _ le_refl [THEN le_SucI]])
lemma execn_assoc:
"\<Gamma>\<turnstile>\<langle>Seq c1 (Seq c2 c3),s\<rangle> =n\<Rightarrow> t = \<Gamma>\<turnstile>\<langle>Seq (Seq c1 c2) c3,s\<rangle> =n\<Rightarrow> t"
by (auto elim!: execn_elim_cases intro: execn_Seq')
lemma execn_to_exec:
assumes execn: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
shows "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
using execn
by induct (auto intro: exec.intros)
lemma exec_to_execn:
assumes execn: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
shows "\<exists>n. \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
using execn
proof (induct)
case Skip thus ?case by (iprover intro: execn.intros)
next
case Guard thus ?case by (iprover intro: execn.intros)
next
case GuardFault thus ?case by (iprover intro: execn.intros)
next
case FaultProp thus ?case by (iprover intro: execn.intros)
next
case Basic thus ?case by (iprover intro: execn.intros)
next
case Spec thus ?case by (iprover intro: execn.intros)
next
case SpecStuck thus ?case by (iprover intro: execn.intros)
next
case (Seq c1 s s' c2 s'')
then obtain n m where
"\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =n\<Rightarrow> s'" "\<Gamma>\<turnstile>\<langle>c2,s'\<rangle> =m\<Rightarrow> s''"
by blast
then have
"\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =max n m\<Rightarrow> s'"
"\<Gamma>\<turnstile>\<langle>c2,s'\<rangle> =max n m\<Rightarrow> s''"
by (auto elim!: execn_mono intro: max.cobounded1 max.cobounded2)
thus ?case
by (iprover intro: execn.intros)
next
case CondTrue thus ?case by (iprover intro: execn.intros)
next
case CondFalse thus ?case by (iprover intro: execn.intros)
next
case (WhileTrue s b c s' s'')
then obtain n m where
"\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> s'" "\<Gamma>\<turnstile>\<langle>While b c,s'\<rangle> =m\<Rightarrow> s''"
by blast
then have
"\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =max n m\<Rightarrow> s'" "\<Gamma>\<turnstile>\<langle>While b c,s'\<rangle> =max n m\<Rightarrow> s''"
by (auto elim!: execn_mono intro: max.cobounded1 max.cobounded2)
with WhileTrue
show ?case
by (iprover intro: execn.intros)
next
case WhileFalse thus ?case by (iprover intro: execn.intros)
next
case Call thus ?case by (iprover intro: execn.intros)
next
case CallUndefined thus ?case by (iprover intro: execn.intros)
next
case StuckProp thus ?case by (iprover intro: execn.intros)
next
case DynCom thus ?case by (iprover intro: execn.intros)
next
case Throw thus ?case by (iprover intro: execn.intros)
next
case AbruptProp thus ?case by (iprover intro: execn.intros)
next
case (CatchMatch c1 s s' c2 s'')
then obtain n m where
"\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =n\<Rightarrow> Abrupt s'" "\<Gamma>\<turnstile>\<langle>c2,Normal s'\<rangle> =m\<Rightarrow> s''"
by blast
then have
"\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =max n m\<Rightarrow> Abrupt s'"
"\<Gamma>\<turnstile>\<langle>c2,Normal s'\<rangle> =max n m\<Rightarrow> s''"
by (auto elim!: execn_mono intro: max.cobounded1 max.cobounded2)
with CatchMatch.hyps show ?case
by (iprover intro: execn.intros)
next
case CatchMiss thus ?case by (iprover intro: execn.intros)
qed
theorem exec_iff_execn: "(\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t) = (\<exists>n. \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t)"
by (iprover intro: exec_to_execn execn_to_exec)
definition nfinal_notin:: "('s,'p,'f) body \<Rightarrow> ('s,'p,'f) com \<Rightarrow> ('s,'f) xstate \<Rightarrow> nat
\<Rightarrow> ('s,'f) xstate set \<Rightarrow> bool"
("_\<turnstile> \<langle>_,_\<rangle> =_\<Rightarrow>\<notin>_" [60,20,98,65,60] 89) where
"\<Gamma>\<turnstile> \<langle>c,s\<rangle> =n\<Rightarrow>\<notin>T = (\<forall>t. \<Gamma>\<turnstile> \<langle>c,s\<rangle> =n\<Rightarrow> t \<longrightarrow> t\<notin>T)"
definition final_notin:: "('s,'p,'f) body \<Rightarrow> ('s,'p,'f) com \<Rightarrow> ('s,'f) xstate
\<Rightarrow> ('s,'f) xstate set \<Rightarrow> bool"
("_\<turnstile> \<langle>_,_\<rangle> \<Rightarrow>\<notin>_" [60,20,98,60] 89) where
"\<Gamma>\<turnstile> \<langle>c,s\<rangle> \<Rightarrow>\<notin>T = (\<forall>t. \<Gamma>\<turnstile> \<langle>c,s\<rangle> \<Rightarrow>t \<longrightarrow> t\<notin>T)"
lemma final_notinI: "\<lbrakk>\<And>t. \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t \<Longrightarrow> t \<notin> T\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>T"
by (simp add: final_notin_def)
lemma noFaultStuck_Call_body': "p \<in> dom \<Gamma> \<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F)) =
\<Gamma>\<turnstile>\<langle>the (\<Gamma> p),Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F))"
by (clarsimp simp add: final_notin_def exec_Call_body)
lemma noFault_startn:
assumes execn: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" and t: "t\<noteq>Fault f"
shows "s\<noteq>Fault f"
using execn t by (induct) auto
lemma noFault_start:
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t" and t: "t\<noteq>Fault f"
shows "s\<noteq>Fault f"
using exec t by (induct) auto
lemma noStuck_startn:
assumes execn: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" and t: "t\<noteq>Stuck"
shows "s\<noteq>Stuck"
using execn t by (induct) auto
lemma noStuck_start:
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t" and t: "t\<noteq>Stuck"
shows "s\<noteq>Stuck"
using exec t by (induct) auto
lemma noAbrupt_startn:
assumes execn: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" and t: "\<forall>t'. t\<noteq>Abrupt t'"
shows "s\<noteq>Abrupt s'"
using execn t by (induct) auto
lemma noAbrupt_start:
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t" and t: "\<forall>t'. t\<noteq>Abrupt t'"
shows "s\<noteq>Abrupt s'"
using exec t by (induct) auto
lemma noFaultn_startD: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> Normal t \<Longrightarrow> s \<noteq> Fault f"
by (auto dest: noFault_startn)
lemma noFaultn_startD': "t\<noteq>Fault f \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t \<Longrightarrow> s \<noteq> Fault f"
by (auto dest: noFault_startn)
lemma noFault_startD: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> Normal t \<Longrightarrow> s \<noteq> Fault f"
by (auto dest: noFault_start)
lemma noFault_startD': "t\<noteq>Fault f\<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t \<Longrightarrow> s \<noteq> Fault f"
by (auto dest: noFault_start)
lemma noStuckn_startD: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> Normal t \<Longrightarrow> s \<noteq> Stuck"
by (auto dest: noStuck_startn)
lemma noStuckn_startD': "t\<noteq>Stuck \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t \<Longrightarrow> s \<noteq> Stuck"
by (auto dest: noStuck_startn)
lemma noStuck_startD: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> Normal t \<Longrightarrow> s \<noteq> Stuck"
by (auto dest: noStuck_start)
lemma noStuck_startD': "t\<noteq>Stuck \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t \<Longrightarrow> s \<noteq> Stuck"
by (auto dest: noStuck_start)
lemma noAbruptn_startD: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> Normal t \<Longrightarrow> s \<noteq> Abrupt s'"
by (auto dest: noAbrupt_startn)
lemma noAbrupt_startD: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> Normal t \<Longrightarrow> s \<noteq> Abrupt s'"
by (auto dest: noAbrupt_start)
lemma noFaultnI: "\<lbrakk>\<And>t. \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>t \<Longrightarrow> t\<noteq>Fault f\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>{Fault f}"
by (simp add: nfinal_notin_def)
lemma noFaultnI':
assumes contr: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> Fault f \<Longrightarrow> False"
shows "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>{Fault f}"
proof (rule noFaultnI)
fix t assume "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
with contr show "t \<noteq> Fault f"
by (cases "t=Fault f") auto
qed
lemma noFaultn_def': "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>{Fault f} = (\<not>\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> Fault f)"
apply rule
apply (fastforce simp add: nfinal_notin_def)
apply (fastforce intro: noFaultnI')
done
lemma noStucknI':
assumes contr: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> Stuck \<Longrightarrow> False"
shows "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>{Stuck}"
proof (rule noStucknI)
fix t assume "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
with contr show "t \<noteq> Stuck"
by (cases t) auto
qed
lemma noStuckn_def': "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>{Stuck} = (\<not>\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> Stuck)"
apply rule
apply (fastforce simp add: nfinal_notin_def)
apply (fastforce intro: noStucknI')
done
lemma noFaultI: "\<lbrakk>\<And>t. \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>t \<Longrightarrow> t\<noteq>Fault f\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Fault f}"
by (simp add: final_notin_def)
lemma noFaultI':
assumes contr: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> Fault f\<Longrightarrow> False"
shows "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Fault f}"
proof (rule noFaultI)
fix t assume "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
with contr show "t \<noteq> Fault f"
by (cases "t=Fault f") auto
qed
lemma noFaultE:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Fault f}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> Fault f\<rbrakk> \<Longrightarrow> P"
by (auto simp add: final_notin_def)
lemma noStuckI: "\<lbrakk>\<And>t. \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>t \<Longrightarrow> t\<noteq>Stuck\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (simp add: final_notin_def)
lemma noStuckI':
assumes contr: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> Stuck \<Longrightarrow> False"
shows "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Stuck}"
proof (rule noStuckI)
fix t assume "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
with contr show "t \<noteq> Stuck"
by (cases t) auto
qed
lemma noStuckE:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Stuck}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> Stuck\<rbrakk> \<Longrightarrow> P"
by (auto simp add: final_notin_def)
lemma noStuck_def': "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Stuck} = (\<not>\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> Stuck)"
apply rule
apply (fastforce simp add: final_notin_def)
apply (fastforce intro: noStuckI')
done
lemma noFaultn_execD: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>{Fault f}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>t\<rbrakk> \<Longrightarrow> t\<noteq>Fault f"
by (simp add: nfinal_notin_def)
lemma noFault_execD: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Fault f}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>t\<rbrakk> \<Longrightarrow> t\<noteq>Fault f"
by (simp add: final_notin_def)
lemma noFaultn_exec_startD: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>{Fault f}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>t\<rbrakk> \<Longrightarrow> s\<noteq>Fault f"
by (auto simp add: nfinal_notin_def dest: noFaultn_startD)
lemma noFault_exec_startD: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Fault f}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>t\<rbrakk> \<Longrightarrow> s\<noteq>Fault f"
by (auto simp add: final_notin_def dest: noFault_startD)
lemma noStuckn_execD: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>{Stuck}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>t\<rbrakk> \<Longrightarrow> t\<noteq>Stuck"
by (simp add: nfinal_notin_def)
lemma noStuck_execD: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Stuck}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>t\<rbrakk> \<Longrightarrow> t\<noteq>Stuck"
by (simp add: final_notin_def)
lemma noStuckn_exec_startD: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>{Stuck}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>t\<rbrakk> \<Longrightarrow> s\<noteq>Stuck"
by (auto simp add: nfinal_notin_def dest: noStuckn_startD)
lemma noStuck_exec_startD: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Stuck}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>t\<rbrakk> \<Longrightarrow> s\<noteq>Stuck"
by (auto simp add: final_notin_def dest: noStuck_startD)
lemma noFaultStuckn_execD:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>{Fault True,Fault False,Stuck}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>t\<rbrakk> \<Longrightarrow>
t\<notin>{Fault True,Fault False,Stuck}"
by (simp add: nfinal_notin_def)
lemma noFaultStuck_execD: "\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Fault True,Fault False,Stuck}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>t\<rbrakk>
\<Longrightarrow> t\<notin>{Fault True,Fault False,Stuck}"
by (simp add: final_notin_def)
lemma noFaultStuckn_exec_startD:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>{Fault True, Fault False,Stuck}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>t\<rbrakk>
\<Longrightarrow> s\<notin>{Fault True,Fault False,Stuck}"
by (auto simp add: nfinal_notin_def )
lemma noFaultStuck_exec_startD:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>{Fault True, Fault False,Stuck}; \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>t\<rbrakk>
\<Longrightarrow> s\<notin>{Fault True,Fault False,Stuck}"
by (auto simp add: final_notin_def )
lemma noStuck_Call:
assumes noStuck: "\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}"
shows "p \<in> dom \<Gamma>"
proof (cases "p \<in> dom \<Gamma>")
case True thus ?thesis by simp
next
case False
hence "\<Gamma> p = None" by auto
hence "\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>Stuck"
by (rule exec.CallUndefined)
with noStuck show ?thesis
by (auto simp add: final_notin_def)
qed
lemma Guard_noFaultStuckD:
assumes "\<Gamma>\<turnstile>\<langle>Guard f g c,Normal s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` (-F))"
assumes "f \<notin> F"
shows "s \<in> g"
using assms
by (auto simp add: final_notin_def intro: exec.intros)
lemma final_notin_to_finaln:
assumes notin: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>T"
shows "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>T"
proof (clarsimp simp add: nfinal_notin_def)
fix t assume "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" and "t\<in>T"
with notin show "False"
by (auto intro: execn_to_exec simp add: final_notin_def)
qed
lemma noFault_Call_body:
"\<Gamma> p=Some bdy\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Call p ,Normal s\<rangle> \<Rightarrow>\<notin>{Fault f} =
\<Gamma>\<turnstile>\<langle>the (\<Gamma> p),Normal s\<rangle> \<Rightarrow>\<notin>{Fault f}"
by (simp add: noFault_def' exec_Call_body)
lemma noStuck_Call_body:
"\<Gamma> p=Some bdy\<Longrightarrow>
\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck} =
\<Gamma>\<turnstile>\<langle>the (\<Gamma> p),Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (simp add: noStuck_def' exec_Call_body)
lemma exec_final_notin_to_execn: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>T \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>T"
by (auto simp add: final_notin_def nfinal_notin_def dest: execn_to_exec)
lemma execn_final_notin_to_exec: "\<forall>n. \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>T \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>T"
by (auto simp add: final_notin_def nfinal_notin_def dest: exec_to_execn)
lemma exec_final_notin_iff_execn: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow>\<notin>T = (\<forall>n. \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>\<notin>T)"
by (auto intro: exec_final_notin_to_execn execn_final_notin_to_exec)
lemma Seq_NoFaultStuckD2:
assumes noabort: "\<Gamma>\<turnstile>\<langle>Seq c1 c2,s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` F)"
shows "\<forall>t. \<Gamma>\<turnstile>\<langle>c1,s\<rangle> \<Rightarrow> t \<longrightarrow> t\<notin> ({Stuck} \<union> Fault ` F) \<longrightarrow>
\<Gamma>\<turnstile>\<langle>c2,t\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` F)"
using noabort
by (auto simp add: final_notin_def intro: exec_Seq') lemma Seq_NoFaultStuckD1:
assumes noabort: "\<Gamma>\<turnstile>\<langle>Seq c1 c2,s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` F)"
shows "\<Gamma>\<turnstile>\<langle>c1,s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` F)"
proof (rule final_notinI)
fix t
assume exec_c1: "\<Gamma>\<turnstile>\<langle>c1,s\<rangle> \<Rightarrow> t"
show "t \<notin> {Stuck} \<union> Fault ` F"
proof
assume "t \<in> {Stuck} \<union> Fault ` F"
moreover
{
assume "t = Stuck"
with exec_c1
have "\<Gamma>\<turnstile>\<langle>Seq c1 c2,s\<rangle> \<Rightarrow> Stuck"
by (auto intro: exec_Seq')
with noabort have False
by (auto simp add: final_notin_def)
hence False ..
}
moreover
{
assume "t \<in> Fault ` F"
then obtain f where
t: "t=Fault f" and f: "f \<in> F"
by auto
from t exec_c1
have "\<Gamma>\<turnstile>\<langle>Seq c1 c2,s\<rangle> \<Rightarrow> Fault f"
by (auto intro: exec_Seq')
with noabort f have False
by (auto simp add: final_notin_def)
hence False ..
}
ultimately show False by auto
qed
qed
lemma Seq_NoFaultStuckD2':
assumes noabort: "\<Gamma>\<turnstile>\<langle>Seq c1 c2,s\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` F)"
shows "\<forall>t. \<Gamma>\<turnstile>\<langle>c1,s\<rangle> \<Rightarrow> t \<longrightarrow> t\<notin> ({Stuck} \<union> Fault ` F) \<longrightarrow>
\<Gamma>\<turnstile>\<langle>c2,t\<rangle> \<Rightarrow>\<notin>({Stuck} \<union> Fault ` F)"
using noabort
by (auto simp add: final_notin_def intro: exec_Seq')
(* ************************************************************************* *)
subsection {* Lemmas about @{const "sequence"}, @{const "flatten"} and
@{const "normalize"} *}
(* ************************************************************************ *)
lemma execn_sequence_app: "\<And>s s' t.
\<lbrakk>\<Gamma>\<turnstile>\<langle>sequence Seq xs,Normal s\<rangle> =n\<Rightarrow> s'; \<Gamma>\<turnstile>\<langle>sequence Seq ys,s'\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow> \<Gamma>\<turnstile>\<langle>sequence Seq (xs@ys),Normal s\<rangle> =n\<Rightarrow> t"
proof (induct xs)
case Nil
thus ?case by (auto elim: execn_Normal_elim_cases)
next
case (Cons x xs)
have exec_x_xs: "\<Gamma>\<turnstile>\<langle>sequence Seq (x # xs),Normal s\<rangle> =n\<Rightarrow> s'" by fact
have exec_ys: "\<Gamma>\<turnstile>\<langle>sequence Seq ys,s'\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases xs)
case Nil
with exec_x_xs have "\<Gamma>\<turnstile>\<langle>x,Normal s\<rangle> =n\<Rightarrow> s'"
by (auto elim: execn_Normal_elim_cases )
with Nil exec_ys show ?thesis
by (cases ys) (auto intro: execn.intros elim: execn_elim_cases)
next
case Cons
with exec_x_xs
obtain s'' where
exec_x: "\<Gamma>\<turnstile>\<langle>x,Normal s\<rangle> =n\<Rightarrow> s''" and
exec_xs: "\<Gamma>\<turnstile>\<langle>sequence Seq xs,s''\<rangle> =n\<Rightarrow> s'"
by (auto elim: execn_Normal_elim_cases )
show ?thesis
proof (cases s'')
case (Normal s''')
from Cons.hyps [OF exec_xs [simplified Normal] exec_ys]
have "\<Gamma>\<turnstile>\<langle>sequence Seq (xs @ ys),Normal s'''\<rangle> =n\<Rightarrow> t" .
with Cons exec_x Normal
show ?thesis
by (auto intro: execn.intros)
next
case (Abrupt s''')
with exec_xs have "s'=Abrupt s'''"
by (auto dest: execn_Abrupt_end)
with exec_ys have "t=Abrupt s'''"
by (auto dest: execn_Abrupt_end)
with exec_x Abrupt Cons show ?thesis
by (auto intro: execn.intros)
next
case (Fault f)
with exec_xs have "s'=Fault f"
by (auto dest: execn_Fault_end)
with exec_ys have "t=Fault f"
by (auto dest: execn_Fault_end)
with exec_x Fault Cons show ?thesis
by (auto intro: execn.intros)
next
case Stuck
with exec_xs have "s'=Stuck"
by (auto dest: execn_Stuck_end)
with exec_ys have "t=Stuck"
by (auto dest: execn_Stuck_end)
with exec_x Stuck Cons show ?thesis
by (auto intro: execn.intros)
qed
qed
qed
lemma execn_sequence_appD: "\<And>s t. \<Gamma>\<turnstile>\<langle>sequence Seq (xs @ ys),Normal s\<rangle> =n\<Rightarrow> t \<Longrightarrow>
\<exists>s'. \<Gamma>\<turnstile>\<langle>sequence Seq xs,Normal s\<rangle> =n\<Rightarrow> s' \<and> \<Gamma>\<turnstile>\<langle>sequence Seq ys,s'\<rangle> =n\<Rightarrow> t"
proof (induct xs)
case Nil
thus ?case
by (auto intro: execn.intros)
next
case (Cons x xs)
have exec_app: "\<Gamma>\<turnstile>\<langle>sequence Seq ((x # xs) @ ys),Normal s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases xs)
case Nil
with exec_app show ?thesis
by (cases ys) (auto elim: execn_Normal_elim_cases intro: execn_Skip')
next
case Cons
with exec_app obtain s' where
exec_x: "\<Gamma>\<turnstile>\<langle>x,Normal s\<rangle> =n\<Rightarrow> s'" and
exec_xs_ys: "\<Gamma>\<turnstile>\<langle>sequence Seq (xs @ ys),s'\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
show ?thesis
proof (cases s')
case (Normal s'')
from Cons.hyps [OF exec_xs_ys [simplified Normal]] Normal exec_x Cons
show ?thesis
by (auto intro: execn.intros)
next
case (Abrupt s'')
with exec_xs_ys have "t=Abrupt s''"
by (auto dest: execn_Abrupt_end)
with Abrupt exec_x Cons
show ?thesis
by (auto intro: execn.intros)
next
case (Fault f)
with exec_xs_ys have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault exec_x Cons
show ?thesis
by (auto intro: execn.intros)
next
case Stuck
with exec_xs_ys have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck exec_x Cons
show ?thesis
by (auto intro: execn.intros)
qed
qed
qed
lemma execn_sequence_appE [consumes 1]:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>sequence Seq (xs @ ys),Normal s\<rangle> =n\<Rightarrow> t;
\<And>s'. \<lbrakk>\<Gamma>\<turnstile>\<langle>sequence Seq xs,Normal s\<rangle> =n\<Rightarrow> s';\<Gamma>\<turnstile>\<langle>sequence Seq ys,s'\<rangle> =n\<Rightarrow> t\<rbrakk> \<Longrightarrow> P
\<rbrakk> \<Longrightarrow> P"
by (auto dest: execn_sequence_appD)
lemma execn_to_execn_sequence_flatten:
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
shows "\<Gamma>\<turnstile>\<langle>sequence Seq (flatten c),s\<rangle> =n\<Rightarrow> t"
using exec
proof induct
case (Seq c1 c2 n s s' s'') thus ?case
by (auto intro: execn.intros execn_sequence_app)
qed (auto intro: execn.intros)
lemma execn_to_execn_normalize:
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
shows "\<Gamma>\<turnstile>\<langle>normalize c,s\<rangle> =n\<Rightarrow> t"
using exec
proof induct
case (Seq c1 c2 n s s' s'') thus ?case
by (auto intro: execn_to_execn_sequence_flatten execn_sequence_app )
qed (auto intro: execn.intros)
lemma execn_sequence_flatten_to_execn:
shows "\<And>s t. \<Gamma>\<turnstile>\<langle>sequence Seq (flatten c),s\<rangle> =n\<Rightarrow> t \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
proof (induct c)
case (Seq c1 c2)
have exec_seq: "\<Gamma>\<turnstile>\<langle>sequence Seq (flatten (Seq c1 c2)),s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases s)
case (Normal s')
with exec_seq obtain s'' where
"\<Gamma>\<turnstile>\<langle>sequence Seq (flatten c1),Normal s'\<rangle> =n\<Rightarrow> s''" and
"\<Gamma>\<turnstile>\<langle>sequence Seq (flatten c2),s''\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_sequence_appE)
with Seq.hyps Normal
show ?thesis
by (fastforce intro: execn.intros)
next
case Abrupt
with exec_seq
show ?thesis by (auto intro: execn.intros dest: execn_Abrupt_end)
next
case Fault
with exec_seq
show ?thesis by (auto intro: execn.intros dest: execn_Fault_end)
next
case Stuck
with exec_seq
show ?thesis by (auto intro: execn.intros dest: execn_Stuck_end)
qed
qed auto
lemma execn_normalize_to_execn:
shows "\<And>s t n. \<Gamma>\<turnstile>\<langle>normalize c,s\<rangle> =n\<Rightarrow> t \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
proof (induct c)
case Skip thus ?case by simp
next
case Basic thus ?case by simp
next
case Spec thus ?case by simp
next
case (Seq c1 c2)
have "\<Gamma>\<turnstile>\<langle>normalize (Seq c1 c2),s\<rangle> =n\<Rightarrow> t" by fact
hence exec_norm_seq:
"\<Gamma>\<turnstile>\<langle>sequence Seq (flatten (normalize c1) @ flatten (normalize c2)),s\<rangle> =n\<Rightarrow> t"
by simp
show ?case
proof (cases s)
case (Normal s')
with exec_norm_seq obtain s'' where
exec_norm_c1: "\<Gamma>\<turnstile>\<langle>sequence Seq (flatten (normalize c1)),Normal s'\<rangle> =n\<Rightarrow> s''" and
exec_norm_c2: "\<Gamma>\<turnstile>\<langle>sequence Seq (flatten (normalize c2)),s''\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_sequence_appE)
from execn_sequence_flatten_to_execn [OF exec_norm_c1]
execn_sequence_flatten_to_execn [OF exec_norm_c2] Seq.hyps Normal
show ?thesis
by (fastforce intro: execn.intros)
next
case (Abrupt s')
with exec_norm_seq have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by (auto intro: execn.intros)
next
case (Fault f)
with exec_norm_seq have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by (auto intro: execn.intros)
next
case Stuck
with exec_norm_seq have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by (auto intro: execn.intros)
qed
next
case Cond thus ?case
by (auto intro: execn.intros elim!: execn_elim_cases)
next
case (While b c)
have "\<Gamma>\<turnstile>\<langle>normalize (While b c),s\<rangle> =n\<Rightarrow> t" by fact
hence exec_norm_w: "\<Gamma>\<turnstile>\<langle>While b (normalize c),s\<rangle> =n\<Rightarrow> t"
by simp
{
fix s t w
assume exec_w: "\<Gamma>\<turnstile>\<langle>w,s\<rangle> =n\<Rightarrow> t"
have "w=While b (normalize c) \<Longrightarrow> \<Gamma>\<turnstile>\<langle>While b c,s\<rangle> =n\<Rightarrow> t"
using exec_w
proof (induct)
case (WhileTrue s b' c' n w t)
from WhileTrue obtain
s_in_b: "s \<in> b" and
exec_c: "\<Gamma>\<turnstile>\<langle>normalize c,Normal s\<rangle> =n\<Rightarrow> w" and
hyp_w: "\<Gamma>\<turnstile>\<langle>While b c,w\<rangle> =n\<Rightarrow> t"
by simp
from While.hyps [OF exec_c]
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> w"
by simp
with hyp_w s_in_b
have "\<Gamma>\<turnstile>\<langle>While b c,Normal s\<rangle> =n\<Rightarrow> t"
by (auto intro: execn.intros)
with WhileTrue show ?case by simp
qed (auto intro: execn.intros)
}
from this [OF exec_norm_w]
show ?case
by simp
next
case Call thus ?case by simp
next
case DynCom thus ?case by (auto intro: execn.intros elim!: execn_elim_cases)
next
case Guard thus ?case by (auto intro: execn.intros elim!: execn_elim_cases)
next
case Throw thus ?case by simp
next
case Catch thus ?case by (fastforce intro: execn.intros elim!: execn_elim_cases)
qed
lemma execn_normalize_iff_execn:
"\<Gamma>\<turnstile>\<langle>normalize c,s\<rangle> =n\<Rightarrow> t = \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
by (auto intro: execn_to_execn_normalize execn_normalize_to_execn)
lemma exec_sequence_app:
assumes exec_xs: "\<Gamma>\<turnstile>\<langle>sequence Seq xs,Normal s\<rangle> \<Rightarrow> s'"
assumes exec_ys: "\<Gamma>\<turnstile>\<langle>sequence Seq ys,s'\<rangle> \<Rightarrow> t"
shows "\<Gamma>\<turnstile>\<langle>sequence Seq (xs@ys),Normal s\<rangle> \<Rightarrow> t"
proof -
from exec_to_execn [OF exec_xs]
obtain n where
execn_xs: "\<Gamma>\<turnstile>\<langle>sequence Seq xs,Normal s\<rangle> =n\<Rightarrow> s'"..
from exec_to_execn [OF exec_ys]
obtain m where
execn_ys: "\<Gamma>\<turnstile>\<langle>sequence Seq ys,s'\<rangle> =m\<Rightarrow> t"..
with execn_xs obtain
"\<Gamma>\<turnstile>\<langle>sequence Seq xs,Normal s\<rangle> =max n m\<Rightarrow> s'"
"\<Gamma>\<turnstile>\<langle>sequence Seq ys,s'\<rangle> =max n m\<Rightarrow> t"
by (auto intro: execn_mono max.cobounded1 max.cobounded2)
from execn_sequence_app [OF this]
have "\<Gamma>\<turnstile>\<langle>sequence Seq (xs @ ys),Normal s\<rangle> =max n m\<Rightarrow> t" .
thus ?thesis
by (rule execn_to_exec)
qed
lemma exec_sequence_appD:
assumes exec_xs_ys: "\<Gamma>\<turnstile>\<langle>sequence Seq (xs @ ys),Normal s\<rangle> \<Rightarrow> t"
shows "\<exists>s'. \<Gamma>\<turnstile>\<langle>sequence Seq xs,Normal s\<rangle> \<Rightarrow> s' \<and> \<Gamma>\<turnstile>\<langle>sequence Seq ys,s'\<rangle> \<Rightarrow> t"
proof -
from exec_to_execn [OF exec_xs_ys]
obtain n where "\<Gamma>\<turnstile>\<langle>sequence Seq (xs @ ys),Normal s\<rangle> =n\<Rightarrow> t"..
thus ?thesis
by (cases rule: execn_sequence_appE) (auto intro: execn_to_exec)
qed
lemma exec_sequence_appE [consumes 1]:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>sequence Seq (xs @ ys),Normal s\<rangle> \<Rightarrow> t;
\<And>s'. \<lbrakk>\<Gamma>\<turnstile>\<langle>sequence Seq xs,Normal s\<rangle> \<Rightarrow> s';\<Gamma>\<turnstile>\<langle>sequence Seq ys,s'\<rangle> \<Rightarrow> t\<rbrakk> \<Longrightarrow> P
\<rbrakk> \<Longrightarrow> P"
by (auto dest: exec_sequence_appD)
lemma exec_to_exec_sequence_flatten:
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
shows "\<Gamma>\<turnstile>\<langle>sequence Seq (flatten c),s\<rangle> \<Rightarrow> t"
proof -
from exec_to_execn [OF exec]
obtain n where "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"..
from execn_to_execn_sequence_flatten [OF this]
show ?thesis
by (rule execn_to_exec)
qed
lemma exec_sequence_flatten_to_exec:
assumes exec_seq: "\<Gamma>\<turnstile>\<langle>sequence Seq (flatten c),s\<rangle> \<Rightarrow> t"
shows "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
proof -
from exec_to_execn [OF exec_seq]
obtain n where "\<Gamma>\<turnstile>\<langle>sequence Seq (flatten c),s\<rangle> =n\<Rightarrow> t"..
from execn_sequence_flatten_to_execn [OF this]
show ?thesis
by (rule execn_to_exec)
qed
lemma exec_to_exec_normalize:
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
shows "\<Gamma>\<turnstile>\<langle>normalize c,s\<rangle> \<Rightarrow> t"
proof -
from exec_to_execn [OF exec] obtain n where "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"..
hence "\<Gamma>\<turnstile>\<langle>normalize c,s\<rangle> =n\<Rightarrow> t"
by (rule execn_to_execn_normalize)
thus ?thesis
by (rule execn_to_exec)
qed
lemma exec_normalize_to_exec:
assumes exec: "\<Gamma>\<turnstile>\<langle>normalize c,s\<rangle> \<Rightarrow> t"
shows "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
proof -
from exec_to_execn [OF exec] obtain n where "\<Gamma>\<turnstile>\<langle>normalize c,s\<rangle> =n\<Rightarrow> t"..
hence "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
by (rule execn_normalize_to_execn)
thus ?thesis
by (rule execn_to_exec)
qed
lemma execn_to_execn_subseteq_guards: "\<And>c s t n. \<lbrakk>c \<subseteq>\<^sub>g c'; \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t\<rbrakk>
\<Longrightarrow> \<exists>t'. \<Gamma>\<turnstile>\<langle>c',s\<rangle> =n\<Rightarrow> t' \<and>
(isFault t \<longrightarrow> isFault t') \<and> (\<not> isFault t' \<longrightarrow> t'=t)"
proof (induct c')
case Skip thus ?case
by (fastforce dest: subseteq_guardsD elim: execn_elim_cases)
next
case Basic thus ?case
by (fastforce dest: subseteq_guardsD elim: execn_elim_cases)
next
case Spec thus ?case
by (fastforce dest: subseteq_guardsD elim: execn_elim_cases)
next
case (Seq c1' c2')
have "c \<subseteq>\<^sub>g Seq c1' c2'" by fact
from subseteq_guards_Seq [OF this]
obtain c1 c2 where
c: "c = Seq c1 c2" and
c1_c1': "c1 \<subseteq>\<^sub>g c1'" and
c2_c2': "c2 \<subseteq>\<^sub>g c2'"
by blast
have exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" by fact
with c obtain w where
exec_c1: "\<Gamma>\<turnstile>\<langle>c1,s\<rangle> =n\<Rightarrow> w" and
exec_c2: "\<Gamma>\<turnstile>\<langle>c2,w\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_elim_cases)
from exec_c1 Seq.hyps c1_c1'
obtain w' where
exec_c1': "\<Gamma>\<turnstile>\<langle>c1',s\<rangle> =n\<Rightarrow> w'" and
w_Fault: "isFault w \<longrightarrow> isFault w'" and
w'_noFault: "\<not> isFault w' \<longrightarrow> w'=w"
by blast
show ?case
proof (cases "s")
case (Fault f)
with exec have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
show ?thesis
proof (cases "isFault w")
case True
then obtain f where w': "w=Fault f"..
moreover with exec_c2
have t: "t=Fault f"
by (auto dest: execn_Fault_end)
ultimately show ?thesis
using Normal w_Fault exec_c1'
by (fastforce intro: execn.intros elim: isFaultE)
next
case False
note noFault_w = this
show ?thesis
proof (cases "isFault w'")
case True
then obtain f' where w': "w'=Fault f'"..
with Normal exec_c1'
have exec: "\<Gamma>\<turnstile>\<langle>Seq c1' c2',s\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
then show ?thesis
by auto
next
case False
with w'_noFault have w': "w'=w" by simp
from Seq.hyps exec_c2 c2_c2'
obtain t' where
"\<Gamma>\<turnstile>\<langle>c2',w\<rangle> =n\<Rightarrow> t'" and
"isFault t \<longrightarrow> isFault t'" and
"\<not> isFault t' \<longrightarrow> t'=t"
by blast
with Normal exec_c1' w'
show ?thesis
by (fastforce intro: execn.intros)
qed
qed
qed
next
case (Cond b c1' c2')
have exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" by fact
have "c \<subseteq>\<^sub>g Cond b c1' c2'" by fact
from subseteq_guards_Cond [OF this]
obtain c1 c2 where
c: "c = Cond b c1 c2" and
c1_c1': "c1 \<subseteq>\<^sub>g c1'" and
c2_c2': "c2 \<subseteq>\<^sub>g c2'"
by blast
show ?case
proof (cases "s")
case (Fault f)
with exec have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
from exec [simplified c Normal]
show ?thesis
proof (cases)
assume s'_in_b: "s' \<in> b"
assume "\<Gamma>\<turnstile>\<langle>c1,Normal s'\<rangle> =n\<Rightarrow> t"
with c1_c1' Normal Cond.hyps obtain t' where
"\<Gamma>\<turnstile>\<langle>c1',Normal s'\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"\<not> isFault t' \<longrightarrow> t' = t"
by blast
with s'_in_b Normal show ?thesis
by (fastforce intro: execn.intros)
next
assume s'_notin_b: "s' \<notin> b"
assume "\<Gamma>\<turnstile>\<langle>c2,Normal s'\<rangle> =n\<Rightarrow> t"
with c2_c2' Normal Cond.hyps obtain t' where
"\<Gamma>\<turnstile>\<langle>c2',Normal s'\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"\<not> isFault t' \<longrightarrow> t' = t"
by blast
with s'_notin_b Normal show ?thesis
by (fastforce intro: execn.intros)
qed
qed
next
case (While b c')
have exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" by fact
have "c \<subseteq>\<^sub>g While b c'" by fact
from subseteq_guards_While [OF this]
obtain c'' where
c: "c = While b c''" and
c''_c': "c'' \<subseteq>\<^sub>g c'"
by blast
{
fix c r w
assume exec: "\<Gamma>\<turnstile>\<langle>c,r\<rangle> =n\<Rightarrow> w"
assume c: "c=While b c''"
have "\<exists>w'. \<Gamma>\<turnstile>\<langle>While b c',r\<rangle> =n\<Rightarrow> w' \<and>
(isFault w \<longrightarrow> isFault w') \<and> (\<not> isFault w' \<longrightarrow> w'=w)"
using exec c
proof (induct)
case (WhileTrue r b' ca n u w)
have eqs: "While b' ca = While b c''" by fact
from WhileTrue have r_in_b: "r \<in> b" by simp
from WhileTrue have exec_c'': "\<Gamma>\<turnstile>\<langle>c'',Normal r\<rangle> =n\<Rightarrow> u" by simp
from While.hyps [OF c''_c' exec_c''] obtain u' where
exec_c': "\<Gamma>\<turnstile>\<langle>c',Normal r\<rangle> =n\<Rightarrow> u'" and
u_Fault: "isFault u \<longrightarrow> isFault u' "and
u'_noFault: "\<not> isFault u' \<longrightarrow> u' = u"
by blast
from WhileTrue obtain w' where
exec_w: "\<Gamma>\<turnstile>\<langle>While b c',u\<rangle> =n\<Rightarrow> w'" and
w_Fault: "isFault w \<longrightarrow> isFault w'" and
w'_noFault: "\<not> isFault w' \<longrightarrow> w' = w"
by blast
show ?case
proof (cases "isFault u'")
case True
with exec_c' r_in_b
show ?thesis
by (fastforce intro: execn.intros elim: isFaultE)
next
case False
with exec_c' r_in_b u'_noFault exec_w w_Fault w'_noFault
show ?thesis
by (fastforce intro: execn.intros)
qed
next
case WhileFalse thus ?case by (fastforce intro: execn.intros)
qed auto
}
from this [OF exec c]
show ?case .
next
case Call thus ?case
by (fastforce dest: subseteq_guardsD elim: execn_elim_cases)
next
case (DynCom C')
have exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" by fact
have "c \<subseteq>\<^sub>g DynCom C'" by fact
from subseteq_guards_DynCom [OF this] obtain C where
c: "c = DynCom C" and
C_C': "\<forall>s. C s \<subseteq>\<^sub>g C' s"
by blast
show ?case
proof (cases "s")
case (Fault f)
with exec have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
from exec [simplified c Normal]
have "\<Gamma>\<turnstile>\<langle>C s',Normal s'\<rangle> =n\<Rightarrow> t"
by cases
from DynCom.hyps C_C' [rule_format] this obtain t' where
"\<Gamma>\<turnstile>\<langle>C' s',Normal s'\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"\<not> isFault t' \<longrightarrow> t' = t"
by blast
with Normal show ?thesis
by (fastforce intro: execn.intros)
qed
next
case (Guard f' g' c')
have exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" by fact
have "c \<subseteq>\<^sub>g Guard f' g' c'" by fact
hence subset_cases: "(c \<subseteq>\<^sub>g c') \<or> (\<exists>c''. c = Guard f' g' c'' \<and> (c'' \<subseteq>\<^sub>g c'))"
by (rule subseteq_guards_Guard)
show ?case
proof (cases "s")
case (Fault f)
with exec have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
from subset_cases show ?thesis
proof
assume c_c': "c \<subseteq>\<^sub>g c'"
from Guard.hyps [OF this exec] Normal obtain t' where
exec_c': "\<Gamma>\<turnstile>\<langle>c',Normal s'\<rangle> =n\<Rightarrow> t'" and
t_Fault: "isFault t \<longrightarrow> isFault t'" and
t_noFault: "\<not> isFault t' \<longrightarrow> t' = t"
by blast
with Normal
show ?thesis
by (cases "s' \<in> g'") (fastforce intro: execn.intros)+
next
assume "\<exists>c''. c = Guard f' g' c'' \<and> (c'' \<subseteq>\<^sub>g c')"
then obtain c'' where
c: "c = Guard f' g' c''" and
c''_c': "c'' \<subseteq>\<^sub>g c'"
by blast
from c exec Normal
have exec_Guard': "\<Gamma>\<turnstile>\<langle>Guard f' g' c'',Normal s'\<rangle> =n\<Rightarrow> t"
by simp
thus ?thesis
proof (cases)
assume s'_in_g': "s' \<in> g'"
assume exec_c'': "\<Gamma>\<turnstile>\<langle>c'',Normal s'\<rangle> =n\<Rightarrow> t"
from Guard.hyps [OF c''_c' exec_c''] obtain t' where
exec_c': "\<Gamma>\<turnstile>\<langle>c',Normal s'\<rangle> =n\<Rightarrow> t'" and
t_Fault: "isFault t \<longrightarrow> isFault t'" and
t_noFault: "\<not> isFault t' \<longrightarrow> t' = t"
by blast
with Normal s'_in_g'
show ?thesis
by (fastforce intro: execn.intros)
next
assume "s' \<notin> g'" "t=Fault f'"
with Normal show ?thesis
by (fastforce intro: execn.intros)
qed
qed
qed
next
case Throw thus ?case
by (fastforce dest: subseteq_guardsD intro: execn.intros
elim: execn_elim_cases)
next
case (Catch c1' c2')
have "c \<subseteq>\<^sub>g Catch c1' c2'" by fact
from subseteq_guards_Catch [OF this]
obtain c1 c2 where
c: "c = Catch c1 c2" and
c1_c1': "c1 \<subseteq>\<^sub>g c1'" and
c2_c2': "c2 \<subseteq>\<^sub>g c2'"
by blast
have exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases "s")
case (Fault f)
with exec have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
from exec [simplified c Normal]
show ?thesis
proof (cases)
fix w
assume exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s'\<rangle> =n\<Rightarrow> Abrupt w"
assume exec_c2: "\<Gamma>\<turnstile>\<langle>c2,Normal w\<rangle> =n\<Rightarrow> t"
from Normal exec_c1 c1_c1' Catch.hyps obtain w' where
exec_c1': "\<Gamma>\<turnstile>\<langle>c1',Normal s'\<rangle> =n\<Rightarrow> w'" and
w'_noFault: "\<not> isFault w' \<longrightarrow> w' = Abrupt w"
by blast
show ?thesis
proof (cases "isFault w'")
case True
with exec_c1' Normal show ?thesis
by (fastforce intro: execn.intros elim: isFaultE)
next
case False
with w'_noFault have w': "w'=Abrupt w" by simp
from Normal exec_c2 c2_c2' Catch.hyps obtain t' where
"\<Gamma>\<turnstile>\<langle>c2',Normal w\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"\<not> isFault t' \<longrightarrow> t' = t"
by blast
with exec_c1' w' Normal
show ?thesis
by (fastforce intro: execn.intros )
qed
next
assume exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s'\<rangle> =n\<Rightarrow> t"
assume t: "\<not> isAbr t"
from Normal exec_c1 c1_c1' Catch.hyps obtain t' where
exec_c1': "\<Gamma>\<turnstile>\<langle>c1',Normal s'\<rangle> =n\<Rightarrow> t'" and
t_Fault: "isFault t \<longrightarrow> isFault t'" and
t'_noFault: "\<not> isFault t' \<longrightarrow> t' = t"
by blast
show ?thesis
proof (cases "isFault t'")
case True
with exec_c1' Normal show ?thesis
by (fastforce intro: execn.intros elim: isFaultE)
next
case False
with exec_c1' Normal t_Fault t'_noFault t
show ?thesis
by (fastforce intro: execn.intros)
qed
qed
qed
qed
lemma exec_to_exec_subseteq_guards:
assumes c_c': "c \<subseteq>\<^sub>g c'"
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
shows "\<exists>t'. \<Gamma>\<turnstile>\<langle>c',s\<rangle> \<Rightarrow> t' \<and>
(isFault t \<longrightarrow> isFault t') \<and> (\<not> isFault t' \<longrightarrow> t'=t)"
proof -
from exec_to_execn [OF exec] obtain n where
"\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" ..
from execn_to_execn_subseteq_guards [OF c_c' this]
show ?thesis
by (blast intro: execn_to_exec)
qed
(* ************************************************************************* *)
subsection {* Lemmas about @{const "merge_guards"} *}
(* ************************************************************************ *)
theorem execn_to_execn_merge_guards:
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
shows "\<Gamma>\<turnstile>\<langle>merge_guards c,s\<rangle> =n\<Rightarrow> t "
using exec_c
proof (induct)
case (Guard s g c n t f)
have s_in_g: "s \<in> g" by fact
have exec_merge_c: "\<Gamma>\<turnstile>\<langle>merge_guards c,Normal s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases "\<exists>f' g' c'. merge_guards c = Guard f' g' c'")
case False
with exec_merge_c s_in_g
show ?thesis
by (cases "merge_guards c") (auto intro: execn.intros simp add: Let_def)
next
case True
then obtain f' g' c' where
merge_guards_c: "merge_guards c = Guard f' g' c'"
by iprover
show ?thesis
proof (cases "f=f'")
case False
from exec_merge_c s_in_g merge_guards_c False show ?thesis
by (auto intro: execn.intros simp add: Let_def)
next
case True
from exec_merge_c s_in_g merge_guards_c True show ?thesis
by (fastforce intro: execn.intros elim: execn.cases)
qed
qed
next
case (GuardFault s g f c n)
have s_notin_g: "s \<notin> g" by fact
show ?case
proof (cases "\<exists>f' g' c'. merge_guards c = Guard f' g' c'")
case False
with s_notin_g
show ?thesis
by (cases "merge_guards c") (auto intro: execn.intros simp add: Let_def)
next
case True
then obtain f' g' c' where
merge_guards_c: "merge_guards c = Guard f' g' c'"
by iprover
show ?thesis
proof (cases "f=f'")
case False
from s_notin_g merge_guards_c False show ?thesis
by (auto intro: execn.intros simp add: Let_def)
next
case True
from s_notin_g merge_guards_c True show ?thesis
by (fastforce intro: execn.intros)
qed
qed
qed (fastforce intro: execn.intros)+
lemma execn_merge_guards_to_execn_Normal:
"\<And>s n t. \<Gamma>\<turnstile>\<langle>merge_guards c,Normal s\<rangle> =n\<Rightarrow> t \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> t"
proof (induct c)
case Skip thus ?case by auto
next
case Basic thus ?case by auto
next
case Spec thus ?case by auto
next
case (Seq c1 c2)
have "\<Gamma>\<turnstile>\<langle>merge_guards (Seq c1 c2),Normal s\<rangle> =n\<Rightarrow> t" by fact
hence exec_merge: "\<Gamma>\<turnstile>\<langle>Seq (merge_guards c1) (merge_guards c2),Normal s\<rangle> =n\<Rightarrow> t"
by simp
then obtain s' where
exec_merge_c1: "\<Gamma>\<turnstile>\<langle>merge_guards c1,Normal s\<rangle> =n\<Rightarrow> s'" and
exec_merge_c2: "\<Gamma>\<turnstile>\<langle>merge_guards c2,s'\<rangle> =n\<Rightarrow> t"
by cases
from exec_merge_c1
have exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =n\<Rightarrow> s'"
by (rule Seq.hyps)
show ?case
proof (cases s')
case (Normal s'')
with exec_merge_c2
have "\<Gamma>\<turnstile>\<langle>c2,s'\<rangle> =n\<Rightarrow> t"
by (auto intro: Seq.hyps)
with exec_c1 show ?thesis
by (auto intro: execn.intros)
next
case (Abrupt s'')
with exec_merge_c2 have "t=Abrupt s''"
by (auto dest: execn_Abrupt_end)
with exec_c1 Abrupt
show ?thesis
by (auto intro: execn.intros)
next
case (Fault f)
with exec_merge_c2 have "t=Fault f"
by (auto dest: execn_Fault_end)
with exec_c1 Fault
show ?thesis
by (auto intro: execn.intros)
next
case Stuck
with exec_merge_c2 have "t=Stuck"
by (auto dest: execn_Stuck_end)
with exec_c1 Stuck
show ?thesis
by (auto intro: execn.intros)
qed
next
case Cond thus ?case
by (fastforce intro: execn.intros elim: execn_Normal_elim_cases)
next
case (While b c)
{
fix c' r w
assume exec_c': "\<Gamma>\<turnstile>\<langle>c',r\<rangle> =n\<Rightarrow> w"
assume c': "c'=While b (merge_guards c)"
have "\<Gamma>\<turnstile>\<langle>While b c,r\<rangle> =n\<Rightarrow> w"
using exec_c' c'
proof (induct)
case (WhileTrue r b' c'' n u w)
have eqs: "While b' c'' = While b (merge_guards c)" by fact
from WhileTrue
have r_in_b: "r \<in> b"
by simp
from WhileTrue While.hyps have exec_c: "\<Gamma>\<turnstile>\<langle>c,Normal r\<rangle> =n\<Rightarrow> u"
by simp
from WhileTrue have exec_w: "\<Gamma>\<turnstile>\<langle>While b c,u\<rangle> =n\<Rightarrow> w"
by simp
from r_in_b exec_c exec_w
show ?case
by (rule execn.WhileTrue)
next
case WhileFalse thus ?case by (auto intro: execn.WhileFalse)
qed auto
}
with While.prems show ?case
by (auto)
next
case Call thus ?case by simp
next
case DynCom thus ?case
by (fastforce intro: execn.intros elim: execn_Normal_elim_cases)
next
case (Guard f g c)
have exec_merge: "\<Gamma>\<turnstile>\<langle>merge_guards (Guard f g c),Normal s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases "s \<in> g")
case False
with exec_merge have "t=Fault f"
by (auto split: com.splits split_if_asm elim: execn_Normal_elim_cases
simp add: Let_def is_Guard_def)
with False show ?thesis
by (auto intro: execn.intros)
next
case True
note s_in_g = this
show ?thesis
proof (cases "\<exists>f' g' c'. merge_guards c = Guard f' g' c'")
case False
then
have "merge_guards (Guard f g c) = Guard f g (merge_guards c)"
by (cases "merge_guards c") (auto simp add: Let_def)
with exec_merge s_in_g
obtain "\<Gamma>\<turnstile>\<langle>merge_guards c,Normal s\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
from Guard.hyps [OF this] s_in_g
show ?thesis
by (auto intro: execn.intros)
next
case True
then obtain f' g' c' where
merge_guards_c: "merge_guards c = Guard f' g' c'"
by iprover
show ?thesis
proof (cases "f=f'")
case False
with merge_guards_c
have "merge_guards (Guard f g c) = Guard f g (merge_guards c)"
by (simp add: Let_def)
with exec_merge s_in_g
obtain "\<Gamma>\<turnstile>\<langle>merge_guards c,Normal s\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
from Guard.hyps [OF this] s_in_g
show ?thesis
by (auto intro: execn.intros)
next
case True
note f_eq_f' = this
with merge_guards_c have
merge_guards_Guard: "merge_guards (Guard f g c) = Guard f (g \<inter> g') c'"
by simp
show ?thesis
proof (cases "s \<in> g'")
case True
with exec_merge merge_guards_Guard merge_guards_c s_in_g
have "\<Gamma>\<turnstile>\<langle>merge_guards c,Normal s\<rangle> =n\<Rightarrow> t"
by (auto intro: execn.intros elim: execn_Normal_elim_cases)
with Guard.hyps [OF this] s_in_g
show ?thesis
by (auto intro: execn.intros)
next
case False
with exec_merge merge_guards_Guard
have "t=Fault f"
by (auto elim: execn_Normal_elim_cases)
with merge_guards_c f_eq_f' False
have "\<Gamma>\<turnstile>\<langle>merge_guards c,Normal s\<rangle> =n\<Rightarrow> t"
by (auto intro: execn.intros)
from Guard.hyps [OF this] s_in_g
show ?thesis
by (auto intro: execn.intros)
qed
qed
qed
qed
next
case Throw thus ?case by simp
next
case (Catch c1 c2)
have "\<Gamma>\<turnstile>\<langle>merge_guards (Catch c1 c2),Normal s\<rangle> =n\<Rightarrow> t" by fact
hence "\<Gamma>\<turnstile>\<langle>Catch (merge_guards c1) (merge_guards c2),Normal s\<rangle> =n\<Rightarrow> t" by simp
thus ?case
by cases (auto intro: execn.intros Catch.hyps)
qed
theorem execn_merge_guards_to_execn:
"\<Gamma>\<turnstile>\<langle>merge_guards c,s\<rangle> =n\<Rightarrow> t \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c, s\<rangle> =n\<Rightarrow> t"
apply (cases s)
apply (fastforce intro: execn_merge_guards_to_execn_Normal)
apply (fastforce dest: execn_Abrupt_end)
apply (fastforce dest: execn_Fault_end)
apply (fastforce dest: execn_Stuck_end)
done
corollary execn_iff_execn_merge_guards:
"\<Gamma>\<turnstile>\<langle>c, s\<rangle> =n\<Rightarrow> t = \<Gamma>\<turnstile>\<langle>merge_guards c,s\<rangle> =n\<Rightarrow> t"
by (blast intro: execn_merge_guards_to_execn execn_to_execn_merge_guards)
theorem exec_iff_exec_merge_guards:
"\<Gamma>\<turnstile>\<langle>c, s\<rangle> \<Rightarrow> t = \<Gamma>\<turnstile>\<langle>merge_guards c,s\<rangle> \<Rightarrow> t"
by (blast dest: exec_to_execn intro: execn_to_exec
intro: execn_to_execn_merge_guards
execn_merge_guards_to_execn)
corollary exec_to_exec_merge_guards:
"\<Gamma>\<turnstile>\<langle>c, s\<rangle> \<Rightarrow> t \<Longrightarrow> \<Gamma>\<turnstile>\<langle>merge_guards c,s\<rangle> \<Rightarrow> t"
by (rule iffD1 [OF exec_iff_exec_merge_guards])
corollary exec_merge_guards_to_exec:
"\<Gamma>\<turnstile>\<langle>merge_guards c,s\<rangle> \<Rightarrow> t \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c, s\<rangle> \<Rightarrow> t"
by (rule iffD2 [OF exec_iff_exec_merge_guards])
(* ************************************************************************* *)
subsection {* Lemmas about @{const "mark_guards"} *}
(* ************************************************************************ *)
lemma execn_to_execn_mark_guards:
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
assumes t_not_Fault: "\<not> isFault t"
shows "\<Gamma>\<turnstile>\<langle>mark_guards f c,s\<rangle> =n\<Rightarrow> t "
using exec_c t_not_Fault [simplified not_isFault_iff]
by (induct) (auto intro: execn.intros dest: noFaultn_startD')
lemma execn_to_execn_mark_guards_Fault:
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
shows "\<And>f. \<lbrakk>t=Fault f\<rbrakk> \<Longrightarrow> \<exists>f'. \<Gamma>\<turnstile>\<langle>mark_guards x c,s\<rangle> =n\<Rightarrow> Fault f'"
using exec_c
proof (induct)
case Skip thus ?case by auto
next
case Guard thus ?case by (fastforce intro: execn.intros)
next
case GuardFault thus ?case by (fastforce intro: execn.intros)
next
case FaultProp thus ?case by auto
next
case Basic thus ?case by auto
next
case Spec thus ?case by auto
next
case SpecStuck thus ?case by auto
next
case (Seq c1 s n w c2 t)
have exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =n\<Rightarrow> w" by fact
have exec_c2: "\<Gamma>\<turnstile>\<langle>c2,w\<rangle> =n\<Rightarrow> t" by fact
have t: "t=Fault f" by fact
show ?case
proof (cases w)
case (Fault f')
with exec_c2 t have "f'=f"
by (auto dest: execn_Fault_end)
with Fault Seq.hyps obtain f'' where
"\<Gamma>\<turnstile>\<langle>mark_guards x c1,Normal s\<rangle> =n\<Rightarrow> Fault f''"
by auto
moreover have "\<Gamma>\<turnstile>\<langle>mark_guards x c2,Fault f''\<rangle> =n\<Rightarrow> Fault f''"
by auto
ultimately show ?thesis
by (auto intro: execn.intros)
next
case (Normal s')
with execn_to_execn_mark_guards [OF exec_c1]
have exec_mark_c1: "\<Gamma>\<turnstile>\<langle>mark_guards x c1,Normal s\<rangle> =n\<Rightarrow> w"
by simp
with Seq.hyps t obtain f' where
"\<Gamma>\<turnstile>\<langle>mark_guards x c2,w\<rangle> =n\<Rightarrow> Fault f'"
by blast
with exec_mark_c1 show ?thesis
by (auto intro: execn.intros)
next
case (Abrupt s')
with execn_to_execn_mark_guards [OF exec_c1]
have exec_mark_c1: "\<Gamma>\<turnstile>\<langle>mark_guards x c1,Normal s\<rangle> =n\<Rightarrow> w"
by simp
with Seq.hyps t obtain f' where
"\<Gamma>\<turnstile>\<langle>mark_guards x c2,w\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
with exec_mark_c1 show ?thesis
by (auto intro: execn.intros)
next
case Stuck
with exec_c2 have "t=Stuck"
by (auto dest: execn_Stuck_end)
with t show ?thesis by simp
qed
next
case CondTrue thus ?case by (fastforce intro: execn.intros)
next
case CondFalse thus ?case by (fastforce intro: execn.intros)
next
case (WhileTrue s b c n w t)
have exec_c: "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> w" by fact
have exec_w: "\<Gamma>\<turnstile>\<langle>While b c,w\<rangle> =n\<Rightarrow> t" by fact
have t: "t = Fault f" by fact
have s_in_b: "s \<in> b" by fact
show ?case
proof (cases w)
case (Fault f')
with exec_w t have "f'=f"
by (auto dest: execn_Fault_end)
with Fault WhileTrue.hyps obtain f'' where
"\<Gamma>\<turnstile>\<langle>mark_guards x c,Normal s\<rangle> =n\<Rightarrow> Fault f''"
by auto
moreover have "\<Gamma>\<turnstile>\<langle>mark_guards x (While b c),Fault f''\<rangle> =n\<Rightarrow> Fault f''"
by auto
ultimately show ?thesis
using s_in_b by (auto intro: execn.intros)
next
case (Normal s')
with execn_to_execn_mark_guards [OF exec_c]
have exec_mark_c: "\<Gamma>\<turnstile>\<langle>mark_guards x c,Normal s\<rangle> =n\<Rightarrow> w"
by simp
with WhileTrue.hyps t obtain f' where
"\<Gamma>\<turnstile>\<langle>mark_guards x (While b c),w\<rangle> =n\<Rightarrow> Fault f'"
by blast
with exec_mark_c s_in_b show ?thesis
by (auto intro: execn.intros)
next
case (Abrupt s')
with execn_to_execn_mark_guards [OF exec_c]
have exec_mark_c: "\<Gamma>\<turnstile>\<langle>mark_guards x c,Normal s\<rangle> =n\<Rightarrow> w"
by simp
with WhileTrue.hyps t obtain f' where
"\<Gamma>\<turnstile>\<langle>mark_guards x (While b c),w\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
with exec_mark_c s_in_b show ?thesis
by (auto intro: execn.intros)
next
case Stuck
with exec_w have "t=Stuck"
by (auto dest: execn_Stuck_end)
with t show ?thesis by simp
qed
next
case WhileFalse thus ?case by (fastforce intro: execn.intros)
next
case Call thus ?case by (fastforce intro: execn.intros)
next
case CallUndefined thus ?case by simp
next
case StuckProp thus ?case by simp
next
case DynCom thus ?case by (fastforce intro: execn.intros)
next
case Throw thus ?case by simp
next
case AbruptProp thus ?case by simp
next
case (CatchMatch c1 s n w c2 t)
have exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =n\<Rightarrow> Abrupt w" by fact
have exec_c2: "\<Gamma>\<turnstile>\<langle>c2,Normal w\<rangle> =n\<Rightarrow> t" by fact
have t: "t = Fault f" by fact
from execn_to_execn_mark_guards [OF exec_c1]
have exec_mark_c1: "\<Gamma>\<turnstile>\<langle>mark_guards x c1,Normal s\<rangle> =n\<Rightarrow> Abrupt w"
by simp
with CatchMatch.hyps t obtain f' where
"\<Gamma>\<turnstile>\<langle>mark_guards x c2,Normal w\<rangle> =n\<Rightarrow> Fault f'"
by blast
with exec_mark_c1 show ?case
by (auto intro: execn.intros)
next
case CatchMiss thus ?case by (fastforce intro: execn.intros)
qed
lemma execn_mark_guards_to_execn:
"\<And>s n t. \<Gamma>\<turnstile>\<langle>mark_guards f c,s\<rangle> =n\<Rightarrow> t
\<Longrightarrow> \<exists>t'. \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t' \<and>
(isFault t \<longrightarrow> isFault t') \<and>
(t' = Fault f \<longrightarrow> t'=t) \<and>
(isFault t' \<longrightarrow> isFault t) \<and>
(\<not> isFault t' \<longrightarrow> t'=t)"
proof (induct c)
case Skip thus ?case by auto
next
case Basic thus ?case by auto
next
case Spec thus ?case by auto
next
case (Seq c1 c2 s n t)
have exec_mark: "\<Gamma>\<turnstile>\<langle>mark_guards f (Seq c1 c2),s\<rangle> =n\<Rightarrow> t" by fact
then obtain w where
exec_mark_c1: "\<Gamma>\<turnstile>\<langle>mark_guards f c1,s\<rangle> =n\<Rightarrow> w" and
exec_mark_c2: "\<Gamma>\<turnstile>\<langle>mark_guards f c2,w\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_elim_cases)
from Seq.hyps exec_mark_c1
obtain w' where
exec_c1: "\<Gamma>\<turnstile>\<langle>c1,s\<rangle> =n\<Rightarrow> w'" and
w_Fault: "isFault w \<longrightarrow> isFault w'" and
w'_Fault_f: "w' = Fault f \<longrightarrow> w'=w" and
w'_Fault: "isFault w' \<longrightarrow> isFault w" and
w'_noFault: "\<not> isFault w' \<longrightarrow> w'=w"
by blast
show ?case
proof (cases "s")
case (Fault f)
with exec_mark have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec_mark have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec_mark have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
show ?thesis
proof (cases "isFault w")
case True
then obtain f where w': "w=Fault f"..
moreover with exec_mark_c2
have t: "t=Fault f"
by (auto dest: execn_Fault_end)
ultimately show ?thesis
using Normal w_Fault w'_Fault_f exec_c1
by (fastforce intro: execn.intros elim: isFaultE)
next
case False
note noFault_w = this
show ?thesis
proof (cases "isFault w'")
case True
then obtain f' where w': "w'=Fault f'"..
with Normal exec_c1
have exec: "\<Gamma>\<turnstile>\<langle>Seq c1 c2,s\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
from w'_Fault_f w' noFault_w
have "f' \<noteq> f"
by (cases w) auto
moreover
from w' w'_Fault exec_mark_c2 have "isFault t"
by (auto dest: execn_Fault_end elim: isFaultE)
ultimately
show ?thesis
using exec
by auto
next
case False
with w'_noFault have w': "w'=w" by simp
from Seq.hyps exec_mark_c2
obtain t' where
"\<Gamma>\<turnstile>\<langle>c2,w\<rangle> =n\<Rightarrow> t'" and
"isFault t \<longrightarrow> isFault t'" and
"t' = Fault f \<longrightarrow> t'=t" and
"isFault t' \<longrightarrow> isFault t" and
"\<not> isFault t' \<longrightarrow> t'=t"
by blast
with Normal exec_c1 w'
show ?thesis
by (fastforce intro: execn.intros)
qed
qed
qed
next
case (Cond b c1 c2 s n t)
have exec_mark: "\<Gamma>\<turnstile>\<langle>mark_guards f (Cond b c1 c2),s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases s)
case (Fault f)
with exec_mark have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec_mark have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec_mark have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
show ?thesis
proof (cases "s'\<in> b")
case True
with Normal exec_mark
have "\<Gamma>\<turnstile>\<langle>mark_guards f c1 ,Normal s'\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
with Normal True Cond.hyps obtain t'
where "\<Gamma>\<turnstile>\<langle>c1,Normal s'\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"t' = Fault f \<longrightarrow> t'=t"
"isFault t' \<longrightarrow> isFault t"
"\<not> isFault t' \<longrightarrow> t' = t"
by blast
with Normal True
show ?thesis
by (blast intro: execn.intros)
next
case False
with Normal exec_mark
have "\<Gamma>\<turnstile>\<langle>mark_guards f c2 ,Normal s'\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
with Normal False Cond.hyps obtain t'
where "\<Gamma>\<turnstile>\<langle>c2,Normal s'\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"t' = Fault f \<longrightarrow> t'=t"
"isFault t' \<longrightarrow> isFault t"
"\<not> isFault t' \<longrightarrow> t' = t"
by blast
with Normal False
show ?thesis
by (blast intro: execn.intros)
qed
qed
next
case (While b c s n t)
have exec_mark: "\<Gamma>\<turnstile>\<langle>mark_guards f (While b c),s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases s)
case (Fault f)
with exec_mark have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec_mark have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec_mark have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
{
fix c' r w
assume exec_c': "\<Gamma>\<turnstile>\<langle>c',r\<rangle> =n\<Rightarrow> w"
assume c': "c'=While b (mark_guards f c)"
have "\<exists>w'. \<Gamma>\<turnstile>\<langle>While b c,r\<rangle> =n\<Rightarrow> w' \<and> (isFault w \<longrightarrow> isFault w') \<and>
(w' = Fault f \<longrightarrow> w'=w) \<and> (isFault w' \<longrightarrow> isFault w) \<and>
(\<not> isFault w' \<longrightarrow> w'=w)"
using exec_c' c'
proof (induct)
case (WhileTrue r b' c'' n u w)
have eqs: "While b' c'' = While b (mark_guards f c)" by fact
from WhileTrue.hyps eqs
have r_in_b: "r\<in>b" by simp
from WhileTrue.hyps eqs
have exec_mark_c: "\<Gamma>\<turnstile>\<langle>mark_guards f c,Normal r\<rangle> =n\<Rightarrow> u" by simp
from WhileTrue.hyps eqs
have exec_mark_w: "\<Gamma>\<turnstile>\<langle>While b (mark_guards f c),u\<rangle> =n\<Rightarrow> w"
by simp
show ?case
proof -
from WhileTrue.hyps eqs have "\<Gamma>\<turnstile>\<langle>mark_guards f c,Normal r\<rangle> =n\<Rightarrow> u"
by simp
with While.hyps
obtain u' where
exec_c: "\<Gamma>\<turnstile>\<langle>c,Normal r\<rangle> =n\<Rightarrow> u'" and
u_Fault: "isFault u \<longrightarrow> isFault u'" and
u'_Fault_f: "u' = Fault f \<longrightarrow> u'=u" and
u'_Fault: "isFault u' \<longrightarrow> isFault u" and
u'_noFault: "\<not> isFault u' \<longrightarrow> u'=u"
by blast
show ?thesis
proof (cases "isFault u'")
case False
with u'_noFault have u': "u'=u" by simp
from WhileTrue.hyps eqs obtain w' where
"\<Gamma>\<turnstile>\<langle>While b c,u\<rangle> =n\<Rightarrow> w'"
"isFault w \<longrightarrow> isFault w'"
"w' = Fault f \<longrightarrow> w'=w"
"isFault w' \<longrightarrow> isFault w"
"\<not> isFault w' \<longrightarrow> w' = w"
by blast
with u' exec_c r_in_b
show ?thesis
by (blast intro: execn.WhileTrue)
next
case True
then obtain f' where u': "u'=Fault f'"..
with exec_c r_in_b
have exec: "\<Gamma>\<turnstile>\<langle>While b c,Normal r\<rangle> =n\<Rightarrow> Fault f'"
by (blast intro: execn.intros)
from True u'_Fault have "isFault u"
by simp
then obtain f where u: "u=Fault f"..
with exec_mark_w have "w=Fault f"
by (auto dest: execn_Fault_end)
with exec u' u u'_Fault_f
show ?thesis
by auto
qed
qed
next
case (WhileFalse r b' c'' n)
have eqs: "While b' c'' = While b (mark_guards f c)" by fact
from WhileFalse.hyps eqs
have r_not_in_b: "r\<notin>b" by simp
show ?case
proof -
from r_not_in_b
have "\<Gamma>\<turnstile>\<langle>While b c,Normal r\<rangle> =n\<Rightarrow> Normal r"
by (rule execn.WhileFalse)
thus ?thesis
by blast
qed
qed auto
} note hyp_while = this
show ?thesis
proof (cases "s'\<in>b")
case False
with Normal exec_mark
have "t=s"
by (auto elim: execn_Normal_elim_cases)
with Normal False show ?thesis
by (auto intro: execn.intros)
next
case True note s'_in_b = this
with Normal exec_mark obtain r where
exec_mark_c: "\<Gamma>\<turnstile>\<langle>mark_guards f c,Normal s'\<rangle> =n\<Rightarrow> r" and
exec_mark_w: "\<Gamma>\<turnstile>\<langle>While b (mark_guards f c),r\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
from While.hyps exec_mark_c obtain r' where
exec_c: "\<Gamma>\<turnstile>\<langle>c,Normal s'\<rangle> =n\<Rightarrow> r'" and
r_Fault: "isFault r \<longrightarrow> isFault r'" and
r'_Fault_f: "r' = Fault f \<longrightarrow> r'=r" and
r'_Fault: "isFault r' \<longrightarrow> isFault r" and
r'_noFault: "\<not> isFault r' \<longrightarrow> r'=r"
by blast
show ?thesis
proof (cases "isFault r'")
case False
with r'_noFault have r': "r'=r" by simp
from hyp_while exec_mark_w
obtain t' where
"\<Gamma>\<turnstile>\<langle>While b c,r\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"t' = Fault f \<longrightarrow> t'=t"
"isFault t' \<longrightarrow> isFault t"
"\<not> isFault t' \<longrightarrow> t'=t"
by blast
with r' exec_c Normal s'_in_b
show ?thesis
by (blast intro: execn.intros)
next
case True
then obtain f' where r': "r'=Fault f'"..
hence "\<Gamma>\<turnstile>\<langle>While b c,r'\<rangle> =n\<Rightarrow> Fault f'"
by auto
with Normal s'_in_b exec_c
have exec: "\<Gamma>\<turnstile>\<langle>While b c,Normal s'\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
from True r'_Fault
have "isFault r"
by simp
then obtain f where r: "r=Fault f"..
with exec_mark_w have "t=Fault f"
by (auto dest: execn_Fault_end)
with Normal exec r' r r'_Fault_f
show ?thesis
by auto
qed
qed
qed
next
case Call thus ?case by auto
next
case DynCom thus ?case
by (fastforce elim!: execn_elim_cases intro: execn.intros)
next
case (Guard f' g c s n t)
have exec_mark: "\<Gamma>\<turnstile>\<langle>mark_guards f (Guard f' g c),s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases s)
case (Fault f)
with exec_mark have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec_mark have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec_mark have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
show ?thesis
proof (cases "s'\<in>g")
case False
with Normal exec_mark have t: "t=Fault f"
by (auto elim: execn_Normal_elim_cases)
from False
have "\<Gamma>\<turnstile>\<langle>Guard f' g c,Normal s'\<rangle> =n\<Rightarrow> Fault f'"
by (blast intro: execn.intros)
with Normal t show ?thesis
by auto
next
case True
with exec_mark Normal
have "\<Gamma>\<turnstile>\<langle>mark_guards f c,Normal s'\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
with Guard.hyps obtain t' where
"\<Gamma>\<turnstile>\<langle>c,Normal s'\<rangle> =n\<Rightarrow> t'" and
"isFault t \<longrightarrow> isFault t'" and
"t' = Fault f \<longrightarrow> t'=t" and
"isFault t' \<longrightarrow> isFault t" and
"\<not> isFault t' \<longrightarrow> t'=t"
by blast
with Normal True
show ?thesis
by (blast intro: execn.intros)
qed
qed
next
case Throw thus ?case by auto
next
case (Catch c1 c2 s n t)
have exec_mark: "\<Gamma>\<turnstile>\<langle>mark_guards f (Catch c1 c2),s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases "s")
case (Fault f)
with exec_mark have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec_mark have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec_mark have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s') note s=this
with exec_mark have
"\<Gamma>\<turnstile>\<langle>Catch (mark_guards f c1) (mark_guards f c2),Normal s'\<rangle> =n\<Rightarrow> t" by simp
thus ?thesis
proof (cases)
fix w
assume exec_mark_c1: "\<Gamma>\<turnstile>\<langle>mark_guards f c1,Normal s'\<rangle> =n\<Rightarrow> Abrupt w"
assume exec_mark_c2: "\<Gamma>\<turnstile>\<langle>mark_guards f c2,Normal w\<rangle> =n\<Rightarrow> t"
from exec_mark_c1 Catch.hyps
obtain w' where
exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s'\<rangle> =n\<Rightarrow> w'" and
w'_Fault_f: "w' = Fault f \<longrightarrow> w'=Abrupt w" and
w'_Fault: "isFault w' \<longrightarrow> isFault (Abrupt w)" and
w'_noFault: "\<not> isFault w' \<longrightarrow> w'=Abrupt w"
by fastforce
show ?thesis
proof (cases "w'")
case (Fault f')
with Normal exec_c1 have "\<Gamma>\<turnstile>\<langle>Catch c1 c2,s\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
with w'_Fault Fault show ?thesis
by auto
next
case Stuck
with w'_noFault have False
by simp
thus ?thesis ..
next
case (Normal w'')
with w'_noFault have False by simp thus ?thesis ..
next
case (Abrupt w'')
with w'_noFault have w'': "w''=w" by simp
from exec_mark_c2 Catch.hyps
obtain t' where
"\<Gamma>\<turnstile>\<langle>c2,Normal w\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"t' = Fault f \<longrightarrow> t'=t"
"isFault t' \<longrightarrow> isFault t"
"\<not> isFault t' \<longrightarrow> t'=t"
by blast
with w'' Abrupt s exec_c1
show ?thesis
by (blast intro: execn.intros)
qed
next
assume t: "\<not> isAbr t"
assume "\<Gamma>\<turnstile>\<langle>mark_guards f c1,Normal s'\<rangle> =n\<Rightarrow> t"
with Catch.hyps
obtain t' where
exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s'\<rangle> =n\<Rightarrow> t'" and
t_Fault: "isFault t \<longrightarrow> isFault t'" and
t'_Fault_f: "t' = Fault f \<longrightarrow> t'=t" and
t'_Fault: "isFault t' \<longrightarrow> isFault t" and
t'_noFault: "\<not> isFault t' \<longrightarrow> t'=t"
by blast
show ?thesis
proof (cases "isFault t'")
case True
then obtain f' where t': "t'=Fault f'"..
with exec_c1 have "\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s'\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
with t'_Fault_f t'_Fault t' s show ?thesis
by auto
next
case False
with t'_noFault have "t'=t" by simp
with t exec_c1 s show ?thesis
by (blast intro: execn.intros)
qed
qed
qed
qed
lemma exec_to_exec_mark_guards:
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
assumes t_not_Fault: "\<not> isFault t"
shows "\<Gamma>\<turnstile>\<langle>mark_guards f c,s\<rangle> \<Rightarrow> t "
proof -
from exec_to_execn [OF exec_c] obtain n where
"\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t" ..
from execn_to_execn_mark_guards [OF this t_not_Fault]
show ?thesis
by (blast intro: execn_to_exec)
qed
lemma exec_to_exec_mark_guards_Fault:
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> Fault f"
shows "\<exists>f'. \<Gamma>\<turnstile>\<langle>mark_guards x c,s\<rangle> \<Rightarrow> Fault f'"
proof -
from exec_to_execn [OF exec_c] obtain n where
"\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> Fault f" ..
from execn_to_execn_mark_guards_Fault [OF this]
show ?thesis
by (blast intro: execn_to_exec)
qed
lemma exec_mark_guards_to_exec:
assumes exec_mark: "\<Gamma>\<turnstile>\<langle>mark_guards f c,s\<rangle> \<Rightarrow> t"
shows "\<exists>t'. \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t' \<and>
(isFault t \<longrightarrow> isFault t') \<and>
(t' = Fault f \<longrightarrow> t'=t) \<and>
(isFault t' \<longrightarrow> isFault t) \<and>
(\<not> isFault t' \<longrightarrow> t'=t)"
proof -
from exec_to_execn [OF exec_mark] obtain n where
"\<Gamma>\<turnstile>\<langle>mark_guards f c,s\<rangle> =n\<Rightarrow> t" ..
from execn_mark_guards_to_execn [OF this]
show ?thesis
by (blast intro: execn_to_exec)
qed
(* ************************************************************************* *)
subsection {* Lemmas about @{const "strip_guards"} *}
(* ************************************************************************* *)
lemma execn_to_execn_strip_guards:
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
assumes t_not_Fault: "\<not> isFault t"
shows "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> =n\<Rightarrow> t "
using exec_c t_not_Fault [simplified not_isFault_iff]
by (induct) (auto intro: execn.intros dest: noFaultn_startD')
lemma execn_to_execn_strip_guards_Fault:
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
shows "\<And>f. \<lbrakk>t=Fault f; f \<notin> F\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> =n\<Rightarrow> Fault f"
using exec_c
proof (induct)
case Skip thus ?case by auto
next
case Guard thus ?case by (fastforce intro: execn.intros)
next
case GuardFault thus ?case by (fastforce intro: execn.intros)
next
case FaultProp thus ?case by auto
next
case Basic thus ?case by auto
next
case Spec thus ?case by auto
next
case SpecStuck thus ?case by auto
next
case (Seq c1 s n w c2 t)
have exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =n\<Rightarrow> w" by fact
have exec_c2: "\<Gamma>\<turnstile>\<langle>c2,w\<rangle> =n\<Rightarrow> t" by fact
have t: "t=Fault f" by fact
have notinF: "f \<notin> F" by fact
show ?case
proof (cases w)
case (Fault f')
with exec_c2 t have "f'=f"
by (auto dest: execn_Fault_end)
with Fault notinF Seq.hyps
have "\<Gamma>\<turnstile>\<langle>strip_guards F c1,Normal s\<rangle> =n\<Rightarrow> Fault f"
by auto
moreover have "\<Gamma>\<turnstile>\<langle>strip_guards F c2,Fault f\<rangle> =n\<Rightarrow> Fault f"
by auto
ultimately show ?thesis
by (auto intro: execn.intros)
next
case (Normal s')
with execn_to_execn_strip_guards [OF exec_c1]
have exec_strip_c1: "\<Gamma>\<turnstile>\<langle>strip_guards F c1,Normal s\<rangle> =n\<Rightarrow> w"
by simp
with Seq.hyps t notinF
have "\<Gamma>\<turnstile>\<langle>strip_guards F c2,w\<rangle> =n\<Rightarrow> Fault f"
by blast
with exec_strip_c1 show ?thesis
by (auto intro: execn.intros)
next
case (Abrupt s')
with execn_to_execn_strip_guards [OF exec_c1]
have exec_strip_c1: "\<Gamma>\<turnstile>\<langle>strip_guards F c1,Normal s\<rangle> =n\<Rightarrow> w"
by simp
with Seq.hyps t notinF
have "\<Gamma>\<turnstile>\<langle>strip_guards F c2,w\<rangle> =n\<Rightarrow> Fault f"
by (auto intro: execn.intros)
with exec_strip_c1 show ?thesis
by (auto intro: execn.intros)
next
case Stuck
with exec_c2 have "t=Stuck"
by (auto dest: execn_Stuck_end)
with t show ?thesis by simp
qed
next
case CondTrue thus ?case by (fastforce intro: execn.intros)
next
case CondFalse thus ?case by (fastforce intro: execn.intros)
next
case (WhileTrue s b c n w t)
have exec_c: "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> w" by fact
have exec_w: "\<Gamma>\<turnstile>\<langle>While b c,w\<rangle> =n\<Rightarrow> t" by fact
have t: "t = Fault f" by fact
have notinF: "f \<notin> F" by fact
have s_in_b: "s \<in> b" by fact
show ?case
proof (cases w)
case (Fault f')
with exec_w t have "f'=f"
by (auto dest: execn_Fault_end)
with Fault notinF WhileTrue.hyps
have "\<Gamma>\<turnstile>\<langle>strip_guards F c,Normal s\<rangle> =n\<Rightarrow> Fault f"
by auto
moreover have "\<Gamma>\<turnstile>\<langle>strip_guards F (While b c),Fault f\<rangle> =n\<Rightarrow> Fault f"
by auto
ultimately show ?thesis
using s_in_b by (auto intro: execn.intros)
next
case (Normal s')
with execn_to_execn_strip_guards [OF exec_c]
have exec_strip_c: "\<Gamma>\<turnstile>\<langle>strip_guards F c,Normal s\<rangle> =n\<Rightarrow> w"
by simp
with WhileTrue.hyps t notinF
have "\<Gamma>\<turnstile>\<langle>strip_guards F (While b c),w\<rangle> =n\<Rightarrow> Fault f"
by blast
with exec_strip_c s_in_b show ?thesis
by (auto intro: execn.intros)
next
case (Abrupt s')
with execn_to_execn_strip_guards [OF exec_c]
have exec_strip_c: "\<Gamma>\<turnstile>\<langle>strip_guards F c,Normal s\<rangle> =n\<Rightarrow> w"
by simp
with WhileTrue.hyps t notinF
have "\<Gamma>\<turnstile>\<langle>strip_guards F (While b c),w\<rangle> =n\<Rightarrow> Fault f"
by (auto intro: execn.intros)
with exec_strip_c s_in_b show ?thesis
by (auto intro: execn.intros)
next
case Stuck
with exec_w have "t=Stuck"
by (auto dest: execn_Stuck_end)
with t show ?thesis by simp
qed
next
case WhileFalse thus ?case by (fastforce intro: execn.intros)
next
case Call thus ?case by (fastforce intro: execn.intros)
next
case CallUndefined thus ?case by simp
next
case StuckProp thus ?case by simp
next
case DynCom thus ?case by (fastforce intro: execn.intros)
next
case Throw thus ?case by simp
next
case AbruptProp thus ?case by simp
next
case (CatchMatch c1 s n w c2 t)
have exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =n\<Rightarrow> Abrupt w" by fact
have exec_c2: "\<Gamma>\<turnstile>\<langle>c2,Normal w\<rangle> =n\<Rightarrow> t" by fact
have t: "t = Fault f" by fact
have notinF: "f \<notin> F" by fact
from execn_to_execn_strip_guards [OF exec_c1]
have exec_strip_c1: "\<Gamma>\<turnstile>\<langle>strip_guards F c1,Normal s\<rangle> =n\<Rightarrow> Abrupt w"
by simp
with CatchMatch.hyps t notinF
have "\<Gamma>\<turnstile>\<langle>strip_guards F c2,Normal w\<rangle> =n\<Rightarrow> Fault f"
by blast
with exec_strip_c1 show ?case
by (auto intro: execn.intros)
next
case CatchMiss thus ?case by (fastforce intro: execn.intros)
qed
lemma execn_to_execn_strip_guards':
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
assumes t_not_Fault: "t \<notin> Fault ` F"
shows "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> =n\<Rightarrow> t"
proof (cases t)
case (Fault f)
with t_not_Fault exec_c show ?thesis
by (auto intro: execn_to_execn_strip_guards_Fault)
qed (insert exec_c, auto intro: execn_to_execn_strip_guards)
lemma execn_strip_guards_to_execn:
"\<And>s n t. \<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> =n\<Rightarrow> t
\<Longrightarrow> \<exists>t'. \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t' \<and>
(isFault t \<longrightarrow> isFault t') \<and>
(t' \<in> Fault ` (- F) \<longrightarrow> t'=t) \<and>
(\<not> isFault t' \<longrightarrow> t'=t)"
proof (induct c)
case Skip thus ?case by auto
next
case Basic thus ?case by auto
next
case Spec thus ?case by auto
next
case (Seq c1 c2 s n t)
have exec_strip: "\<Gamma>\<turnstile>\<langle>strip_guards F (Seq c1 c2),s\<rangle> =n\<Rightarrow> t" by fact
then obtain w where
exec_strip_c1: "\<Gamma>\<turnstile>\<langle>strip_guards F c1,s\<rangle> =n\<Rightarrow> w" and
exec_strip_c2: "\<Gamma>\<turnstile>\<langle>strip_guards F c2,w\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_elim_cases)
from Seq.hyps exec_strip_c1
obtain w' where
exec_c1: "\<Gamma>\<turnstile>\<langle>c1,s\<rangle> =n\<Rightarrow> w'" and
w_Fault: "isFault w \<longrightarrow> isFault w'" and
w'_Fault: "w' \<in> Fault ` (- F) \<longrightarrow> w'=w" and
w'_noFault: "\<not> isFault w' \<longrightarrow> w'=w"
by blast
show ?case
proof (cases "s")
case (Fault f)
with exec_strip have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec_strip have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec_strip have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
show ?thesis
proof (cases "isFault w")
case True
then obtain f where w': "w=Fault f"..
moreover with exec_strip_c2
have t: "t=Fault f"
by (auto dest: execn_Fault_end)
ultimately show ?thesis
using Normal w_Fault w'_Fault exec_c1
by (fastforce intro: execn.intros elim: isFaultE)
next
case False
note noFault_w = this
show ?thesis
proof (cases "isFault w'")
case True
then obtain f' where w': "w'=Fault f'"..
with Normal exec_c1
have exec: "\<Gamma>\<turnstile>\<langle>Seq c1 c2,s\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
from w'_Fault w' noFault_w
have "f' \<in> F"
by (cases w) auto
with exec
show ?thesis
by auto
next
case False
with w'_noFault have w': "w'=w" by simp
from Seq.hyps exec_strip_c2
obtain t' where
"\<Gamma>\<turnstile>\<langle>c2,w\<rangle> =n\<Rightarrow> t'" and
"isFault t \<longrightarrow> isFault t'" and
"t' \<in> Fault ` (-F) \<longrightarrow> t'=t" and
"\<not> isFault t' \<longrightarrow> t'=t"
by blast
with Normal exec_c1 w'
show ?thesis
by (fastforce intro: execn.intros)
qed
qed
qed
next
next
case (Cond b c1 c2 s n t)
have exec_strip: "\<Gamma>\<turnstile>\<langle>strip_guards F (Cond b c1 c2),s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases s)
case (Fault f)
with exec_strip have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec_strip have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec_strip have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
show ?thesis
proof (cases "s'\<in> b")
case True
with Normal exec_strip
have "\<Gamma>\<turnstile>\<langle>strip_guards F c1 ,Normal s'\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
with Normal True Cond.hyps obtain t'
where "\<Gamma>\<turnstile>\<langle>c1,Normal s'\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"t' \<in> Fault ` (-F) \<longrightarrow> t'=t"
"\<not> isFault t' \<longrightarrow> t' = t"
by blast
with Normal True
show ?thesis
by (blast intro: execn.intros)
next
case False
with Normal exec_strip
have "\<Gamma>\<turnstile>\<langle>strip_guards F c2 ,Normal s'\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
with Normal False Cond.hyps obtain t'
where "\<Gamma>\<turnstile>\<langle>c2,Normal s'\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"t' \<in> Fault ` (-F) \<longrightarrow> t'=t"
"\<not> isFault t' \<longrightarrow> t' = t"
by blast
with Normal False
show ?thesis
by (blast intro: execn.intros)
qed
qed
next
case (While b c s n t)
have exec_strip: "\<Gamma>\<turnstile>\<langle>strip_guards F (While b c),s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases s)
case (Fault f)
with exec_strip have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec_strip have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec_strip have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
{
fix c' r w
assume exec_c': "\<Gamma>\<turnstile>\<langle>c',r\<rangle> =n\<Rightarrow> w"
assume c': "c'=While b (strip_guards F c)"
have "\<exists>w'. \<Gamma>\<turnstile>\<langle>While b c,r\<rangle> =n\<Rightarrow> w' \<and> (isFault w \<longrightarrow> isFault w') \<and>
(w' \<in> Fault ` (-F) \<longrightarrow> w'=w) \<and>
(\<not> isFault w' \<longrightarrow> w'=w)"
using exec_c' c'
proof (induct)
case (WhileTrue r b' c'' n u w)
have eqs: "While b' c'' = While b (strip_guards F c)" by fact
from WhileTrue.hyps eqs
have r_in_b: "r\<in>b" by simp
from WhileTrue.hyps eqs
have exec_strip_c: "\<Gamma>\<turnstile>\<langle>strip_guards F c,Normal r\<rangle> =n\<Rightarrow> u" by simp
from WhileTrue.hyps eqs
have exec_strip_w: "\<Gamma>\<turnstile>\<langle>While b (strip_guards F c),u\<rangle> =n\<Rightarrow> w"
by simp
show ?case
proof -
from WhileTrue.hyps eqs have "\<Gamma>\<turnstile>\<langle>strip_guards F c,Normal r\<rangle> =n\<Rightarrow> u"
by simp
with While.hyps
obtain u' where
exec_c: "\<Gamma>\<turnstile>\<langle>c,Normal r\<rangle> =n\<Rightarrow> u'" and
u_Fault: "isFault u \<longrightarrow> isFault u'" and
u'_Fault: "u' \<in> Fault ` (-F) \<longrightarrow> u'=u" and
u'_noFault: "\<not> isFault u' \<longrightarrow> u'=u"
by blast
show ?thesis
proof (cases "isFault u'")
case False
with u'_noFault have u': "u'=u" by simp
from WhileTrue.hyps eqs obtain w' where
"\<Gamma>\<turnstile>\<langle>While b c,u\<rangle> =n\<Rightarrow> w'"
"isFault w \<longrightarrow> isFault w'"
"w' \<in> Fault ` (-F) \<longrightarrow> w'=w"
"\<not> isFault w' \<longrightarrow> w' = w"
by blast
with u' exec_c r_in_b
show ?thesis
by (blast intro: execn.WhileTrue)
next
case True
then obtain f' where u': "u'=Fault f'"..
with exec_c r_in_b
have exec: "\<Gamma>\<turnstile>\<langle>While b c,Normal r\<rangle> =n\<Rightarrow> Fault f'"
by (blast intro: execn.intros)
show ?thesis
proof (cases "isFault u")
case True
then obtain f where u: "u=Fault f"..
with exec_strip_w have "w=Fault f"
by (auto dest: execn_Fault_end)
with exec u' u u'_Fault
show ?thesis
by auto
next
case False
with u'_Fault u' have "f' \<in> F"
by (cases u) auto
with exec show ?thesis
by auto
qed
qed
qed
next
case (WhileFalse r b' c'' n)
have eqs: "While b' c'' = While b (strip_guards F c)" by fact
from WhileFalse.hyps eqs
have r_not_in_b: "r\<notin>b" by simp
show ?case
proof -
from r_not_in_b
have "\<Gamma>\<turnstile>\<langle>While b c,Normal r\<rangle> =n\<Rightarrow> Normal r"
by (rule execn.WhileFalse)
thus ?thesis
by blast
qed
qed auto
} note hyp_while = this
show ?thesis
proof (cases "s'\<in>b")
case False
with Normal exec_strip
have "t=s"
by (auto elim: execn_Normal_elim_cases)
with Normal False show ?thesis
by (auto intro: execn.intros)
next
case True note s'_in_b = this
with Normal exec_strip obtain r where
exec_strip_c: "\<Gamma>\<turnstile>\<langle>strip_guards F c,Normal s'\<rangle> =n\<Rightarrow> r" and
exec_strip_w: "\<Gamma>\<turnstile>\<langle>While b (strip_guards F c),r\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
from While.hyps exec_strip_c obtain r' where
exec_c: "\<Gamma>\<turnstile>\<langle>c,Normal s'\<rangle> =n\<Rightarrow> r'" and
r_Fault: "isFault r \<longrightarrow> isFault r'" and
r'_Fault: "r' \<in> Fault ` (-F) \<longrightarrow> r'=r" and
r'_noFault: "\<not> isFault r' \<longrightarrow> r'=r"
by blast
show ?thesis
proof (cases "isFault r'")
case False
with r'_noFault have r': "r'=r" by simp
from hyp_while exec_strip_w
obtain t' where
"\<Gamma>\<turnstile>\<langle>While b c,r\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"t' \<in> Fault ` (-F) \<longrightarrow> t'=t"
"\<not> isFault t' \<longrightarrow> t'=t"
by blast
with r' exec_c Normal s'_in_b
show ?thesis
by (blast intro: execn.intros)
next
case True
then obtain f' where r': "r'=Fault f'"..
hence "\<Gamma>\<turnstile>\<langle>While b c,r'\<rangle> =n\<Rightarrow> Fault f'"
by auto
with Normal s'_in_b exec_c
have exec: "\<Gamma>\<turnstile>\<langle>While b c,Normal s'\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
show ?thesis
proof (cases "isFault r")
case True
then obtain f where r: "r=Fault f"..
with exec_strip_w have "t=Fault f"
by (auto dest: execn_Fault_end)
with Normal exec r' r r'_Fault
show ?thesis
by auto
next
case False
with r'_Fault r' have "f' \<in> F"
by (cases r) auto
with Normal exec show ?thesis
by auto
qed
qed
qed
qed
next
case Call thus ?case by auto
next
case DynCom thus ?case
by (fastforce elim!: execn_elim_cases intro: execn.intros)
next
case (Guard f g c s n t)
have exec_strip: "\<Gamma>\<turnstile>\<langle>strip_guards F (Guard f g c),s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases s)
case (Fault f)
with exec_strip have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec_strip have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec_strip have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s')
show ?thesis
proof (cases "f\<in>F")
case True
with exec_strip Normal
have exec_strip_c: "\<Gamma>\<turnstile>\<langle>strip_guards F c,Normal s'\<rangle> =n\<Rightarrow> t"
by simp
with Guard.hyps obtain t' where
"\<Gamma>\<turnstile>\<langle>c,Normal s'\<rangle> =n\<Rightarrow> t'" and
"isFault t \<longrightarrow> isFault t'" and
"t' \<in> Fault ` (-F) \<longrightarrow> t'=t" and
"\<not> isFault t' \<longrightarrow> t'=t"
by blast
with Normal True
show ?thesis
by (cases "s'\<in> g") (fastforce intro: execn.intros)+
next
case False
note f_notin_F = this
show ?thesis
proof (cases "s'\<in>g")
case False
with Normal exec_strip f_notin_F have t: "t=Fault f"
by (auto elim: execn_Normal_elim_cases)
from False
have "\<Gamma>\<turnstile>\<langle>Guard f g c,Normal s'\<rangle> =n\<Rightarrow> Fault f"
by (blast intro: execn.intros)
with False Normal t show ?thesis
by auto
next
case True
with exec_strip Normal f_notin_F
have "\<Gamma>\<turnstile>\<langle>strip_guards F c,Normal s'\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
with Guard.hyps obtain t' where
"\<Gamma>\<turnstile>\<langle>c,Normal s'\<rangle> =n\<Rightarrow> t'" and
"isFault t \<longrightarrow> isFault t'" and
"t' \<in> Fault ` (-F) \<longrightarrow> t'=t" and
"\<not> isFault t' \<longrightarrow> t'=t"
by blast
with Normal True
show ?thesis
by (blast intro: execn.intros)
qed
qed
qed
next
case Throw thus ?case by auto
next
case (Catch c1 c2 s n t)
have exec_strip: "\<Gamma>\<turnstile>\<langle>strip_guards F (Catch c1 c2),s\<rangle> =n\<Rightarrow> t" by fact
show ?case
proof (cases "s")
case (Fault f)
with exec_strip have "t=Fault f"
by (auto dest: execn_Fault_end)
with Fault show ?thesis
by auto
next
case Stuck
with exec_strip have "t=Stuck"
by (auto dest: execn_Stuck_end)
with Stuck show ?thesis
by auto
next
case (Abrupt s')
with exec_strip have "t=Abrupt s'"
by (auto dest: execn_Abrupt_end)
with Abrupt show ?thesis
by auto
next
case (Normal s') note s=this
with exec_strip have
"\<Gamma>\<turnstile>\<langle>Catch (strip_guards F c1) (strip_guards F c2),Normal s'\<rangle> =n\<Rightarrow> t" by simp
thus ?thesis
proof (cases)
fix w
assume exec_strip_c1: "\<Gamma>\<turnstile>\<langle>strip_guards F c1,Normal s'\<rangle> =n\<Rightarrow> Abrupt w"
assume exec_strip_c2: "\<Gamma>\<turnstile>\<langle>strip_guards F c2,Normal w\<rangle> =n\<Rightarrow> t"
from exec_strip_c1 Catch.hyps
obtain w' where
exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s'\<rangle> =n\<Rightarrow> w'" and
w'_Fault: "w' \<in> Fault ` (-F) \<longrightarrow> w'=Abrupt w" and
w'_noFault: "\<not> isFault w' \<longrightarrow> w'=Abrupt w"
by blast
show ?thesis
proof (cases "w'")
case (Fault f')
with Normal exec_c1 have "\<Gamma>\<turnstile>\<langle>Catch c1 c2,s\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
with w'_Fault Fault show ?thesis
by auto
next
case Stuck
with w'_noFault have False
by simp
thus ?thesis ..
next
case (Normal w'')
with w'_noFault have False by simp thus ?thesis ..
next
case (Abrupt w'')
with w'_noFault have w'': "w''=w" by simp
from exec_strip_c2 Catch.hyps
obtain t' where
"\<Gamma>\<turnstile>\<langle>c2,Normal w\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'"
"t' \<in> Fault ` (-F) \<longrightarrow> t'=t"
"\<not> isFault t' \<longrightarrow> t'=t"
by blast
with w'' Abrupt s exec_c1
show ?thesis
by (blast intro: execn.intros)
qed
next
assume t: "\<not> isAbr t"
assume "\<Gamma>\<turnstile>\<langle>strip_guards F c1,Normal s'\<rangle> =n\<Rightarrow> t"
with Catch.hyps
obtain t' where
exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s'\<rangle> =n\<Rightarrow> t'" and
t_Fault: "isFault t \<longrightarrow> isFault t'" and
t'_Fault: "t' \<in> Fault ` (-F) \<longrightarrow> t'=t" and
t'_noFault: "\<not> isFault t' \<longrightarrow> t'=t"
by blast
show ?thesis
proof (cases "isFault t'")
case True
then obtain f' where t': "t'=Fault f'"..
with exec_c1 have "\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s'\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
with t'_Fault t' s show ?thesis
by auto
next
case False
with t'_noFault have "t'=t" by simp
with t exec_c1 s show ?thesis
by (blast intro: execn.intros)
qed
qed
qed
qed
lemma execn_strip_to_execn:
assumes exec_strip: "strip F \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
shows "\<exists>t'. \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t' \<and>
(isFault t \<longrightarrow> isFault t') \<and>
(t' \<in> Fault ` (- F) \<longrightarrow> t'=t) \<and>
(\<not> isFault t' \<longrightarrow> t'=t)"
using exec_strip
proof (induct)
case Skip thus ?case by (blast intro: execn.intros)
next
case Guard thus ?case by (blast intro: execn.intros)
next
case GuardFault thus ?case by (blast intro: execn.intros)
next
case FaultProp thus ?case by (blast intro: execn.intros)
next
case Basic thus ?case by (blast intro: execn.intros)
next
case Spec thus ?case by (blast intro: execn.intros)
next
case SpecStuck thus ?case by (blast intro: execn.intros)
next
case Seq thus ?case by (blast intro: execn.intros elim: isFaultE)
next
case CondTrue thus ?case by (blast intro: execn.intros)
next
case CondFalse thus ?case by (blast intro: execn.intros)
next
case WhileTrue thus ?case by (blast intro: execn.intros elim: isFaultE)
next
case WhileFalse thus ?case by (blast intro: execn.intros)
next
case Call thus ?case
by simp (blast intro: execn.intros dest: execn_strip_guards_to_execn)
next
case CallUndefined thus ?case
by simp (blast intro: execn.intros)
next
case StuckProp thus ?case
by blast
next
case DynCom thus ?case by (blast intro: execn.intros)
next
case Throw thus ?case by (blast intro: execn.intros)
next
case AbruptProp thus ?case by (blast intro: execn.intros)
next
case (CatchMatch c1 s n r c2 t)
then obtain r' t' where
exec_c1: "\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =n\<Rightarrow> r'" and
r'_Fault: "r' \<in> Fault ` (-F) \<longrightarrow> r' = Abrupt r" and
r'_noFault: "\<not> isFault r' \<longrightarrow> r' = Abrupt r" and
exec_c2: "\<Gamma>\<turnstile>\<langle>c2,Normal r\<rangle> =n\<Rightarrow> t'" and
t_Fault: "isFault t \<longrightarrow> isFault t'" and
t'_Fault: "t' \<in> Fault ` (-F) \<longrightarrow> t' = t" and
t'_noFault: "\<not> isFault t' \<longrightarrow> t' = t"
by blast
show ?case
proof (cases "isFault r'")
case True
then obtain f' where r': "r'=Fault f'"..
with exec_c1 have "\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> =n\<Rightarrow> Fault f'"
by (auto intro: execn.intros)
with r' r'_Fault show ?thesis
by (auto intro: execn.intros)
next
case False
with r'_noFault have "r'=Abrupt r" by simp
with exec_c1 exec_c2 t_Fault t'_noFault t'_Fault
show ?thesis
by (blast intro: execn.intros)
qed
next
case CatchMiss thus ?case by (fastforce intro: execn.intros elim: isFaultE)
qed
lemma exec_strip_guards_to_exec:
assumes exec_strip: "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> \<Rightarrow> t"
shows "\<exists>t'. \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t' \<and>
(isFault t \<longrightarrow> isFault t') \<and>
(t' \<in> Fault ` (-F) \<longrightarrow> t'=t) \<and>
(\<not> isFault t' \<longrightarrow> t'=t)"
proof -
from exec_strip obtain n where
execn_strip: "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> =n\<Rightarrow> t"
by (auto simp add: exec_iff_execn)
then obtain t' where
"\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'" "t' \<in> Fault ` (-F) \<longrightarrow> t'=t" "\<not> isFault t' \<longrightarrow> t'=t"
by (blast dest: execn_strip_guards_to_execn)
thus ?thesis
by (blast intro: execn_to_exec)
qed
lemma exec_strip_to_exec:
assumes exec_strip: "strip F \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
shows "\<exists>t'. \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t' \<and>
(isFault t \<longrightarrow> isFault t') \<and>
(t' \<in> Fault ` (-F) \<longrightarrow> t'=t) \<and>
(\<not> isFault t' \<longrightarrow> t'=t)"
proof -
from exec_strip obtain n where
execn_strip: "strip F \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
by (auto simp add: exec_iff_execn)
then obtain t' where
"\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t'"
"isFault t \<longrightarrow> isFault t'" "t' \<in> Fault ` (-F) \<longrightarrow> t'=t" "\<not> isFault t' \<longrightarrow> t'=t"
by (blast dest: execn_strip_to_execn)
thus ?thesis
by (blast intro: execn_to_exec)
qed
lemma exec_to_exec_strip_guards:
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
assumes t_not_Fault: "\<not> isFault t"
shows "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> \<Rightarrow> t"
proof -
from exec_c obtain n where "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>t"
by (auto simp add: exec_iff_execn)
from this t_not_Fault
have "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> =n\<Rightarrow> t"
by (rule execn_to_execn_strip_guards )
thus "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> \<Rightarrow> t"
by (rule execn_to_exec)
qed
lemma exec_to_exec_strip_guards':
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
assumes t_not_Fault: "t \<notin> Fault ` F"
shows "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> \<Rightarrow> t"
proof -
from exec_c obtain n where "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>t"
by (auto simp add: exec_iff_execn)
from this t_not_Fault
have "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> =n\<Rightarrow> t"
by (rule execn_to_execn_strip_guards' )
thus "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> \<Rightarrow> t"
by (rule execn_to_exec)
qed
lemma execn_to_execn_strip:
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
assumes t_not_Fault: "\<not> isFault t"
shows "strip F \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
using exec_c t_not_Fault
proof (induct)
case (Call p bdy s n s')
have bdy: "\<Gamma> p = Some bdy" by fact
from Call have "strip F \<Gamma>\<turnstile>\<langle>bdy,Normal s\<rangle> =n\<Rightarrow> s'"
by blast
from execn_to_execn_strip_guards [OF this] Call
have "strip F \<Gamma>\<turnstile>\<langle>strip_guards F bdy,Normal s\<rangle> =n\<Rightarrow> s'"
by simp
moreover from bdy have "(strip F \<Gamma>) p = Some (strip_guards F bdy)"
by simp
ultimately
show ?case
by (blast intro: execn.intros)
next
case CallUndefined thus ?case by (auto intro: execn.CallUndefined)
qed (auto intro: execn.intros dest: noFaultn_startD' simp add: not_isFault_iff)
lemma execn_to_execn_strip':
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
assumes t_not_Fault: "t \<notin> Fault ` F"
shows "strip F \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
using exec_c t_not_Fault
proof (induct)
case (Call p bdy s n s')
have bdy: "\<Gamma> p = Some bdy" by fact
from Call have "strip F \<Gamma>\<turnstile>\<langle>bdy,Normal s\<rangle> =n\<Rightarrow> s'"
by blast
from execn_to_execn_strip_guards' [OF this] Call
have "strip F \<Gamma>\<turnstile>\<langle>strip_guards F bdy,Normal s\<rangle> =n\<Rightarrow> s'"
by simp
moreover from bdy have "(strip F \<Gamma>) p = Some (strip_guards F bdy)"
by simp
ultimately
show ?case
by (blast intro: execn.intros)
next
case CallUndefined thus ?case by (auto intro: execn.CallUndefined)
next
case (Seq c1 s n s' c2 t)
show ?case
proof (cases "isFault s'")
case False
with Seq show ?thesis
by (auto intro: execn.intros simp add: not_isFault_iff)
next
case True
then obtain f' where s': "s'=Fault f'" by (auto simp add: isFault_def)
with Seq obtain "t=Fault f'" and "f' \<notin> F"
by (force dest: execn_Fault_end)
with Seq s' show ?thesis
by (auto intro: execn.intros)
qed
next
case (WhileTrue b c s n s' t)
show ?case
proof (cases "isFault s'")
case False
with WhileTrue show ?thesis
by (auto intro: execn.intros simp add: not_isFault_iff)
next
case True
then obtain f' where s': "s'=Fault f'" by (auto simp add: isFault_def)
with WhileTrue obtain "t=Fault f'" and "f' \<notin> F"
by (force dest: execn_Fault_end)
with WhileTrue s' show ?thesis
by (auto intro: execn.intros)
qed
qed (auto intro: execn.intros)
lemma exec_to_exec_strip:
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
assumes t_not_Fault: "\<not> isFault t"
shows "strip F \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
proof -
from exec_c obtain n where "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>t"
by (auto simp add: exec_iff_execn)
from this t_not_Fault
have "strip F \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
by (rule execn_to_execn_strip)
thus "strip F \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
by (rule execn_to_exec)
qed
lemma exec_to_exec_strip':
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
assumes t_not_Fault: "t \<notin> Fault ` F"
shows "strip F \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
proof -
from exec_c obtain n where "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>t"
by (auto simp add: exec_iff_execn)
from this t_not_Fault
have "strip F \<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
by (rule execn_to_execn_strip' )
thus "strip F \<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
by (rule execn_to_exec)
qed
lemma exec_to_exec_strip_guards_Fault:
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> Fault f"
assumes f_notin_F: "f \<notin> F"
shows"\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> \<Rightarrow> Fault f"
proof -
from exec_c obtain n where "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow>Fault f"
by (auto simp add: exec_iff_execn)
from execn_to_execn_strip_guards_Fault [OF this _ f_notin_F]
have "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> =n\<Rightarrow> Fault f"
by simp
thus "\<Gamma>\<turnstile>\<langle>strip_guards F c,s\<rangle> \<Rightarrow> Fault f"
by (rule execn_to_exec)
qed
(* ************************************************************************* *)
subsection {* Lemmas about @{term "c\<^sub>1 \<inter>\<^sub>g c\<^sub>2"} *}
(* ************************************************************************* *)
lemma inter_guards_execn_Normal_noFault:
"\<And>c c2 s t n. \<lbrakk>(c1 \<inter>\<^sub>g c2) = Some c; \<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> t; \<not> isFault t\<rbrakk>
\<Longrightarrow> \<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =n\<Rightarrow> t \<and> \<Gamma>\<turnstile>\<langle>c2,Normal s\<rangle> =n\<Rightarrow> t"
proof (induct c1)
case Skip
have "(Skip \<inter>\<^sub>g c2) = Some c" by fact
then obtain c2: "c2=Skip" and c: "c=Skip"
by (simp add: inter_guards_Skip)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> t" by fact
with c have "t=Normal s"
by (auto elim: execn_Normal_elim_cases)
with Skip c2
show ?case
by (auto intro: execn.intros)
next
case (Basic f)
have "(Basic f \<inter>\<^sub>g c2) = Some c" by fact
then obtain c2: "c2=Basic f" and c: "c=Basic f"
by (simp add: inter_guards_Basic)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> t" by fact
with c have "t=Normal (f s)"
by (auto elim: execn_Normal_elim_cases)
with Basic c2
show ?case
by (auto intro: execn.intros)
next
case (Spec r)
have "(Spec r \<inter>\<^sub>g c2) = Some c" by fact
then obtain c2: "c2=Spec r" and c: "c=Spec r"
by (simp add: inter_guards_Spec)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> t" by fact
with c have "\<Gamma>\<turnstile>\<langle>Spec r,Normal s\<rangle> =n\<Rightarrow> t" by simp
from this Spec c2 show ?case
by (cases) (auto intro: execn.intros)
next
case (Seq a1 a2)
have noFault: "\<not> isFault t" by fact
have "(Seq a1 a2 \<inter>\<^sub>g c2) = Some c" by fact
then obtain b1 b2 d1 d2 where
c2: "c2=Seq b1 b2" and
d1: "(a1 \<inter>\<^sub>g b1) = Some d1" and d2: "(a2 \<inter>\<^sub>g b2) = Some d2" and
c: "c=Seq d1 d2"
by (auto simp add: inter_guards_Seq)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> t" by fact
with c obtain s' where
exec_d1: "\<Gamma>\<turnstile>\<langle>d1,Normal s\<rangle> =n\<Rightarrow> s'" and
exec_d2: "\<Gamma>\<turnstile>\<langle>d2,s'\<rangle> =n\<Rightarrow> t"
by (auto elim: execn_Normal_elim_cases)
show ?case
proof (cases s')
case (Fault f')
with exec_d2 have "t=Fault f'"
by (auto intro: execn_Fault_end)
with noFault show ?thesis by simp
next
case (Normal s'')
with d1 exec_d1 Seq.hyps
obtain
"\<Gamma>\<turnstile>\<langle>a1,Normal s\<rangle> =n\<Rightarrow> Normal s''" and "\<Gamma>\<turnstile>\<langle>b1,Normal s\<rangle> =n\<Rightarrow> Normal s''"
by auto
moreover
from Normal d2 exec_d2 noFault Seq.hyps
obtain "\<Gamma>\<turnstile>\<langle>a2,Normal s''\<rangle> =n\<Rightarrow> t" and "\<Gamma>\<turnstile>\<langle>b2,Normal s''\<rangle> =n\<Rightarrow> t"
by auto
ultimately
show ?thesis
using Normal c2 by (auto intro: execn.intros)
next
case (Abrupt s'')
with exec_d2 have "t=Abrupt s''"
by (auto simp add: execn_Abrupt_end)
moreover
from Abrupt d1 exec_d1 Seq.hyps
obtain "\<Gamma>\<turnstile>\<langle>a1,Normal s\<rangle> =n\<Rightarrow> Abrupt s''" and "\<Gamma>\<turnstile>\<langle>b1,Normal s\<rangle> =n\<Rightarrow> Abrupt s''"
by auto
moreover
obtain
"\<Gamma>\<turnstile>\<langle>a2,Abrupt s''\<rangle> =n\<Rightarrow> Abrupt s''" and "\<Gamma>\<turnstile>\<langle>b2,Abrupt s''\<rangle> =n\<Rightarrow> Abrupt s''"
by auto
ultimately
show ?thesis
using Abrupt c2 by (auto intro: execn.intros)
next
case Stuck
with exec_d2 have "t=Stuck"
by (auto simp add: execn_Stuck_end)
moreover
from Stuck d1 exec_d1 Seq.hyps
obtain "\<Gamma>\<turnstile>\<langle>a1,Normal s\<rangle> =n\<Rightarrow> Stuck" and "\<Gamma>\<turnstile>\<langle>b1,Normal s\<rangle> =n\<Rightarrow> Stuck"
by auto
moreover
obtain
"\<Gamma>\<turnstile>\<langle>a2,Stuck\<rangle> =n\<Rightarrow> Stuck" and "\<Gamma>\<turnstile>\<langle>b2,Stuck\<rangle> =n\<Rightarrow> Stuck"
by auto
ultimately
show ?thesis
using Stuck c2 by (auto intro: execn.intros)
qed
next
case (Cond b t1 e1)
have noFault: "\<not> isFault t" by fact
have "(Cond b t1 e1 \<inter>\<^sub>g c2) = Some c" by fact
then obtain t2 e2 t3 e3 where
c2: "c2=Cond b t2 e2" and
t3: "(t1 \<inter>\<^sub>g t2) = Some t3" and
e3: "(e1 \<inter>\<^sub>g e2) = Some e3" and
c: "c=Cond b t3 e3"
by (auto simp add: inter_guards_Cond)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> t" by fact
with c have "\<Gamma>\<turnstile>\<langle>Cond b t3 e3,Normal s\<rangle> =n\<Rightarrow> t"
by simp
then show ?case
proof (cases)
assume s_in_b: "s\<in>b"
assume "\<Gamma>\<turnstile>\<langle>t3,Normal s\<rangle> =n\<Rightarrow> t"
with Cond.hyps t3 noFault
obtain "\<Gamma>\<turnstile>\<langle>t1,Normal s\<rangle> =n\<Rightarrow> t" "\<Gamma>\<turnstile>\<langle>t2,Normal s\<rangle> =n\<Rightarrow> t"
by auto
with s_in_b c2 show ?thesis
by (auto intro: execn.intros)
next
assume s_notin_b: "s\<notin>b"
assume "\<Gamma>\<turnstile>\<langle>e3,Normal s\<rangle> =n\<Rightarrow> t"
with Cond.hyps e3 noFault
obtain "\<Gamma>\<turnstile>\<langle>e1,Normal s\<rangle> =n\<Rightarrow> t" "\<Gamma>\<turnstile>\<langle>e2,Normal s\<rangle> =n\<Rightarrow> t"
by auto
with s_notin_b c2 show ?thesis
by (auto intro: execn.intros)
qed
next
case (While b bdy1)
have noFault: "\<not> isFault t" by fact
have "(While b bdy1 \<inter>\<^sub>g c2) = Some c" by fact
then obtain bdy2 bdy where
c2: "c2=While b bdy2" and
bdy: "(bdy1 \<inter>\<^sub>g bdy2) = Some bdy" and
c: "c=While b bdy"
by (auto simp add: inter_guards_While)
have exec_c: "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> t" by fact
{
fix s t n w w1 w2
assume exec_w: "\<Gamma>\<turnstile>\<langle>w,Normal s\<rangle> =n\<Rightarrow> t"
assume w: "w=While b bdy"
assume noFault: "\<not> isFault t"
from exec_w w noFault
have "\<Gamma>\<turnstile>\<langle>While b bdy1,Normal s\<rangle> =n\<Rightarrow> t \<and>
\<Gamma>\<turnstile>\<langle>While b bdy2,Normal s\<rangle> =n\<Rightarrow> t"
proof (induct)
prefer 10
case (WhileTrue s b' bdy' n s' s'')
have eqs: "While b' bdy' = While b bdy" by fact
from WhileTrue have s_in_b: "s \<in> b" by simp
have noFault_s'': "\<not> isFault s''" by fact
from WhileTrue
have exec_bdy: "\<Gamma>\<turnstile>\<langle>bdy,Normal s\<rangle> =n\<Rightarrow> s'" by simp
from WhileTrue
have exec_w: "\<Gamma>\<turnstile>\<langle>While b bdy,s'\<rangle> =n\<Rightarrow> s''" by simp
show ?case
proof (cases s')
case (Fault f)
with exec_w have "s''=Fault f"
by (auto intro: execn_Fault_end)
with noFault_s'' show ?thesis by simp
next
case (Normal s''')
with exec_bdy bdy While.hyps
obtain "\<Gamma>\<turnstile>\<langle>bdy1,Normal s\<rangle> =n\<Rightarrow> Normal s'''"
"\<Gamma>\<turnstile>\<langle>bdy2,Normal s\<rangle> =n\<Rightarrow> Normal s'''"
by auto
moreover
from Normal WhileTrue
obtain
"\<Gamma>\<turnstile>\<langle>While b bdy1,Normal s'''\<rangle> =n\<Rightarrow> s''"
"\<Gamma>\<turnstile>\<langle>While b bdy2,Normal s'''\<rangle> =n\<Rightarrow> s''"
by simp
ultimately show ?thesis
using s_in_b Normal
by (auto intro: execn.intros)
next
case (Abrupt s''')
with exec_bdy bdy While.hyps
obtain "\<Gamma>\<turnstile>\<langle>bdy1,Normal s\<rangle> =n\<Rightarrow> Abrupt s'''"
"\<Gamma>\<turnstile>\<langle>bdy2,Normal s\<rangle> =n\<Rightarrow> Abrupt s'''"
by auto
moreover
from Abrupt WhileTrue
obtain
"\<Gamma>\<turnstile>\<langle>While b bdy1,Abrupt s'''\<rangle> =n\<Rightarrow> s''"
"\<Gamma>\<turnstile>\<langle>While b bdy2,Abrupt s'''\<rangle> =n\<Rightarrow> s''"
by simp
ultimately show ?thesis
using s_in_b Abrupt
by (auto intro: execn.intros)
next
case Stuck
with exec_bdy bdy While.hyps
obtain "\<Gamma>\<turnstile>\<langle>bdy1,Normal s\<rangle> =n\<Rightarrow> Stuck"
"\<Gamma>\<turnstile>\<langle>bdy2,Normal s\<rangle> =n\<Rightarrow> Stuck"
by auto
moreover
from Stuck WhileTrue
obtain
"\<Gamma>\<turnstile>\<langle>While b bdy1,Stuck\<rangle> =n\<Rightarrow> s''"
"\<Gamma>\<turnstile>\<langle>While b bdy2,Stuck\<rangle> =n\<Rightarrow> s''"
by simp
ultimately show ?thesis
using s_in_b Stuck
by (auto intro: execn.intros)
qed
next
case WhileFalse thus ?case by (auto intro: execn.intros)
qed (simp_all)
}
with this [OF exec_c c noFault] c2
show ?case
by auto
next
case Call thus ?case by (simp add: inter_guards_Call)
next
case (DynCom f1)
have noFault: "\<not> isFault t" by fact
have "(DynCom f1 \<inter>\<^sub>g c2) = Some c" by fact
then obtain f2 f where
c2: "c2=DynCom f2" and
f_defined: "\<forall>s. ((f1 s) \<inter>\<^sub>g (f2 s)) \<noteq> None" and
c: "c=DynCom (\<lambda>s. the ((f1 s) \<inter>\<^sub>g (f2 s)))"
by (auto simp add: inter_guards_DynCom)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> t" by fact
with c have "\<Gamma>\<turnstile>\<langle>DynCom (\<lambda>s. the ((f1 s) \<inter>\<^sub>g (f2 s))),Normal s\<rangle> =n\<Rightarrow> t" by simp
then show ?case
proof (cases)
assume exec_f: "\<Gamma>\<turnstile>\<langle>the (f1 s \<inter>\<^sub>g f2 s),Normal s\<rangle> =n\<Rightarrow> t"
from f_defined obtain f where "(f1 s \<inter>\<^sub>g f2 s) = Some f"
by auto
with DynCom.hyps this exec_f c2 noFault
show ?thesis
using execn.DynCom by fastforce
qed
next
case Guard thus ?case
by (fastforce elim: execn_Normal_elim_cases intro: execn.intros
simp add: inter_guards_Guard)
next
case Throw thus ?case
by (fastforce elim: execn_Normal_elim_cases
simp add: inter_guards_Throw)
next
case (Catch a1 a2)
have noFault: "\<not> isFault t" by fact
have "(Catch a1 a2 \<inter>\<^sub>g c2) = Some c" by fact
then obtain b1 b2 d1 d2 where
c2: "c2=Catch b1 b2" and
d1: "(a1 \<inter>\<^sub>g b1) = Some d1" and d2: "(a2 \<inter>\<^sub>g b2) = Some d2" and
c: "c=Catch d1 d2"
by (auto simp add: inter_guards_Catch)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> t" by fact
with c have "\<Gamma>\<turnstile>\<langle>Catch d1 d2,Normal s\<rangle> =n\<Rightarrow> t" by simp
then show ?case
proof (cases)
fix s'
assume "\<Gamma>\<turnstile>\<langle>d1,Normal s\<rangle> =n\<Rightarrow> Abrupt s'"
with d1 Catch.hyps
obtain "\<Gamma>\<turnstile>\<langle>a1,Normal s\<rangle> =n\<Rightarrow> Abrupt s'" and "\<Gamma>\<turnstile>\<langle>b1,Normal s\<rangle> =n\<Rightarrow> Abrupt s'"
by auto
moreover
assume "\<Gamma>\<turnstile>\<langle>d2,Normal s'\<rangle> =n\<Rightarrow> t"
with d2 Catch.hyps noFault
obtain "\<Gamma>\<turnstile>\<langle>a2,Normal s'\<rangle> =n\<Rightarrow> t" and "\<Gamma>\<turnstile>\<langle>b2,Normal s'\<rangle> =n\<Rightarrow> t"
by auto
ultimately
show ?thesis
using c2 by (auto intro: execn.intros)
next
assume "\<not> isAbr t"
moreover
assume "\<Gamma>\<turnstile>\<langle>d1,Normal s\<rangle> =n\<Rightarrow> t"
with d1 Catch.hyps noFault
obtain "\<Gamma>\<turnstile>\<langle>a1,Normal s\<rangle> =n\<Rightarrow> t" and "\<Gamma>\<turnstile>\<langle>b1,Normal s\<rangle> =n\<Rightarrow> t"
by auto
ultimately
show ?thesis
using c2 by (auto intro: execn.intros)
qed
qed
lemma inter_guards_execn_noFault:
assumes c: "(c1 \<inter>\<^sub>g c2) = Some c"
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
assumes noFault: "\<not> isFault t"
shows "\<Gamma>\<turnstile>\<langle>c1,s\<rangle> =n\<Rightarrow> t \<and> \<Gamma>\<turnstile>\<langle>c2,s\<rangle> =n\<Rightarrow> t"
proof (cases s)
case (Fault f)
with exec_c have "t = Fault f"
by (auto intro: execn_Fault_end)
with noFault show ?thesis
by simp
next
case (Abrupt s')
with exec_c have "t=Abrupt s'"
by (simp add: execn_Abrupt_end)
with Abrupt show ?thesis by auto
next
case Stuck
with exec_c have "t=Stuck"
by (simp add: execn_Stuck_end)
with Stuck show ?thesis by auto
next
case (Normal s')
with exec_c noFault inter_guards_execn_Normal_noFault [OF c]
show ?thesis
by blast
qed
lemma inter_guards_exec_noFault:
assumes c: "(c1 \<inter>\<^sub>g c2) = Some c"
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
assumes noFault: "\<not> isFault t"
shows "\<Gamma>\<turnstile>\<langle>c1,s\<rangle> \<Rightarrow> t \<and> \<Gamma>\<turnstile>\<langle>c2,s\<rangle> \<Rightarrow> t"
proof -
from exec_c obtain n where "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
by (auto simp add: exec_iff_execn)
from c this noFault
have "\<Gamma>\<turnstile>\<langle>c1,s\<rangle> =n\<Rightarrow> t \<and> \<Gamma>\<turnstile>\<langle>c2,s\<rangle> =n\<Rightarrow> t"
by (rule inter_guards_execn_noFault)
thus ?thesis
by (auto intro: execn_to_exec)
qed
lemma inter_guards_execn_Normal_Fault:
"\<And>c c2 s n. \<lbrakk>(c1 \<inter>\<^sub>g c2) = Some c; \<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> Fault f\<rbrakk>
\<Longrightarrow> (\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> =n\<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>c2,Normal s\<rangle> =n\<Rightarrow> Fault f)"
proof (induct c1)
case Skip thus ?case by (fastforce simp add: inter_guards_Skip)
next
case (Basic f) thus ?case by (fastforce simp add: inter_guards_Basic)
next
case (Spec r) thus ?case by (fastforce simp add: inter_guards_Spec)
next
case (Seq a1 a2)
have "(Seq a1 a2 \<inter>\<^sub>g c2) = Some c" by fact
then obtain b1 b2 d1 d2 where
c2: "c2=Seq b1 b2" and
d1: "(a1 \<inter>\<^sub>g b1) = Some d1" and d2: "(a2 \<inter>\<^sub>g b2) = Some d2" and
c: "c=Seq d1 d2"
by (auto simp add: inter_guards_Seq)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> Fault f" by fact
with c obtain s' where
exec_d1: "\<Gamma>\<turnstile>\<langle>d1,Normal s\<rangle> =n\<Rightarrow> s'" and
exec_d2: "\<Gamma>\<turnstile>\<langle>d2,s'\<rangle> =n\<Rightarrow> Fault f"
by (auto elim: execn_Normal_elim_cases)
show ?case
proof (cases s')
case (Fault f')
with exec_d2 have "f'=f"
by (auto dest: execn_Fault_end)
with Fault d1 exec_d1
have "\<Gamma>\<turnstile>\<langle>a1,Normal s\<rangle> =n\<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>b1,Normal s\<rangle> =n\<Rightarrow> Fault f"
by (auto dest: Seq.hyps)
thus ?thesis
proof (cases rule: disjE [consumes 1])
assume "\<Gamma>\<turnstile>\<langle>a1,Normal s\<rangle> =n\<Rightarrow> Fault f"
hence "\<Gamma>\<turnstile>\<langle>Seq a1 a2,Normal s\<rangle> =n\<Rightarrow> Fault f"
by (auto intro: execn.intros)
thus ?thesis
by simp
next
assume "\<Gamma>\<turnstile>\<langle>b1,Normal s\<rangle> =n\<Rightarrow> Fault f"
hence "\<Gamma>\<turnstile>\<langle>Seq b1 b2,Normal s\<rangle> =n\<Rightarrow> Fault f"
by (auto intro: execn.intros)
with c2 show ?thesis
by simp
qed
next
case Abrupt with exec_d2 show ?thesis by (auto dest: execn_Abrupt_end)
next
case Stuck with exec_d2 show ?thesis by (auto dest: execn_Stuck_end)
next
case (Normal s'')
with inter_guards_execn_noFault [OF d1 exec_d1] obtain
exec_a1: "\<Gamma>\<turnstile>\<langle>a1,Normal s\<rangle> =n\<Rightarrow> Normal s''" and
exec_b1: "\<Gamma>\<turnstile>\<langle>b1,Normal s\<rangle> =n\<Rightarrow> Normal s''"
by simp
moreover from d2 exec_d2 Normal
have "\<Gamma>\<turnstile>\<langle>a2,Normal s''\<rangle> =n\<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>b2,Normal s''\<rangle> =n\<Rightarrow> Fault f"
by (auto dest: Seq.hyps)
ultimately show ?thesis
using c2 by (auto intro: execn.intros)
qed
next
case (Cond b t1 e1)
have "(Cond b t1 e1 \<inter>\<^sub>g c2) = Some c" by fact
then obtain t2 e2 t e where
c2: "c2=Cond b t2 e2" and
t: "(t1 \<inter>\<^sub>g t2) = Some t" and
e: "(e1 \<inter>\<^sub>g e2) = Some e" and
c: "c=Cond b t e"
by (auto simp add: inter_guards_Cond)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> Fault f" by fact
with c have "\<Gamma>\<turnstile>\<langle>Cond b t e,Normal s\<rangle> =n\<Rightarrow> Fault f" by simp
thus ?case
proof (cases)
assume "s \<in> b"
moreover assume "\<Gamma>\<turnstile>\<langle>t,Normal s\<rangle> =n\<Rightarrow> Fault f"
with t have "\<Gamma>\<turnstile>\<langle>t1,Normal s\<rangle> =n\<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>t2,Normal s\<rangle> =n\<Rightarrow> Fault f"
by (auto dest: Cond.hyps)
ultimately show ?thesis using c2 c by (fastforce intro: execn.intros)
next
assume "s \<notin> b"
moreover assume "\<Gamma>\<turnstile>\<langle>e,Normal s\<rangle> =n\<Rightarrow> Fault f"
with e have "\<Gamma>\<turnstile>\<langle>e1,Normal s\<rangle> =n\<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>e2,Normal s\<rangle> =n\<Rightarrow> Fault f"
by (auto dest: Cond.hyps)
ultimately show ?thesis using c2 c by (fastforce intro: execn.intros)
qed
next
case (While b bdy1)
have "(While b bdy1 \<inter>\<^sub>g c2) = Some c" by fact
then obtain bdy2 bdy where
c2: "c2=While b bdy2" and
bdy: "(bdy1 \<inter>\<^sub>g bdy2) = Some bdy" and
c: "c=While b bdy"
by (auto simp add: inter_guards_While)
have exec_c: "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> Fault f" by fact
{
fix s t n w w1 w2
assume exec_w: "\<Gamma>\<turnstile>\<langle>w,Normal s\<rangle> =n\<Rightarrow> t"
assume w: "w=While b bdy"
assume Fault: "t=Fault f"
from exec_w w Fault
have "\<Gamma>\<turnstile>\<langle>While b bdy1,Normal s\<rangle> =n\<Rightarrow> Fault f\<or>
\<Gamma>\<turnstile>\<langle>While b bdy2,Normal s\<rangle> =n\<Rightarrow> Fault f"
proof (induct)
case (WhileTrue s b' bdy' n s' s'')
have eqs: "While b' bdy' = While b bdy" by fact
from WhileTrue have s_in_b: "s \<in> b" by simp
have Fault_s'': "s''=Fault f" by fact
from WhileTrue
have exec_bdy: "\<Gamma>\<turnstile>\<langle>bdy,Normal s\<rangle> =n\<Rightarrow> s'" by simp
from WhileTrue
have exec_w: "\<Gamma>\<turnstile>\<langle>While b bdy,s'\<rangle> =n\<Rightarrow> s''" by simp
show ?case
proof (cases s')
case (Fault f')
with exec_w Fault_s'' have "f'=f"
by (auto dest: execn_Fault_end)
with Fault exec_bdy bdy While.hyps
have "\<Gamma>\<turnstile>\<langle>bdy1,Normal s\<rangle> =n\<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>bdy2,Normal s\<rangle> =n\<Rightarrow> Fault f"
by auto
with s_in_b show ?thesis
by (fastforce intro: execn.intros)
next
case (Normal s''')
with inter_guards_execn_noFault [OF bdy exec_bdy]
obtain "\<Gamma>\<turnstile>\<langle>bdy1,Normal s\<rangle> =n\<Rightarrow> Normal s'''"
"\<Gamma>\<turnstile>\<langle>bdy2,Normal s\<rangle> =n\<Rightarrow> Normal s'''"
by auto
moreover
from Normal WhileTrue
have "\<Gamma>\<turnstile>\<langle>While b bdy1,Normal s'''\<rangle> =n\<Rightarrow> Fault f \<or>
\<Gamma>\<turnstile>\<langle>While b bdy2,Normal s'''\<rangle> =n\<Rightarrow> Fault f"
by simp
ultimately show ?thesis
using s_in_b by (fastforce intro: execn.intros)
next
case (Abrupt s''')
with exec_w Fault_s'' show ?thesis by (fastforce dest: execn_Abrupt_end)
next
case Stuck
with exec_w Fault_s'' show ?thesis by (fastforce dest: execn_Stuck_end)
qed
next
case WhileFalse thus ?case by (auto intro: execn.intros)
qed (simp_all)
}
with this [OF exec_c c] c2
show ?case
by auto
next
case Call thus ?case by (fastforce simp add: inter_guards_Call)
next
case (DynCom f1)
have "(DynCom f1 \<inter>\<^sub>g c2) = Some c" by fact
then obtain f2 where
c2: "c2=DynCom f2" and
F_defined: "\<forall>s. ((f1 s) \<inter>\<^sub>g (f2 s)) \<noteq> None" and
c: "c=DynCom (\<lambda>s. the ((f1 s) \<inter>\<^sub>g (f2 s)))"
by (auto simp add: inter_guards_DynCom)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> Fault f" by fact
with c have "\<Gamma>\<turnstile>\<langle>DynCom (\<lambda>s. the ((f1 s) \<inter>\<^sub>g (f2 s))),Normal s\<rangle> =n\<Rightarrow> Fault f" by simp
then show ?case
proof (cases)
assume exec_F: "\<Gamma>\<turnstile>\<langle>the (f1 s \<inter>\<^sub>g f2 s),Normal s\<rangle> =n\<Rightarrow> Fault f"
from F_defined obtain F where "(f1 s \<inter>\<^sub>g f2 s) = Some F"
by auto
with DynCom.hyps this exec_F c2
show ?thesis
by (fastforce intro: execn.intros)
qed
next
case (Guard m g1 bdy1)
have "(Guard m g1 bdy1 \<inter>\<^sub>g c2) = Some c" by fact
then obtain g2 bdy2 bdy where
c2: "c2=Guard m g2 bdy2" and
bdy: "(bdy1 \<inter>\<^sub>g bdy2) = Some bdy" and
c: "c=Guard m (g1 \<inter> g2) bdy"
by (auto simp add: inter_guards_Guard)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> Fault f" by fact
with c have "\<Gamma>\<turnstile>\<langle>Guard m (g1 \<inter> g2) bdy,Normal s\<rangle> =n\<Rightarrow> Fault f"
by simp
thus ?case
proof (cases)
assume f_m: "Fault f = Fault m"
assume "s \<notin> g1 \<inter> g2"
hence "s\<notin>g1 \<or> s\<notin>g2"
by blast
with c2 f_m show ?thesis
by (auto intro: execn.intros)
next
assume "s \<in> g1 \<inter> g2"
moreover
assume "\<Gamma>\<turnstile>\<langle>bdy,Normal s\<rangle> =n\<Rightarrow> Fault f"
with bdy have "\<Gamma>\<turnstile>\<langle>bdy1,Normal s\<rangle> =n\<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>bdy2,Normal s\<rangle> =n\<Rightarrow> Fault f"
by (rule Guard.hyps)
ultimately show ?thesis
using c2
by (auto intro: execn.intros)
qed
next
case Throw thus ?case by (fastforce simp add: inter_guards_Throw)
next
case (Catch a1 a2)
have "(Catch a1 a2 \<inter>\<^sub>g c2) = Some c" by fact
then obtain b1 b2 d1 d2 where
c2: "c2=Catch b1 b2" and
d1: "(a1 \<inter>\<^sub>g b1) = Some d1" and d2: "(a2 \<inter>\<^sub>g b2) = Some d2" and
c: "c=Catch d1 d2"
by (auto simp add: inter_guards_Catch)
have "\<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> =n\<Rightarrow> Fault f" by fact
with c have "\<Gamma>\<turnstile>\<langle>Catch d1 d2,Normal s\<rangle> =n\<Rightarrow> Fault f" by simp
thus ?case
proof (cases)
fix s'
assume "\<Gamma>\<turnstile>\<langle>d1,Normal s\<rangle> =n\<Rightarrow> Abrupt s'"
from inter_guards_execn_noFault [OF d1 this] obtain
exec_a1: "\<Gamma>\<turnstile>\<langle>a1,Normal s\<rangle> =n\<Rightarrow> Abrupt s'" and
exec_b1: "\<Gamma>\<turnstile>\<langle>b1,Normal s\<rangle> =n\<Rightarrow> Abrupt s'"
by simp
moreover assume "\<Gamma>\<turnstile>\<langle>d2,Normal s'\<rangle> =n\<Rightarrow> Fault f"
with d2
have "\<Gamma>\<turnstile>\<langle>a2,Normal s'\<rangle> =n\<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>b2,Normal s'\<rangle> =n\<Rightarrow> Fault f"
by (auto dest: Catch.hyps)
ultimately show ?thesis
using c2 by (fastforce intro: execn.intros)
next
assume "\<Gamma>\<turnstile>\<langle>d1,Normal s\<rangle> =n\<Rightarrow> Fault f"
with d1 have "\<Gamma>\<turnstile>\<langle>a1,Normal s\<rangle> =n\<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>b1,Normal s\<rangle> =n\<Rightarrow> Fault f"
by (auto dest: Catch.hyps)
with c2 show ?thesis
by (fastforce intro: execn.intros)
qed
qed
lemma inter_guards_execn_Fault:
assumes c: "(c1 \<inter>\<^sub>g c2) = Some c"
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> Fault f"
shows "\<Gamma>\<turnstile>\<langle>c1,s\<rangle> =n\<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>c2,s\<rangle> =n\<Rightarrow> Fault f"
proof (cases s)
case (Fault f)
with exec_c show ?thesis
by (auto dest: execn_Fault_end)
next
case (Abrupt s')
with exec_c show ?thesis
by (fastforce dest: execn_Abrupt_end)
next
case Stuck
with exec_c show ?thesis
by (fastforce dest: execn_Stuck_end)
next
case (Normal s')
with exec_c inter_guards_execn_Normal_Fault [OF c]
show ?thesis
by blast
qed
lemma inter_guards_exec_Fault:
assumes c: "(c1 \<inter>\<^sub>g c2) = Some c"
assumes exec_c: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> Fault f"
shows "\<Gamma>\<turnstile>\<langle>c1,s\<rangle> \<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>c2,s\<rangle> \<Rightarrow> Fault f"
proof -
from exec_c obtain n where "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> Fault f"
by (auto simp add: exec_iff_execn)
from c this
have "\<Gamma>\<turnstile>\<langle>c1,s\<rangle> =n\<Rightarrow> Fault f \<or> \<Gamma>\<turnstile>\<langle>c2,s\<rangle> =n\<Rightarrow> Fault f"
by (rule inter_guards_execn_Fault)
thus ?thesis
by (auto intro: execn_to_exec)
qed
(* ************************************************************************* *)
subsection "Restriction of Procedure Environment"
(* ************************************************************************* *)
lemma restrict_SomeD: "(m|\<^bsub>A\<^esub>) x = Some y \<Longrightarrow> m x = Some y"
by (auto simp add: restrict_map_def split: split_if_asm)
(* FIXME: To Map *)
lemma restrict_dom_same [simp]: "m|\<^bsub>dom m\<^esub> = m"
apply (rule ext)
apply (clarsimp simp add: restrict_map_def)
apply (simp only: not_None_eq [symmetric])
apply rule
apply (drule sym)
apply blast
done
lemma restrict_in_dom: "x \<in> A \<Longrightarrow> (m|\<^bsub>A\<^esub>) x = m x"
by (auto simp add: restrict_map_def)
lemma exec_restrict_to_exec:
assumes exec_restrict: "\<Gamma>|\<^bsub>A\<^esub>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
assumes notStuck: "t\<noteq>Stuck"
shows "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
using exec_restrict notStuck
by (induct) (auto intro: exec.intros dest: restrict_SomeD Stuck_end)
lemma execn_restrict_to_execn:
assumes exec_restrict: "\<Gamma>|\<^bsub>A\<^esub>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
assumes notStuck: "t\<noteq>Stuck"
shows "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
using exec_restrict notStuck
by (induct) (auto intro: execn.intros dest: restrict_SomeD execn_Stuck_end)
lemma restrict_NoneD: "m x = None \<Longrightarrow> (m|\<^bsub>A\<^esub>) x = None"
by (auto simp add: restrict_map_def split: split_if_asm)
lemma exec_to_exec_restrict:
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
shows "\<exists>t'. \<Gamma>|\<^bsub>P\<^esub>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t' \<and> (t=Stuck \<longrightarrow> t'=Stuck) \<and>
(\<forall>f. t=Fault f\<longrightarrow> t'\<in>{Fault f,Stuck}) \<and> (t'\<noteq>Stuck \<longrightarrow> t'=t)"
proof -
from exec obtain n where
execn_strip: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
by (auto simp add: exec_iff_execn)
from execn_to_execn_restrict [where P=P,OF this]
obtain t' where
"\<Gamma>|\<^bsub>P\<^esub>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t'"
"t=Stuck \<longrightarrow> t'=Stuck" "\<forall>f. t=Fault f\<longrightarrow> t'\<in>{Fault f,Stuck}" "t'\<noteq>Stuck \<longrightarrow> t'=t"
by blast
thus ?thesis
by (blast intro: execn_to_exec)
qed
lemma notStuck_GuardD:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>Guard m g c,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}; s \<in> g\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (auto simp add: final_notin_def dest: exec.Guard )
lemma notStuck_SeqD1:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>Seq c1 c2,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (auto simp add: final_notin_def dest: exec.Seq )
lemma notStuck_SeqD2:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>Seq c1 c2,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}; \<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow>s'\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c2,s'\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (auto simp add: final_notin_def dest: exec.Seq )
lemma notStuck_SeqD:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>Seq c1 c2,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}\<rbrakk> \<Longrightarrow>
\<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck} \<and> (\<forall>s'. \<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow>s' \<longrightarrow> \<Gamma>\<turnstile>\<langle>c2,s'\<rangle> \<Rightarrow>\<notin>{Stuck})"
by (auto simp add: final_notin_def dest: exec.Seq )
lemma notStuck_CondTrueD:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>Cond b c1 c2,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}; s \<in> b\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (auto simp add: final_notin_def dest: exec.CondTrue)
lemma notStuck_CondFalseD:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>Cond b c1 c2,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}; s \<notin> b\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c2,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (auto simp add: final_notin_def dest: exec.CondFalse)
lemma notStuck_WhileTrueD1:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>While b c,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}; s \<in> b\<rbrakk>
\<Longrightarrow> \<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (auto simp add: final_notin_def dest: exec.WhileTrue)
lemma notStuck_WhileTrueD2:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>While b c,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}; \<Gamma>\<turnstile>\<langle>c,Normal s\<rangle> \<Rightarrow>s'; s \<in> b\<rbrakk>
\<Longrightarrow> \<Gamma>\<turnstile>\<langle>While b c,s'\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (auto simp add: final_notin_def dest: exec.WhileTrue)
lemma notStuck_CallD:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>Call p ,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}; \<Gamma> p = Some bdy\<rbrakk>
\<Longrightarrow> \<Gamma>\<turnstile>\<langle>bdy,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (auto simp add: final_notin_def dest: exec.Call)
lemma notStuck_CallDefinedD:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>Call p,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}\<rbrakk>
\<Longrightarrow> \<Gamma> p \<noteq> None"
by (cases "\<Gamma> p")
(auto simp add: final_notin_def dest: exec.CallUndefined)
lemma notStuck_DynComD:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>DynCom c,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}\<rbrakk>
\<Longrightarrow> \<Gamma>\<turnstile>\<langle>(c s),Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (auto simp add: final_notin_def dest: exec.DynCom)
lemma notStuck_CatchD1:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}\<rbrakk> \<Longrightarrow> \<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (auto simp add: final_notin_def dest: exec.CatchMatch exec.CatchMiss )
lemma notStuck_CatchD2:
"\<lbrakk>\<Gamma>\<turnstile>\<langle>Catch c1 c2,Normal s\<rangle> \<Rightarrow>\<notin>{Stuck}; \<Gamma>\<turnstile>\<langle>c1,Normal s\<rangle> \<Rightarrow>Abrupt s'\<rbrakk>
\<Longrightarrow> \<Gamma>\<turnstile>\<langle>c2,Normal s'\<rangle> \<Rightarrow>\<notin>{Stuck}"
by (auto simp add: final_notin_def dest: exec.CatchMatch)
(* ************************************************************************* *)
subsection "Miscellaneous"
(* ************************************************************************* *)
lemma execn_noguards_no_Fault:
assumes execn: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
assumes noguards_c: "noguards c"
assumes noguards_\<Gamma>: "\<forall>p \<in> dom \<Gamma>. noguards (the (\<Gamma> p))"
assumes s_no_Fault: "\<not> isFault s"
shows "\<not> isFault t"
using execn noguards_c s_no_Fault
proof (induct)
case (Call p bdy n s t) with noguards_\<Gamma> show ?case
apply -
apply (drule bspec [where x=p])
apply auto
done
qed (auto)
lemma exec_noguards_no_Fault:
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
assumes noguards_c: "noguards c"
assumes noguards_\<Gamma>: "\<forall>p \<in> dom \<Gamma>. noguards (the (\<Gamma> p))"
assumes s_no_Fault: "\<not> isFault s"
shows "\<not> isFault t"
using exec noguards_c s_no_Fault
proof (induct)
case (Call p bdy s t) with noguards_\<Gamma> show ?case
apply -
apply (drule bspec [where x=p])
apply auto
done
qed auto
lemma execn_nothrows_no_Abrupt:
assumes execn: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> =n\<Rightarrow> t"
assumes nothrows_c: "nothrows c"
assumes nothrows_\<Gamma>: "\<forall>p \<in> dom \<Gamma>. nothrows (the (\<Gamma> p))"
assumes s_no_Abrupt: "\<not>(isAbr s)"
shows "\<not>(isAbr t)"
using execn nothrows_c s_no_Abrupt
proof (induct)
case (Call p bdy n s t) with nothrows_\<Gamma> show ?case
apply -
apply (drule bspec [where x=p])
apply auto
done
qed (auto)
lemma exec_nothrows_no_Abrupt:
assumes exec: "\<Gamma>\<turnstile>\<langle>c,s\<rangle> \<Rightarrow> t"
assumes nothrows_c: "nothrows c"
assumes nothrows_\<Gamma>: "\<forall>p \<in> dom \<Gamma>. nothrows (the (\<Gamma> p))"
assumes s_no_Abrupt: "\<not>(isAbr s)"
shows "\<not>(isAbr t)"
using exec nothrows_c s_no_Abrupt
proof (induct)
case (Call p bdy s t) with nothrows_\<Gamma> show ?case
apply -
apply (drule bspec [where x=p])
apply auto
done
qed (auto)
end
|
This is implementation of efficient and simple model for simulation of
High-resolution atomic force microscopy (AFM), scanning probe microscopy (STM)
and inelastic tunneling microscopy (IETS) images using classical forcefileds.
There are two versions of the code:
\begin{enumerate}
\item currently developed Python/C++ version in PyProbe\_nonOrtho (branch master
);
to get quick esence of this model you can also try web interface hostet here: http://nanosurf.fzu.cz/ppr/
for more details see wikipage:
https://github.com/ProkopHapala/ProbeParticleModel/wiki
\item Legacy fortran version in SHTM\_springTip2 (branch fortran );
more detailed description o the fortran version is here:
http://nanosurf.fzu.cz/wiki/doku.php?id=probe\_particle\_model
\end{enumerate}
\cite{phapalamechhighresol}
\cite{phapalaoriginhighresol}
\section{C++ \& Python version}
\subsection{Examples of results}
\subsection{How it works}
\begin{itemize}
\item Pauli-repulsion ( currently approximated by repulsive part of
Lenard-Jones potential $r^{-12}$ )
\item van der Waals attraction ( currently approximated by attractive part
of Lenard-Jones potentia $r^{-6}$ )
\item electrostatic ( currently can be computed as coulomb pairwise
interaction between PP and point charges in centers of atoms of sample, or
by reading electrostatic force-field obtained by derivative of sample
Hartree potential as described in supplementary of this paper
\cite{phapalaoriginhighresol}.
\end{itemize}
The computation of images is divided into two parts:
\begin{enumerate}
\item Precompute vector Forcefield ( Fx(x,y,z), Fy(x,y,z), Fz(x,y,z) ) over
sample and store it on a 3D-grid. ( see getLenardJonesFF and getCoulombFF
functions for more details ). After individial components of forcefiled are
sampled ( i.e. Lenard-Jones and electrostatic ) they are summed up to form
one single total effective forcefield in which Probe-Particle moves.
\item relax Probe-Particle attached to the tip under influence of the total
effective forcefield. The relaxation of Probe-Particle is done using "Fast
Inertial Realxation Engine" (FIRE) algorithm
\cite{ebitzekstructrelaxmadesimple}. implemented in
FIRE:move() function. In each step of relaxation the forcefield on the grid
is interpolated in interpolate3DvecWrap function, considering periodic
boundary condition. From python the relaxation is called as relaxTipStroke
function providing 1D-array of tip positions, and obtaining back 1D-array of
Probe-Particle position after the relaxation and of force between tip
ProbePartcile and sample at that position. The lateral scan in (x,y) to
obtained stack of images is done by calling relaxTipStroke at different
(x,y) position of tip, where each call of relaxTipStroke does one approach
along z-direction.
\end{enumerate}
\subsection{Why it is splitted like this ?}
This splitting of computation ( first sampling of forcefiled on the grid, and
than relaxation using interpolation) has several advantages over strightforward
computation of interaction on the fly during relaxation process.
\begin{itemize}
\item \textbf{It is faster} - if there is ~100 atoms of sample, summing over
each pairwise Lennard-Jones interactions, is much slower, than just
interpolating the forcefield from the grid. Because force evaluation is done
several-times for each voxel of the 3D scanning grid, it is more efficient
to precompute it just once for each voxel, and then interpolate.
\item \textbf{It is more flexible and general} - Decoupling of relaxation
process and computation of forcefiled allows us to plug-in any forcefield.
The original motivation was to used electrostatic forcefield obtained from
Hartree potential from DFT calculation. However, it is not limited to that.
We can plug in e.g. a derivative of potential energy of probe peraticle
(i.e. Xe or CO ) obtained by scanning it in DFT code, in similar way as Guo
did in \cite{chshguohighresolmodel} . The only limitation here is computational cost of obtaining
such potential from ab-initio calculation.
\end{itemize}
\subsection{Code structure}
The code is divided into Python and C++, where performance intensive
computations are done in C++ and python is used as convenient scripting
interface for doing tasks like file I/O, plotting, memory management. For
binding of Python and C++ is used python ctypes library.
\subsubsection{Dependencies:}
\begin{itemize}
\item \textbf{C++} : g++ ( tested with g++ (Ubuntu 4.8.1-2ubuntu1~12.04) 4.8.1 )
\item \textbf{Python} : python ( 2.7.3 ), numpy (1.9.2), matplotlib ( 1.4.3 ), ctypes
( 1.1.0 )
\end{itemize}
\subsubsection{C++ source:}
\begin{itemize}
\item ProbeParticle.cpp - implementation of all performance intensive
ProbeParticle model as dynamic library which can be called dynamically from
python.
\item Vec3.cpp,Mat3.cpp math subroutines for operations with 3D vectors and
metrices
\end{itemize}
\subsubsection{Python source:}
\begin{itemize}
\item ProbeParticle.py - Interface between C++ core and python ising
C-types. Here are also defined some python rutines like:
\begin{itemize}
\item conversion from Force to frequency shift ( Fz2df ),
\item evaluation of Lenard-Jones coeffitints ( getAtomsLJ )
\item copy sample geometry to simulate periodic boundary condition (
PBCAtoms )
\item automatic setup of imagining area acroding to geometry of
nonperiodic sample ( autoGeom )
\item default parameters of simulation ( params ) with subroutine to
read this parameters from a file ( loadParams )
\end{itemize}
\item test2.py, testServer2.py - two examples of python scripts with run
actual computations using ProbeParticle library. The idea is that this files
can be modified by user to match particular task
\item basUtils.py - routines for loading of molecule geometry from
xyz-format, ( loadAtoms ), finding bonds and other.
\item Element.py and elements.py contains just parameters of atoms form
periodic table ( like effective raidus, color for visualization etc. ). It
is used by basUtils.py
\end{itemize}
|
# Dimensional Reduction
G. Richards (2016, 2018), based on materials from Ivezic, Connolly, Leighly, and VanderPlas
**Before class starts, please try to do the following:**
> find . -name βsdss_corrected_spectra.pyβ -print
> ./anaconda/lib/python2.7/site-packages/astroML/datasets/sdss_corrected_spectra.py
> emacs -nw ./anaconda/lib/python2.7/site-packages/astroML/datasets/sdss_corrected_spectra.py
> #DATA_URL = 'http://www.astro.washington.edu/users/vanderplas/spec4000.npz'
> DATA_URL = 'http://staff.washington.edu/jakevdp/spec4000.npz'
Just in case that doesn't work, I've put "spec4000.npz" in PHYS_T480_F18/data. Copy this to your "astroML_data" directory.
## Curse of Dimensionality
You want to buy a car. Right now--you don't want to wait. But you are picky and have certain things that you would like it to have. Each of those things has a probability between 0 and 1 of being on the the car dealer's lot. You want a red car which has a probability of being on the lot of $p_{\rm red}$; you want good gas mileage, $p_{\rm gas}$; you want leather seats, $p_{\rm leather}$; and you want a sunroof, $p_{\rm sunroof}$. The probability that the dealer has a car on the lot that meets all of those requirements is
$$p_{\rm red} \, p_{\rm gas} \, p_{\rm leather} \, p_{\rm sunroof},$$
or $p^n$ where $n$ is the number of features (assuming equal probability for each).
If the probability of each of these is 50%, then the probability of you driving off with your car of choice is only $0.5*0.5*0.5*0.5 = 0.0625$. Not very good. Imagine if you also wanted other things. This is the [Curse of Dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality).
Let's illustrate the curse of dimensionality with two figures from [here.](https://medium.freecodecamp.org/the-curse-of-dimensionality-how-we-can-save-big-data-from-itself-d9fa0f872335)
In the first example we are trying to find which box hold some treasure, which gets harder and harder with more dimensions, despite there just being 5 boxes in each dimension:
In the next example we inscribe a circle in a square. The area outside of the circle grows larger and larger as the number of dimensions increase:
Mathematically we can describe this as: the more dimensions that your data span, the more points needed to uniformly sample the space.
For $D$ dimensions with coordinates $[-1,1]$, the fraction of points in a unit hypersphere (with radius $r$, as illustrated above) is
$$f_D = \frac{V_D(r)}{(2r)^D} = \frac{\pi^{D/2}}{D2^{D-1}\Gamma(D/2)}$$
which goes to $0$ as $D$ goes to infinity! Actually, as you can see from the plot below, it is effectively 0 much earlier than that!
```python
# Execute this cell
# from Andy Connolly
%matplotlib inline
import numpy as np
import scipy.special as sp
from matplotlib import pyplot as plt
def unitVolume(dimension, radius=1.):
return 2*(radius**dimension *np.pi**(dimension/2.))/(dimension*sp.gamma(dimension/2.))
dim = np.linspace(1,100)
#------------------------------------------------------------
# Plot the results
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(dim,unitVolume(dim)/2.**dim)
ax.set_yscale('log')
ax.set_xlabel('$Dimension$')
ax.set_ylabel('$Volume$')
plt.show()
```
Note that this works in the opposite direction too: let's say you want to find "rare" objects in 10 dimensions, where we'll define rare as <1% of the population. Then you'll need to accept objects from 63% of the distribution in all 10 dimensions! So are those really "rare" or are they just a particular 1% of the population?
```python
import numpy as np
p = 10**(np.log10(0.01)/10.0)
print(p)
```
0.6309573444801932
N.B. Dimensionality isn't just measuring $D$ parameters for $N$ objects. It could be a spectrum with $D$ values or an image with $D$ pixels, etc. In the book the examples used just happen to be spectra of galaxies from the SDSS project. But we can insert the data of our choice instead.
For example: the SDSS comprises a sample of 357 million sources:
- each source has 448 measured attributes
- selecting just 30 (e.g., magnitude, size..) and normalizing the data range $-1$ to $1$
yields a probability of having one of the 357 million sources reside within a unit hypersphere of 1 in 1.4$\times 10^5$.
## Principal Component Analysis (PCA)
In [Principal Component Analysis (PCA)](https://en.wikipedia.org/wiki/Principal_component_analysis) we seek to take a data set like the one shown below and apply a transform to the data such that the new axes are aligned with the maximal variance of the data. As can be seen in the Figure, this is basically just the same as doing regression by minimizing the square of the perpendicular distances to the new axes. Note that we haven't made any changes to the data, we have just defined new axes.
```python
# Execute this cell
# Ivezic, Figure 7.2
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
#------------------------------------------------------------
# Set parameters and draw the random sample
np.random.seed(42)
r = 0.9
sigma1 = 0.25
sigma2 = 0.08
rotation = np.pi / 6
s = np.sin(rotation)
c = np.cos(rotation)
X = np.random.normal(0, [sigma1, sigma2], size=(100, 2)).T
R = np.array([[c, -s],[s, c]])
X = np.dot(R, X) #Same data, now rotated by R matrix.
#------------------------------------------------------------
# Plot the diagram
fig = plt.figure(figsize=(5, 5), facecolor='w')
ax = plt.axes((0, 0, 1, 1), xticks=[], yticks=[], frameon=False)
# draw axes
ax.annotate(r'$x$', (-r, 0), (r, 0),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
ax.annotate(r'$y$', (0, -r), (0, r),
ha='center', va='center',
arrowprops=dict(arrowstyle='<->', color='k', lw=1))
# draw rotated axes
ax.annotate(r'$x^\prime$', (-r * c, -r * s), (r * c, r * s),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
ax.annotate(r'$y^\prime$', (r * s, -r * c), (-r * s, r * c),
ha='center', va='center',
arrowprops=dict(color='k', arrowstyle='<->', lw=1))
# scatter points
ax.scatter(X[0], X[1], s=25, lw=0, c='k', zorder=2)
# draw lines
vnorm = np.array([s, -c])
for v in (X.T):
d = np.dot(v, vnorm)
v1 = v - d * vnorm
ax.plot([v[0], v1[0]], [v[1], v1[1]], '-k')
# draw ellipses
for sigma in (1, 2, 3):
ax.add_patch(Ellipse((0, 0), 2 * sigma * sigma1, 2 * sigma * sigma2,
rotation * 180. / np.pi,
ec='k', fc='gray', alpha=0.2, zorder=1))
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
plt.show()
```
Note that the points are correlated along a particular direction which doesn't align with the initial choice of axes. So, we should rotate our axes to align with this correlation.
We'll choose the rotation to maximize the ability to discriminate between the data points:
* the first axis, or **principal component**, is direction of maximal variance
* the second principal component is orthogonal to the first component and maximizes the residual variance
* ...
PCA is a dimensional reduction process because we can generally account for nearly "all" of the variance in the data set with fewer than the original $K$ dimensions. See more below.
We start with a data set $\{x_i\}$ which consists of $N$ objects for which we measure $K$ features. We start by subtracting the mean for each feature in $\{x_i\}$ and write $X$ as a $N\times K$ matrix.
The covariance of this matrix is
$$C_X=\frac{1}{N-1}X^TX.$$
There are off-diagonal terms if there are correlations between the measurements (e.g., maybe two of the features are temperature dependent and the measurements were taken at the same time).
If $R$ is a projection of the data that is aligned with the maximal variance, then we have $Y= X R$ with covariance
$$ C_{Y} = R^T X^T X R = R^T C_X R.$$
$r_1$ is the first principal component of $R$, which can be derived using Langrange multipliers with the following cost function:
$$ \phi(r_1,\lambda_1) = r_1^TC_X r_1 - \lambda_1(r_1^Tr_1-1). $$
If we take derivative of $\phi(r_1,\lambda)$ with respect to $r_1$ and set it to 0, then we have
$$ C_Xr_1 - \lambda_1 r_1 = 0. $$
$\lambda_1$ (the largest eigenvalue of the matrix) is the root of the equation $\det(C_X -
\lambda_1 {\bf I})=0$ for which the eigenvalue is
$$ \lambda_1 = r_1^T C_X r_1.$$
The columns of the full matrix, $R$ are the eigenvectors (known here as principal components).
The diagonal values of $C_Y$ are the variance contained within each component.
We aren't going to go through the linear algebra more than that here. But it would be a good group project for someone. See the end of 7.3.1 starting at the bottom on page 294 or go through [Karen Leighly's PCA lecture notes](http://seminar.ouml.org/lectures/principal-components-analysis/) if you want to walk through the math in more detail.
### Preparing data for PCA
* Subtract the mean of each dimension (to "center" the data)
* Divide by the variance in each dimension (to "whiten" the data)
* (For spectra and images) normalize each row to yield an integral of unity.
Below is a typical call to the PCA algorithm. Note that this is somewhat backwards. We are starting with `X` and then we are making it higher dimensional--to create a mock high-$D$ data set. Then we are applying PCA as a dimensionality reduction technique.
```python
#Example call from 7.3.2
import numpy as np
from sklearn.decomposition import PCA
X = np.random.normal(size=(100,3)) # 100 points in 3D
R = np.random.random((3,10)) # projection matrix
X = np.dot(X,R) # X is now 10-dim, with 3 intrinsic dims
pca = PCA(n_components=4) # n_components can be optionally set
pca.fit(X)
comp = pca.transform(X) # compute the subspace projection of X, 4 eigenvalues for each of the 100 samples
mean = pca.mean_ # length 10 mean of the data
components = pca.components_ # 4x10 matrix of components, multiply each by respective "comp" to reconstruct
#Reconstruction of object1
#Xreconstruct[0] = mean + [components][comp[0]]
```
To illustrate what is happening here is a PCA reconstruction of handwritten "3s" from [Hastie et al.](https://web.stanford.edu/~hastie/ElemStatLearn/) :
[Scikit-Learn's decomposition module](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition) has a number of [PCA type implementations](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html#sklearn.decomposition.PCA).
Let's work through an example using spectra of galaxies take during the Sloan Digital Sky Survey. In this sample there are 4000 spectra with flux measurements in 1000 bins. 15 example spectra are shown below and our example will use half of the spectra chosen at random.
```python
%matplotlib inline
# Example from Andy Connolly
# See Ivezic, Figure 7.4
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
print(len(spectra), len(wavelengths))
#----------------------------------------------------------------------
# Compute PCA
np.random.seed(500)
nrows = 2000 # We'll just look at 2000 random spectra
n_components = 5 # Do the fit with 5 components, which is the mean plus 4
ind = np.random.randint(spectra.shape[0], size=nrows)
spec_mean = spectra[ind].mean(0) # Compute the mean spectrum, which is the first component
# spec_mean = spectra[:50].mean(0)
# use Randomized PCA for speed
#pca = RandomizedPCA(n_components - 1)
pca = PCA(n_components - 1,svd_solver='randomized')
pca.fit(spectra[ind])
pca_comp = np.vstack([spec_mean,pca.components_]) #Add the mean to the components
evals = pca.explained_variance_ratio_
print(evals)
```
downloading PCA-processed SDSS spectra from http://staff.washington.edu/jakevdp/spec4000.npz to /home/pranphy/astroML_data
Downloading http://staff.washington.edu/jakevdp/spec4000.npz
[=========================================] 27.15Mb / 27.15Mb
4000 1000
[0.889316 0.06058301 0.02481432 0.01012148]
Now let's plot the components. See also Ivezic, Figure 7.4. The left hand panels are just the first 5 spectra for comparison with the first 5 PCA components, which are shown on the right. They are ordered by the size of their eigenvalues.
```python
#Make plots
fig = plt.figure(figsize=(10, 8))
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05,
bottom=0.1, top=0.95, hspace=0.05)
titles = 'PCA components'
for j in range(n_components):
# plot the components
ax = fig.add_subplot(n_components, 2, 2*j+2)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax.set_xlabel('wavelength (Angstroms)')
ax.plot(wavelengths, pca_comp[j], '-k', lw=1)
# plot zero line
xlim = [3000, 7999]
ax.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax.set_xlim(xlim)
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
# plot the first j spectra
ax2 = fig.add_subplot(n_components, 2, 2*j+1)
ax2.yaxis.set_major_formatter(plt.NullFormatter())
ax2.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax2.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax2.set_xlabel('wavelength (Angstroms)')
ax2.plot(wavelengths, spectra[j], '-k', lw=1)
# plot zero line
ax2.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax2.set_xlim(xlim)
if j == 0:
ax.set_title(titles, fontsize='medium')
if j == 0:
label = 'mean'
else:
label = 'component %i' % j
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax2.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
ax.text(0.02, 0.95, label, transform=ax.transAxes,
ha='left', va='top', bbox=dict(ec='w', fc='w'),
fontsize='small')
plt.show()
```
Now let's make "scree" plots. These plots tell us how much of the variance is explained as a function of the each eigenvector. Our plot won't look much like Ivezic, Figure 7.5, so I've shown it below to explain where "scree" comes from.
```python
# Execute this cell
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(121)
ax.plot(np.arange(n_components-1), evals)
ax.set_xlabel("eigenvalue number")
ax.set_ylabel("eigenvalue ")
ax = fig.add_subplot(122)
ax.plot(np.arange(n_components-1), evals.cumsum())
ax.set_xlabel("eigenvalue number")
ax.set_ylabel("cumulative eigenvalue")
plt.show()
```
How much of the variance is explained by the first two components? How about all of the components?
```python
print("The first component explains {:.3f} of the variance in the data.".format(pca.explained_variance_ratio_[0]))
print("The second component explains {:.3f} of the variance in the data.".format(pca.explained_variance_ratio_[0]))
print("All components explain {:.3f} of the variance in the data.".format(sum(pca.explained_variance_ratio_)))
```
The first component explains 0.889 of the variance in the data.
The second component explains 0.889 of the variance in the data.
All components explain 0.996 of the variance in the data.
This is why PCA enables dimensionality reduction.
How many components would we need to explain 99.5% of the variance?
```python
for num_feats in np.arange(1,20, dtype = int):
pca = PCA(n_components=num_feats)
pca.fit(spectra[ind])
if (sum(pca.explained_variance_ratio_[:num_feats])>0.995):
break
print("{:d} features are needed to explain 99.5% of the variance".format(num_feats))
```
8 features are needed vo explain 99.5% of the variance
Note that we would need 1000 components to encode *all* of the variance.
## Interpreting the PCA
- The output eigenvectors are ordered by their associated eigenvalues
- The eigenvalues reflect the variance within each eigenvector
- The sum of the eigenvalues is total variance of the system
- Projection of each spectrum onto the first few eigenspectra is a compression of the data
Once we have the eigenvectors, we can try to reconstruct an observed spectrum, ${x}(k)$, in the eigenvector basis, ${e}_i(k)$, as
$$ \begin{equation}
{x}_i(k) = {\mu}(k) + \sum_j^R \theta_{ij} {e}_j(k).
\end{equation}
$$
That would give a full (perfect) reconstruction of the data since it uses all of the eigenvectors. But if we truncate (i.e., $r<R$), then we will have reduced the dimensionality while still reconstructing the data with relatively little loss of information.
For example, we started with 4000x1000 floating point numbers. If we can explain nearly all of the variance with 8 eigenvectors, then we have reduced the problem to 4000x8+8x1000 floating point numbers!
Execute the next cell to see how the reconstruction improves by adding more components.
```python
# Execute this cell
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
#------------------------------------------------------------
# Compute PCA components
# Eigenvalues can be computed using PCA as in the commented code below:
#from sklearn.decomposition import PCA
#pca = PCA()
#pca.fit(spectra)
#evals = pca.explained_variance_ratio_
#evals_cs = evals.cumsum()
# because the spectra have been reconstructed from masked values, this
# is not exactly correct in this case: we'll use the values computed
# in the file compute_sdss_pca.py
evals = data['evals'] ** 2
evals_cs = evals.cumsum()
evals_cs /= evals_cs[-1]
evecs = data['evecs']
spec_mean = spectra.mean(0)
#------------------------------------------------------------
# Find the coefficients of a particular spectrum
spec = spectra[1]
coeff = np.dot(evecs, spec - spec_mean)
#------------------------------------------------------------
# Plot the sequence of reconstructions
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(hspace=0)
for i, n in enumerate([0, 4, 8, 20]):
ax = fig.add_subplot(411 + i)
ax.plot(wavelengths, spec, '-', c='gray')
ax.plot(wavelengths, spec_mean + np.dot(coeff[:n], evecs[:n]), '-k')
if i < 3:
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylim(-2, 21)
ax.set_ylabel('flux')
if n == 0:
text = "mean"
elif n == 1:
text = "mean + 1 component\n"
text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
else:
text = "mean + %i components\n" % n
text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
ax.text(0.01, 0.95, text, ha='left', va='top', transform=ax.transAxes)
fig.axes[-1].set_xlabel(r'${\rm wavelength\ (\AA)}$')
plt.show()
```
### Caveats I
PCA is a linear process, whereas the variations in the data may not be. So it may not always be appropriate to use and/or may require a relatively large number of components to fully describe any non-linearity.
Note also that PCA can be very impractical for large data sets which exceed the memory per core as the computational requirement goes as $\mathscr{O}(D^3$) and the memory requirement goes as $\mathscr{O}(2D^2)$.
### Missing Data
We have assumed so far that there is no missing data (e.g., bad pixels in the spectrum, etc.). But often the data set is incomplete. Since PCA encodes the flux correlation with wavelength (or whatever parameters are in your data set), we can actually use it to determine missing values.
An example is shown below. Here, black are the observed spectra. Gray are the regions where we have no data. Blue is the PCA reconstruction, including the regions where there are no data. Awesome, isn't it?
```python
# Execute this cell
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
from astroML.datasets import fetch_sdss_corrected_spectra
from astroML.datasets import sdss_corrected_spectra
#------------------------------------------------------------
# Get spectra and eigenvectors used to reconstruct them
data = fetch_sdss_corrected_spectra()
spec = sdss_corrected_spectra.reconstruct_spectra(data)
lam = sdss_corrected_spectra.compute_wavelengths(data)
evecs = data['evecs']
mu = data['mu']
norms = data['norms']
mask = data['mask']
#------------------------------------------------------------
# plot the results
i_plot = ((lam > 5750) & (lam < 6350))
lam = lam[i_plot]
specnums = [20, 8, 9]
subplots = [311, 312, 313]
fig = plt.figure(figsize=(8, 10))
fig.subplots_adjust(hspace=0)
for subplot, i in zip(subplots, specnums):
ax = fig.add_subplot(subplot)
# compute eigen-coefficients
spec_i_centered = spec[i] / norms[i] - mu
coeffs = np.dot(spec_i_centered, evecs.T)
# blank out masked regions
spec_i = spec[i]
mask_i = mask[i]
spec_i[mask_i] = np.nan
# plot the raw masked spectrum
ax.plot(lam, spec_i[i_plot], '-', color='k', lw=2,
label='True spectrum')
# plot two levels of reconstruction
for nev in [10]:
if nev == 0:
label = 'mean'
else:
label = 'N EV=%i' % nev
spec_i_recons = norms[i] * (mu + np.dot(coeffs[:nev], evecs[:nev]))
ax.plot(lam, spec_i_recons[i_plot], label=label)
# plot shaded background in masked region
ylim = ax.get_ylim()
mask_shade = ylim[0] + mask[i][i_plot].astype(float) * ylim[1]
plt.fill(np.concatenate([lam[:1], lam, lam[-1:]]),
np.concatenate([[ylim[0]], mask_shade, [ylim[0]]]),
lw=0, fc='k', alpha=0.2)
ax.set_xlim(lam[0], lam[-1])
ax.set_ylim(ylim)
ax.yaxis.set_major_formatter(ticker.NullFormatter())
if subplot == 311:
ax.legend(loc=1, prop=dict(size=14))
ax.set_xlabel('$\lambda\ (\AA)$')
ax.set_ylabel('normalized flux')
plt.show()
```
The example that we have been using above is "spectral" PCA. Some examples from the literature include:
- [Francis et al. 1992](http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1992ApJ...398..476F&data_type=PDF_HIGH&whole_paper=YES&type=PRINTER&filetype=.pdf)
- [Connolly et al. 1995](http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1995AJ....110.1071C&data_type=PDF_HIGH&whole_paper=YES&type=PRINTER&filetype=.pdf)
- [Yip et al. 2004](http://iopscience.iop.org/article/10.1086/425626/meta;jsessionid=31BB5F11B85D2BF4180834DC71BA0B85.c3.iopscience.cld.iop.org)
One can also do PCA on features that aren't ordered (as they were for the spectra). E.g., if you have $D$ different parameters measured for your objects. The classic example in astronomy is
[Boroson & Green 1992](http://articles.adsabs.harvard.edu/cgi-bin/nph-iarticle_query?1992ApJS...80..109B&data_type=PDF_HIGH&whole_paper=YES&type=PRINTER&filetype=.pdf).
### Caveats II
One of the things that I don't like about PCA is that the eigenvectors are defined relative to the mean. So they can be positive or negative and they often don't look anything like the original data itself. Whereas it is often the case that you might expect that the components would look like, well, the physical components. For example, quasars are fundamentally galaxies. So, part of their flux comes from the galaxy that they live in. But PCA doesn't return any component that looks like a typical galaxy.
## Non-negative Matrix Factorization (NMF)
This is where [Non-negative Matrix Factorizaiton (NMF)](https://en.wikipedia.org/wiki/Non-negative_matrix_factorization) comes in. Here we are treating the data as a linear sum of positive-definite components.
NMF assumes any data matrix can be factored into two matrices, $W$ and $Y$, with
$$\begin{equation}
X=W Y,
\end{equation}
$$
where both $W$ and $Y$ are nonnegative.
So, $WY$ is an approximation of $X$. Minimizing the reconstruction error $|| (X - W Y)^2 ||$,
nonnegative bases can be derived through an iterative process.
Note, however, that the iterative process does not guarantee nonlocal minima (like $K$-means and EM), but using
random initialization and cross-validation can be used to find the global minimum.
An example from the literature is [Allen et al. 2008](http://arxiv.org/abs/0810.4231)
In Scikit-Learn the [NMF implementation](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.NMF.html) looks like:
```python
# Execute this cell
import numpy as np
from sklearn.decomposition import NMF
X = np.random.random((100,10)) # 100 points in 10-D
nmf = NMF(n_components=3)
nmf.fit(X)
proj = nmf.transform(X) # project to 3 dimension
comp = nmf.components_ # 3x10 array of components
err = nmf.reconstruction_err_ # how well 3 components capture the data
```
An example (and comparison to PCA) is given below.
```python
# Execute the next 2 cells
# Example from Figure 7.4
# Author: Jake VanderPlas
# License: BSD
%matplotlib inline
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import NMF
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition import PCA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
```
```python
#----------------------------------------------------------------------
# Compute PCA, and NMF components
def compute_PCA_NMF(n_components=5):
spec_mean = spectra.mean(0)
# PCA: use randomized PCA for speed
#pca = RandomizedPCA(n_components - 1)
pca = PCA(n_components - 1,svd_solver='randomized')
pca.fit(spectra)
pca_comp = np.vstack([spec_mean, pca.components_])
# NMF requires all elements of the input to be greater than zero
spectra[spectra < 0] = 0
nmf = NMF(n_components)
nmf.fit(spectra)
nmf_comp = nmf.components_
return pca_comp, nmf_comp
n_components = 5
decompositions = compute_PCA_NMF(n_components)
#----------------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05,
bottom=0.1, top=0.95, hspace=0.05)
titles = ['PCA components', 'NMF components']
for i, comp in enumerate(decompositions):
for j in range(n_components):
ax = fig.add_subplot(n_components, 3, 3 * j + 1 + i)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax.set_xlabel('wavelength (Angstroms)')
ax.plot(wavelengths, comp[j], '-k', lw=1)
# plot zero line
xlim = [3000, 7999]
ax.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax.set_xlim(xlim)
if j == 0:
ax.set_title(titles[i])
if titles[i].startswith('PCA') or titles[i].startswith('ICA'):
if j == 0:
label = 'mean'
else:
label = 'component %i' % j
else:
label = 'component %i' % (j + 1)
ax.text(0.03, 0.94, label, transform=ax.transAxes,
ha='left', va='top')
for l in ax.get_xticklines() + ax.get_yticklines():
l.set_markersize(2)
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
plt.show()
```
## Independent Component Analysis (ICA)
For data where the components are statistically independent (or nearly so) [Independent Component Analysis (ICA)](https://en.wikipedia.org/wiki/Independent_component_analysis) has become a popular method for separating mixed components. The classical example is the so-called "cocktail party" problem. This is illustrated in the following figure from Hastie, Tibshirani, and Friedman (Figure 14.27 on page 497 in my copy, so they have clearly added some stuff!). Think of the "source signals" as two voices at a party. You are trying to concentrate on just one voice. What you hear is something like the "measured signals" pattern. You could run the data through PCA and that would do an excellent job of reconstructing the signal with reduced dimensionality, but it wouldn't actually isolate the different physical components (bottom-left panel). ICA on the other hand can (bottom-right panel).
.](../images/HastieFigure14_37.png)
[Hastie et al.](https://web.stanford.edu/~hastie/ElemStatLearn/): "ICA applied to multivariate data looks for a sequence of orthogonal projections such that the projected data look as far from Gaussian as possible. With pre-whitened data, this amounts to looking for
components that are as independent as possible."
In short you want to find components that are maximally non-Gaussian since the sum of 2 random variables will be more Gaussian than either of the components (remember the Central Limit Theorem). Hastie et al. illustrate this as follows:
ICA is a good choice for a complex system with relatively indepent components. For example a galaxy is roughly a linear combination of cool stars and hot stars, and a quasar is just a galaxy with others component from an accretion disk and emission line regions. Ideally we want "eigenvectors" that are aligned with those physical traits/regions as opposed to mathematical constructs.
The basic call to the [FastICA algoirthm](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.FastICA.html) in Scikit-Learn looks like:
```python
# Execute this cell
import numpy as np
from sklearn.decomposition import FastICA
X = np.random.normal(size=(100,2)) # 100 objects in 2D
R = np.random.random((2,5)) # mixing matrix
X = np.dot(X,R) # Simulation of a 5D data space
ica = FastICA(2) # Now reproject to 2-D
ica.fit(X)
proj = ica.transform(X) # 100x2 projection of the data
comp = ica.components_ # 2x5 matrix of independent components
## sources = ica.sources_ # 100x2 matrix of sources
```
Execute the next 2 cells to produce a plot showing the ICA components.
```python
%matplotlib inline
#Example from Andy Connolly
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import FastICA
from astroML.datasets import sdss_corrected_spectra
from astroML.decorators import pickle_results
#------------------------------------------------------------
# Download data
data = sdss_corrected_spectra.fetch_sdss_corrected_spectra()
spectra = sdss_corrected_spectra.reconstruct_spectra(data)
wavelengths = sdss_corrected_spectra.compute_wavelengths(data)
#----------------------------------------------------------------------
# Compute PCA
np.random.seed(500)
nrows = 500
n_components = 5
ind = np.random.randint(spectra.shape[0], size=nrows)
spec_mean = spectra[ind].mean(0)
# spec_mean = spectra[:50].mean(0)
ica = FastICA(n_components - 1)
ica.fit(spectra[ind])
ica_comp = np.vstack([spec_mean,ica.components_]) #Add the mean to the components
```
```python
#Make plots
fig = plt.figure(figsize=(10, 8))
fig.subplots_adjust(left=0.05, right=0.95, wspace=0.05,
bottom=0.1, top=0.95, hspace=0.05)
titles = 'ICA components'
for j in range(n_components):
# plot the components
ax = fig.add_subplot(n_components, 2, 2*j+2)
ax.yaxis.set_major_formatter(plt.NullFormatter())
ax.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax.set_xlabel(r'wavelength ${\rm (\AA)}$')
ax.plot(wavelengths, ica_comp[j], '-k', lw=1)
# plot zero line
xlim = [3000, 7999]
ax.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax.set_xlim(xlim)
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
# plot the first j spectra
ax2 = fig.add_subplot(n_components, 2, 2*j+1)
ax2.yaxis.set_major_formatter(plt.NullFormatter())
ax2.xaxis.set_major_locator(plt.MultipleLocator(1000))
if j < n_components - 1:
ax2.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax2.set_xlabel(r'wavelength ${\rm (\AA)}$')
ax2.plot(wavelengths, spectra[j], '-k', lw=1)
# plot zero line
ax2.plot(xlim, [0, 0], '-', c='gray', lw=1)
ax2.set_xlim(xlim)
if j == 0:
ax.set_title(titles, fontsize='medium')
if j == 0:
label = 'mean'
else:
label = 'component %i' % j
# adjust y limits
ylim = plt.ylim()
dy = 0.05 * (ylim[1] - ylim[0])
ax2.set_ylim(ylim[0] - dy, ylim[1] + 4 * dy)
ax.text(0.02, 0.95, label, transform=ax.transAxes,
ha='left', va='top', bbox=dict(ec='w', fc='w'),
fontsize='small')
plt.show()
```
As with PCA and NMF, we can similarly do a reconstruction:
```python
# Execute this cell
#------------------------------------------------------------
# Find the coefficients of a particular spectrum
spec = spectra[1]
evecs = data['evecs']
coeff = np.dot(evecs, spec - spec_mean)
#------------------------------------------------------------
# Plot the sequence of reconstructions
fig = plt.figure(figsize=(8, 8))
fig.subplots_adjust(hspace=0)
for i, n in enumerate([0, 2, 4, 8]):
ax = fig.add_subplot(411 + i)
ax.plot(wavelengths, spec, '-', c='gray')
ax.plot(wavelengths, spec_mean + np.dot(coeff[:n], evecs[:n]), '-k')
if i < 3:
ax.xaxis.set_major_formatter(plt.NullFormatter())
ax.set_ylim(-2, 21)
ax.set_ylabel('flux')
if n == 0:
text = "mean"
elif n == 1:
text = "mean + 1 component\n"
#text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
else:
text = "mean + %i components\n" % n
#text += r"$(\sigma^2_{tot} = %.2f)$" % evals_cs[n - 1]
ax.text(0.01, 0.95, text, ha='left', va='top', transform=ax.transAxes)
fig.axes[-1].set_xlabel(r'${\rm wavelength\ (\AA)}$')
plt.show()
```
Ivezic, Figure 7.4 compares the components found by the PCA, ICA, and NMF algorithms. Their differences and similarities are quite interesting.
If you think that I was pulling your leg about the cocktail problem, try it yourself!
Load the code instead of running it and see what effect changing some things has.
```python
# %load ../code/plot_ica_blind_source_separation.py
"""
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
#
s3[time>4] = 0
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure(figsize=(8,8))
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
```
Let's revisit the digits sample and see what PCA, NMF, and ICA do for it.
```python
## Execute this cell to load the digits sample
%matplotlib inline
import numpy as np
from sklearn.datasets import load_digits
from matplotlib import pyplot as plt
digits = load_digits()
grid_data = np.reshape(digits.data[0], (8,8)) #reshape to 8x8
plt.imshow(grid_data, interpolation = "nearest", cmap = "bone_r")
print(grid_data)
X = digits.data
y = digits.target
```
Do the PCA transform, projecting to 2 dimensions and plot the results.
```python
# PCA
from sklearn.decomposition import PCA
pca = PCA(n_components = 2)
pca.fit_transform(X)
X_reduced = pca.transform(X)
plt.scatter(X_reduced[:,0], X_reduced[:,1], c=y, cmap="nipy_spectral", edgecolor="None")
plt.colorbar()
```
Similarly for NMF and ICA
```python
from sklearn.decomposition import NMF
nmf = NMF(n_components=2)
nmf.fit(X)
X_reduced = nmf.transform(X)
plt.scatter(X_reduced[:,0], X_reduced[:,1], c=y, cmap="nipy_spectral", edgecolor="None")
plt.colorbar()
```
```python
# ICA
from sklearn.decomposition import FastICA
ica = FastICA(n_components=2)
ica.fit(X)
X_reduced = ica.transform(X)
plt.scatter(X_reduced[:,0],X_reduced[:,1], c=y, cmap="nipy_spectral", edgecolor="None")
plt.colorbar()
```
Take a second to think about what ICA is doing. What if you had digits from digital clocks instead of handwritten?
I wasn't going to introduce [Neural Networks](https://en.wikipedia.org/wiki/Artificial_neural_network) yet, but it is worth noting that Scikit-Learn's [`Bernoulli Restricted Boltzman Machine (RBM)`](http://scikit-learn.org/stable/modules/generated/sklearn.neural_network.BernoulliRBM.html) is discussed in the [(unsupervised) neural network](http://scikit-learn.org/stable/modules/neural_networks_unsupervised.html) part of the User's Guide and is relevant here as the data input must be either binary or values between 0 and 1, which is the case that we have here.
We could think about doing dimensional reduction of the digits data set in another way. There are 64 pixels in each of our images. Presumably all of them aren't equally useful. Let's figure out exactly which pixels are the most relevant. We'll use Scikit-Learn's [`RandomForestRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestRegressor.html). We won't get to regression until next week, but you don't need to understand the algorithm to do this, just look at the inputs and outputs. Which pixels are the most important? As a bonus see if you can plot digit images with those pixels highlighted.
```python
from sklearn.ensemble import RandomForestRegressor
RFreg = RandomForestRegressor()# Complete or leave blank as you see fit
RFreg.fit(X,y)# Do Fitting
importances = RFreg.feature_importances_# Determine "importances"
pixelorder = np.argsort(importances)[::-1] #Rank importances (highest to lowest)
print(pixelorder)
plt.figure()
plt.imshow(np.reshape(importances,(8,8)),interpolation="nearest")
plt.show()
```
```python
```
|
{-# OPTIONS --without-K #-}
open import HoTT.Base
open import HoTT.Identity
open import HoTT.Homotopy
module HoTT.Equivalence where
open variables
private variable C : π° i
module _ (f : A β B) where
qinv = Ξ£[ g βΆ (B β A) ] (g β f ~ id) Γ (f β g ~ id)
-- Bi-invertible map
linv = Ξ£[ g βΆ (B β A) ] g β f ~ id
rinv = Ξ£[ g βΆ (B β A) ] f β g ~ id
-- The book uses a flipped version rinv Γ linv for the definition in Β§2.4.
biinv = linv Γ rinv
-- Half-adjoint equivalence
ishae = Ξ£[ g βΆ (B β A) ] Ξ£[ Ξ· βΆ g β f ~ id ] Ξ£[ Ξ΅ βΆ f β g ~ id ] ap f β Ξ· ~ Ξ΅ β f
module _ {f : A β B} where
module qinv (e : qinv f) where
g = prβ e
Ξ· = prβ (prβ e)
Ξ΅ = prβ (prβ e)
qinvβlinv : qinv f β linv f
qinvβlinv e = g , Ξ·
where open qinv e
qinvβrinv : qinv f β rinv f
qinvβrinv e = g , Ξ΅
where open qinv e
module ishae (e : ishae f) where
g = prβ e
Ξ· = prβ (prβ e)
Ξ΅ = prβ (prβ (prβ e))
Ο = prβ (prβ (prβ e))
ishaeβqinv : ishae f β qinv f
ishaeβqinv e = g , Ξ· , Ξ΅
where open ishae e
qinvβishae : qinv f β ishae f
qinvβishae e = g , Ξ· , Ξ΅' , Ο
where
open qinv e
Ξ΅' : f β g ~ id
Ξ΅' b = Ξ΅ (f (g b)) β»ΒΉ β (ap f (Ξ· (g b)) β Ξ΅ b)
Ο : ap f β Ξ· ~ Ξ΅' β f
Ο a =
ap f (Ξ· a) =β¨ unitβ β©
refl β ap f (Ξ· a) =β¨ invβ β»ΒΉ βα΅£ _ β©
_ β Ξ΅ (f (g (f a))) β ap f (Ξ· a) =β¨ assoc β»ΒΉ β©
_ β (_ β ap f (Ξ· a)) =β¨ _ ββ (_ ββ ap-id (ap f (Ξ· a)) β»ΒΉ) β©
_ β (Ξ΅ (f (g (f a))) β ap id _) =β¨ _ ββ ~-natural Ξ΅ (ap f (Ξ· a)) β©
_ β (ap (f β g) (ap f (Ξ· a)) β _) =β¨ _ ββ (ap-β (f β g) f (Ξ· a) β»ΒΉ βα΅£ _) β©
_ β (ap (f β g β f) (Ξ· a) β _) =β¨ _ ββ (ap-β f (g β f) (Ξ· a) βα΅£ _) β©
_ β (ap f (ap (g β f) (Ξ· a)) β _) =β¨ _ ββ (ap (ap f) (~-natural-comm Ξ· a β»ΒΉ) βα΅£ _) β©
Ξ΅' (f a) β
where open =-Reasoning
module biinv (e : biinv f) where
h = prβ (prβ e)
Ξ² = prβ (prβ e)
g = prβ (prβ e)
Ξ± = prβ (prβ e)
biinvβqinv : biinv f β qinv f
biinvβqinv e = g , Ξ²' , Ξ±
where
open biinv e
Ξ³ : g ~ h
Ξ³ x = Ξ² (g x) β»ΒΉ β ap h (Ξ± x)
Ξ²' : g β f ~ id
Ξ²' x = Ξ³ (f x) β Ξ² x
qinvβbiinv : qinv f β biinv f
qinvβbiinv e = (g , Ξ·) , (g , Ξ΅)
where open qinv e
module _ {fβ : B β C} {fβ : A β B} where
ishae-β : ishae fβ β ishae fβ β ishae (fβ β fβ)
ishae-β eβ eβ = g , Ξ· , Ξ΅ , Ο
where
open ishae eβ renaming (g to gβ ; Ξ· to Ξ·β ; Ξ΅ to Ξ΅β ; Ο to Οβ)
open ishae eβ renaming (g to gβ ; Ξ· to Ξ·β ; Ξ΅ to Ξ΅β ; Ο to Οβ)
f = fβ β fβ
g = gβ β gβ
Ξ· : g β f ~ id
Ξ· x = ap gβ (Ξ·β (fβ x)) β Ξ·β x
Ξ΅ : f β g ~ id
Ξ΅ x = ap fβ (Ξ΅β (gβ x)) β Ξ΅β x
Ο : ap f β Ξ· ~ Ξ΅ β f
Ο x =
ap f (Ξ· x) =β¨ ap-β fβ fβ (ap gβ (Ξ·β (fβ x)) β Ξ·β x) β©
ap fβ (ap fβ (Ξ· x)) =β¨ ap (ap fβ) (ap-β fβ (ap gβ (Ξ·β (fβ x))) (Ξ·β x)) β©
ap fβ (ap fβ (ap gβ (Ξ·β (fβ x))) β ap fβ (Ξ·β x)) =β¨ ap (ap fβ) (ap fβ _ ββ Οβ x) β©
ap fβ (ap fβ (ap gβ (Ξ·β (fβ x))) β Ξ΅β (fβ x)) =β¨ ap (ap fβ) (ap-β fβ gβ _ β»ΒΉ βα΅£ Ξ΅β (fβ x)) β©
ap fβ (ap (fβ β gβ) (Ξ·β (fβ x)) β Ξ΅β (fβ x)) =β¨ ap (ap fβ) (~-natural Ξ΅β (Ξ·β (fβ x))) β»ΒΉ β©
ap fβ (Ξ΅β (gβ (f x)) β ap id (Ξ·β (fβ x))) =β¨ ap (ap fβ) (Ξ΅β (gβ (f x)) ββ ap-id (Ξ·β (fβ x))) β©
ap fβ (Ξ΅β (gβ (f x)) β Ξ·β (fβ x)) =β¨ ap-β fβ (Ξ΅β (gβ (f x))) (Ξ·β (fβ x)) β©
ap fβ (Ξ΅β (gβ (f x))) β ap fβ (Ξ·β (fβ x)) =β¨ _ ββ Οβ (fβ x) β©
Ξ΅ (f x) β
where open =-Reasoning
biinv-β : biinv fβ β biinv fβ β biinv (fβ β fβ)
biinv-β eβ eβ = (h , Ξ²) , (g , Ξ±)
where
open biinv eβ renaming (h to hβ ; Ξ² to Ξ²β ; g to gβ ; Ξ± to Ξ±β)
open biinv eβ renaming (h to hβ ; Ξ² to Ξ²β ; g to gβ ; Ξ± to Ξ±β)
f = fβ β fβ
h = hβ β hβ
Ξ² : h β f ~ id
Ξ² x = ap hβ (Ξ²β (fβ x)) β Ξ²β x
g = gβ β gβ
Ξ± : f β g ~ id
Ξ± x = ap fβ (Ξ±β (gβ x)) β Ξ±β x
-- Choose isequiv :β‘ biinv since it is quicker to compute.
isequiv = biinv
qinvβisequiv = qinvβbiinv
isequivβqinv = biinvβqinv
isequiv-β = biinv-β
_β_ : π° i β π° j β π° (i β j)
A β B = Ξ£ (A β B) isequiv
infixr 5 _β_
record Iso (A : π° i) (B : π° j) : π° (i β j) where
field
f : A β B
g : B β A
Ξ· : g β f ~ id
Ξ΅ : f β g ~ id
isoβeqv : Iso A B β A β B
isoβeqv iso = f , qinvβisequiv (g , Ξ· , Ξ΅)
where open Iso iso
eqvβiso : A β B β Iso A B
eqvβiso e = record { f = prβ e ; g = g ; Ξ· = Ξ· ; Ξ΅ = Ξ΅ }
where open qinv (isequivβqinv (prβ e))
module Eqv {i} {j} {A : π° i} {B : π° j} (e : A β B) = Iso (eqvβiso e)
-- Lemma 2.4.12
-- (i)
reflβ : A β A
reflβ = id , qinvβisequiv (id , (Ξ» _ β refl) , (Ξ» _ β refl))
-- (ii)
_β»ΒΉβ : A β B β B β A
e β»ΒΉβ = g , qinvβisequiv (prβ e , Ξ΅ , Ξ·)
where
open qinv (isequivβqinv (prβ e))
infix 30 _β»ΒΉβ
-- (iii)
_ββ_ : A β B β B β C β A β C
eβ ββ eβ = prβ eβ β prβ eβ , isequiv-β (prβ eβ) (prβ eβ)
infixl 20 _ββ_
idtoeqv : A == B β A β B
idtoeqv p = transport id p , e
where
e : isequiv (transport id p)
e rewrite p = prβ reflβ
module β-Reasoning
where
_ββ¨_β©_ : (A : π° i) β A β B β B β C β A β C
x ββ¨ eβ β© eβ = eβ ββ eβ
infixr 2 _ββ¨_β©_
_β : (A : π° i) β A β A
_ β = reflβ
infix 3 _β
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas32_2e206m5_9limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition carry :
{ carry : feBW_loose -> feBW_tight
| forall a, phiBW_tight (carry a) = (phiBW_loose a) }.
Proof.
Set Ltac Profiling.
Time synthesize_carry ().
Show Ltac Profile.
Time Defined.
Print Assumptions carry.
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for PyMVPA surface searchlight functions specific for
handling AFNI datasets"""
import numpy as np
import os
import tempfile
from mvpa2.testing import *
from mvpa2.support.nibabel import afni_niml, afni_niml_dset, afni_niml_roi, \
surf, afni_suma_spec
from mvpa2.datasets import niml
from mvpa2.datasets.base import Dataset
class SurfTests(unittest.TestCase):
"""Test for AFNI I/O together with surface-based stuff
NNO Aug 2012
'Ground truth' is whatever output is returned by the implementation
as of mid-Aug 2012"""
def _get_rng(self):
keys = [(17 * i ** 5 + 78234745 * i + 8934) % (2 ** 32 - 1)
for i in xrange(624)]
keys = np.asanyarray(keys, dtype=np.uint32)
rng = np.random.RandomState()
rng.set_state(('MT19937', keys, 0))
return rng
def test_afni_niml(self):
# just a bunch of tests
ps = afni_niml._partial_string
assert_equal(ps("", 0, 0), "")
assert_equal(ps("ab", 0, 0), "")
assert_equal(ps("abcdefghij", 0, 0), "")
assert_equal(ps("", 2, 0), "")
assert_equal(ps("ab", 2, 0), "")
assert_equal(ps("abcdefghij", 2, 0), "")
assert_equal(ps("", 0, 1), "")
assert_equal(ps("ab", 0, 1), " ... b")
assert_equal(ps("abcdefghij", 0, 1), " ... j")
assert_equal(ps("", 2, 1), "")
assert_equal(ps("ab", 2, 1), "")
assert_equal(ps("abcdefghij", 2, 1), " ... j")
assert_equal(ps("", 0, 100), "")
assert_equal(ps("ab", 0, 100), "ab")
assert_equal(ps("abcdefghij", 0, 100), "abcdefghij")
assert_equal(ps("", 2, 100), "")
assert_equal(ps("ab", 2, 100), "")
assert_equal(ps("abcdefghij", 2, 100), "cdefghij")
data = np.asarray([[1347506771, 1347506772],
[1347506773, 1347506774]],
dtype=np.int32)
fmt_data_reprs = dict(text='1347506771 1347506772\n1347506773 1347506774',
binary='SRQPTRQPURQPVRQP',
base64='U1JRUFRSUVBVUlFQVlJRUA==')
minimal_niml_struct = [{'dset_type': 'Node_Bucket',
'name': 'AFNI_dataset',
'ni_form': 'ni_group',
'nodes': [{'data': data,
'data_type': 'Node_Bucket_data',
'name': 'SPARSE_DATA',
'ni_dimen': '2',
'ni_type': '2*int32'},
{'atr_name': 'COLMS_LABS',
'data': 'col_0;col_1',
'name': 'AFNI_atr',
'ni_dimen': '1',
'ni_type': 'String'}]}]
def _eq(p, q):
# helper function: equality for both arrays and other things
return np.all(p == q) if type(p) is np.ndarray else p == q
for fmt, data_repr in fmt_data_reprs.iteritems():
s = afni_niml.rawniml2string(minimal_niml_struct, fmt)
d = afni_niml.string2rawniml(s)
# ensure data was converted properly
for k, v in minimal_niml_struct[0].iteritems():
if k == 'nodes':
# at least in one of the data
for node in v:
for kk, vv in node.iteritems():
# at least one of the data fields should have a value matching
# that from the expected converted value
dvals = [d[0]['nodes'][i].get(kk, None) for i in xrange(len(v))]
assert_true(any([_eq(vv, dval) for dval in dvals]))
elif k != 'name':
# check header was properly converted
assert_true(('%s="%s"' % (k, v)).encode() in s)
# check that if we remove some important information, then parsing fails
important_keys = ['ni_form', 'ni_dimen', 'ni_type']
for k in important_keys:
s_bad = s.replace(k.encode(), b'foo')
assert_raises((KeyError, ValueError), afni_niml.string2rawniml, s_bad)
# adding garbage at the beginning or end should fail the parse
garbage = "GARBAGE".encode()
assert_raises((KeyError, ValueError), afni_niml.string2rawniml, s + garbage)
assert_raises((KeyError, ValueError), afni_niml.string2rawniml, garbage + s)
@with_tempfile('.niml.dset', 'dset')
def test_afni_niml_dset_with_2d_strings(self, fn):
# test for 2D arrays with strings. These are possibly SUMA-incompatible
# but should still be handled properly for i/o.
# Addresses https://github.com/PyMVPA/PyMVPA/issues/163 (#163)
samples = np.asarray([[1, 2, 3], [4, 5, 6]])
labels = np.asarray(map(list, ['abcd', 'efgh']))
idxs = np.asarray([np.arange(10, 14), np.arange(20, 24)])
ds = Dataset(samples, sa=dict(labels=labels, idxs=idxs))
for fmt in ('binary', 'text', 'base64'):
niml.write(fn, ds, fmt)
ds_ = niml.read(fn)
assert_array_equal(ds.samples, ds_.samples)
for sa_key in ds.sa.keys():
v = ds.sa[sa_key].value
v_ = ds_.sa[sa_key].value
assert_array_equal(v, v_)
@with_tempfile('.niml.dset', 'dset')
def test_afni_niml_dset(self, fn):
sz = (100, 45) # dataset size
rng = self._get_rng() # generate random data
expected_vals = {(0, 0):-2.13856 , (sz[0] - 1, sz[1] - 1):-1.92434,
(sz[0], sz[1] - 1):None, (sz[0] - 1, sz[1]):None,
sz:None}
# test for different formats in which the data is stored
fmts = ['text', 'binary', 'base64']
# also test for different datatypes
tps = [np.int32, np.int64, np.float32, np.float64]
# generated random data
data = rng.normal(size=sz)
# set labels for samples, and set node indices
labels = ['lab_%d' % round(rng.uniform() * 1000)
for _ in xrange(sz[1])]
node_indices = np.argsort(rng.uniform(size=(sz[0],)))
node_indices = np.reshape(node_indices, (sz[0], 1))
eps = .00001
# test I/O
# depending on the mode we do different tests (but on the same data)
modes = ['normal', 'skipio', 'sparse2full']
for fmt in fmts:
for tp in tps:
for mode in modes:
# make a dataset
dset = dict(data=np.asarray(data, tp),
labels=labels,
node_indices=node_indices)
dset_keys = dset.keys()
if mode == 'skipio':
# try conversion to/from raw NIML
# do not write to disk
r = afni_niml_dset.dset2rawniml(dset)
s = afni_niml.rawniml2string(r)
r2 = afni_niml.string2rawniml(s)
dset2 = afni_niml_dset.rawniml2dset(r2)[0]
else:
# write and read from disk
afni_niml_dset.write(fn, dset, fmt)
dset2 = afni_niml_dset.read(fn)
os.remove(fn)
# data in dset and dset2 should be identical
for k in dset_keys:
# general idea is to test whether v is equal to v2
v = dset[k]
v2 = dset2[k]
if k == 'data':
if mode == 'sparse2full':
# test the sparse2full feature
# this changes the order of the data over columns
# so we skip testing whether dset2 is equal to dset
nfull = 2 * sz[0]
dset3 = afni_niml_dset.sparse2full(dset2,
pad_to_node=nfull)
assert_equal(dset3['data'].shape[0], nfull)
idxs = dset['node_indices'][:, 0]
idxs3 = dset3['node_indices'][:, 0]
vbig = np.zeros((nfull, sz[1]))
vbig[idxs, :] = v[np.arange(sz[0]), :]
v = vbig
v2 = dset3['data'][idxs3, :]
else:
# check that data is as expected
for pos, val in expected_vals.iteritems():
if val is None:
assert_raises(IndexError, lambda x:x[pos], v2)
else:
val2 = np.asarray(val, tp)
assert_true(abs(v2[pos] - val2) < eps)
if type(v) is list:
assert_equal(v, v2)
else:
eps_dec = 4
if mode != 'sparse2full' or k == 'data':
assert_array_almost_equal(v, v2, eps_dec)
@with_tempfile('.niml.dset', 'dset')
def test_niml(self, fn):
d = dict(data=np.random.normal(size=(10, 2)),
node_indices=np.arange(10),
stats=['none', 'Tstat(2)'],
labels=['foo', 'bar'])
a = niml.from_niml(d)
b = niml.to_niml(a)
afni_niml_dset.write(fn, b)
bb = afni_niml_dset.read(fn)
cc = niml.from_niml(bb)
os.remove(fn)
for dset in (a, cc):
assert_equal(list(dset.sa['labels']), d['labels'])
assert_equal(list(dset.sa['stats']), d['stats'])
assert_array_equal(np.asarray(dset.fa['node_indices']).ravel(),
d['node_indices'])
eps_dec = 4
assert_array_almost_equal(dset.samples, d['data'].transpose(),
eps_dec)
# some more tests to ensure that the order of elements is ok
# (row first or column first)
d = np.arange(10).reshape((5, -1)) + .5
ds = Dataset(d)
writers = [niml.write, afni_niml_dset.write]
for i, writer in enumerate(writers):
for form in ('text', 'binary', 'base64'):
if i == 0:
writer(fn, ds, form=form)
else:
writer(fn, dict(data=d.transpose()), form=form)
x = afni_niml_dset.read(fn)
assert_array_equal(x['data'], d.transpose())
@with_tempfile('.niml.dset', 'dset')
def test_niml_dset_voxsel(self, fn):
if not externals.exists('nibabel'):
return
# This is actually a bit of an integration test.
# It tests storing and retrieving searchlight results.
# Imports are inline here so that it does not mess up the header
# and makes the other unit tests more modular
# XXX put this in a separate file?
from mvpa2.misc.surfing import volgeom, surf_voxel_selection, queryengine
from mvpa2.measures.searchlight import Searchlight
from mvpa2.support.nibabel import surf
from mvpa2.measures.base import Measure
from mvpa2.datasets.mri import fmri_dataset
class _Voxel_Count_Measure(Measure):
# used to check voxel selection results
is_trained = True
def __init__(self, dtype, **kwargs):
Measure.__init__(self, **kwargs)
self.dtype = dtype
def _call(self, dset):
return self.dtype(dset.nfeatures)
sh = (20, 20, 20)
vg = volgeom.VolGeom(sh, np.identity(4))
density = 20
outer = surf.generate_sphere(density) * 10. + 5
inner = surf.generate_sphere(density) * 5. + 5
intermediate = outer * .5 + inner * .5
xyz = intermediate.vertices
radius = 50
sel = surf_voxel_selection.run_voxel_selection(radius, vg, inner, outer)
qe = queryengine.SurfaceVerticesQueryEngine(sel)
for dtype in (int, float):
sl = Searchlight(_Voxel_Count_Measure(dtype), queryengine=qe)
ds = fmri_dataset(vg.get_empty_nifti_image(1))
r = sl(ds)
niml.write(fn, r)
rr = niml.read(fn)
os.remove(fn)
assert_array_equal(r.samples, rr.samples)
def test_niml_dset_stack(self):
values = map(lambda x:np.random.normal(size=x), [(10, 3), (10, 4), (10, 5)])
indices = [[0, 1, 2], [3, 2, 1, 0], None]
dsets = []
for v, i in zip(values, indices):
dset = Dataset(v)
if not i is None:
dset.fa['node_indices'] = i
dsets.append(dset)
dset = niml.hstack(dsets)
assert_equal(dset.nfeatures, 12)
assert_equal(dset.nsamples, 10)
indices = np.asarray([ 0, 1, 2, 6, 5, 4, 3, 7, 8, 9, 10, 11])
assert_array_equal(dset.fa['node_indices'], indices)
dset = niml.hstack(dsets, 10)
dset = niml.hstack(dsets, 10) # twice to ensure not overwriting
assert_equal(dset.nfeatures, 30)
indices = np.asarray([0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
13, 12, 11, 10, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
assert_array_equal(dset.fa['node_indices'], indices)
assert_true(np.all(dset[:, 4].samples == 0))
assert_array_equal(dset[:, 10:14].samples, dsets[1].samples)
# If not enough space it should raise an error
stacker = (lambda x: niml.hstack(dsets, x))
assert_raises(ValueError, stacker, 2)
# If sparse then with no padding it should fail
dsets[0].fa.node_indices[0] = 3
assert_raises(ValueError, stacker, None)
# Using an illegal node index should raise an error
dsets[1].fa.node_indices[0] = 666
assert_raises(ValueError, stacker, 10)
@with_tempfile('.niml.roi', 'dset')
def test_afni_niml_roi(self, fn):
payload = """# <Node_ROI
# ni_type = "SUMA_NIML_ROI_DATUM"
# ni_dimen = "5"
# self_idcode = "XYZ_QlRYtdSyHmNr39qZWxD0wQ"
# domain_parent_idcode = "XYZ_V_Ug6er2LCNoLy_OzxPsZg"
# Parent_side = "no_side"
# Label = "myroi"
# iLabel = "12"
# Type = "2"
# ColPlaneName = "ROI.-.CoMminfl"
# FillColor = "0.525490 0.043137 0.231373 1.000000"
# EdgeColor = "0.000000 0.000000 1.000000 1.000000"
# EdgeThickness = "2"
# >
1 4 1 42946
1 4 10 42946 42947 43062 43176 43289 43401 43512 43513 43623 43732
1 4 8 43732 43623 43514 43404 43293 43181 43068 42954
3 4 9 42954 42953 42952 42951 42950 42949 42948 42947 42946
4 1 14 43063 43064 43065 43066 43067 43177 43178 43179 43180 43290 43291 43292 43402 43403
# </Node_ROI>"""
with open(fn, 'w') as f:
f.write(payload)
rois = afni_niml_roi.read(fn)
assert_equal(len(rois), 1)
roi = rois[0]
expected_keys = ['ni_type', 'ColPlaneName', 'iLabel', 'Parent_side',
'EdgeColor', 'Label', 'edges', 'ni_dimen',
'self_idcode', 'EdgeThickness', 'Type', 'areas',
'domain_parent_idcode', 'FillColor']
assert_equal(set(roi.keys()), set(expected_keys))
assert_equal(roi['Label'], 'myroi')
assert_equal(roi['iLabel'], 12)
# check edges
arr = np.asarray
expected_edges = [arr([42946]),
arr([42946, 42947, 43062, 43176, 43289, 43401,
43512, 43513, 43623, 43732]),
arr([43732, 43623, 43514, 43404, 43293, 43181,
43068, 42954]),
arr([42954, 42953, 42952, 42951, 42950, 42949,
42948, 42947, 42946])]
for i in xrange(4):
assert_array_equal(roi['edges'][i], expected_edges[i])
# check nodes
expected_nodes = [arr([43063, 43064, 43065, 43066, 43067, 43177, 43178,
43179, 43180, 43290, 43291, 43292, 43402, 43403])]
assert_equal(len(roi['areas']), 1)
assert_array_equal(roi['areas'][0], expected_nodes[0])
# check mapping
m = afni_niml_roi.read_mapping(rois)
assert_equal(m.keys(), ['myroi'])
unique_nodes = np.unique(expected_nodes[0])
assert_array_equal(m['myroi'], unique_nodes)
@with_tempfile()
def test_afni_suma_spec(self, temp_dir):
# XXX this function generates quite a few temporary files,
# which are removed at the end.
# the decorator @with_tempfile seems unsuitable as it only
# supports a single temporary file
# make temporary directory
os.mkdir(temp_dir)
# generate surfaces
inflated_surf = surf.generate_plane((0, 0, 0), (0, 1, 0), (0, 0, 1),
10, 10)
white_surf = inflated_surf + 1.
# helper function
_tmp = lambda x:os.path.join(temp_dir, x)
# filenames for surfaces and spec file
inflated_fn = _tmp('_lh_inflated.asc')
white_fn = _tmp('_lh_white.asc')
spec_fn = _tmp('lh.spec')
spec_dir = os.path.split(spec_fn)[0]
# generate SUMA-like spec dictionary
white = dict(SurfaceFormat='ASCII',
EmbedDimension='3',
SurfaceType='FreeSurfer',
SurfaceName=white_fn,
Anatomical='Y',
LocalCurvatureParent='SAME',
LocalDomainParent='SAME',
SurfaceState='smoothwm')
inflated = dict(SurfaceFormat='ASCII',
EmbedDimension='3',
SurfaceType='FreeSurfer',
SurfaceName=inflated_fn,
Anatomical='N',
LocalCurvatureParent=white_fn,
LocalDomainParent=white_fn,
SurfaceState='inflated')
# make SurfaceSpec object
spec = afni_suma_spec.SurfaceSpec([white], directory=spec_dir)
spec.add_surface(inflated)
# test __str__ and __repr__
assert_true('SurfaceSpec instance with 2 surfaces'
', 2 states ' in '%s' % spec)
assert_true(('%r' % spec).startswith('SurfaceSpec'))
# test finding surfaces
inflated_ = spec.find_surface_from_state('inflated')
assert_equal([(1, inflated)], inflated_)
empty = spec.find_surface_from_state('unknown')
assert_equal(empty, [])
# test .same_states
minimal = afni_suma_spec.SurfaceSpec([dict(SurfaceState=s)
for s in ('smoothwm', 'inflated')])
assert_true(spec.same_states(minimal))
assert_false(spec.same_states(afni_suma_spec.SurfaceSpec(dict())))
# test 'smart' surface file matching
assert_equal(spec.get_surface_file('smo'), white_fn)
assert_equal(spec.get_surface_file('inflated'), inflated_fn)
assert_equal(spec.get_surface_file('this should be None'), None)
# test i/o
spec.write(spec_fn)
spec_ = afni_suma_spec.from_any(spec_fn)
# prepare for another (right-hemisphere) spec file
lh_spec = spec
rh_spec_fn = spec_fn.replace('lh', 'rh')
rh_inflated_fn = _tmp(os.path.split(inflated_fn)[1].replace('_lh',
'_rh'))
rh_white_fn = _tmp(os.path.split(white_fn)[1].replace('_lh',
'_rh'))
rh_spec_fn = _tmp('rh.spec')
rh_white = dict(SurfaceFormat='ASCII',
EmbedDimension='3',
SurfaceType='FreeSurfer',
SurfaceName=rh_white_fn,
Anatomical='Y',
LocalCurvatureParent='SAME',
LocalDomainParent='SAME',
SurfaceState='smoothwm')
rh_inflated = dict(SurfaceFormat='ASCII',
EmbedDimension='3',
SurfaceType='FreeSurfer',
SurfaceName=rh_inflated_fn,
Anatomical='N',
LocalCurvatureParent=rh_white_fn,
LocalDomainParent=rh_white_fn,
SurfaceState='inflated')
rh_spec = afni_suma_spec.SurfaceSpec([rh_white], directory=spec_dir)
rh_spec.add_surface(rh_inflated)
# write files
all_temp_fns = [spec_fn, rh_spec_fn]
for fn, s in [(rh_inflated_fn, inflated_surf),
(rh_white_fn, white_surf),
(inflated_fn, inflated_surf),
(white_fn, white_surf)]:
surf.write(fn, s)
all_temp_fns.append(fn)
# test adding views
added_specs = afni_suma_spec.hemi_pairs_add_views((lh_spec, rh_spec),
'inflated', '.asc')
for hemi, added_spec in zip(('l', 'r'), added_specs):
states = ['smoothwm', 'inflated'] + ['CoM%sinflated' % i
for i in 'msiap']
assert_equal(states, [s['SurfaceState']
for s in added_specs[0].surfaces])
all_temp_fns.extend([s['SurfaceName']
for s in added_spec.surfaces])
# test combining specs (bh=both hemispheres)
bh_spec = afni_suma_spec.combine_left_right(added_specs)
# test merging specs (mh=merged hemispheres)
mh_spec, mh_surfs = afni_suma_spec.merge_left_right(bh_spec)
assert_equal([s['SurfaceState'] for s in mh_spec.surfaces],
['smoothwm'] + ['CoM%sinflated' % i for i in 'msiap'])
def suite(): # pragma: no cover
"""Create the suite"""
return unittest.makeSuite(SurfTests)
if __name__ == '__main__': # pragma: no cover
import runner
runner.run()
|
using ArgParse
using YAML
using Distributed
@everywhere include("evo/robo.jl")
s = ArgParse.ArgParseSettings()
ArgParse.@add_arg_table(
s,
"--seed", arg_type=Int, default=0,
"--id", arg_type=String, default="test",
"--log", arg_type=String, default="evolution.log",
"--cfg", arg_type=String, default="cfg/darwin.yaml",
)
args = ArgParse.parse_args(s)
cfg = YAML.load_file(args["cfg"])
cfg["seed"] = args["seed"]
cfg["n_fitness"] = 1
e = Evolution(NeurodevoInd, cfg; id=args["id"], logfile=args["log"])
e.mutation = uniform_mutation
e.evaluation = robo_eval
e.generation = generation
Darwin.run!(e)
|
```python
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import numpy as np
import sympy as sym
```
```python
def f(X, n, q, b, c):
Z = np.zeros(len(X))
for i in range(len(X)):
for j in range(int(n)):
for k in range(int(n)):
Z[i] += q[j][k]*X[i][j]*X[i][k]
for j in range(int(n)):
Z[i] += b[j]*X[i][j]
return Z
```
```python
def f_mesh(X, Y, q, b, c):
Z = np.zeros(len(X))
Z = q[0][0]*X*X + q[0][1]*X*Y + q[1][0]*Y*X + q[1][1]*Y*Y + b[0]*X + b[1]*Y + c
return Z
```
```python
def f2(X, Y, n, q, b, c):
Z = q[0][0]*X*X + q[0][1]*X*Y + q[1][0]*Y*X + q[1][1]*Y*Y + b[0]*X + b[1]*Y + int(c[0])
return Z
```
```python
def grad_descent(X, X1, Y1, Y, n, q, b, c, eps=0.05, precision=0.0001, max_iter=200):
X_old = np.zeros((1, 2))
X_new = np.zeros((1, 2))
dfr = np.zeros((1, 2))
X_new[0][0] = 4.9
X_new[0][1] = 4.9
i = 0
Xs = np.zeros((max_iter,2))
Ys = np.zeros(max_iter)
x, y = sym.symbols('x y')
df1 = sym.diff(f2(x, y, n, q, b, c), x)
df2 = sym.diff(f2(x, y, n, q, b, c), y)
# print("df1: {}, df2: {}".format(df1, df2))
while np.all(abs(X_new - X_old)) > precision and max_iter > i:
Xs[i] = X_new
Ys[i] = f2 (X_new[0][0], X_new[0][1], n, q, b, c)
X_old = X_new
dfr[0][0] = df1.evalf(subs={x: X_old[0][0], y: X_old[0][1]})
dfr[0][1] = df2.evalf(subs={x: X_old[0][0], y: X_old[0][1]})
X_new = X_old - eps * dfr
# print("X_new: {}, X_old: {}, dfr: {}".format(X_new, X_old, dfr))
i += 1
eps *= 0.99
print("Finished with {} step".format(i))
if (i < max_iter):
Xs[i] = X_new
Ys[i] = f2(X_new[0][0], X_new[0][1], n, q, b, c)
for j in range(max_iter - 1, i, -1):
Xs = np.delete(Xs, j, axis=0)
Ys = np.delete(Ys, j, axis=0)
return Xs, Ys
```
```python
X1 = np.arange(-5, 5, 0.1)
Y1 = np.arange(-5, 5, 0.1)
Z1 = np.zeros(len(X1))
X_new = np.zeros((100,2))
for i in range(len(X1)):
X_new[i][0] = X1[i]
X_new[i][1] = Y1[i]
Z1 = f(X_new, n, q, b, c)
x_list, y_list = grad_descent(X_new, X1, Y1, Z1, n, q, b, c)
```
Finished with 200 step
```python
n = input("Enter power of your function: ")
q = []
for i in range(int(n)):
q.append(input("Enter the function's coefficient matrix q's row q[{}]: ".format(i)).split())
b = input("Enter the function's coefficient b matrix: ").split()
c = input("Enter the function's constant c: ")
for i in range(int(n)):
q[i] = list(map(float, q[i]))
b = list(map(float, b))
c = list(map(float, c))
```
Enter power of your function: 2
Enter the function's coefficient matrix q's row q[0]: 1 0
Enter the function's coefficient matrix q's row q[1]: 0 2
Enter the function's coefficient b matrix: 0 0
Enter the function's constant c: 0
```python
X1, Y1 = np.meshgrid(X1, Y1)
Z1 = f_mesh(X1, Y1, q, b, c)
X, Y = zip(*x_list)
Z = y_list
ax = plt.subplots(nrows=1, ncols=1, figsize=(10,10))
cs = plt.contour(X1, Y1, Z1)
plt.clabel(cs, inline=1, fontsize=10)
colors = ['b', 'g', 'm', 'c', 'orange']
for j in range(1, len(X)):
ax[1].annotate('', xy=(X[j], Y[j]), xytext=(X[j-1], Y[j-1]),
arrowprops={'arrowstyle': '->', 'color': 'r', 'lw': 1},
va='center', ha='center')
ax[1].scatter(X, Y, s=40, lw=0)
ax[1].set_xlabel('X')
ax[1].set_ylabel('Y')
ax[1].set_title('Minimizing function')
```
```python
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot_surface(X1, Y1, Z1, rstride=1, cstride=1,
cmap='viridis', edgecolor='none')
ax.set_title('surface')
plt.show()
```
```python
plt.figure()
cs = plt.contour(X1, Y1, Z1)
plt.clabel(cs, inline=1, fontsize=10)
plt.title('Two-dimensional contour plot')
plt.show()
```
```python
```
|
%kExpAny 'Eports khoros files into many different format '
% This MatLab function was automatically generated by a converter (KhorosToMatLab) from the Khoros ExpAny.pane file
%
% Parameters:
% InputFile: i 'Input File', required: 'First Input data object'
% OutputFile: o 'Output File', required: 'Resulting output data object'
% Toggle: color 'RGB-Color ', default: 0: 'generate a color-image'
% Toggle: nonormalize 'Do not normalize', default: 0: 'if selected, no normalization will be performed. Range is usually 0..255'
%
% Example: o = kExpAny(i, {'i','';'o','';'color',0;'nonormalize',0})
%
% Khoros helpfile follows below:
%
% PROGRAM
% ExpAny - Eports khoros files into many different format
%
% DESCRIPTION
%
%
%
% EXAMPLES
%
% "SEE ALSO"
%
% RESTRICTIONS
%
% REFERENCES
%
% COPYRIGHT
% Copyright (C) 1996-2003, Rainer Heintzmann, All rights reserved.
%
function varargout = kExpAny(varargin)
if nargin ==0
Inputs={};arglist={'',''};
elseif nargin ==1
Inputs=varargin{1};arglist={'',''};
elseif nargin ==2
Inputs=varargin{1}; arglist=varargin{2};
else error('Usage: [out1,..] = kExpAny(Inputs,arglist).');
end
if size(arglist,2)~=2
error('arglist must be of form {''ParameterTag1'',value1;''ParameterTag2'',value2}')
end
narglist={'i', '__input';'o', '__output';'color', 0;'nonormalize', 0};
maxval={0,0,0,0};
minval={0,0,0,0};
istoggle=[0,0,1,1];
was_set=istoggle * 0;
paramtype={'InputFile','OutputFile','Toggle','Toggle'};
% identify the input arrays and assign them to the arguments as stated by the user
if ~iscell(Inputs)
Inputs = {Inputs};
end
NumReqOutputs=1; nextinput=1; nextoutput=1;
for ii=1:size(arglist,1)
wasmatched=0;
for jj=1:size(narglist,1)
if strcmp(arglist{ii,1},narglist{jj,1}) % a given argument was matched to the possible arguments
wasmatched = 1;
was_set(jj) = 1;
if strcmp(narglist{jj,2}, '__input')
if (nextinput > length(Inputs))
error(['Input ' narglist{jj,1} ' has no corresponding input!']);
end
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
elseif strcmp(narglist{jj,2}, '__output')
if (nextoutput > nargout)
error(['Output nr. ' narglist{jj,1} ' is not present in the assignment list of outputs !']);
end
if (isempty(arglist{ii,2}))
narglist{jj,2} = 'OK_out';
else
narglist{jj,2} = arglist{ii,2};
end
nextoutput = nextoutput + 1;
if (minval{jj} == 0)
NumReqOutputs = NumReqOutputs - 1;
end
elseif isstr(arglist{ii,2})
narglist{jj,2} = arglist{ii,2};
else
if strcmp(paramtype{jj}, 'Integer') & (round(arglist{ii,2}) ~= arglist{ii,2})
error(['Argument ' arglist{ii,1} ' is of integer type but non-integer number ' arglist{ii,2} ' was supplied']);
end
if (minval{jj} ~= 0 | maxval{jj} ~= 0)
if (minval{jj} == 1 & maxval{jj} == 1 & arglist{ii,2} < 0)
error(['Argument ' arglist{ii,1} ' must be bigger or equal to zero!']);
elseif (minval{jj} == -1 & maxval{jj} == -1 & arglist{ii,2} > 0)
error(['Argument ' arglist{ii,1} ' must be smaller or equal to zero!']);
elseif (minval{jj} == 2 & maxval{jj} == 2 & arglist{ii,2} <= 0)
error(['Argument ' arglist{ii,1} ' must be bigger than zero!']);
elseif (minval{jj} == -2 & maxval{jj} == -2 & arglist{ii,2} >= 0)
error(['Argument ' arglist{ii,1} ' must be smaller than zero!']);
elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} < minval{jj})
error(['Argument ' arglist{ii,1} ' must be bigger than ' num2str(minval{jj})]);
elseif (minval{jj} ~= maxval{jj} & arglist{ii,2} > maxval{jj})
error(['Argument ' arglist{ii,1} ' must be smaller than ' num2str(maxval{jj})]);
end
end
end
if ~strcmp(narglist{jj,2},'OK_out') & ~strcmp(narglist{jj,2},'OK_in')
narglist{jj,2} = arglist{ii,2};
end
end
end
if (wasmatched == 0 & ~strcmp(arglist{ii,1},''))
error(['Argument ' arglist{ii,1} ' is not a valid argument for this function']);
end
end
% match the remaining inputs/outputs to the unused arguments and test for missing required inputs
for jj=1:size(narglist,1)
if strcmp(paramtype{jj}, 'Toggle')
if (narglist{jj,2} ==0)
narglist{jj,1} = '';
end;
narglist{jj,2} = '';
end;
if ~strcmp(narglist{jj,2},'__input') && ~strcmp(narglist{jj,2},'__output') && istoggle(jj) && ~ was_set(jj)
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
if strcmp(narglist{jj,2}, '__input')
if (minval{jj} == 0) % meaning this input is required
if (nextinput > size(Inputs))
error(['Required input ' narglist{jj,1} ' has no corresponding input in the list!']);
else
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
end
else % this is an optional input
if (nextinput <= length(Inputs))
narglist{jj,2} = 'OK_in';
nextinput = nextinput + 1;
else
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
end;
else
if strcmp(narglist{jj,2}, '__output')
if (minval{jj} == 0) % this is a required output
if (nextoutput > nargout & nargout > 1)
error(['Required output ' narglist{jj,1} ' is not stated in the assignment list!']);
else
narglist{jj,2} = 'OK_out';
nextoutput = nextoutput + 1;
NumReqOutputs = NumReqOutputs-1;
end
else % this is an optional output
if (nargout - nextoutput >= NumReqOutputs)
narglist{jj,2} = 'OK_out';
nextoutput = nextoutput + 1;
else
narglist{jj,1} = '';
narglist{jj,2} = '';
end;
end
end
end
end
if nargout
varargout = cell(1,nargout);
else
varargout = cell(1,1);
end
global KhorosRoot
if exist('KhorosRoot') && ~isempty(KhorosRoot)
w=['"' KhorosRoot];
else
if ispc
w='"C:\Program Files\dip\khorosBin\';
else
[s,w] = system('which cantata');
w=['"' w(1:end-8)];
end
end
[varargout{:}]=callKhoros([w 'expany" '],Inputs,narglist);
|
(*
* Copyright 2014, General Dynamics C4 Systems
*
* SPDX-License-Identifier: GPL-2.0-only
*)
theory ArchVSpaceEntries_AI
imports "../VSpaceEntries_AI"
begin
context Arch begin global_naming ARM_HYP (*FIXME: arch_split*)
lemma a_type_pdD:
"a_type ko = AArch APageDirectory \<Longrightarrow> \<exists>pd. ko = ArchObj (PageDirectory pd)"
by (clarsimp)
primrec
pde_range_sz :: "pde \<Rightarrow> nat"
where
"pde_range_sz (InvalidPDE) = 0"
| "pde_range_sz (SectionPDE ptr x y) = 0"
| "pde_range_sz (SuperSectionPDE ptr x z) = 4"
| "pde_range_sz (PageTablePDE ptr) = 0"
primrec
pte_range_sz :: "pte \<Rightarrow> nat"
where
"pte_range_sz (InvalidPTE) = 0"
| "pte_range_sz (LargePagePTE ptr x y) = 4"
| "pte_range_sz (SmallPagePTE ptr x y) = 0"
primrec
pde_range :: "pde \<Rightarrow> 11 word \<Rightarrow> 11 word set"
where
"pde_range (InvalidPDE) p = {}"
| "pde_range (SectionPDE ptr x y) p = {p}"
| "pde_range (SuperSectionPDE ptr x z) p =
(if is_aligned p 4 then {x. x && ~~ mask 4 = p && ~~ mask 4} else {p})"
| "pde_range (PageTablePDE ptr) p = {p}"
primrec
pte_range :: "pte \<Rightarrow> 9 word \<Rightarrow> 9 word set"
where
"pte_range (InvalidPTE) p = {}"
| "pte_range (LargePagePTE ptr x y) p =
(if is_aligned p 4 then {x. x && ~~ mask 4 = p && ~~ mask 4} else {p})"
| "pte_range (SmallPagePTE ptr x y) p = {p}"
abbreviation "valid_pt_entries \<equiv> \<lambda>pt. valid_entries pte_range pt"
abbreviation "valid_pd_entries \<equiv> \<lambda>pd. valid_entries pde_range pd"
definition
obj_valid_pdpt :: "kernel_object \<Rightarrow> bool"
where
"obj_valid_pdpt obj \<equiv> case obj of
ArchObj (PageTable pt) \<Rightarrow> valid_pt_entries pt \<and> entries_align pte_range_sz pt
| ArchObj (PageDirectory pd) \<Rightarrow> valid_pd_entries pd \<and> entries_align pde_range_sz pd
| _ \<Rightarrow> True"
lemmas obj_valid_pdpt_simps[simp]
= obj_valid_pdpt_def
[split_simps Structures_A.kernel_object.split
arch_kernel_obj.split]
abbreviation
valid_pdpt_objs :: "'z state \<Rightarrow> bool"
where
"valid_pdpt_objs s \<equiv> \<forall>x \<in> ran (kheap s). obj_valid_pdpt x"
lemma valid_pdpt_init[iff]:
"valid_pdpt_objs init_A_st"
by (auto simp: init_A_st_def init_kheap_def valid_entries_def entries_align_def
elim!: ranE split: if_split_asm)
lemma set_object_valid_pdpt[wp]:
"\<lbrace>valid_pdpt_objs and K (obj_valid_pdpt obj)\<rbrace>
set_object ptr obj
\<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: set_object_def get_object_def, wp)
apply (auto simp: fun_upd_def[symmetric] del: ballI elim: ball_ran_updI)
done
crunch valid_pdpt_objs[wp]: cap_insert, cap_swap_for_delete,empty_slot "valid_pdpt_objs"
(wp: crunch_wps simp: crunch_simps ignore:set_object)
crunches
vcpu_save,vcpu_restore,vcpu_enable,get_vcpu,set_vcpu,vcpu_disable,vcpu_read_reg,
read_vcpu_register,write_vcpu_register
for valid_pdpt_objs[wp]: "valid_pdpt_objs"
(wp: crunch_wps simp: crunch_simps ignore: set_object do_machine_op)
lemma vcpu_switch_valid_pdpt_objs[wp]:
"\<lbrace>valid_pdpt_objs\<rbrace>
vcpu_switch v
\<lbrace>\<lambda>_. valid_pdpt_objs\<rbrace>"
apply (simp add: vcpu_switch_def)
apply (rule hoare_pre)
apply (wp | wpc | clarsimp)+
done
crunch valid_pdpt_objs[wp]: flush_page "valid_pdpt_objs"
(wp: crunch_wps simp: crunch_simps ignore: set_object)
lemma add_3_eq_Suc'[simp]: "n + 3 = Suc (Suc (Suc n))" by simp
lemma shift_0x3C_set:
"\<lbrakk> is_aligned p 7; 8 \<le> bits; bits < 32; len_of TYPE('a) = bits - 3 \<rbrakk> \<Longrightarrow>
(\<lambda>x. ucast (x + p && mask bits >> 3) :: ('a :: len) word) ` set [0 :: word32 , 8 .e. 0x78]
= {x. x && ~~ mask 4 = ucast (p && mask bits >> 3)}"
apply (clarsimp simp: upto_enum_step_def word_shift_by_3 image_image)
apply (subst image_cong[where N="{x. x < 2 ^ 4}"])
apply (safe, simp_all)[1]
apply (drule plus_one_helper2, simp_all)[1]
apply (drule word_le_minus_one_leq, simp_all)[1]
apply (rule_tac f="\<lambda>x. ucast (x && mask bits >> 3)" in arg_cong)
apply (rule trans[OF add.commute is_aligned_add_or], assumption)
apply (rule shiftl_less_t2n, simp_all)[1]
apply safe
apply (frule upper_bits_unset_is_l2p_32[THEN iffD2, rotated])
apply (simp add: word_bits_conv)
apply (rule word_eqI)
apply (simp add: word_ops_nth_size word_size nth_ucast nth_shiftr
nth_shiftl neg_mask_test_bit
word_bits_conv)
apply (safe, simp_all add: is_aligned_nth)[1]
apply (drule_tac x="Suc (Suc (Suc n))" in spec)
apply simp
apply (rule_tac x="ucast x && mask 4" in image_eqI)
apply (rule word_eqI[rule_format])
apply (drule_tac x=n in word_eqD)
apply (simp add: word_ops_nth_size word_size nth_ucast nth_shiftr
nth_shiftl)
apply (safe, simp_all)
apply (rule order_less_le_trans, rule and_mask_less_size)
apply (simp_all add: word_size)
done
lemma mapM_x_store_pte_updates:
"\<forall>x \<in> set xs. f x && ~~ mask pt_bits = p \<Longrightarrow>
\<lbrace>\<lambda>s. (\<not> page_table_at p s \<longrightarrow> Q s) \<and>
(\<forall>pt. ko_at (ArchObj (PageTable pt)) p s
\<longrightarrow> Q (s \<lparr> kheap := (kheap s) (p := Some (ArchObj (PageTable (\<lambda>y. if y \<in> (\<lambda>x.
ucast (f x && mask pt_bits >> 3)) ` set xs then pte else pt y)))) \<rparr>))\<rbrace>
mapM_x (\<lambda>x. store_pte (f x) pte) xs
\<lbrace>\<lambda>_. Q\<rbrace>"
apply (induct xs)
apply (simp add: mapM_x_Nil)
apply wp
apply (clarsimp simp: obj_at_def fun_upd_idem)
apply (simp add: mapM_x_Cons)
apply (rule hoare_seq_ext, assumption)
apply (thin_tac "valid P f Q" for P f Q)
apply (simp add: store_pte_def set_pt_def set_object_def)
apply (wp get_pt_wp get_object_wp)
apply (clarsimp simp: obj_at_def a_type_simps)
apply (erule rsubst[where P=Q])
apply (rule abstract_state.fold_congs[OF refl refl])
apply (rule ext, clarsimp simp add: vspace_bits_defs)
apply (rule ext, clarsimp simp add: vspace_bits_defs)
done
lemma valid_pt_entries_invalid[simp]:
"valid_pt_entries (\<lambda>x. InvalidPTE)"
by (simp add:valid_entries_def)
lemma valid_pd_entries_invalid[simp]:
"valid_pd_entries (\<lambda>x. InvalidPDE)"
by (simp add:valid_entries_def)
lemma entries_align_pte_update:
"\<lbrakk>entries_align pte_range_sz pt;
(\<forall>y. (P y) \<longrightarrow> is_aligned y (pte_range_sz pte))\<rbrakk>
\<Longrightarrow> entries_align pte_range_sz (\<lambda>y. if (P y) then pte else pt y)"
by (simp add:entries_align_def)
lemma entries_align_pde_update:
"\<lbrakk>entries_align pde_range_sz pd;
(\<forall>y. (P y) \<longrightarrow> is_aligned y (pde_range_sz pde))\<rbrakk>
\<Longrightarrow> entries_align pde_range_sz (\<lambda>y. if (P y) then pde else pd y)"
by (simp add:entries_align_def)
lemma valid_pdpt_objs_pdD:
"\<lbrakk>valid_pdpt_objs s;
kheap s ptr = Some (ArchObj (arch_kernel_obj.PageDirectory pd))\<rbrakk>
\<Longrightarrow> valid_pd_entries pd \<and> entries_align pde_range_sz pd"
by (fastforce simp:ran_def)
lemma valid_pdpt_objs_ptD:
"\<lbrakk>valid_pdpt_objs s;
kheap s ptr = Some (ArchObj (arch_kernel_obj.PageTable pt))\<rbrakk>
\<Longrightarrow> valid_pt_entries pt \<and> entries_align pte_range_sz pt"
by (fastforce simp:ran_def)
lemma mapM_x_store_invalid_pte_valid_pdpt:
"\<lbrace>valid_pdpt_objs and K (is_aligned p 7) \<rbrace>
mapM_x (\<lambda>x. store_pte (x + p) InvalidPTE) [0, 8 .e. 0x78]
\<lbrace>\<lambda>_. valid_pdpt_objs\<rbrace>"
apply (rule hoare_gen_asm)+
apply (rule hoare_pre, rule_tac p="p && ~~ mask pt_bits" in mapM_x_store_pte_updates)
apply clarsimp
apply (rule mask_out_first_mask_some[where n=7])
apply (drule_tac d=x in is_aligned_add_helper)
apply (drule subsetD[OF upto_enum_step_subset])
apply simp
apply (erule order_le_less_trans, simp)
apply (simp add: field_simps)
apply (simp add: vspace_bits_defs)
apply (clarsimp simp: ranI elim!: ranE split: if_split_asm)
apply (intro conjI)
apply (simp add: shift_0x3C_set vspace_bits_defs)
apply (rule valid_entries_overwrite_groups
[where S = "{x. x && ~~ mask 4 = ucast (p && mask 12 >> 3)}"])
apply (fastforce simp add: obj_at_def ran_def)
apply simp
apply clarsimp
apply (case_tac v)
apply (simp split:if_splits)+
apply (clarsimp)
apply (case_tac v, simp_all split:if_splits)
apply (intro conjI impI)
apply (rule disjointI)
apply (clarsimp)+
apply (rule entries_align_pte_update)
apply (clarsimp simp:obj_at_def)
apply (drule(1) valid_pdpt_objs_ptD)
apply simp
apply (simp)
done
lemma mapM_x_store_pde_updates:
"\<forall>x \<in> set xs. f x && ~~ mask pd_bits = p \<Longrightarrow>
\<lbrace>\<lambda>s. (\<not> page_directory_at p s \<longrightarrow> Q s) \<and>
(\<forall>pd. ko_at (ArchObj (PageDirectory pd)) p s
\<longrightarrow> Q (s \<lparr> kheap := (kheap s) (p := Some (ArchObj (PageDirectory (\<lambda>y. if y \<in> (\<lambda>x.
ucast (f x && mask pd_bits >> 3)) ` set xs then pde else pd y)))) \<rparr>))\<rbrace>
mapM_x (\<lambda>x. store_pde (f x) pde) xs
\<lbrace>\<lambda>_. Q\<rbrace>"
apply (induct xs)
apply (simp add: mapM_x_Nil)
apply wp
apply (clarsimp simp: obj_at_def fun_upd_idem)
apply (simp add: mapM_x_Cons)
apply (rule hoare_seq_ext, assumption)
apply (thin_tac "valid P f Q" for P f Q)
apply (simp add: store_pde_def set_pd_def set_object_def)
apply (wp get_pd_wp get_object_wp)
apply (clarsimp simp: obj_at_def a_type_simps)
apply (erule rsubst[where P=Q])
apply (rule abstract_state.fold_congs[OF refl refl])
apply (rule ext, clarsimp simp add: vspace_bits_defs)
apply (rule ext, clarsimp simp add: vspace_bits_defs)
done
lemma mapM_x_store_pde_valid_pdpt_objs:
"\<lbrace>valid_pdpt_objs and K (is_aligned p 7)\<rbrace>
mapM_x (\<lambda>x. store_pde (x + p) InvalidPDE) [0, 8 .e. 0x78]
\<lbrace>\<lambda>_. valid_pdpt_objs\<rbrace>"
apply (rule hoare_gen_asm)+
apply (rule hoare_pre, rule_tac p="p && ~~ mask pd_bits" in mapM_x_store_pde_updates)
apply clarsimp
apply (rule mask_out_first_mask_some[where n=7])
apply (drule_tac d=x in is_aligned_add_helper)
apply (drule subsetD[OF upto_enum_step_subset])
apply simp
apply (erule order_le_less_trans, simp)
apply (simp add: field_simps)
apply (simp add: vspace_bits_defs)
apply (clarsimp simp: ranI elim!: ranE split: if_split_asm)
apply (simp add: shift_0x3C_set vspace_bits_defs)
apply (rule conjI)
apply (rule_tac valid_entries_overwrite_groups
[where S = "{x. x && ~~ mask 4 = ucast (p && mask 14 >> 3)}"])
apply (fastforce simp add: obj_at_def ran_def)
apply fastforce
apply clarsimp
apply (case_tac v, simp_all split:if_splits)
apply clarsimp
apply (case_tac v, simp_all split:if_splits)
apply (intro conjI impI allI)
apply (rule disjointI)
apply clarsimp
apply (rule entries_align_pde_update)
apply (clarsimp simp:obj_at_def)
apply (drule valid_pdpt_objs_pdD)
apply (simp add:pd_bits_def pageBits_def)
apply simp
apply simp
done
lemma store_invalid_pde_valid_pdpt:
"\<lbrace>valid_pdpt_objs and
(\<lambda>s. \<forall>pd. ko_at (ArchObj (PageDirectory pd)) (p && ~~ mask pd_bits) s
\<longrightarrow> pde = InvalidPDE)\<rbrace>
store_pde p pde \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: store_pde_def set_pd_def, wp get_object_wp)
apply (clarsimp simp: obj_at_def)
apply (intro conjI)
apply (rule valid_entries_overwrite_0, simp_all)
apply (fastforce simp: ran_def)
apply (simp add:fun_upd_def)
apply (rule entries_align_pde_update)
apply (drule(1) valid_pdpt_objs_pdD)
apply simp
apply simp
done
lemma store_pde_non_master_valid_pdpt:
"\<lbrace>valid_pdpt_objs and
(\<lambda>s. \<forall>pd. ko_at (ArchObj (PageDirectory pd)) (p && ~~ mask pd_bits) s
\<longrightarrow> (pde_range_sz (pd (ucast (p && mask pd_bits >> 3) && ~~ mask 4)) = 0
\<and> pde_range_sz pde = 0))\<rbrace>
store_pde p pde \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: store_pde_def set_pd_def, wp get_object_wp)
apply (clarsimp simp: obj_at_def)
apply (intro conjI)
apply (rule valid_entries_overwrite_0)
apply (fastforce simp:ran_def)
apply (drule bspec)
apply fastforce
apply (case_tac "pd pa")
apply (simp_all add: vspace_bits_defs)
apply (case_tac pde,simp_all)
apply (case_tac pde,simp_all)
apply (case_tac pde,simp_all)
apply (clarsimp simp: is_aligned_neg_mask_eq)+
apply (simp add:fun_upd_def)
apply (rule entries_align_pde_update)
apply (drule(1) valid_pdpt_objs_pdD,simp)
apply simp
done
lemma store_invalid_pte_valid_pdpt:
"\<lbrace>valid_pdpt_objs and
(\<lambda>s. \<forall>pt. ko_at (ArchObj (PageTable pt)) (p && ~~ mask pt_bits) s
\<longrightarrow> pte = InvalidPTE)\<rbrace>
store_pte p pte \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: store_pte_def set_pt_def, wp get_object_wp)
apply (clarsimp simp: obj_at_def)
apply (intro conjI)
apply (rule valid_entries_overwrite_0, simp_all)
apply (fastforce simp: ran_def)
apply (simp add:fun_upd_def)
apply (rule entries_align_pte_update)
apply (drule (1) valid_pdpt_objs_ptD,simp)
apply simp
done
lemma store_pte_non_master_valid_pdpt:
"\<lbrace>valid_pdpt_objs and
(\<lambda>s. \<forall>pt. ko_at (ArchObj (PageTable pt)) (p && ~~ mask pt_bits) s
\<longrightarrow> (pte_range_sz (pt (ucast (p && mask pt_bits >> 3) && ~~ mask 4)) = 0
\<and> pte_range_sz pte = 0))\<rbrace>
store_pte p pte \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: store_pte_def set_pt_def, wp get_object_wp)
apply (clarsimp simp: obj_at_def)
apply (intro conjI)
apply (rule valid_entries_overwrite_0)
apply (fastforce simp:ran_def)
apply (drule bspec)
apply fastforce
apply (case_tac "pt pa")
apply simp
apply (case_tac pte,simp_all)
apply (clarsimp simp: is_aligned_neg_mask_eq vspace_bits_defs)
apply (case_tac pte,simp_all)
apply (simp add:fun_upd_def)
apply (rule entries_align_pte_update)
apply (drule (1) valid_pdpt_objs_ptD,simp)
apply simp
done
lemma unmap_page_valid_pdpt[wp]:
"\<lbrace>valid_pdpt_objs\<rbrace> unmap_page sz asid vptr pptr \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: unmap_page_def mapM_discarded
cong: vmpage_size.case_cong)
including no_pre apply wp
prefer 2
apply (rule valid_validE[OF find_pd_for_asid_inv])
apply (rule hoare_pre)
apply (wp get_object_wp get_pte_wp get_pde_wp lookup_pt_slot_inv_any
store_invalid_pte_valid_pdpt
store_invalid_pde_valid_pdpt
mapM_x_store_invalid_pte_valid_pdpt mapM_x_store_pde_valid_pdpt_objs
| simp add: mapM_x_map vspace_bits_defs largePagePTE_offsets_def superSectionPDE_offsets_def
| wpc | simp add: check_mapping_pptr_def)+
apply (simp add: fun_upd_def[symmetric] is_aligned_mask[symmetric])
done
crunch valid_pdpt_objs[wp]: flush_table "valid_pdpt_objs"
(wp: crunch_wps simp: crunch_simps)
(*
NOTE: This isn't true, but is the main reason flush_table_kheap does not work now,
I guess it is possible to prove this for a P that does not care about VCPU
but let's wait and see where and how this lemma is used.
lemma vcpu_switch_kheap[wp]:"\<lbrace>\<lambda>s. P (kheap s)\<rbrace> vcpu_switch v \<lbrace>\<lambda>_ s. P (kheap s)\<rbrace>"
crunch kheap[wp]: flush_table "\<lambda>s. P (kheap s)"
(wp: crunch_wps simp: crunch_simps)
FIXME: Delete
*)
crunch kheap[wp]: get_cap "\<lambda>s. P (kheap s)"
(wp: crunch_wps simp: crunch_simps)
lemma unmap_page_table_valid_pdpt_objs[wp]:
"\<lbrace>valid_pdpt_objs\<rbrace> unmap_page_table asid vptr pt \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: unmap_page_table_def)
including no_pre apply (wp get_object_wp store_invalid_pde_valid_pdpt | wpc)+
apply (simp add: obj_at_def)
apply (simp add: page_table_mapped_def)
apply (wp get_pde_wp | wpc)+
apply simp
apply (rule hoare_post_impErr, rule valid_validE,
rule find_pd_for_asid_inv, simp_all)
done
lemma set_simple_ko_valid_pdpt_objs[wp]:
"\<lbrace>\<lambda>s. \<forall>x\<in>ran (kheap s). obj_valid_pdpt x\<rbrace>
set_simple_ko param_a param_b param_c \<lbrace>\<lambda>_ s. \<forall>x\<in>ran (kheap s). obj_valid_pdpt x\<rbrace>"
unfolding set_simple_ko_def
apply (subst option.disc_eq_case(2))
apply (wpsimp wp: set_object_valid_pdpt[THEN hoare_set_object_weaken_pre]
get_object_wp
simp: a_type_simps obj_at_def)
apply (clarsimp simp: a_type_def
split: kernel_object.splits)
done
crunch valid_pdpt_objs[wp]: finalise_cap, cap_swap_for_delete, empty_slot "valid_pdpt_objs"
(wp: crunch_wps select_wp preemption_point_inv simp: crunch_simps unless_def ignore:set_object)
lemma preemption_point_valid_pdpt_objs[wp]:
"\<lbrace>valid_pdpt_objs\<rbrace> preemption_point \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
by (wp preemption_point_inv | simp)+
lemmas cap_revoke_preservation_valid_pdpt_objs = cap_revoke_preservation[OF _,
where E=valid_pdpt_objs,
simplified, THEN validE_valid]
lemmas rec_del_preservation_valid_pdpt_objs = rec_del_preservation[OF _ _ _ _,
where P=valid_pdpt_objs, simplified]
crunch valid_pdpt_objs[wp]: cap_delete, cap_revoke "valid_pdpt_objs"
(rule: cap_revoke_preservation_valid_pdpt_objs)
crunch valid_pdpt_objs[wp]: invalidate_tlb_by_asid, page_table_mapped
"valid_pdpt_objs"
lemma mapM_x_copy_pde_updates:
"\<lbrakk> \<forall>x \<in> set xs. f x && ~~ mask pd_bits = 0; is_aligned p pd_bits;
is_aligned p' pd_bits \<rbrakk> \<Longrightarrow>
\<lbrace>\<lambda>s. (\<not> page_directory_at p s \<longrightarrow> Q s) \<and> (\<not> page_directory_at p' s \<longrightarrow> Q s) \<and>
(\<forall>pd pd'. ko_at (ArchObj (PageDirectory pd)) p s
\<and> ko_at (ArchObj (PageDirectory pd')) p' s
\<longrightarrow> Q (s \<lparr> kheap := (kheap s) (p' := Some (ArchObj (PageDirectory (\<lambda>y. if y \<in> (\<lambda>x.
ucast (f x && mask pd_bits >> 3)) ` set xs then pd y else pd' y)))) \<rparr>))\<rbrace>
mapM_x (\<lambda>x. get_pde (p + f x) >>= store_pde (p' + f x)) xs
\<lbrace>\<lambda>_. Q\<rbrace>"
including no_pre
apply (induct xs)
apply (simp add: mapM_x_Nil)
apply wp
apply (clarsimp simp: obj_at_def fun_upd_idem dest!: a_type_pdD)
apply (simp add: mapM_x_Cons)
apply wp
apply (thin_tac "valid P f Q" for P f Q)
apply (simp add: store_pde_def set_pd_def set_object_def
cong: bind_cong split del: if_split)
apply (wp get_object_wp get_pde_wp)
apply (clarsimp simp: obj_at_def a_type_simps mask_out_add_aligned[symmetric]
split del: if_split)
apply (simp add: a_type_simps, safe)
apply (erule rsubst[where P=Q])
apply (rule abstract_state.fold_congs[OF refl refl])
apply (rule ext, clarsimp)
apply (rule ext, simp)
apply (erule rsubst[where P=Q])
apply (rule abstract_state.fold_congs[OF refl refl])
apply (rule ext, clarsimp simp add: vspace_bits_defs)
apply (rule ext, simp add: mask_add_aligned vspace_bits_defs)
done
lemma copy_global_mappings_valid_pdpt_objs[wp]:
"\<lbrace>valid_pdpt_objs and valid_arch_state and pspace_aligned
and K (is_aligned p pd_bits)\<rbrace>
copy_global_mappings p \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (rule hoare_gen_asm)
apply (simp add: copy_global_mappings_def)
apply wp
apply auto
done
lemma in_pte_rangeD:
"x \<in> pte_range v y \<Longrightarrow> x && ~~ mask 4 = y && ~~ mask 4"
by (case_tac v,simp_all split:if_splits)
lemma in_pde_rangeD:
"x \<in> pde_range v y \<Longrightarrow> x && ~~ mask 4 = y && ~~ mask 4"
by (case_tac v,simp_all split:if_splits)
lemma mapM_x_store_pte_valid_pdpt2:
"\<lbrace>valid_pdpt_objs and K (is_aligned ptr pt_bits)\<rbrace>
mapM_x (\<lambda>x. store_pte x InvalidPTE) [ptr, ptr + 8 .e. ptr + 2 ^ pt_bits - 1]
\<lbrace>\<lambda>_. valid_pdpt_objs\<rbrace>"
apply (rule hoare_gen_asm)+
apply (rule mapM_x_wp')
apply (simp add:store_pte_def set_pt_def)
apply (wp get_pt_wp get_object_wp)
apply (clarsimp simp: mask_in_range
split:Structures_A.kernel_object.splits
arch_kernel_obj.splits)
apply (rule conjI)
apply (rule valid_entries_overwrite_0)
apply (fastforce simp:ran_def obj_at_def)
apply simp
apply (simp add:fun_upd_def obj_at_def)
apply (rule entries_align_pte_update)
apply (drule (1) valid_pdpt_objs_ptD,simp)
apply simp
done
lemma mapM_x_store_pde_valid_pdpt2:
"\<lbrace>valid_pdpt_objs and K (is_aligned pd pd_bits)\<rbrace>
mapM_x (\<lambda>x. store_pde ((x << 3) + pd) pde.InvalidPDE)
[0.e.(kernel_base >> 20) - 1]
\<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (rule hoare_gen_asm)
apply (rule mapM_x_wp')
apply (simp add:store_pde_def set_pd_def)
apply (wp get_pd_wp get_object_wp)
apply (clarsimp simp: mask_in_range
split:Structures_A.kernel_object.splits
arch_kernel_obj.splits)
apply (rule conjI)
apply (rule valid_entries_overwrite_0)
apply (fastforce simp:ran_def obj_at_def)
apply simp
apply (simp add:fun_upd_def obj_at_def)
apply (rule entries_align_pde_update)
apply (drule (1) valid_pdpt_objs_pdD,simp)
apply simp
done
lemma non_invalid_in_pde_range:
"pde \<noteq> InvalidPDE
\<Longrightarrow> x \<in> pde_range pde x"
by (case_tac pde,simp_all)
lemma non_invalid_in_pte_range:
"pte \<noteq> InvalidPTE
\<Longrightarrow> x \<in> pte_range pte x"
by (case_tac pte,simp_all)
crunch valid_pdpt_objs[wp]: cancel_badged_sends "valid_pdpt_objs"
(simp: crunch_simps filterM_mapM wp: crunch_wps ignore: filterM)
crunch valid_pdpt_objs[wp]: cap_move, cap_insert "valid_pdpt_objs"
lemma invoke_cnode_valid_pdpt_objs[wp]:
"\<lbrace>valid_pdpt_objs and invs and valid_cnode_inv i\<rbrace> invoke_cnode i \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: invoke_cnode_def)
apply (rule hoare_pre)
apply (wp get_cap_wp | wpc | simp split del: if_split)+
done
crunch valid_pdpt_objs[wp]: invoke_tcb "valid_pdpt_objs"
(wp: check_cap_inv crunch_wps simp: crunch_simps
ignore: check_cap_at)
lemma invoke_domain_valid_pdpt_objs[wp]:
"\<lbrace>valid_pdpt_objs\<rbrace> invoke_domain t d \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
by (simp add: invoke_domain_def | wp)+
crunch valid_pdpt_objs[wp]: set_extra_badge, transfer_caps_loop "valid_pdpt_objs"
(rule: transfer_caps_loop_pres)
crunch valid_pdpt_objs[wp]: send_ipc, send_signal,
do_reply_transfer, invoke_irq_control, invoke_irq_handler "valid_pdpt_objs"
(wp: crunch_wps simp: crunch_simps
ignore: clearMemory const_on_failure set_object)
lemma valid_pdpt_objs_trans_state[simp]: "valid_pdpt_objs (trans_state f s) = valid_pdpt_objs s"
apply (simp add: obj_valid_pdpt_def)
done
lemma retype_region_valid_pdpt[wp]:
"\<lbrace>valid_pdpt_objs\<rbrace> retype_region ptr bits o_bits type dev \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: retype_region_def split del: if_split)
apply (wp | simp only: valid_pdpt_objs_trans_state trans_state_update[symmetric])+
apply (clarsimp simp: retype_addrs_fold foldr_upd_app_if ranI
elim!: ranE split: if_split_asm simp del:fun_upd_apply)
apply (simp add: default_object_def default_arch_object_def
split: Structures_A.kernel_object.splits
Structures_A.apiobject_type.split aobject_type.split)+
apply (simp add:entries_align_def)
done
lemma detype_valid_pdpt[elim!]:
"valid_pdpt_objs s \<Longrightarrow> valid_pdpt_objs (detype S s)"
by (auto simp add: detype_def ran_def)
crunch valid_pdpt_objs[wp]: create_cap "valid_pdpt_objs"
(ignore: clearMemory simp: crunch_simps unless_def)
lemma init_arch_objects_valid_pdpt:
"\<lbrace>valid_pdpt_objs and pspace_aligned and valid_arch_state
and K (\<exists>us sz. orefs = retype_addrs ptr type n us
\<and> range_cover ptr sz (obj_bits_api type us) n)\<rbrace>
init_arch_objects type ptr n obj_sz orefs
\<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (rule hoare_gen_asm)+
apply (clarsimp simp: init_arch_objects_def
split del: if_split)
apply (rule hoare_pre)
apply (wp | wpc)+
apply (rule_tac Q="\<lambda>rv. valid_pdpt_objs and pspace_aligned and valid_arch_state"
in hoare_post_imp, simp)
apply (rule mapM_x_wp')
apply (rule hoare_pre, wp copy_global_mappings_valid_pdpt_objs)
apply clarsimp
apply (drule_tac sz=sz in retype_addrs_aligned)
apply (simp add:range_cover_def)
apply (drule range_cover.sz,simp add:word_bits_def)
apply (simp add:range_cover_def)
apply (clarsimp simp:obj_bits_api_def pd_bits_def pageBits_def
arch_kobj_size_def default_arch_object_def range_cover_def)+
apply wp
apply simp
done
lemma delete_objects_valid_pdpt:
"\<lbrace>valid_pdpt_objs\<rbrace> delete_objects ptr bits \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
by (rule delete_objects_reduct) (wp detype_valid_pdpt)
crunch valid_pdpt[wp]: reset_untyped_cap "valid_pdpt_objs"
(wp: mapME_x_inv_wp crunch_wps simp: crunch_simps unless_def)
lemma invoke_untyped_valid_pdpt[wp]:
"\<lbrace>valid_pdpt_objs and invs and ct_active
and valid_untyped_inv ui\<rbrace>
invoke_untyped ui
\<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (rule hoare_pre, rule invoke_untyped_Q)
apply (wp init_arch_objects_valid_pdpt | simp)+
apply (auto simp: post_retype_invs_def split: if_split_asm)[1]
apply (wp | simp)+
done
crunch valid_pdpt_objs[wp]: perform_asid_pool_invocation,
perform_asid_control_invocation "valid_pdpt_objs"
(ignore: delete_objects wp: delete_objects_valid_pdpt static_imp_wp)
abbreviation (input)
"safe_pt_range \<equiv> \<lambda>slots s. obj_at (\<lambda>ko. \<exists>pt. ko = ArchObj (PageTable pt)
\<and> (\<forall>x\<in>set (tl slots). pt (ucast (x && mask pt_bits >> 3))
= pte.InvalidPTE))
(hd slots && ~~ mask pt_bits) s"
abbreviation (input)
"safe_pd_range \<equiv> \<lambda>slots s. obj_at (\<lambda>ko. \<exists>pd. ko = ArchObj (PageDirectory pd)
\<and> (\<forall>x\<in>set (tl slots). pd (ucast (x && mask pd_bits >> 3))
= pde.InvalidPDE))
(hd slots && ~~ mask pd_bits) s"
definition
"page_inv_entries_pre entries \<equiv>
let slots = (case entries of Inl (pte, slots) \<Rightarrow> slots | Inr (pde, slots) \<Rightarrow> slots)
in (if \<exists>sl. slots = [sl]
then case entries of
Inl (pte, _) \<Rightarrow> obj_at (\<lambda>ko. \<exists>pt pte. ko = ArchObj (PageTable pt)
\<and> pt (ucast (hd slots && mask pt_bits >> 3) && ~~ mask 4) = pte
\<and> pte_range_sz pte = 0)
(hd slots && ~~ mask pt_bits)
and K (pte_range_sz pte = 0)
| Inr (pde, _) \<Rightarrow> obj_at (\<lambda>ko. \<exists>pd pde. ko = ArchObj (PageDirectory pd)
\<and> pd (ucast (head slots && mask pd_bits >> 3) && ~~ mask 4)
= pde \<and> pde_range_sz pde = 0)
(hd slots && ~~ mask pd_bits)
and K (pde_range_sz pde = 0)
else (\<lambda>s. (\<exists>p. is_aligned p 7 \<and> slots = map (\<lambda>x. x + p) [0, 8 .e. 0x78])))
and K (case entries of Inl (pte,slots) \<Rightarrow> pte \<noteq> InvalidPTE
| Inr (pde,slots) \<Rightarrow> pde \<noteq> InvalidPDE)"
definition
"page_inv_entries_safe entries \<equiv>
let slots = (case entries of Inl (pte, slots) \<Rightarrow> slots | Inr (pde, slots) \<Rightarrow> slots)
in if \<exists>sl. slots = [sl]
then case entries of
Inl (pte, _) \<Rightarrow> obj_at (\<lambda>ko. \<exists>pt pte. ko = ArchObj (PageTable pt)
\<and> pt (ucast (hd slots && mask pt_bits >> 3) && ~~ mask 4) = pte
\<and> pte_range_sz pte = 0)
(hd slots && ~~ mask pt_bits)
and K (pte_range_sz pte = 0)
| Inr (pde, _) \<Rightarrow> obj_at (\<lambda>ko. \<exists>pd pde. ko = ArchObj (PageDirectory pd)
\<and> pd (ucast (head slots && mask pd_bits >> 3) && ~~ mask 4)
= pde \<and> pde_range_sz pde = 0)
(hd slots && ~~ mask pd_bits)
and K (pde_range_sz pde = 0)
else (\<lambda>s. (\<exists>p. is_aligned p 7 \<and> slots = map (\<lambda>x. x + p) [0, 8 .e. 0x78]
\<and> (case entries of
Inl (pte, _) \<Rightarrow> safe_pt_range slots s
| Inr (pde, _) \<Rightarrow> safe_pd_range slots s
)))"
definition
"page_inv_duplicates_valid iv \<equiv> case iv of
PageMap asid cap ct_slot entries \<Rightarrow>
page_inv_entries_safe entries
| _ \<Rightarrow> \<top>"
lemma pte_range_interD:
"pte_range pte p \<inter> pte_range pte' p' \<noteq> {}
\<Longrightarrow> pte \<noteq> InvalidPTE \<and> pte' \<noteq> InvalidPTE
\<and> p && ~~ mask 4 = p' && ~~ mask 4"
apply (drule int_not_emptyD)
apply (case_tac pte,simp_all split:if_splits)
apply (case_tac pte',simp_all split:if_splits)
apply clarsimp
apply (case_tac pte',simp_all split:if_splits)
apply (case_tac pte', simp_all split:if_splits)
done
lemma pde_range_interD:
"pde_range pde p \<inter> pde_range pde' p' \<noteq> {}
\<Longrightarrow> pde \<noteq> InvalidPDE \<and> pde' \<noteq> InvalidPDE
\<and> p && ~~ mask 4 = p' && ~~ mask 4"
apply (drule int_not_emptyD)
apply (case_tac pde,simp_all split:if_splits)
apply (case_tac pde',simp_all split:if_splits)
apply (case_tac pde',simp_all split:if_splits)
apply clarsimp
apply (case_tac pde', simp_all split:if_splits)
apply (case_tac pde', simp_all split:if_splits)
done
lemma pte_range_sz_le:
"(pte_range_sz pte) \<le> 4"
by (case_tac pte,simp_all)
lemma pde_range_sz_le:
"(pde_range_sz pde) \<le> 4"
by (case_tac pde,simp_all)
(* BUG , revisit the following lemmas , moved from ArchAcc_R.thy *)
lemma mask_pd_bits_shift_ucast_align[simp]:
"is_aligned (ucast (p && mask pd_bits >> 3)::11 word) 4 =
is_aligned ((p::word32) >> 3) 4"
by (clarsimp simp: is_aligned_mask mask_def vspace_bits_defs) word_bitwise
lemma mask_pt_bits_shift_ucast_align[simp]:
"is_aligned (ucast (p && mask pt_bits >> 3)::9 word) 4 =
is_aligned ((p::word32) >> 3) 4"
by (clarsimp simp: is_aligned_mask mask_def vspace_bits_defs)
word_bitwise
lemma ucast_pt_index:
"\<lbrakk>is_aligned (p::word32) (4 + pte_bits)\<rbrakk>
\<Longrightarrow> ucast ((pa && mask 4) + (ucast (p && mask pt_bits >> pte_bits)::9 word))
= ucast (pa && mask 4) + (p && mask pt_bits >> pte_bits)"
apply (simp add:is_aligned_mask mask_def vspace_bits_defs)
apply word_bitwise
apply (auto simp: carry_def)
done
lemma unat_ucast_9_32:
fixes x :: "9 word"
shows "unat (ucast x :: word32) = unat x"
unfolding ucast_def unat_def
apply (subst int_word_uint)
apply (subst mod_pos_pos_trivial)
apply simp
apply (rule lt2p_lem)
apply simp
apply simp
done
lemma store_pte_valid_pdpt:
"\<lbrace>valid_pdpt_objs and page_inv_entries_safe (Inl (pte, slots))\<rbrace>
store_pte (hd slots) pte \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (rule hoare_name_pre_state)
apply (clarsimp simp:page_inv_entries_safe_def split:if_splits)
apply (clarsimp simp:store_pte_def set_pt_def)
apply (wp get_pt_wp get_object_wp)
apply (clarsimp simp:obj_at_def pte_bits_def
split:pte.splits arch_kernel_obj.splits)
apply (rule conjI)
apply (drule(1) valid_pdpt_objs_ptD)
apply (rule valid_entries_overwrite_0)
apply simp
apply (case_tac pte)
apply simp+
apply (case_tac "pta p",simp_all)
apply (clarsimp simp: is_aligned_neg_mask_eq)
apply (simp add:fun_upd_def)
apply (rule entries_align_pte_update)
apply (drule (1) valid_pdpt_objs_ptD,simp)
apply simp
apply (simp add:hd_map_simp upto_enum_def upto_enum_step_def)
apply (clarsimp simp:store_pte_def set_pt_def)
apply (wp get_pt_wp get_object_wp)
apply (clarsimp simp:obj_at_def pte_bits_def
split:pte.splits arch_kernel_obj.splits)
apply (drule(1) valid_pdpt_objs_ptD)
apply (rule conjI)
apply (rule valid_entries_overwrite_0)
apply simp
apply (rule ccontr)
apply (drule pte_range_interD)
apply clarsimp
apply (simp add:ucast_neg_mask)
apply (subst (asm) is_aligned_neg_mask_eq[where n = 4])
apply (rule is_aligned_shiftr[OF is_aligned_andI1])
apply simp
apply (drule_tac x = "((p && ~~ mask pt_bits) + ((ucast pa) << 3))" in bspec)
apply (clarsimp simp: tl_map_simp upto_0_to_n2 image_def)
apply (rule_tac x = "unat (((ucast pa)::word32) - (p && mask pt_bits >> 3))" in bexI)
apply (simp add:ucast_nat_def shiftl_t2n mask_out_sub_mask)
apply (subst shiftl_t2n[where n = 3,simplified field_simps,simplified,symmetric])
apply (subst shiftr_shiftl1)
apply simp+
apply (subst is_aligned_neg_mask_eq)
apply (erule is_aligned_andI1[OF is_aligned_weaken])
apply simp
apply simp
apply simp
apply (drule_tac s = "ucast (p && mask pt_bits >> 3)" in sym)
apply (simp add:mask_out_sub_mask field_simps)
apply (drule_tac f = "ucast::(9 word\<Rightarrow>word32)" in arg_cong)
apply (simp add: ucast_pt_index[simplified pte_bits_def])
apply (simp add:unat_ucast_9_32)
apply (rule conjI)
apply (subgoal_tac "unat (pa && mask 4)\<noteq> 0")
apply simp
apply (simp add:unat_gt_0)
apply (rule unat_less_helper)
apply (rule le_less_trans[OF word_and_le1])
apply (simp add:mask_def)
apply (simp add:field_simps neg_mask_add_mask)
apply (thin_tac "ucast y = x" for y x)
apply (subst (asm) less_mask_eq[where n = pt_bits])
apply (rule shiftl_less_t2n)
apply (simp add:vspace_bits_defs)
apply word_bitwise
apply (simp add:vspace_bits_defs)
apply (subst (asm) shiftl_shiftr_id)
apply simp
apply (simp,word_bitwise)
apply (simp add:ucast_ucast_id)
apply (simp add:fun_upd_def entries_align_def)
apply (rule is_aligned_weaken[OF _ pte_range_sz_le])
apply (simp add:is_aligned_shiftr)
done
lemma ucast_pd_index:
"\<lbrakk>is_aligned (p::word32) (4 + pde_bits)\<rbrakk>
\<Longrightarrow> ucast ((pa && mask 4) + (ucast (p && mask pd_bits >> pde_bits)::11 word))
= ucast (pa && mask 4) + (p && mask pd_bits >> pde_bits)"
apply (simp add:is_aligned_mask mask_def vspace_bits_defs)
apply word_bitwise
apply (auto simp:carry_def)
done
lemma unat_ucast_11_32:
"unat (ucast (x::(11 word))::word32) = unat x"
apply (subst unat_ucast)
apply (rule mod_less)
apply (rule less_le_trans[OF unat_lt2p])
apply simp
done
lemma ucast_pd_index11:
"\<lbrakk>is_aligned (p::word32) 7\<rbrakk>
\<Longrightarrow> ucast ((pa && mask 4) + (ucast (p && mask 14 >> 3)::11 word))
= ucast (pa && mask 4) + (p && mask 14 >> 3)"
apply (simp add:is_aligned_mask mask_def)
apply word_bitwise
apply (auto simp:carry_def)
done
lemma store_pde_valid_pdpt:
"\<lbrace>valid_pdpt_objs and page_inv_entries_safe (Inr (pde, slots))\<rbrace>
store_pde (hd slots) pde \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (rule hoare_name_pre_state)
apply (clarsimp simp:page_inv_entries_safe_def split:if_splits)
apply (clarsimp simp:store_pde_def set_pd_def)
apply (wp get_pd_wp get_object_wp)
apply (clarsimp simp:obj_at_def pde_bits_def
split:pde.splits arch_kernel_obj.splits)
apply (drule(1) valid_pdpt_objs_pdD)
apply (rule conjI)
apply (rule valid_entries_overwrite_0)
apply simp
apply (case_tac pde,simp_all)
apply (case_tac "pda p",simp_all)
apply (clarsimp simp: is_aligned_neg_mask_eq)
apply (case_tac "pda p",simp_all)
apply (clarsimp simp: is_aligned_neg_mask_eq)
apply (simp add:fun_upd_def)
apply (rule entries_align_pde_update)
apply simp+
apply (simp add:hd_map_simp upto_enum_def upto_enum_step_def)
apply (clarsimp simp:store_pde_def set_pd_def)
apply (wp get_pd_wp get_object_wp)
apply (clarsimp simp:obj_at_def pde_bits_def
split:pde.splits arch_kernel_obj.splits)
apply (drule(1) valid_pdpt_objs_pdD)
apply (rule conjI)
apply (rule valid_entries_overwrite_0)
apply simp
apply (rule ccontr)
apply (drule pde_range_interD)
apply clarsimp
apply (simp add:ucast_neg_mask)
apply (subst (asm) is_aligned_neg_mask_eq[where n = 4])
apply (rule is_aligned_shiftr[OF is_aligned_andI1])
apply simp
apply (drule_tac x = "((p && ~~ mask pd_bits) + ((ucast pa) << 3))" in bspec)
apply (clarsimp simp: tl_map_simp upto_0_to_n2 image_def)
apply (rule_tac x = "unat (((ucast pa)::word32) - (p && mask pd_bits >> 3))" in bexI)
apply (simp add:ucast_nat_def shiftl_t2n mask_out_sub_mask)
apply (subst shiftl_t2n[where n = 3,simplified field_simps,simplified,symmetric])
apply (subst shiftr_shiftl1)
apply simp+
apply (subst is_aligned_neg_mask_eq)
apply (erule is_aligned_andI1[OF is_aligned_weaken])
apply simp
apply simp
apply simp
apply (drule_tac s = "ucast (p && mask pd_bits >> 3)" in sym)
apply (simp add:mask_out_sub_mask field_simps)
apply (drule_tac f = "ucast::(11 word\<Rightarrow>word32)" in arg_cong)
apply (simp add:ucast_pd_index[simplified pde_bits_def])
apply (simp add:unat_ucast_11_32)
apply (rule conjI)
apply (subgoal_tac "unat (pa && mask 4)\<noteq> 0")
apply simp
apply (simp add:unat_gt_0)
apply (rule unat_less_helper)
apply (rule le_less_trans[OF word_and_le1])
apply (simp add:mask_def)
apply (simp add:field_simps neg_mask_add_mask)
apply (thin_tac "ucast y = x" for y x)
apply (subst (asm) less_mask_eq[where n = pd_bits])
apply (rule shiftl_less_t2n)
apply (simp add:vspace_bits_defs)
apply word_bitwise
apply (simp add:vspace_bits_defs)
apply (subst (asm) shiftl_shiftr_id)
apply simp
apply (simp,word_bitwise)
apply (simp add:ucast_ucast_id)
apply (simp add:entries_align_def)
apply (rule is_aligned_weaken[OF _ pde_range_sz_le])
apply (simp add:is_aligned_shiftr)
done
lemma set_cap_page_inv_entries_safe:
"\<lbrace>page_inv_entries_safe x\<rbrace> set_cap y z \<lbrace>\<lambda>_. page_inv_entries_safe x\<rbrace>"
apply (simp add:page_inv_entries_safe_def set_cap_def split_def
get_object_def set_object_def)
apply (wp | wpc)+
apply (case_tac x)
apply (auto simp:obj_at_def
Let_def split:if_splits option.splits)
done
crunch inv[wp]: pte_check_if_mapped, pde_check_if_mapped "\<lambda>s. P s"
lemma perform_page_valid_pdpt[wp]:
"\<lbrace>valid_pdpt_objs and valid_page_inv pinv and page_inv_duplicates_valid pinv\<rbrace>
perform_page_invocation pinv \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: perform_page_invocation_def page_inv_duplicates_valid_def)
apply (cases pinv,
simp_all add: mapM_discarded page_inv_entries_safe_def
split: sum.split arch_cap.split option.split,
safe intro!: hoare_gen_asm hoare_gen_asm[unfolded K_def],
simp_all add: mapM_x_Nil mapM_x_Cons mapM_x_map)
apply (wp store_pte_valid_pdpt store_pde_valid_pdpt get_master_pte_wp get_master_pde_wp
store_pte_non_master_valid_pdpt store_pde_non_master_valid_pdpt
mapM_x_wp'[OF store_invalid_pte_valid_pdpt
[where pte=pte.InvalidPTE, simplified]]
mapM_x_wp'[OF store_invalid_pde_valid_pdpt
[where pde=pde.InvalidPDE, simplified]]
set_cap_page_inv_entries_safe
hoare_vcg_imp_lift[OF set_cap_arch_obj_neg] hoare_vcg_all_lift
| clarsimp simp: cte_wp_at_weakenE[OF _ TrueI] obj_at_def
pte_range_sz_def pde_range_sz_def swp_def valid_page_inv_def
valid_slots_def page_inv_entries_safe_def pte_check_if_mapped_def
pde_check_if_mapped_def
split: pte.splits pde.splits
| wp (once) hoare_drop_imps)+
done
definition
"pti_duplicates_valid iv \<equiv>
case iv of PageTableMap cap ct_slot pde pd_slot
\<Rightarrow> obj_at (\<lambda>ko. \<exists>pd pde. ko = ArchObj (PageDirectory pd)
\<and> pd (ucast (pd_slot && mask pd_bits >> 3) && ~~ mask 4)
= pde \<and> pde_range_sz pde = 0)
(pd_slot && ~~ mask pd_bits)
and K (pde_range_sz pde = 0)
| _ \<Rightarrow> \<top>"
definition
"invocation_duplicates_valid i \<equiv>
case i of
InvokeArchObject (InvokePage pinv) \<Rightarrow> page_inv_duplicates_valid pinv
| InvokeArchObject (InvokePageTable pti) \<Rightarrow> pti_duplicates_valid pti
| _ \<Rightarrow> \<top>"
lemma perform_page_table_valid_pdpt[wp]:
"\<lbrace>valid_pdpt_objs and valid_pti pinv and pti_duplicates_valid pinv\<rbrace>
perform_page_table_invocation pinv \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: perform_page_table_invocation_def split_def vspace_bits_defs
cong: page_table_invocation.case_cong
option.case_cong cap.case_cong arch_cap.case_cong)
apply (rule hoare_pre)
apply (wp store_pde_non_master_valid_pdpt hoare_vcg_ex_lift
set_cap_arch_obj mapM_x_store_pte_valid_pdpt2[simplified vspace_bits_defs, simplified]
| wpc
| simp add: swp_def
| strengthen all_imp_ko_at_from_ex_strg)+
apply (clarsimp simp: pti_duplicates_valid_def valid_pti_def)
apply (auto simp: obj_at_def cte_wp_at_caps_of_state valid_cap_simps
cap_aligned_def vspace_bits_defs
intro!: inj_onI)
done
lemma perform_page_directory_valid_pdpt[wp]:
"\<lbrace>valid_pdpt_objs and valid_pdi pinv\<rbrace>
perform_page_directory_invocation pinv \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: perform_page_directory_invocation_def split_def)
apply (rule hoare_pre)
apply (wp | wpc | simp)+
done
crunch valid_pdpt_objs[wp]: perform_vcpu_invocation "valid_pdpt_objs"
(ignore: delete_objects wp: delete_objects_valid_pdpt static_imp_wp)
lemma perform_invocation_valid_pdpt[wp]:
"\<lbrace>invs and ct_active and valid_invocation i and valid_pdpt_objs
and invocation_duplicates_valid i\<rbrace>
perform_invocation blocking call i
\<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (cases i, simp_all)
apply (wp send_signal_interrupt_states | simp)+
apply (clarsimp simp: invocation_duplicates_valid_def)
apply (wp | wpc | simp)+
apply (simp add: arch_perform_invocation_def)
apply (rule hoare_pre)
apply (wp | wpc | simp)+
apply (auto simp: valid_arch_inv_def invocation_duplicates_valid_def)
done
lemma neg_mask_pt_7_4:
"(ptr && mask pt_bits >> 3) && ~~ mask 4 =
(ptr::word32) && ~~ mask 7 && mask pt_bits >> 3"
apply (simp add:vspace_bits_defs)
apply word_bitwise
apply (simp add:word_size)
done
lemma neg_mask_pd_7_4:
"(ptr && mask pd_bits >> 3) && ~~ mask 4 =
(ptr::word32) && ~~ mask 7 && mask pd_bits >> 3"
apply (simp add:pd_bits_def pageBits_def)
apply word_bitwise
apply (simp add:word_size)
done
lemma mask_out_same_pt:
"\<lbrakk>is_aligned p 7; x < 2 ^ 7 \<rbrakk> \<Longrightarrow> p + x && ~~ mask pt_bits = p && ~~ mask pt_bits"
apply (subst mask_lower_twice[symmetric,where n = 7])
apply (simp add:vspace_bits_defs)
apply (simp add:is_aligned_add_helper)
done
lemma mask_out_same_pd:
"\<lbrakk>is_aligned p 7; x < 2 ^ 7 \<rbrakk> \<Longrightarrow> p + x && ~~ mask pd_bits = p && ~~ mask pd_bits"
apply (subst mask_lower_twice[symmetric,where n = 7])
apply (simp add:pd_bits_def pageBits_def)
apply (simp add:is_aligned_add_helper)
done
lemma ensure_safe_mapping_ensures[wp]:
"\<lbrace>valid_pdpt_objs and (case entries of (Inl (SmallPagePTE _ _ _, [_])) \<Rightarrow> \<top>
| (Inl (SmallPagePTE _ _ _, _)) \<Rightarrow> \<bottom>
| (Inl (LargePagePTE _ _ _, [])) \<Rightarrow> \<bottom>
| (Inr (SectionPDE _ _ _, [_])) \<Rightarrow> \<top>
| (Inr (SuperSectionPDE _ _ _, [])) \<Rightarrow> \<bottom>
| (Inr (SectionPDE _ _ _, _)) \<Rightarrow> \<bottom>
| _ \<Rightarrow> page_inv_entries_pre entries)\<rbrace>
ensure_safe_mapping entries
\<lbrace>\<lambda>rv. page_inv_entries_safe entries\<rbrace>,-"
proof -
have [simp]:
"\<And>s a. page_inv_entries_pre (Inl (pte.InvalidPTE, a)) s \<Longrightarrow>
page_inv_entries_safe (Inl (pte.InvalidPTE, a)) s"
apply (clarsimp simp:page_inv_entries_pre_def page_inv_entries_safe_def
split:if_splits)
done
have name_pre:
"\<And>F P Q. (\<And>s. P s \<Longrightarrow> \<lbrace>(=) s \<rbrace> F \<lbrace>Q\<rbrace>, -) \<Longrightarrow> \<lbrace>P\<rbrace> F \<lbrace>Q\<rbrace>,-"
apply (simp add:validE_R_def validE_def)
apply (rule hoare_name_pre_state)
apply assumption
done
have mask_neg_mask_order[simp]:
"\<And>a m n. a && ~~ mask m && mask n = a && mask n && ~~ mask m"
by (simp add:word_bw_comms word_bw_lcs)
have align_entry_ptD:
"\<And>pt m x xb xc. \<lbrakk>pt m = pte.LargePagePTE x xb xc; entries_align pte_range_sz pt\<rbrakk>
\<Longrightarrow> is_aligned m 4"
apply (simp add:entries_align_def)
apply (drule_tac x = m in spec,simp)
done
have align_entry_pdD:
"\<And>pd m x xb xc. \<lbrakk>pd m = pde.SuperSectionPDE x xb xc; entries_align pde_range_sz pd\<rbrakk>
\<Longrightarrow> is_aligned m 4"
apply (simp add:entries_align_def)
apply (drule_tac x = m in spec,simp)
done
have pt_offset_bitwise[simp]:"\<And>a. (ucast ((a::word32) && mask pt_bits && ~~ mask 7 >> 3)::9 word)
= (ucast (a && mask pt_bits >> 3)::9 word) && ~~ mask 4"
apply (simp add: vspace_bits_defs mask_def)
apply word_bitwise
done
have pt_offset_bitwise_pt_bits[simp]:"\<And>a. (ucast ((a::word32) && mask pt_bits && ~~ mask 7 >> pte_bits)::9 word)
= (ucast (a && mask pt_bits >> 3)::9 word) && ~~ mask 4"
by (simp add: pte_bits_def)
have pd_offset_bitwise[simp]:"\<And>a. (ucast ((a::word32) && mask pd_bits && ~~ mask 7 >> 3)::11 word)
= (ucast (a && mask pd_bits >> 3)::11 word) && ~~ mask 4"
apply (simp add: vspace_bits_defs mask_def)
apply word_bitwise
done
have pd_offset_bitwise_pt_bits[simp]:"\<And>a. (ucast ((a::word32) && mask pd_bits && ~~ mask 7 >> pde_bits)::11 word)
= (ucast (a && mask pd_bits >> 3)::11 word) && ~~ mask 4"
by (simp add: pde_bits_def)
have mask_neq_0:
"\<And>z zs xa p g. \<lbrakk>[0 :: word32, 8 .e. 0x78] = z # zs; xa \<in> set zs; is_aligned p 7; 7 \<le> g\<rbrakk>
\<Longrightarrow> (p + xa && mask g >> 3) && mask 4 \<noteq> 0"
apply (rule ccontr)
apply (simp add:is_aligned_mask[symmetric])
apply (drule is_aligned_shiftl[where n = 7 and m = 3,simplified])
apply (subst (asm) shiftr_shiftl1)
apply simp+
apply (subst (asm) is_aligned_neg_mask_eq)
apply (rule is_aligned_andI1)
apply (erule aligned_add_aligned)
apply (clarsimp simp :upto_enum_def upto_enum_step_def
Fun.comp_def upto_0_to_n2 is_aligned_mult_triv2[where n = 3,simplified])
apply simp
apply (simp add:is_aligned_mask mask_twice
pt_bits_def pageBits_def min_def)
apply (subst (asm) is_aligned_mask[symmetric])
apply (subst (asm) is_aligned_add_helper)
apply simp
apply (clarsimp simp :upto_enum_def upto_enum_step_def
Fun.comp_def upto_0_to_n2)
apply (subst shiftl_t2n
[where n = 3,simplified field_simps,simplified,symmetric])+
apply (rule shiftl_less_t2n[where m = 7,simplified])
apply (rule word_of_nat_less)
apply simp
apply simp
apply (clarsimp simp :upto_enum_def upto_enum_step_def
Fun.comp_def upto_0_to_n2)
apply (cut_tac x = "of_nat x" and n = 3 in word_power_nonzero_32)
apply (simp add:word_of_nat_less word_bits_def)+
apply (simp add: of_nat_neq_0)
apply simp
done
have neq_pt_offset: "\<And>z zs xa (p::word32). \<lbrakk>[0 , 8 .e. 0x78] = z # zs;
xa \<in> set zs;is_aligned p 7 \<rbrakk> \<Longrightarrow>
ucast (p + xa && mask pt_bits >> 3) && ~~ mask 4 \<noteq> ((ucast (p + xa && mask pt_bits >> 3))::9 word)"
apply (rule ccontr)
apply (simp add:mask_out_sub_mask ucast_and_mask[symmetric])
apply (drule arg_cong[where f = unat])
apply (simp add:unat_ucast)
apply (subst (asm) mod_less)
apply (rule unat_less_helper)
apply (rule le_less_trans[OF word_and_le1])
apply (simp add:mask_def)
apply (simp add:unat_eq_0)
apply (drule(2) mask_neq_0[of _ _ _ _ pt_bits])
apply (simp add:pt_bits_def pageBits_def)+
done
have neq_pd_offset: "\<And>z zs xa (p::word32). \<lbrakk>[0 , 8 .e. 0x78] = z # zs;
xa \<in> set zs;is_aligned p 7 \<rbrakk> \<Longrightarrow>
ucast (p + xa && mask pd_bits >> 3) && ~~ mask 4 \<noteq> ((ucast (p + xa && mask pd_bits >> 3)) :: 11 word)"
apply (simp add:mask_out_sub_mask)
apply (rule ccontr)
apply (simp add:mask_out_sub_mask ucast_and_mask[symmetric])
apply (drule arg_cong[where f = unat])
apply (simp add:unat_ucast)
apply (subst (asm) mod_less)
apply (rule unat_less_helper)
apply (rule le_less_trans[OF word_and_le1])
apply (simp add:mask_def)
apply (simp add:unat_eq_0)
apply (drule(2) mask_neq_0[of _ _ _ _ pd_bits])
apply (simp add:pd_bits_def pageBits_def)+
done
have invalid_pteI:
"\<And>a pt x y z. \<lbrakk>valid_pt_entries pt; (a && ~~ mask 4) \<noteq> a;
pt (a && ~~ mask 4) = pte.LargePagePTE x y z \<rbrakk>
\<Longrightarrow> pt a = pte.InvalidPTE"
apply (drule(1) valid_entriesD[rotated])
apply (case_tac "pt a"; simp add:mask_lower_twice is_aligned_neg_mask split:if_splits)
done
have invalid_pdeI:
"\<And>a pd x y z. \<lbrakk>valid_pd_entries pd; (a && ~~ mask 4) \<noteq> a;
pd (a && ~~ mask 4) = pde.SuperSectionPDE x y z \<rbrakk>
\<Longrightarrow> pd a = pde.InvalidPDE"
apply (drule(1) valid_entriesD[rotated])
apply (case_tac "pd a",
simp_all add:mask_lower_twice is_aligned_neg_mask
split:if_splits)
done
have inj[simp]:
"\<And>p. is_aligned (p::word32) 7 \<Longrightarrow> inj_on (\<lambda>x. toEnum x * 8 + p) {Suc 0..<16}"
apply (clarsimp simp:inj_on_def)
apply (subst (asm) shiftl_t2n[where n = 3,simplified field_simps,simplified,symmetric])+
apply (drule arg_cong[where f = "\<lambda>x. x >> 3"])
apply (simp add:shiftl_shiftr_id word_of_nat_less)
apply (simp add:of_nat_inj)
done
show ?thesis
apply (rule name_pre)
apply (case_tac entries)
apply (case_tac a, case_tac aa)
apply (simp add:page_inv_entries_pre_def page_inv_entries_safe_def
| wp | intro conjI impI)+
apply (simp split:list.splits add:page_inv_entries_pre_def)+
apply (rename_tac obj_ref vm_attributes cap_rights slot slots)
apply (elim conjE exE)
apply (subst mapME_x_Cons)
apply simp
apply wp
apply (rule_tac Q' = "\<lambda>r s. \<forall>x \<in> set slots. obj_at
(\<lambda>ko. \<exists>pt. ko = ArchObj (PageTable pt) \<and>
pt (ucast (x && mask pt_bits >> 3)) = pte.InvalidPTE)
(hd (slot # slots) && ~~ mask pt_bits) s" in hoare_post_imp_R)
apply (wp mapME_x_accumulate_checks[where Q = "\<lambda>s. valid_pdpt_objs s"] )
apply (wp get_master_pte_wp| wpc | simp)+
apply clarsimp
apply (frule_tac x = xa in mask_out_same_pt)
apply (clarsimp simp:upto_enum_def upto_enum_step_def upto_0_to_n2)
apply (erule notE)
apply (subst shiftl_t2n[where n = 3,simplified field_simps,simplified,symmetric])
apply (rule shiftl_less_t2n[where m = 7,simplified])
apply (simp add:word_of_nat_less)
apply simp
apply (frule_tac x = z in mask_out_same_pt)
apply (clarsimp simp:upto_enum_def upto_enum_step_def upto_0_to_n2)
apply (clarsimp simp:field_simps obj_at_def
split:pte.splits)
apply (intro conjI impI)
apply (clarsimp simp: pte_bits_def)
apply (drule(1) valid_pdpt_objs_ptD)
apply (clarsimp simp: word_bool_alg.conj_assoc)
apply (frule align_entry_ptD,simp)
apply (clarsimp simp: is_aligned_neg_mask_eq[of _ 4] pte_bits_def)
apply clarsimp
apply (drule(1) valid_pdpt_objs_ptD,clarify)
apply (erule(4) invalid_pteI[OF _ neq_pt_offset])
apply (clarsimp simp: pte_bits_def)
apply (clarsimp simp: pte_bits_def)
apply (drule(1) valid_pdpt_objs_ptD)
apply (frule align_entry_ptD,simp)
apply (simp add: is_aligned_neg_mask_eq)
apply (wp hoare_drop_imps |wpc|simp)+
apply (clarsimp simp:upto_enum_def upto_enum_step_def
upto_0_to_n2 Fun.comp_def distinct_map)
apply (intro exI conjI,fastforce+)
apply (simp add:obj_at_def hd_map_simp
upto_0_to_n2 upto_enum_def upto_enum_step_def)
apply (frule_tac x = 1 in bspec,fastforce+)
apply ((wp hoare_drop_imps |wpc|simp)+)[1]
apply (simp add:page_inv_entries_pre_def page_inv_entries_safe_def
| wp | intro conjI impI)+
apply (simp split:list.splits add:page_inv_entries_pre_def mapME_singleton)
apply (wp get_master_pte_wp |wpc | simp)+
apply (clarsimp simp:obj_at_def split:pte.splits)
apply (clarsimp simp:page_inv_entries_safe_def split:list.splits)
apply (simp split:list.splits add:page_inv_entries_pre_def mapME_singleton)
apply (case_tac b,case_tac a)
apply ((simp add:page_inv_entries_pre_def page_inv_entries_safe_def
| wp | intro conjI impI)+)[1]
apply simp
apply wp[1]
apply (simp split:list.splits add:page_inv_entries_pre_def mapME_singleton)
apply (wp get_master_pde_wp | wpc | simp)+
apply (clarsimp simp:obj_at_def page_inv_entries_safe_def pde_bits_def
split:pde.splits)
apply (simp split:list.splits if_splits
add:page_inv_entries_pre_def Let_def page_inv_entries_safe_def)
apply (elim conjE exE)
apply (subst mapME_x_Cons)
apply simp
apply wp
apply (rule_tac Q' = "\<lambda>r s. \<forall>x \<in> set x22. obj_at
(\<lambda>ko. \<exists>pd. ko = ArchObj (PageDirectory pd) \<and>
pd (ucast (x && mask pd_bits >> 3)) = pde.InvalidPDE)
(hd (x21 # x22) && ~~ mask pd_bits) s" in hoare_post_imp_R)
apply (wp mapME_x_accumulate_checks[where Q = "\<lambda>s. valid_pdpt_objs s"] )
apply (wp get_master_pde_wp| wpc | simp)+
apply clarsimp
apply (frule_tac x = xa in mask_out_same_pd)
apply (clarsimp simp:upto_enum_def upto_enum_step_def upto_0_to_n2)
apply (erule notE)
apply (subst shiftl_t2n[where n = 3,simplified field_simps,simplified,symmetric])
apply (rule shiftl_less_t2n[where m = 7,simplified])
apply (simp add:word_of_nat_less)
apply simp
apply (frule_tac x = z in mask_out_same_pd)
apply (clarsimp simp:upto_enum_def upto_enum_step_def upto_0_to_n2)
apply (clarsimp simp:field_simps obj_at_def
split:pde.splits)
apply (drule(1) valid_pdpt_objs_pdD)
apply (intro conjI impI; clarsimp simp: pde_bits_def)
apply (frule align_entry_pdD,simp)
apply (clarsimp simp: is_aligned_neg_mask_eq pde_bits_def)
apply (frule(1) align_entry_pdD)
apply (simp add:is_aligned_neg_mask_eq)
apply (frule(1) align_entry_pdD)
apply (simp add:is_aligned_neg_mask_eq)
apply (frule(1) align_entry_pdD)
apply (erule(4) invalid_pdeI[OF _ neq_pd_offset])
apply (wp hoare_drop_imps |wpc|simp)+
apply (clarsimp simp:upto_enum_def upto_enum_step_def
upto_0_to_n2 Fun.comp_def distinct_map)
apply (intro exI conjI,fastforce+)
apply (simp add:obj_at_def hd_map_simp
upto_0_to_n2 upto_enum_def upto_enum_step_def)
apply (frule_tac x = 1 in bspec,fastforce+)
apply (wp get_master_pde_wp | simp | wpc)+
done
qed
lemma create_mapping_entries_safe[wp]:
"\<lbrace>\<exists>\<rhd>pd and K (vmsz_aligned vptr sz) and K (is_aligned pd pd_bits)
and K (vptr < kernel_base)
and valid_vspace_objs and pspace_aligned and
(\<exists>\<rhd> (lookup_pd_slot pd vptr && ~~ mask pd_bits))\<rbrace>
create_mapping_entries ptr vptr sz rights attrib pd
\<lbrace>\<lambda>entries. case entries of (Inl (SmallPagePTE _ _ _, [_])) \<Rightarrow> \<top>
| (Inl (SmallPagePTE _ _ _, _)) \<Rightarrow> \<bottom>
| (Inl (LargePagePTE _ _ _, [])) \<Rightarrow> \<bottom>
| (Inr (SectionPDE _ _ _, [_])) \<Rightarrow> \<top>
| (Inr (SectionPDE _ _ _, _)) \<Rightarrow> \<bottom>
| (Inr (SuperSectionPDE _ _ _, [])) \<Rightarrow> \<bottom>
| _ \<Rightarrow> page_inv_entries_pre entries\<rbrace>,-"
apply (cases sz, simp_all add: largePagePTE_offsets_def superSectionPDE_offsets_def)
defer 2
apply (wp | simp)+
apply (simp split:list.split)
apply (subgoal_tac "lookup_pd_slot pd vptr \<le> lookup_pd_slot pd vptr + 0x78")
apply (clarsimp simp:upto_enum_def not_less upto_enum_step_def vspace_bits_defs
page_inv_entries_pre_def Let_def)
apply (clarsimp simp:upto_enum_step_def upto_enum_def
map_eq_Cons_conv upt_eq_Cons_conv)
apply (drule_tac x = "lookup_pd_slot pd vptr" in spec)
apply (subst (asm) upto_0_to_n2)
apply simp
apply clarsimp
apply (drule lookup_pd_slot_aligned_6)
apply (simp add: vspace_bits_defs)
apply simp
apply clarsimp
apply (erule is_aligned_no_wrap'[OF lookup_pd_slot_aligned_6])
apply (simp add: vspace_bits_defs)
apply simp
apply (wp get_pde_wp | simp add:lookup_pt_slot_def | wpc)+
apply (clarsimp simp:upto_enum_def upto_enum_step_def
page_inv_entries_pre_def Let_def )
apply (drule_tac ref = refa in valid_vspace_objsD)
apply (simp add:obj_at_def)
apply simp
apply (simp add: vspace_bits_defs)
apply (drule_tac x = "ucast (lookup_pd_slot pd vptr && mask pd_bits >> 3)"
in spec)
apply (simp add: vspace_bits_defs)
apply (clarsimp simp:not_less[symmetric] split:list.splits)
apply (clarsimp simp:page_inv_entries_pre_def
Let_def upto_enum_step_def upto_enum_def)
apply (subst (asm) upto_0_to_n2)
apply simp
apply (clarsimp simp:not_less[symmetric])
apply (subgoal_tac
"(\<exists>xa xb. pda (ucast (lookup_pd_slot pd vptr && mask pd_bits >> 3))
= pde.PageTablePDE x)
\<longrightarrow> is_aligned (ptrFromPAddr x + ((vptr >> 12) && 0x1FF << 3)) 7")
apply (clarsimp simp: vspace_bits_defs)
apply (rule_tac x="ptrFromPAddr x + ((vptr >> 12) && 0x1FF << 3)" in exI)
apply (subst map_upt_append[where x=15 and y=16]; simp add: mask_def)
apply clarsimp
apply (rule aligned_add_aligned)
apply (erule(1) pt_aligned)
apply (rule is_aligned_shiftl[OF is_aligned_andI1])
apply (rule is_aligned_shiftr)
apply (simp add:vmsz_aligned_def)
apply simp
done
lemma decode_mmu_invocation_valid_pdpt[wp]:
"\<lbrace>invs and valid_cap (cap.ArchObjectCap cap) and valid_pdpt_objs \<rbrace>
decode_mmu_invocation label args cap_index slot cap excaps
\<lbrace>invocation_duplicates_valid o Invocations_A.InvokeArchObject\<rbrace>, -"
proof -
have bitwise:"\<And>a. (ucast (((a::word32) && ~~ mask 7) && mask 14 >> 3)::11 word)
= (ucast (a && mask 14 >> 3)::11 word) && ~~ mask 4"
apply (simp add:mask_def)
apply word_bitwise
done
have sz:
"\<And>vmpage_size. \<lbrakk>args ! 0 + 2 ^ pageBitsForSize vmpage_size - 1 < kernel_base;
vmsz_aligned (args ! 0) vmpage_size\<rbrakk>
\<Longrightarrow> args ! 0 < kernel_base"
apply (rule le_less_trans[OF is_aligned_no_overflow])
apply (simp add:vmsz_aligned_def)
apply simp
done
show ?thesis
supply if_split[split del]
apply (simp add: decode_mmu_invocation_def)
\<comment> \<open>Handle the easy cases first (trivial because of the post-condition invocation_duplicates_valid)\<close>
apply (cases "invocation_type label \<notin> {ArchInvocationLabel ARMPageTableMap,
ArchInvocationLabel ARMPageMap}")
apply (wpsimp simp: invocation_duplicates_valid_def page_inv_duplicates_valid_def
pti_duplicates_valid_def Let_def
cong: if_cong)
\<comment> \<open>Handle the two interesting cases now\<close>
apply (clarsimp; erule disjE; cases cap;
simp add: isPDFlushLabel_def isPageFlushLabel_def throwError_R')
\<comment> \<open>PageTableMap\<close>
apply (wpsimp simp: Let_def get_master_pde_def
wp: get_pde_wp hoare_drop_imps hoare_vcg_if_lift_ER)
apply (clarsimp simp: invocation_duplicates_valid_def pti_duplicates_valid_def
mask_lower_twice bitwise obj_at_def vspace_bits_defs if_apply_def2
split: if_splits)
apply wp
\<comment> \<open>PageMap\<close>
apply (rename_tac dev pg_ptr rights sz pg_map)
apply (wpsimp simp: Let_def invocation_duplicates_valid_def page_inv_duplicates_valid_def
wp: ensure_safe_mapping_ensures[THEN hoare_post_imp_R]
check_vp_wpR hoare_vcg_if_lift_ER find_pd_for_asid_lookup_pd_wp)
apply (fastforce simp: invs_psp_aligned page_directory_at_aligned_pd_bits word_not_le sz
valid_cap_def valid_arch_cap_def lookup_pd_slot_eq
split: if_splits)
apply wp
done
qed
lemma returnOk_lift :
assumes P': "\<forall>s. P rv s"
shows "\<lbrace>Q\<rbrace> (doE y \<leftarrow> f ; returnOk rv odE) \<lbrace>P\<rbrace>, -"
by (wp,auto simp: returnOk_def return_def validE_R_def validE_def valid_def P')
lemma decode_vcpu_invocation_valid_pdpt[wp]:
"\<lbrace>Q\<rbrace>
decode_vcpu_invocation label args vcap excaps
\<lbrace>invocation_duplicates_valid o Invocations_A.InvokeArchObject\<rbrace>, -"
apply (simp add: decode_vcpu_invocation_def)
apply (wpsimp simp: decode_vcpu_set_tcb_def
decode_vcpu_inject_irq_def decode_vcpu_read_register_def
decode_vcpu_write_register_def decode_vcpu_ack_vppi_def
if_apply_def2
| simp add: invocation_duplicates_valid_def)+
done
lemma arch_decode_invocation_valid_pdpt[wp]:
notes find_pd_for_asid_inv[wp del]
shows
"\<lbrace>invs and valid_cap (cap.ArchObjectCap cap) and valid_pdpt_objs \<rbrace>
arch_decode_invocation label args cap_index slot cap excaps
\<lbrace>invocation_duplicates_valid o Invocations_A.InvokeArchObject\<rbrace>,-"
proof -
show ?thesis
apply (simp add: arch_decode_invocation_def)
apply (rule hoare_pre)
apply (wp | wpc)+
apply auto
done
qed
lemma decode_invocation_valid_pdpt[wp]:
"\<lbrace>invs and valid_cap cap and valid_pdpt_objs\<rbrace>
decode_invocation label args cap_index slot cap excaps
\<lbrace>invocation_duplicates_valid\<rbrace>,-"
apply (simp add: decode_invocation_def split del: if_split)
apply (rule hoare_pre)
apply (wp | wpc
| simp only: invocation_duplicates_valid_def o_def uncurry_def split_def
Invocations_A.invocation.simps)+
apply clarsimp
done
crunch valid_pdpt_objs[wp]: handle_fault, reply_from_kernel "valid_pdpt_objs"
(simp: crunch_simps wp: crunch_wps)
lemma invocation_duplicates_valid_exst_update[simp]:
"invocation_duplicates_valid i (trans_state f s) = invocation_duplicates_valid i s"
apply (clarsimp simp add: invocation_duplicates_valid_def pti_duplicates_valid_def page_inv_duplicates_valid_def page_inv_entries_safe_def split: sum.splits invocation.splits arch_invocation.splits kernel_object.splits page_table_invocation.splits page_invocation.splits)+
done
lemma set_thread_state_duplicates_valid[wp]:
"\<lbrace>invocation_duplicates_valid i\<rbrace> set_thread_state t st \<lbrace>\<lambda>rv. invocation_duplicates_valid i\<rbrace>"
apply (simp add: set_thread_state_def set_object_def get_object_def)
apply (wp|simp)+
apply (clarsimp simp: invocation_duplicates_valid_def pti_duplicates_valid_def
page_inv_duplicates_valid_def page_inv_entries_safe_def
Let_def
dest!: get_tcb_SomeD
split: Invocations_A.invocation.split arch_invocation.split_asm
page_table_invocation.split
page_invocation.split sum.split
)
apply (auto simp add: obj_at_def page_inv_entries_safe_def)
done
lemma handle_invocation_valid_pdpt[wp]:
"\<lbrace>valid_pdpt_objs and invs and ct_active\<rbrace>
handle_invocation calling blocking \<lbrace>\<lambda>rv. valid_pdpt_objs\<rbrace>"
apply (simp add: handle_invocation_def)
apply (wp syscall_valid set_thread_state_ct_st
| simp add: split_def | wpc
| wp (once) hoare_drop_imps)+
apply (auto simp: ct_in_state_def elim: st_tcb_ex_cap)
done
crunch valid_pdpt[wp]: handle_event, activate_thread,switch_to_thread,
switch_to_idle_thread "valid_pdpt_objs"
(simp: crunch_simps wp: crunch_wps alternative_valid select_wp OR_choice_weak_wp select_ext_weak_wp
ignore: without_preemption getActiveIRQ resetTimer ackInterrupt
getFAR getDFSR getIFSR OR_choice set_scheduler_action
clearExMonitor)
lemma schedule_valid_pdpt[wp]: "\<lbrace>valid_pdpt_objs\<rbrace> schedule :: (unit,unit) s_monad \<lbrace>\<lambda>_. valid_pdpt_objs\<rbrace>"
apply (simp add: schedule_def allActiveTCBs_def)
apply (wp alternative_wp select_wp)
apply simp
done
lemma call_kernel_valid_pdpt[wp]:
"\<lbrace>invs and (\<lambda>s. e \<noteq> Interrupt \<longrightarrow> ct_running s) and valid_pdpt_objs\<rbrace>
(call_kernel e) :: (unit,unit) s_monad
\<lbrace>\<lambda>_. valid_pdpt_objs\<rbrace>"
apply (cases e, simp_all add: call_kernel_def)
apply (rule hoare_pre)
apply (wp | simp | wpc
| rule conjI | clarsimp simp: ct_in_state_def
| erule pred_tcb_weakenE
| wp (once) hoare_drop_imps)+
done
end
end
|
State Before: Ξ± : Type u_1
instβ : LinearOrderedAddCommGroup Ξ±
hΞ± : Archimedean Ξ±
p : Ξ±
hp : 0 < p
aβ bβ c : Ξ±
n : β€
a b : Ξ±
m : β€
β’ toIcoMod hp a (b - m β’ p) = toIcoMod hp a b State After: no goals Tactic: rw [sub_eq_add_neg, β neg_smul, toIcoMod_add_zsmul] |
[STATEMENT]
lemma subset_eq_diff_conv:
"A - C \<subseteq># B \<longleftrightarrow> A \<subseteq># B + C"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (A - C \<subseteq># B) = (A \<subseteq># B + C)
[PROOF STEP]
by (simp add: subseteq_mset_def le_diff_conv) |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 15, 2015
@author: Wenchang Yang
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import butter, filtfilt
# ---- Butterworth filter
def _lowpass_ba(lowcut=0.25,fs=1.0,order=2):
nyq = 0.5 * fs
low = lowcut / nyq
b,a = butter(order,low,btype='lowpass')
return b,a
def lowpass(X,lowcut=0.25,fs=1.0,order=2,axis=-1):
b,a = _lowpass_ba(lowcut,fs=fs,order=order)
Y = filtfilt(b,a,X,axis=axis)
return Y
def _highpass_ba(highcut=0.25,fs=1.0,order=2):
nyq = 0.5 * fs
high = highcut / nyq
b,a = butter(order,high,btype='highpass')
return b,a
def highpass(X,highcut=0.25,fs=1.0,order=2,axis=-1):
b,a = _highpass_ba(highcut,fs,order=order)
Y = filtfilt(b,a,X,axis=axis)
return Y
def _bandpass_ba(lowcut=0.125,highcut=0.375,fs=1.0,order=2):
nyq = 0.5 * fs
low = lowcut /nyq
high = highcut / nyq
b,a = butter(order,[low,high],btype='bandpass')
return b,a
def bandpass(X,lowcut=0.125,highcut=0.375,fs=1.0,order=2,axis=-1):
b,a = _bandpass_ba(lowcut,highcut,fs,order=order)
Y = filtfilt(b,a,X,axis=axis)
return Y
#
# ---- Lanczos filter
def _lowpass_ba_lanczos(lowcut=0.25,fs=1.0,M=10):
'''estimate the Lanczos lowpass filter coefficients.
b_k = [\frac{\sin(2\pi f_{cut}k)}{\pi k}][\frac{\sin(\pi k/(M+1))}{\pi k/(M+1)}]
https://books.google.com/books?id=p7YMOPuu8ugC&pg=PA612&lpg=PA612&dq=Lanczos+filter+coefficients&source=bl&ots=Zx1bDJvGh6&sig=WQqdwryB8SU5d-ygDwAwbpx8IsQ&hl=en&sa=X&ei=cGS5VOqKDNivoQSG0IHoAw&ved=0CD0Q6AEwBA#v=onepage&q=Lanczos%20filter%20coefficients&f=false
http://www.unl.edu.ar/ceneha/uploads/LanczosFiltering(1979).pdf'''
low = lowcut / fs
M = float(M)
k = np.arange(-M,M+1)
# b = ( np.sin(2*np.pi*low*k) / (np.pi*k) )*( np.sin(np.pi*k/(M+1)) / (np.pi*k/(M+1)) )
# b[M] = 2*low
b = 2*low*np.sinc(2*low*k) * np.sinc(k/(M+1))
b = b / b.sum()
a = 1.0
return b,a
def lowpass_lanczos(X,lowcut=0.25,fs=1.,M=10,axis=-1):
b,a = _lowpass_ba_lanczos(lowcut,fs=fs,M=M)
Y = filtfilt(b,a,X,axis=axis)
return Y
def _highpass_ba_lanczos(highcut=0.25,fs=1.0,M=10):
'''estimate the Lanczos highpass filter coefficients'''
b,a = _lowpass_ba_lanczos(lowcut=highcut,fs=fs,M=M)
b = -b
b[M] = 1+b[M]
return b,a
def highpass_lanczos(X,highcut=0.25,fs=1.,M=10,axis=-1):
b,a = _highpass_ba_lanczos(highcut,fs=fs,M=M)
Y = filtfilt(b,a,X,axis=axis)
return Y
def bandpass_lanczos(X,lowcut=0.125,highcut=0.375,fs=1.,M=10,axis=-1):
# Ylow = lowpass_lanczos(X,lowcut=lowcut,fs=fs,M=M,axis=axis)
# Yhigh = highpass_lanczos(X,highcut=highcut,fs=fs,M=M,axis=axis)
# Y = X - Ylow - Yhigh
Y = lowpass_lanczos(X,lowcut=highcut,fs=fs,M=M,axis=axis)
Y = highpass_lanczos(Y,highcut=lowcut,fs=fs,M=M,axis=axis)
return Y
#
|
# Copyright (c) 2021 zfit
from typing import List, Optional
import iminuit
import numpy as np
import tensorflow as tf
from .baseminimizer import BaseMinimizer, ZfitStrategy, print_params, print_gradients
from .fitresult import FitResult
from ..core.interfaces import ZfitLoss
from ..core.parameter import Parameter
from ..util.cache import GraphCachable
class Minuit(BaseMinimizer, GraphCachable):
_DEFAULT_name = "Minuit"
def __init__(self, strategy: ZfitStrategy = None, minimize_strategy: int = 1, tolerance: float = None,
verbosity: int = 5, name: str = None,
ncall: Optional[int] = None, use_minuit_grad: bool = None, **minimizer_options):
"""
Args:
strategy: A :py:class:`~zfit.minimizer.baseminimizer.ZfitStrategy` object that defines the behavior of
the minimizer in certain situations.
minimize_strategy: A number used by minuit to define the strategy, either 0, 1 or 2.
tolerance: Stopping criteria: the Estimated Distance to Minimum (EDM) has to be lower then `tolerance`
verbosity: Regulates how much will be printed during minimization. Values between 0 and 10 are valid.
name: Name of the minimizer
ncall: Maximum number of minimization steps.
use_minuit_grad: If True, iminuit uses it's internal numerical gradient calculation instead of the
(analytic/numerical) gradient provided by TensorFlow/zfit.
"""
minimizer_options['ncall'] = 0 if ncall is None else ncall
if minimize_strategy not in range(3):
raise ValueError(f"minimize_strategy has to be 0, 1 or 2, not {minimize_strategy}.")
minimizer_options['strategy'] = minimize_strategy
super().__init__(name=name, strategy=strategy, tolerance=tolerance, verbosity=verbosity,
minimizer_options=minimizer_options)
use_minuit_grad = True if use_minuit_grad is None else use_minuit_grad
self._minuit_minimizer = None
self._use_tfgrad = not use_minuit_grad
def _minimize(self, loss: ZfitLoss, params: List[Parameter]):
# create options
minimizer_options = self.minimizer_options.copy()
minimize_options = {}
precision = minimizer_options.pop('precision', None)
minimize_options['ncall'] = minimizer_options.pop('ncall')
minimizer_init = {}
if 'errordef' in minimizer_options:
raise ValueError("errordef cannot be specified for Minuit as this is already defined in the Loss.")
loss_errordef = loss.errordef
if not isinstance(loss_errordef, (float, int)):
loss_errordef = 1.0 # default of minuit
minimizer_init['errordef'] = loss_errordef
minimizer_init['pedantic'] = minimizer_options.pop('pedantic', False)
minimizer_setter = {}
minimizer_setter['strategy'] = minimizer_options.pop('strategy')
if self.verbosity > 6:
minuit_verbosity = 3
elif self.verbosity > 2:
minuit_verbosity = 1
else:
minuit_verbosity = 0
if minimizer_options:
raise ValueError("The following options are not (yet) supported: {}".format(minimizer_options))
# create Minuit compatible names
limits = tuple(tuple((param.lower, param.upper)) for param in params)
errors = tuple(param.step_size for param in params)
start_values = [p.numpy() for p in params]
limits = np.array([(low.numpy(), up.numpy()) for low, up in limits])
errors = np.array([err.numpy() for err in errors])
multiparam = isinstance(start_values[0], np.ndarray) and len(start_values[0]) > 1 and len(params) == 1
if multiparam:
# TODO(Mayou36): multiparameter
params_name = None # autogenerate for the moment
start_values = start_values[0]
# errors = errors[0]
limits = limits[0]
gradients = gradients[0]
else:
params_name = [param.name for param in params]
current_loss = None
nan_counter = 0
def func(values):
nonlocal current_loss, nan_counter
self._update_params(params=params, values=values)
do_print = self.verbosity > 8
is_nan = False
try:
loss_evaluated = loss.value().numpy()
except tf.errors.InvalidArgumentError:
is_nan = True
loss_evaluated = "invalid, error occured"
except:
loss_evaluated = "invalid, error occured"
raise
finally:
if do_print:
print_params(params, values, loss_evaluated)
is_nan = is_nan or np.isnan(loss_evaluated)
if is_nan:
nan_counter += 1
info_values = {}
info_values['loss'] = loss_evaluated
info_values['old_loss'] = current_loss
info_values['nan_counter'] = nan_counter
loss_evaluated = self.strategy.minimize_nan(loss=loss, params=params, minimizer=minimizer,
values=info_values)
else:
nan_counter = 0
current_loss = loss_evaluated
return loss_evaluated
def grad_func(values):
nonlocal current_loss, nan_counter
self._update_params(params=params, values=values)
do_print = self.verbosity > 8
is_nan = False
try:
loss_value, gradients = loss.value_gradients(params=params)
loss_value = loss_value.numpy()
gradients_values = [float(g.numpy()) for g in gradients]
except tf.errors.InvalidArgumentError:
is_nan = True
loss_value = "invalid, error occured"
gradients_values = ["invalid"] * len(params)
except:
gradients_values = ["invalid"] * len(params)
raise
finally:
if do_print:
print_gradients(params, values, gradients_values, loss=loss_value)
is_nan = is_nan or any(np.isnan(gradients_values))
if is_nan:
nan_counter += 1
info_values = {}
info_values['loss'] = loss_value
info_values['old_loss'] = current_loss
info_values['nan_counter'] = nan_counter
# but loss value not needed here
_ = self.strategy.minimize_nan(loss=loss, params=params, minimizer=minimizer,
values=info_values)
else:
nan_counter = 0
current_loss = loss_value
return gradients_values
grad_func = grad_func if self._use_tfgrad else None
minimizer = iminuit.Minuit(func, start_values,
grad=grad_func,
name=params_name,
)
minimizer.precision = precision
for param in params:
if param.has_step_size:
minimizer.errors[param.name] = param.step_size
if param.has_limits:
minimizer.limits[param.name] = (param.lower, param.upper)
if loss.errordef is None:
raise ValueError("Errordef must not be None to be run with iminuit.")
minimizer.errordef = loss.errordef
minimizer.print_level = minuit_verbosity
strategy = minimizer_setter.pop('strategy')
minimizer.strategy = strategy
minimizer.tol = self.tolerance / 1e-3 # iminuit 1e-3 and tolerance 0.1
assert not minimizer_setter, "minimizer_setter is not empty, bug. Please report. minimizer_setter: {}".format(
minimizer_setter)
self._minuit_minimizer = minimizer
result = minimizer.migrad(**minimize_options)
fitresult = FitResult.from_minuit(loss=loss, params=params, result=result, minimizer=self.copy())
return fitresult
def copy(self):
tmp_minimizer = self._minuit_minimizer
new_minimizer = super().copy()
new_minimizer._minuit_minimizer = tmp_minimizer
return new_minimizer
|
Require Import Crypto.Arithmetic.PrimeFieldTheorems.
Require Import Crypto.Specific.solinas32_2e322m2e161m1_14limbs.Synthesis.
(* TODO : change this to field once field isomorphism happens *)
Definition freeze :
{ freeze : feBW_tight -> feBW_limbwidths
| forall a, phiBW_limbwidths (freeze a) = phiBW_tight a }.
Proof.
Set Ltac Profiling.
Time synthesize_freeze ().
Show Ltac Profile.
Time Defined.
Print Assumptions freeze.
|
lemma cis_cnj: "cnj (cis t) = cis (-t)" |
## This is the function to combine the likelihood values from different measurements.
Combination<-function(LikelihoodMatrix)
{
#print(LikelihoodMatrix);
Dimension<-dim(LikelihoodMatrix);
#print(Dimension);
if (Dimension[2]==2)
{
CombinedLikelihood<-LikelihoodMatrix[2];
} else
{
CombinedLikelihood<-LikelihoodMatrix[2];
for (i in 3:Dimension[2])
{
CombinedLikelihood<-CombinedLikelihood*LikelihoodMatrix[i];
}
}
return(CombinedLikelihood);
}
|
[STATEMENT]
lemma val_p_int_pow:
"val (\<pp>[^](k::int)) = k"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. val (\<pp> [^] k) = eint k
[PROOF STEP]
using ord_p_pow_int p_intpow_closed(2) val_ord
[PROOF STATE]
proof (prove)
using this:
ord (\<pp> [^] ?n) = ?n
\<pp> [^] ?n \<in> nonzero Q\<^sub>p
?a \<in> nonzero Q\<^sub>p \<Longrightarrow> val ?a = eint (ord ?a)
goal (1 subgoal):
1. val (\<pp> [^] k) = eint k
[PROOF STEP]
by presburger |
lemma LIMSEQ_iff: fixes L :: "'a::real_normed_vector" shows "(X \<longlonglongrightarrow> L) = (\<forall>r>0. \<exists>no. \<forall>n \<ge> no. norm (X n - L) < r)" |
(* Use of this source code is governed by the license described *
* in the LICENSE file at the root of the source tree. *)
Set Implicit Arguments.
Require Import fcf.Crypto.
Require Import fcf.Encryption.
Require Import fcf.CompFold.
Require Import fcf.FCF.
Section EncryptClassify.
Variable Plaintext Ciphertext Key : Set.
Variable KeyGen : Comp Key.
Variable Encrypt : Key -> Plaintext -> Comp Ciphertext.
Variable Decrypt : Key -> Ciphertext -> Plaintext.
Hypothesis Ciphertext_EqDec : EqDec Ciphertext.
Variable A_State : Set.
Variable A1 : (Plaintext -> Comp Ciphertext) -> Comp (list (Plaintext * Plaintext) * (Plaintext * Plaintext) * A_State).
Variable A2 : A_State -> (Plaintext -> Comp Ciphertext) -> list (Ciphertext * bool) -> Ciphertext -> Comp bool.
Definition IND_CPA_SecretKey_Class_G :=
key <-$ KeyGen ;
[lsP, p, s_A] <-$3 A1 (Encrypt key);
[p0, p1] <-2 p;
lsC <-$ compMap _ (fun p => b <-$ {0, 1}; p_b <- if b then (snd p) else (fst p); c_b <-$ Encrypt key p_b; ret (c_b, b)) lsP;
b <-$ {0, 1};
pb <- if b then p1 else p0;
c <-$ Encrypt key pb;
b' <-$ A2 s_A (Encrypt key) lsC c;
ret (eqb b b').
End EncryptClassify.
|
# Data frame
continents_df <- data.frame(
continents = c("Africa", "Antarctica", "Asia", "Europe", "North America", "South America", "Oceania"),
area = c(30.3, 14, 44.5, 10.1, 24.7, 17.8, 8.6),
population = c(1287.9, 0.004, 4545.1, 742.6, 587.6, 428.2, 41.2),
shares_border = c(TRUE, FALSE, TRUE, TRUE, TRUE, TRUE, FALSE)
)
# Check structure of data frame
str(continents_df)
# Comparing data frame selections
africa_area <- continents_df[1, "area"]
asia_area <- continents_df[3, "area"]
africa_area > asia_area
# Only show data that's TRUE
continents_df[shares_border, ]
# Ordering data frame
pop_order <- order(continents_df$population, decreasing = TRUE)
continents_df[pop_order, ] |
N @-@ 88 was unofficially designated around 1937 , connecting from N @-@ 29 , to N @-@ 86 and N @-@ 19 in Bridgeport . The route remained relatively the same as the state highway system was officially designated . Before 1955 , Nebraska did not have an adequate legal instrument to define the state highway system . By 1960 , N @-@ 19 was renumbered to US 385 , and US 26 was rerouted north near Bridgeport . The old alignment became part of N @-@ 92 . Two years later , N @-@ 29 was renumbered to N @-@ 71 . Between 1981 @-@ 82 , a road appeared on the official state map , extending from WYO 151 to N @-@ 71 . That road became part of N @-@ 88 by 1986 . No significant changes have been made since .
|
State Before: π : Type u_1
instββΈ : NontriviallyNormedField π
E : Type u_2
instββ· : NormedAddCommGroup E
instββΆ : NormedSpace π E
F : Type u_3
instββ΅ : NormedAddCommGroup F
instββ΄ : NormedSpace π F
G : Type ?u.441700
instβΒ³ : NormedAddCommGroup G
instβΒ² : NormedSpace π G
G' : Type ?u.441795
instβΒΉ : NormedAddCommGroup G'
instβ : NormedSpace π G'
f fβ fβ g : E β F
f' fβ' fβ' g' e : E βL[π] F
x : E
s t : Set E
L Lβ Lβ : Filter E
hf : HasStrictFDerivAt f f' x
hg : HasStrictFDerivAt g g' x
β’ HasStrictFDerivAt (fun x => f x - g x) (f' - g') x State After: no goals Tactic: simpa only [sub_eq_add_neg] using hf.add hg.neg |
β¦the verdict comes at a time when both governments are facing crises of legitimacy. Hun Sen has reason to celebrate the ruling. It may well boost his standing after he won re-election in July by a surprisingly thin margin. As ever, it is Thailandβs domestic politics that remains the main source of tension between Thailand and Cambodia, and it may prove so again.
The current Thai prime minister, Yingluck Shinawatra, and her Pheu Thai party government, have suffered badly as a result of a brazen and aborted attempt to ram through an amnesty bill. The motion would have paved the way for the return of her brother, Thaksin Shinawatra, a former Thai prime minister who was deposed in a 2006 coup and as polarising a figure as is known in this part of the world. The mood among the ultra-royalists and -nationalists who oppose him, along with the Democrat Party camped in the streets of Bangkok, is already upbeat. They have thwarted the return of the man they most loathe (though they still have to figure out how to beat the Shinawatra clan in elections).
This entry was posted in 2Bangkok News, Cambodia. Bookmark the permalink. |
(* This code is copyrighted by its authors; it is distributed under *)
(* the terms of the LGPL license (see LICENSE and description files) *)
From Coq Require Export List.
From Coq Require Import Arith Inclusion Inverse_Image Wf_nat Relation_Definitions.
From Coq Require Import Relation_Operators Lexicographic_Product.
From Buchberger Require Import Relation_Operators_compat LetP.
From Buchberger Require Export WfR0.
Set Default Proof Using "Type".
Section Buch.
Load hCoefStructure.
Load hOrderStructure.
Load hWfRO.
Inductive stable :
list (poly A0 eqA ltM) -> list (poly A0 eqA ltM) -> Prop :=
stable0 :
forall P Q : list (poly A0 eqA ltM),
(forall a : poly A0 eqA ltM,
Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a P ->
Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a Q) ->
(forall a : poly A0 eqA ltM,
Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a Q ->
Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a P) ->
stable P Q.
Local Hint Resolve stable0 : core.
Theorem stable_refl : forall Q : list (poly A0 eqA ltM), stable Q Q.
Proof.
auto.
Qed.
Theorem stable_trans :
forall Q y R : list (poly A0 eqA ltM),
stable Q y -> stable y R -> stable Q R.
Proof.
intros Q y R H' H'0; inversion H'; inversion H'0; auto.
Qed.
Theorem stable_sym :
forall Q R : list (poly A0 eqA ltM), stable R Q -> stable Q R.
Proof.
intros Q R H'; elim H'; auto.
Qed.
Local Hint Resolve (Cb_in _ _ _ _ _ _ _ _ _ cs eqA_dec _ _ ltM_dec os) : core.
Theorem Cb_stable :
forall (a : poly A0 eqA ltM) (Q : list (poly A0 eqA ltM)),
Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a Q ->
stable Q (addEnd A A0 eqA n ltM a Q).
Proof using os minusA invA divA cs A1.
intros a Q H'0; apply stable0; auto.
intros a0 H'1.
apply Cb_trans with (b := a) (1 := cs); auto.
Qed.
Theorem in_incl :
forall (A : Type) (p q : list A) (a b : A), incl p q -> In a p -> In a q.
Proof.
auto.
Qed.
Inductive reds :
poly A0 eqA ltM -> poly A0 eqA ltM -> list (poly A0 eqA ltM) -> Prop :=
| reds0 :
forall (P : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
red A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec
(spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a b) P -> reds a b P
| reds1 :
forall (P : list (poly A0 eqA ltM)) (a b c : poly A0 eqA ltM),
In c P ->
reds a c P ->
reds c b P ->
divp A A0 eqA multA divA n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM a b) c ->
reds a b P.
Theorem reds_com :
forall (P : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
reds a b P -> reds b a P.
Proof.
intros P a b H'; elim H'; simpl in |- *; auto.
intros P0 a0 b0 H'0.
apply reds0; auto.
apply red_com; auto.
intros P0 a0 b0 c H'0 H'1 H'2 H'3 H'4 H'5.
apply reds1 with (c := c); auto.
apply divp_ppc; auto.
Qed.
(* Now we are ready!! We start with the definition of genCpC *)
Inductive cpRes : Type :=
| Keep : forall P : list (poly A0 eqA ltM), cpRes
| DontKeep : forall P : list (poly A0 eqA ltM), cpRes.
Definition getRes : cpRes -> list (poly A0 eqA ltM).
intros H'; case H'; auto.
Defined.
Definition addRes : poly A0 eqA ltM -> cpRes -> cpRes.
intros i H'; case H'.
intros H'0; exact (Keep (i :: H'0)).
intros H'0; exact (DontKeep (i :: H'0)).
Defined.
Definition slice :
poly A0 eqA ltM -> poly A0 eqA ltM -> list (poly A0 eqA ltM) -> cpRes.
intros i a q; elim q; clear q.
case (foreigner_dec A A0 A1 eqA multA n ltM i a).
intros H; exact (DontKeep nil).
intros H; exact (Keep nil).
intros b q1 Rec.
case
(divp_dec _ _ _ _ _ _ _ _ _ cs n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i a) b).
intros divp10; exact (DontKeep (b :: q1)).
intros divp10.
case
(divp_dec _ _ _ _ _ _ _ _ _ cs n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i b) a).
intros divp11; exact Rec.
intros divp11; exact (addRes b Rec).
Defined.
Definition slicef :
poly A0 eqA ltM ->
poly A0 eqA ltM -> list (poly A0 eqA ltM) -> list (poly A0 eqA ltM).
intros i a q; case (slice i a q); auto.
Defined.
Theorem slicef_incl :
forall (a b : poly A0 eqA ltM) (P : list (poly A0 eqA ltM)),
incl (slicef a b P) P.
Proof.
intros a b P; elim P; simpl in |- *; auto.
unfold slicef in |- *; simpl in |- *; auto.
case (foreigner_dec A A0 A1 eqA multA n ltM a b); intros H; apply incl_refl;
auto with datatypes.
intros c; unfold slicef in |- *; simpl in |- *.
case
(divp_dec _ _ _ _ _ _ _ _ _ cs n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM a b) c);
auto with datatypes.
case
(divp_dec _ _ _ _ _ _ _ _ _ cs n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM a c) b);
auto with datatypes.
intros H' H'0 l; case (slice a b l); simpl in |- *; auto with datatypes.
Qed.
Theorem slice_inv :
forall (a b : poly A0 eqA ltM) (P : list (poly A0 eqA ltM))
(c : poly A0 eqA ltM),
In c P ->
In c (getRes (slice a b P)) \/
divp A A0 eqA multA divA n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM a c) b.
Proof.
intros a b P; elim P; simpl in |- *; auto.
intros c H'; elim H'.
intros p aP1;
case
(divp_dec _ _ _ _ _ _ _ _ _ cs n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM a b) p);
simpl in |- *; auto.
case
(divp_dec _ _ _ _ _ _ _ _ _ cs n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM a p) b);
auto.
intros H' H'0 H'1 c H'2; elim H'2;
[ intros H'3; rewrite <- H'3; clear H'2 | intros H'3; clear H'2 ];
auto.
case (slice a b aP1); simpl in |- *; auto.
intros P0 H' H'0 H'1 c H'2; elim H'2;
[ intros H'3; rewrite <- H'3; clear H'2 | intros H'3; clear H'2 ];
auto.
elim (H'1 c); [ intros H'5; try exact H'5 | intros H'5 | idtac ]; auto.
intros P0 H' H'0 H'1 c H'2; elim H'2;
[ intros H'3; rewrite <- H'3; clear H'2 | intros H'3; clear H'2 ];
auto.
elim (H'1 c); [ intros H'5; try exact H'5 | intros H'5 | idtac ]; auto.
Qed.
Theorem slice_cons :
forall (i a : poly A0 eqA ltM) (aP Q : list (poly A0 eqA ltM)),
slice i a aP = DontKeep Q ->
(exists c : poly A0 eqA ltM,
In c Q /\
divp A A0 eqA multA divA n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i a) c) \/
foreigner A A0 A1 eqA multA n ltM i a.
Proof.
intros i a aP; elim aP.
simpl in |- *; case (foreigner_dec A A0 A1 eqA multA n ltM i a); auto.
intros H' Q H'0; inversion H'0.
intros a0 l H' Q; simpl in |- *.
case
(divp_dec _ _ _ _ _ _ _ _ _ cs n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i a) a0);
auto.
intros H'0 H'1; inversion H'1; auto.
left; exists a0; split; simpl in |- *; auto.
case
(divp_dec _ _ _ _ _ _ _ _ _ cs n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i a0) a);
auto.
generalize H'; clear H'; case (slice i a l); simpl in |- *; auto.
intros P H' H'0 H'1 H'2; inversion H'2.
intros P H' H'0 H'1 H'2; inversion H'2.
elim (H' P);
[ intros H'5; elim H'5; intros c E; elim E; intros H'6 H'7; clear E H'5
| intros H'5
| idtac ]; auto.
left; exists c; split; simpl in |- *; auto.
Qed.
Definition Tl : list (poly A0 eqA ltM) -> list (poly A0 eqA ltM) -> Prop.
exact (fun x y : list (poly A0 eqA ltM) => length x < length y).
Defined.
Theorem wf_Tl : well_founded Tl.
Proof.
apply (wf_inverse_image _ _ lt (length (A:=poly A0 eqA ltM))); auto.
generalize lt_wf; auto.
Qed.
Scheme Sdep := Induction for prod Sort Prop.
Theorem slice_Tl :
forall (a ia : poly A0 eqA ltM) (L : list (poly A0 eqA ltM)),
Tl (slicef a ia L) (a :: L).
Proof.
intros a b P; elim P; simpl in |- *; auto.
unfold slicef in |- *; simpl in |- *; auto.
case (foreigner_dec A A0 A1 eqA multA n ltM a b); unfold Tl in |- *;
simpl in |- *; auto.
intros c l.
unfold slicef in |- *; simpl in |- *; auto.
case
(divp_dec _ _ _ _ _ _ _ _ _ cs n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM a b) c);
auto.
unfold Tl in |- *; simpl in |- *; auto.
case
(divp_dec _ _ _ _ _ _ _ _ _ cs n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM a c) b);
auto.
intros H' H'0; case (slice a b l); simpl in |- *; auto.
unfold Tl in |- *; simpl in |- *; auto.
unfold Tl in |- *; simpl in |- *; auto.
unfold Tl in |- *; intros H' H'0; case (slice a b l); simpl in |- *;
auto with arith.
Qed.
Inductive genPcP :
poly A0 eqA ltM ->
list (poly A0 eqA ltM) ->
list (poly A0 eqA ltM) -> list (poly A0 eqA ltM) -> Prop :=
| genPcP0 :
forall (i : poly A0 eqA ltM) (L : list (poly A0 eqA ltM)),
genPcP i nil L L
| genPcP1 :
forall (L L1 L2 L3 : list _) (a i : poly A0 eqA ltM),
slice i a L1 = Keep L2 ->
genPcP i L2 L L3 ->
genPcP i (a :: L1) L
(addEnd A A0 eqA n ltM
(spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os i a) L3)
| genPcP2 :
forall (L L1 L2 L3 : list _) (a i : poly A0 eqA ltM),
slice i a L1 = DontKeep L2 ->
genPcP i L2 L L3 -> genPcP i (a :: L1) L L3.
Local Hint Resolve genPcP0 : core.
Theorem genPcP_spolyp1 :
forall (i : poly A0 eqA ltM) (L L1 L2 : list _),
genPcP i L1 L L2 ->
forall a : poly A0 eqA ltM,
In a L2 ->
(exists b : poly A0 eqA ltM,
In b L1 /\
a =
spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os i b) \/ In a L.
Proof.
intros i L L1 L2 H'; elim H'; clear H'; simpl in |- *; auto.
intros L0 L3 L4 L5 a i0 H' H'0 H'1 a0 H'2.
case (addEnd_cons A A0 eqA n ltM) with (1 := H'2); auto; intros H'7.
rewrite H'7; auto.
left; exists a; split; simpl in |- *; auto.
elim (H'1 a0); auto.
intros H'3; case H'3; intros b E; case E; intros H'4 H'5; rewrite H'5;
clear E H'3.
left; exists b; split; auto.
right; try assumption.
generalize (slicef_incl i0 a L3); unfold slicef in |- *; rewrite H'; auto.
intros L0 L3 L4 L5 a i0 H' H'0 H'1 a0 H'2.
elim (H'1 a0);
[ intros H'5; elim H'5; intros b E; elim E; intros H'6 H'7; rewrite H'7;
clear E H'5
| intros H'5
| idtac ]; auto.
left; exists b; split; [ right | idtac ]; auto.
generalize (slicef_incl i0 a L3); unfold slicef in |- *; rewrite H'; auto.
Qed.
Local Hint Resolve (addEnd_id2 A A0 eqA n ltM) : core.
Local Hint Resolve (addEnd_id1 A A0 eqA n ltM) : core.
Theorem genPcP_incl :
forall (i : poly A0 eqA ltM) (L L1 L2 : list _),
genPcP i L1 L L2 -> incl L L2.
Proof.
intros i L L1 L2 H'; elim H'; simpl in |- *; auto with datatypes.
intros L0 L3 L4 L5 a i0 H'0 H'1 H'2.
unfold incl in |- *; simpl in |- *; auto.
Qed.
Lemma spolyp_cons_genPcP0 :
forall (aP R Q : list _) (i : poly A0 eqA ltM),
genPcP i aP R Q ->
~ BuchAux.zerop A A0 eqA n ltM i ->
forall b : poly A0 eqA ltM,
In b aP ->
~ BuchAux.zerop A A0 eqA n ltM b ->
exists c : poly A0 eqA ltM,
In c aP /\
(In
(spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os i c) Q \/ foreigner A A0 A1 eqA multA n ltM i c) /\
divp A A0 eqA multA divA n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i b) c.
Proof.
intros aP R Q i H'; elim H'; clear H' i aP R Q; simpl in |- *; auto.
intros i L H' b H'0; elim H'0.
intros L L1 L2 L3 a i H' H'0 H'1 H'2 b H'3 H'4.
cut (incl L2 L1);
[ intros incl0
| generalize (slicef_incl i a L1); unfold slicef in |- *; rewrite H' ];
auto.
elim H'3; [ intros H'5; rewrite <- H'5; clear H'3 | intros H'5; clear H'3 ];
auto.
exists a; split; [ idtac | split; [ left | idtac ] ]; auto.
rewrite H'5; auto.
apply zerop_ddivp_ppc; auto.
elim (slice_inv i a L1 b); [ intros H'10 | intros H'10 | idtac ]; auto.
rewrite H' in H'10; simpl in H'10; auto.
lapply H'1;
[ intros H'3; elim (H'3 b);
[ intros c E; elim E; intros H'9 H'11; elim H'11; intros H'12 H'13;
elim H'12;
[ intros H'14; clear H'12 H'11 E H'1
| intros H'14; clear H'12 H'11 E H'1 ]
| clear H'1
| clear H'1 ]
| clear H'1 ]; auto.
exists c; split; [ right | idtac ]; auto.
exists c; split; [ idtac | split; [ right | idtac ] ]; auto.
exists a; split; [ idtac | split ]; auto.
intros L L1 L2 L3 a i H' H'0 H'1 H'2 b H'3 H'4.
cut (incl L2 L1);
[ intros incl0
| generalize (slicef_incl i a L1); unfold slicef in |- *; rewrite H' ];
auto.
elim H'3; [ intros H'5; rewrite <- H'5; clear H'3 | intros H'5; clear H'3 ];
auto.
elim (slice_cons i a L1 L2);
[ intros H'8; elim H'8; intros c E; elim E; intros H'9 H'10; clear E H'8
| intros H'8
| idtac ]; auto.
lapply H'1;
[ intros H'3; elim (H'3 c);
[ intros c0 E; elim E; intros H'11 H'12; elim H'12; intros H'13 H'14;
elim H'13;
[ intros H'15; clear H'13 H'12 E H'1
| intros H'15; clear H'13 H'12 E H'1 ]
| clear H'1
| clear H'1 ]
| clear H'1 ]; auto.
exists c0; split; [ idtac | split; [ left | idtac ] ]; auto.
apply
(divp_trans _ _ _ _ _ _ _ _ _ cs n ltM)
with (y := ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i c);
auto.
apply divP_ppc; auto.
apply divp_ppc; auto.
apply zerop_ddivp_ppc; auto.
rewrite H'5; auto.
exists c0; split; [ idtac | split; [ right | idtac ] ]; auto.
apply
(divp_trans _ _ _ _ _ _ _ _ _ cs n ltM)
with (y := ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i c);
auto.
rewrite H'5; auto.
apply divP_ppc; auto.
apply divp_ppc; auto.
apply zerop_ddivp_ppc; auto.
rewrite <- H'5; auto.
apply divp_nzeropr with (1 := H'10); auto.
exists a; split; [ idtac | split; [ right | idtac ] ]; auto.
rewrite H'5; auto.
apply zerop_ddivp_ppc; auto.
elim (slice_inv i a L1 b); [ intros H'10 | intros H'10 | idtac ]; auto.
rewrite H' in H'10; simpl in H'10; auto.
lapply H'1;
[ intros H'3; elim (H'3 b);
[ intros c E; elim E; intros H'9 H'11; elim H'11; intros H'12 H'13;
elim H'12;
[ intros H'14; clear H'12 H'11 E H'1
| intros H'14; clear H'12 H'11 E H'1 ]
| clear H'1
| clear H'1 ]
| clear H'1 ]; auto.
exists c; split; [ right | idtac ]; auto.
exists c; split; [ idtac | split; [ right | idtac ] ]; auto.
elim (slice_cons i a L1 L2);
[ intros H'8; elim H'8; intros c E; elim E; intros H'9 H'11; clear E H'8
| intros H'8
| idtac ]; auto.
lapply H'1;
[ intros H'3; elim (H'3 c);
[ intros c0 E; elim E; intros H'12 H'13; elim H'13; intros H'14 H'15;
elim H'14;
[ intros H'16; clear H'14 H'13 E H'1
| intros H'16; clear H'14 H'13 E H'1 ]
| clear H'1
| clear H'1 ]
| clear H'1 ]; auto.
exists c0; split; [ idtac | split; [ left | idtac ] ]; auto.
apply
(divp_trans _ _ _ _ _ _ _ _ _ cs n ltM)
with (y := ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i c);
auto.
apply
(divp_trans _ _ _ _ _ _ _ _ _ cs n ltM)
with (y := ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i a);
auto.
apply divP_ppc; auto.
apply divp_ppc; auto.
apply zerop_ddivp_ppc; auto.
apply divP_ppc; auto.
apply divp_ppc; auto.
apply zerop_ddivp_ppc; auto.
apply divp_nzeropr with (1 := H'10); auto.
exists c0; split; [ idtac | split; [ right | idtac ] ]; auto.
apply
(divp_trans _ _ _ _ _ _ _ _ _ cs n ltM)
with (y := ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i a);
auto.
apply divP_ppc; auto.
apply divp_ppc; auto.
apply zerop_ddivp_ppc; auto.
apply
(divp_trans _ _ _ _ _ _ _ _ _ cs n ltM)
with (y := ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i c);
auto.
apply divP_ppc; auto.
apply divp_ppc; auto.
apply zerop_ddivp_ppc; auto.
apply divp_nzeropr with (1 := H'10); auto.
apply divp_nzeropr with (1 := H'11); auto.
exists a; split; [ idtac | split; [ right | idtac ] ]; auto.
Qed.
Lemma spolyp_cons_genPcP :
forall (aP R Q : list _) (i : poly A0 eqA ltM),
genPcP i aP R Q ->
~ BuchAux.zerop A A0 eqA n ltM i ->
forall b : poly A0 eqA ltM,
In b aP ->
~ BuchAux.zerop A A0 eqA n ltM b ->
exists c : poly A0 eqA ltM,
In c aP /\
(In
(spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os i c) Q \/
red A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec
(spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os i c) (addEnd A A0 eqA n ltM i aP)) /\
divp A A0 eqA multA divA n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM i b) c.
Proof.
intros aP R Q i H' H'0 b H'1 H'2.
lapply (spolyp_cons_genPcP0 aP R Q i);
[ intros H'7; lapply H'7;
[ intros H'8; elim (H'8 b);
[ intros c E; elim E; intros H'12 H'13; elim H'13; intros H'14 H'15;
elim H'14;
[ intros H'16; clear H'14 H'13 E H'7
| intros H'16; clear H'14 H'13 E H'7 ]
| clear H'7
| clear H'7 ]
| clear H'7 ]
| idtac ]; auto.
exists c; split; [ idtac | split; [ left | idtac ] ]; auto.
exists c; split; [ idtac | split; [ right | idtac ] ]; auto.
apply foreigner_red; auto.
Qed.
Theorem Cb_genPcP :
forall (i : poly A0 eqA ltM) (P Q R S : list (poly A0 eqA ltM)),
genPcP i P R Q ->
Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec i S ->
(forall a : poly A0 eqA ltM,
In a P -> Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a S) ->
(forall a : poly A0 eqA ltM,
In a R -> Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a S) ->
forall a : poly A0 eqA ltM,
In a Q -> Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a S.
Proof.
intros i P Q R S H'; elim H'; simpl in |- *; auto.
intros L L1 L2 L3 a i0 H'0 H'1 H'2 H'3 H'4 H'5 a0 H'6.
case (addEnd_cons A A0 eqA n ltM) with (1 := H'6); auto; intros H'7.
rewrite H'7; auto.
apply Cb_sp; auto.
apply H'2; auto.
intros a1 H'8.
apply H'4; auto.
right.
generalize (slicef_incl i0 a L1); unfold slicef in |- *; rewrite H'0; auto.
intros L L1 L2 L3 a i0 H'0 H'1 H'2 H'3 H'4 H'5 a0 H'6; auto.
apply H'2; auto.
intros a1 H'7.
apply H'4; auto.
generalize (slicef_incl i0 a L1); unfold slicef in |- *; rewrite H'0; auto.
Qed.
Definition genPcPf0 :
forall (i : poly A0 eqA ltM) (aP R : list (poly A0 eqA ltM)),
{Q : list (poly A0 eqA ltM) | genPcP i aP R Q}.
intros i aP; pattern aP in |- *.
apply well_founded_induction_type with (A := list (poly A0 eqA ltM)) (R := Tl);
clear aP; auto.
try exact wf_Tl.
intros aP; case aP.
intros H' R; exists R; auto.
intros a L1 Rec L; generalize (@refl_equal _ (slice i a L1));
pattern (slice i a L1) at 2 in |- *; case (slice i a L1).
intros L2 H'.
lapply (Rec L2); [ intros H'1; elim (H'1 L); intros L3 E | idtac ]; auto.
exists
(addEnd A A0 eqA n ltM
(spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os i a) L3); auto.
apply genPcP1 with (L2 := L2); auto.
generalize (slice_Tl i a L1); unfold slicef in |- *; rewrite H';
simpl in |- *; auto.
intros L2 H'.
lapply (Rec L2); [ intros H'1; elim (H'1 L); intros L3 E | idtac ]; auto.
exists L3; auto.
apply genPcP2 with (L2 := L2); auto.
generalize (slice_Tl i a L1); unfold slicef in |- *; rewrite H';
simpl in |- *; auto.
Defined.
Definition genPcPf :
poly A0 eqA ltM ->
list (poly A0 eqA ltM) -> list (poly A0 eqA ltM) -> list (poly A0 eqA ltM).
intros i aP Q; case (genPcPf0 i aP Q).
intros x H'; exact x.
Defined.
(* The proof will carry on if we have the following 3 properties for
the function genPcPf *)
Theorem Cb_genPcPf :
forall (b : poly A0 eqA ltM) (P Q R : list (poly A0 eqA ltM)),
Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec b R ->
(forall a : poly A0 eqA ltM,
In a P -> Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a R) ->
(forall a : poly A0 eqA ltM,
In a Q -> Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a R) ->
forall a : poly A0 eqA ltM,
In a (genPcPf b P Q) -> Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a R.
Proof.
intros b P Q R; unfold genPcPf in |- *; case (genPcPf0 b P Q).
intros x H' H'0 H'1 H'2 a H'3.
apply Cb_genPcP with (i := b) (P := P) (Q := x) (R := Q); auto.
Qed.
Theorem genPcPf_incl :
forall (a : poly A0 eqA ltM) (aL Q : list (poly A0 eqA ltM)),
incl Q (genPcPf a aL Q).
Proof.
intros a aL Q; unfold genPcPf in |- *; case (genPcPf0 a aL Q).
intros x H'.
apply genPcP_incl with (i := a) (L1 := aL); auto.
Qed.
Local Hint Resolve genPcPf_incl : core.
Theorem spolyp_addEnd_genPcPf :
forall (aP R Q : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
~ BuchAux.zerop A A0 eqA n ltM a ->
~ BuchAux.zerop A A0 eqA n ltM b ->
In b aP ->
exists c : poly A0 eqA ltM,
In c aP /\
(In
(spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a c) (genPcPf a aP Q) \/
red A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec
(spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a c) (addEnd A A0 eqA n ltM a aP)) /\
divp A A0 eqA multA divA n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM a b) c.
Proof.
intros aP H' Q a b H'0 H'1 H'2.
unfold genPcPf in |- *.
case (genPcPf0 a aP Q).
intros x H'3.
apply spolyp_cons_genPcP with (R := Q); auto.
Qed.
(* Now we can define the optimized version of Buchberger *)
Definition genOCPf : list (poly A0 eqA ltM) -> list (poly A0 eqA ltM).
intros H'; elim H'.
exact (nil (A:=poly A0 eqA ltM)).
intros a l rec; exact (genPcPf a l rec).
Defined.
(* Now we can define the optimized version of Buchberger *)
Theorem genOCPf_stable :
forall (a : poly A0 eqA ltM) (P : list (poly A0 eqA ltM)),
In a (genOCPf P) -> Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a P.
Proof.
intros a P; generalize a; elim P; clear a P; simpl in |- *; auto.
intros a H; elim H.
intros a l H' a0 H'0.
apply Cb_genPcPf with (b := a) (P := l) (Q := genOCPf l); auto with datatypes.
apply Cb_id with (1 := cs); auto with datatypes.
intros; apply Cb_in1 with (1 := cs); auto.
apply Cb_id with (1 := cs); auto with datatypes.
intros; apply Cb_in1 with (1 := cs); auto.
Qed.
Inductive OBuch :
list (poly A0 eqA ltM) ->
list (poly A0 eqA ltM) -> list (poly A0 eqA ltM) -> Prop :=
| OBuch0 : forall aL : list (poly A0 eqA ltM), OBuch aL nil aL
| OBuch1 :
forall (a : poly A0 eqA ltM) (aP Q R : list (poly A0 eqA ltM)),
OBuch
(addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP) aP)
(genPcPf
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP) aP Q) R ->
~
BuchAux.zerop A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os a aP) -> OBuch aP (a :: Q) R
| OBuch2 :
forall (a : poly A0 eqA ltM) (aP Q R : list (poly A0 eqA ltM)),
OBuch aP Q R ->
BuchAux.zerop A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os a aP) -> OBuch aP (a :: Q) R.
Local Hint Resolve OBuch0 OBuch2 : core.
Local Hint Resolve incl_refl incl_tl : core.
Theorem incl_addEnd1 :
forall (a : poly A0 eqA ltM) (L1 L2 : list (poly A0 eqA ltM)),
incl (addEnd A A0 eqA n ltM a L1) L2 -> incl (a :: L1) L2.
Proof.
unfold incl in |- *; simpl in |- *; auto.
intros a L1 L2 H' a0 H'0; case H'0;
[ intros H'1; rewrite <- H'1; clear H'0 | intros H'1; clear H'0 ];
auto.
Qed.
Theorem ObuchPincl :
forall aP R Q : list (poly A0 eqA ltM), OBuch aP Q R -> incl aP R.
Proof.
intros aP R Q H'; elim H'; simpl in |- *; auto.
intros a aP0 Q0 R0 H'0 H'1 H'2; try assumption.
apply
incl_tran
with
(m := nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP0 :: aP0); simpl in |- *;
auto.
apply incl_addEnd1; auto.
Qed.
Theorem ObuchPred :
forall aP R Q : list (poly A0 eqA ltM),
OBuch aP Q R ->
forall a : poly A0 eqA ltM,
In a aP -> red A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec a R.
Proof.
intros aP R Q H'; elim H'; simpl in |- *; auto.
intros; apply red_cons with (1 := cs); auto.
Qed.
Theorem ObuchQred :
forall aP R Q : list (poly A0 eqA ltM),
OBuch aP Q R ->
forall a : poly A0 eqA ltM,
In a Q -> red A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec a R.
Proof.
intros aP R Q H'; elim H'; simpl in |- *; auto.
intros aL a H'0; elim H'0.
intros a aP0 Q0 R0 H'0 H'1 H'2 a0 H'3; elim H'3;
[ intros H'4; rewrite <- H'4; clear H'3 | intros H'4; clear H'3 ];
auto.
apply
red_incl
with
(1 := cs)
(p := addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP0) aP0); auto.
apply
ObuchPincl
with
(Q := genPcPf
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP0) aP0 Q0); auto.
apply nf_red with (cs := cs) (os := os) (aP := aP0); simpl in |- *; auto.
unfold incl in |- *; auto.
apply red_cons with (1 := cs); auto.
apply H'1; auto.
apply
(genPcPf_incl
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec os
a aP0) aP0 Q0); auto.
intros a aP0 Q0 R0 H'0 H'1 H'2 a0 H'3; elim H'3;
[ intros H'4; rewrite <- H'4; clear H'3 | intros H'4; clear H'3 ];
auto.
apply red_incl with (1 := cs) (p := aP0); auto.
apply ObuchPincl with (Q := Q0); auto.
apply zerop_red with (cs := cs) (os := os); auto.
Qed.
Theorem OBuch_Stable :
forall P Q R : list (poly A0 eqA ltM),
OBuch P Q R ->
(forall a : poly A0 eqA ltM,
In a Q -> Cb A A0 eqA plusA multA eqA_dec n ltM ltM_dec a P) ->
stable P R.
Proof.
intros P Q R H'; elim H'; simpl in |- *; auto.
intros a aP Q0 R0 H'0 H'1 H'2 H'3.
apply
stable_trans
with
(y := addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP) aP); auto.
apply stable0; auto.
intros a0 H'4.
apply
Cb_trans
with
(1 := cs)
(b := nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP); auto.
apply nf_Cb; auto.
apply H'1; auto.
intros a0 H'4.
apply
Cb_genPcPf
with
(b := nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP)
(P := aP)
(Q := Q0); auto.
apply Cb_id with (1 := cs); auto.
intros; apply Cb_in with (1 := cs); auto.
apply Cb_id with (1 := cs); auto.
Qed.
Inductive redIn :
poly A0 eqA ltM ->
poly A0 eqA ltM ->
list (poly A0 eqA ltM) ->
list (poly A0 eqA ltM) -> list (poly A0 eqA ltM) -> Prop :=
| redIn0b :
forall (P Q R : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
redIn b a P Q R -> redIn a b P Q R
| redIn0 :
forall (P Q R : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
In
(spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a b) Q -> redIn a b P Q R
| redIn1 :
forall (P Q R : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
red A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec
(spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a b) R -> redIn a b P Q R
| redIn2 :
forall (P Q R : list (poly A0 eqA ltM)) (a b c : poly A0 eqA ltM),
In c P ->
redIn a c P Q R ->
redIn b c P Q R ->
divp A A0 eqA multA divA n ltM
(ppcp A A0 A1 eqA plusA invA minusA multA divA cs n ltM a b) c ->
redIn a b P Q R.
Local Hint Resolve redIn1 redIn0 : core.
Remark lem_redIn_nil :
forall (aP Q R : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
In a R -> In b R -> redIn a b aP Q R -> Q = nil -> aP = R -> reds a b R.
Proof.
intros aP Q R a b H' H'0 H'1; elim H'1; auto.
intros P Q0 R0 a0 b0 H'2 H'3 H'4 H'5.
apply reds_com; auto.
intros P Q0 R0 a0 b0 H'2 H'3 H'4.
rewrite H'3 in H'2; elim H'2.
intros P Q0 R0 a0 b0 H'2 H'3 H'4; rewrite <- H'4.
rewrite H'4.
apply reds0; auto.
intros P Q0 R0 a0 b0 c H'2 H'3 H'4 H'5 H'6 H'7 H'8 H'9.
apply reds1 with (c := c); auto.
rewrite <- H'9; auto.
apply reds_com; auto.
Qed.
Theorem redIn_nil :
forall (R : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
In a R -> In b R -> redIn a b R nil R -> reds a b R.
Proof.
intros R a b H' H'0 H'1.
apply lem_redIn_nil with (aP := R) (Q := nil (A:=poly A0 eqA ltM)); auto.
Qed.
Remark lem_redln_cons :
forall (aP R Q : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
In a aP ->
In b aP ->
redIn a b aP Q R ->
forall (c : poly A0 eqA ltM) (Q1 : list (poly A0 eqA ltM)),
Q = c :: Q1 ->
red A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec c R ->
redIn a b aP Q1 R.
Proof.
intros aP R Q a b H' H'0 H'1; elim H'1; auto.
intros P Q0 R0 a0 b0 H'2 H'3 c Q1 H'4 H'5.
apply redIn0b; auto.
apply H'3 with (c := c); auto.
intros P Q0 R0 a0 b0 H'2 c Q1 H'3 H'4.
rewrite H'3 in H'2; elim H'2; auto.
intros H'5; rewrite H'5 in H'4; auto.
intros P Q0 R0 a0 b0 c H'2 H'3 H'4 H'5 H'6 H'7 c0 Q1 H'8 H'9.
apply redIn2 with (c := c); auto.
apply (H'4 c0); auto.
apply (H'6 c0); auto.
Qed.
Theorem redln_cons :
forall (aP R Q : list (poly A0 eqA ltM)) (a b c : poly A0 eqA ltM),
In a aP ->
In b aP ->
redIn a b aP (c :: Q) R ->
red A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec c R ->
redIn a b aP Q R.
Proof.
intros aP R Q a b c H' H'0 H'1 H'2; try assumption.
apply lem_redln_cons with (Q := c :: Q) (c := c); auto.
Qed.
Theorem redInclP :
forall (P Q R : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
redIn a b P Q R ->
forall P1 : list (poly A0 eqA ltM), incl P P1 -> redIn a b P1 Q R.
Proof.
intros P Q R a b H'; elim H'; auto.
intros P0 Q0 R0 a0 b0 H'0 H'1 P1 H'2.
apply redIn0b; auto.
intros P0 Q0 R0 a0 b0 c H'0 H'1 H'2 H'3 H'4 H'5 Q1 H'6.
apply redIn2 with (c := c); auto.
Qed.
Theorem redInInclQ :
forall (P Q R : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
redIn a b P Q R ->
forall Q1 : list (poly A0 eqA ltM), incl Q Q1 -> redIn a b P Q1 R.
Proof.
intros P Q R a b H'; elim H'; auto.
intros P0 Q0 R0 a0 b0 H'0 H'1 Q1 H'2; try assumption.
apply redIn0b; auto.
intros P0 Q0 R0 a0 b0 c H'0 H'1 H'2 H'3 H'4 H'5 Q1 H'6; try assumption.
apply redIn2 with (c := c); auto.
Qed.
Theorem redInclR :
forall (P Q R : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
redIn a b P Q R ->
forall R1 : list (poly A0 eqA ltM), incl R R1 -> redIn a b P Q R1.
Proof.
intros P Q R a b H'; elim H'; simpl in |- *; auto.
intros P0 Q0 R0 a0 b0 H'0 H'1 R1 H'2; try assumption.
apply redIn0b; auto.
intros P0 Q0 R0 a0 b0 H'0 R1 H'1; try assumption.
apply redIn1; auto.
apply red_incl with (1 := cs) (p := R0); auto.
intros P0 Q0 R0 a0 b0 c H'0 H'1 H'2 H'3 H'4 H'5 R1 H'6.
apply redIn2 with (c := c); auto.
Qed.
Remark lem_redln_cons_gen :
forall (aP R Q : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
In a aP ->
In b aP ->
redIn a b aP Q R ->
forall (c : poly A0 eqA ltM) (Q1 : list (poly A0 eqA ltM)),
incl
(addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os c aP) aP) R ->
Q = c :: Q1 ->
redIn a b
(addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os c aP) aP) Q1 R.
Proof.
intros aP R Q a b H' H'0 H'1; elim H'1; auto.
intros P Q0 R0 a0 b0 H'2 H'3 c Q1 H'4 H'5.
apply redIn0b; auto.
intros P Q0 R0 a0 b0 H'2 c Q1 H'3 H'4.
rewrite H'4 in H'2; elim H'2; auto.
intros H'5; rewrite H'5.
apply redIn1; auto.
apply nf_red with (aP := P) (cs := cs) (os := os); auto.
apply
incl_tran
with
(m := nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os c P :: P); simpl in |- *; auto.
apply incl_addEnd1; auto.
apply red_cons with (1 := cs); auto.
apply
in_incl
with
(p := nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os c P :: P); auto.
apply incl_addEnd1; auto.
rewrite H'5; simpl in |- *; auto.
intros P Q0 R0 a0 b0 c H'2 H'3 H'4 H'5 H'6 H'7 c0 Q1 H'8 H'9.
apply redIn2 with (c := c); auto.
Qed.
Theorem redln_cons_gen :
forall (aP R Q : list (poly A0 eqA ltM)) (a b c : poly A0 eqA ltM),
In a aP ->
In b aP ->
redIn a b aP (c :: Q) R ->
incl
(addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os c aP) aP) R ->
redIn a b
(addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os c aP) aP) Q R.
Proof.
intros aP R Q a b c H' H'0 H'1 H'2.
apply lem_redln_cons_gen with (Q := c :: Q); auto.
Qed.
Local Hint Resolve redln_cons_gen : core.
Theorem red_gen_in :
forall (a : poly A0 eqA ltM) (aP R Q : list (poly A0 eqA ltM)),
~
BuchAux.zerop A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec os a
aP) ->
OBuch
(addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os a aP) aP)
(genPcPf
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os a aP) aP Q) R ->
(forall b c : poly A0 eqA ltM, In b aP -> In c aP -> redIn b c aP (a :: Q) R) ->
forall b : poly A0 eqA ltM,
In b aP ->
~ BuchAux.zerop A A0 eqA n ltM b ->
redIn
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec os a
aP) b
(addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os a aP) aP)
(genPcPf
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os a aP) aP Q) R.
Proof.
intros a aP R Q H' H'0 H'1 b H'2 H'3.
lapply (spolyp_addEnd_genPcPf aP);
[ intros H'5;
elim
(H'5 Q
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec
os a aP) b);
[ intros c E; elim E; intros H'12 H'13; elim H'13; intros H'14 H'15;
elim H'14;
[ intros H'16; clear H'14 H'13 E | intros H'16; clear H'14 H'13 E ]
| idtac
| idtac
| idtac ]
| idtac ]; auto.
apply redIn2 with (c := c); simpl in |- *; auto.
apply redln_cons_gen; auto.
apply redInInclQ with (Q := a :: Q); auto with datatypes.
apply
ObuchPincl
with
(Q := genPcPf
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP) aP Q); auto.
apply redIn2 with (c := c); simpl in |- *; auto.
apply redIn1.
apply
red_incl
with
(p := addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP) aP)
(1 := cs); auto.
apply
ObuchPincl
with
(Q := genPcPf
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP) aP Q); auto.
apply redln_cons_gen; auto.
apply redInInclQ with (Q := a :: Q); auto with datatypes.
apply
ObuchPincl
with
(Q := genPcPf
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP) aP Q); auto.
Qed.
Theorem OBuch_Inv :
forall aP R Q : list (poly A0 eqA ltM),
OBuch aP Q R ->
(forall a b : poly A0 eqA ltM, In a aP -> In b aP -> redIn a b aP Q R) ->
forall a b : poly A0 eqA ltM, In a R -> In b R -> reds a b R.
Proof.
intros aP R Q H'; elim H'; simpl in |- *; auto.
intros aL H'0 a b H'1 H'2; try assumption.
apply redIn_nil; auto.
intros a aP0 Q0 R0 H'0 H'1 H'2 H'3 a0 b H'4 H'5.
apply H'1; auto.
intros a1 b0 H'6.
case (addEnd_cons A A0 eqA n ltM) with (1 := H'6); auto.
intros H'7; rewrite <- H'7; auto.
intros H'8.
case (addEnd_cons A A0 eqA n ltM) with (1 := H'8); auto.
intros H'9; rewrite <- H'9; auto.
apply redIn1; auto.
apply red_id; auto.
intros H'9.
case (zerop_dec A A0 eqA n ltM b0); intros Z; auto.
apply redIn1; auto.
apply zerop_red_spoly_r; auto.
rewrite H'7; auto.
apply red_gen_in; auto.
intros H'7 H'8.
case (addEnd_cons A A0 eqA n ltM) with (1 := H'8); auto.
intros H'9; rewrite <- H'9; auto.
apply redIn0b.
case (zerop_dec A A0 eqA n ltM a1); intros Z.
apply redIn1; auto.
apply zerop_red_spoly_r; auto.
rewrite H'9.
apply red_gen_in; auto.
intros H'9.
apply redln_cons with (c := a); simpl in |- *; auto.
apply redInclP with (P := aP0); auto.
apply redInInclQ with (Q := a :: Q0); auto with datatypes.
unfold incl in |- *; auto.
apply nf_red with (aP := aP0) (cs := cs) (os := os); auto.
apply
incl_tran
with
(m := addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP0) aP0); auto.
unfold incl in |- *; auto.
apply
ObuchPincl
with
(Q := genPcPf
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP0) aP0 Q0); auto.
apply red_cons with (1 := cs); auto.
apply
in_incl
with
(p := addEnd A A0 eqA n ltM
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP0) aP0); simpl in |- *;
auto.
apply
ObuchPincl
with
(Q := genPcPf
(nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a aP0) aP0 Q0); auto.
intros a aP0 Q0 R0 H'0 H'1 H'2 H'3 a0 b H'4 H'5.
apply H'1; auto.
intros a1 b0 H'6 H'7.
apply redln_cons with (c := a); auto.
apply red_incl with (p := aP0) (1 := cs); auto.
apply ObuchPincl with (Q := Q0); auto.
apply zerop_red with (cs := cs) (os := os); auto.
Qed.
Theorem addEnd_incl :
forall (a : poly A0 eqA ltM) (L1 L2 : list (poly A0 eqA ltM)),
incl (a :: L1) L2 -> incl (addEnd A A0 eqA n ltM a L1) L2.
Proof.
unfold incl in |- *; simpl in |- *; auto.
intros a L1 L2 H' a0 H'0.
case (addEnd_cons A A0 eqA n ltM) with (1 := H'0); auto.
Qed.
Theorem genOCp_redln :
forall aL1 R : list (poly A0 eqA ltM),
incl aL1 R ->
forall a b : poly A0 eqA ltM,
In a aL1 -> In b aL1 -> redIn a b aL1 (genOCPf aL1) R.
Proof.
intros aL1; elim aL1; simpl in |- *; auto.
intros a l H' R H'0 a0 b H'1 H'2.
elim H'2; [ intros H'3; rewrite <- H'3; clear H'2 | intros H'3; clear H'2 ];
auto.
elim H'1; [ intros H'2; rewrite <- H'2; clear H'1 | intros H'2; clear H'1 ];
auto.
apply redIn1; auto.
apply red_id; auto.
apply redIn0b.
case (zerop_dec A A0 eqA n ltM a); intros Z; auto.
apply redIn1; auto.
apply zerop_red_spoly_l; auto.
case (zerop_dec A A0 eqA n ltM a0); intros Z1; auto.
apply redIn1; auto.
apply zerop_red_spoly_r; auto.
lapply (spolyp_addEnd_genPcPf l);
[ intros H'4; elim (H'4 (genOCPf l) a a0);
[ intros c E; elim E; intros H'11 H'12; elim H'12; intros H'13 H'14;
elim H'13;
[ intros H'15; clear H'13 H'12 E | intros H'15; clear H'13 H'12 E ]
| idtac
| idtac
| idtac ]
| idtac ]; auto.
apply redIn2 with (c := c); auto.
simpl in |- *; auto.
apply redInInclQ with (Q := genOCPf l); auto.
apply redInclP with (P := l); auto.
apply H'; auto.
apply incl_tran with (m := a :: l); simpl in |- *; auto.
apply redIn2 with (c := c); auto.
simpl in |- *; auto.
apply redIn1; auto.
apply red_incl with (p := addEnd A A0 eqA n ltM a l) (1 := cs); auto.
apply addEnd_incl; auto.
apply redInclP with (P := l); auto.
apply redInInclQ with (Q := genOCPf l); auto.
apply H'; auto.
apply incl_tran with (m := a :: l); auto.
elim H'1; [ intros H'2; rewrite <- H'2; clear H'1 | intros H'2; clear H'1 ];
auto.
case (zerop_dec A A0 eqA n ltM a); intros Z; auto.
apply redIn1; auto.
apply zerop_red_spoly_l; auto.
case (zerop_dec A A0 eqA n ltM b); intros Z1; auto.
apply redIn1; auto.
apply zerop_red_spoly_r; auto.
lapply (spolyp_addEnd_genPcPf l);
[ intros H'4; elim (H'4 (genOCPf l) a b);
[ intros c E; elim E; intros H'11 H'12; elim H'12; intros H'13 H'14;
elim H'13;
[ intros H'15; clear H'13 H'12 E | intros H'15; clear H'13 H'12 E ]
| idtac
| idtac
| idtac ]
| idtac ]; auto.
apply redIn2 with (c := c); simpl in |- *; auto.
apply redInInclQ with (Q := genOCPf l); auto.
apply redInclP with (P := l); auto.
apply H'; auto.
apply incl_tran with (m := a :: l); simpl in |- *; auto.
apply redIn2 with (c := c); simpl in |- *; auto.
apply redIn1; auto.
apply red_incl with (1 := cs) (p := addEnd A A0 eqA n ltM a l); auto.
apply addEnd_incl; auto.
apply redInclP with (P := l); auto.
apply redInInclQ with (Q := genOCPf l); auto.
apply H'; auto.
apply incl_tran with (m := a :: l); auto.
apply redInclP with (P := l); auto.
apply redInInclQ with (Q := genOCPf l); auto.
apply H'; auto.
apply incl_tran with (m := a :: l); auto.
Qed.
Theorem OBuch_Stable_f :
forall P Q : list (poly A0 eqA ltM), OBuch P (genOCPf P) Q -> stable P Q.
Proof.
intros P Q H'; try assumption.
apply OBuch_Stable with (Q := genOCPf P); auto.
intros a H'0; try assumption.
apply genOCPf_stable; auto.
Qed.
Theorem OBuch_Inv_f :
forall P Q : list (poly A0 eqA ltM),
OBuch P (genOCPf P) Q ->
forall a b : poly A0 eqA ltM, In a Q -> In b Q -> reds a b Q.
Proof.
intros P Q H' a b H'0 H'1; try assumption.
apply OBuch_Inv with (aP := P) (Q := genOCPf P); auto.
intros a0 b0 H'3 H'4.
apply genOCp_redln; auto.
apply ObuchPincl with (Q := genOCPf P); auto.
Qed.
Let FPset (A : list (poly A0 eqA ltM)) := list (poly A0 eqA ltM).
Definition Fl : forall x : list (poly A0 eqA ltM), FPset x -> FPset x -> Prop.
unfold FPset in |- *; simpl in |- *.
intros H' P1 P2.
exact (Tl P1 P2).
Defined.
Theorem wf_Fl : forall x : list (poly A0 eqA ltM), well_founded (Fl x).
Proof.
unfold FPset in |- *; simpl in |- *.
intros x; generalize wf_Tl; auto.
Qed.
Let Co :=
@lexprod (list (poly A0 eqA ltM)) FPset
(RO A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec os)
Fl.
Theorem wf_Co : well_founded Co.
Proof.
unfold Co in |- *; apply wf_lexprod.
apply wf_incl.
exact wf_Fl.
Qed.
Definition PtoS :
list (poly A0 eqA ltM) * list (poly A0 eqA ltM) -> sigT FPset.
intros H'; case H'.
intros P1 P2.
exact (existT FPset P1 P2).
Defined.
Definition RL (x y : list (poly A0 eqA ltM) * list (poly A0 eqA ltM)) :
Prop := Co (PtoS x) (PtoS y).
Theorem wf_RL : well_founded RL.
Proof.
apply (wf_inverse_image _ _ Co PtoS); auto.
try exact wf_Co.
Qed.
Definition pbuchf :
forall PQ : list (poly A0 eqA ltM) * list (poly A0 eqA ltM),
{R : list (poly A0 eqA ltM) | OBuch (fst PQ) (snd PQ) R}.
intros pq; pattern pq in |- *.
apply
well_founded_induction_type
with
(A := (list (poly A0 eqA ltM) * list (poly A0 eqA ltM))%type)
(R := RL).
try exact wf_RL.
intros x; elim x.
intros P Q; case Q; simpl in |- *.
intros H'; exists P; auto.
intros a Q2 Rec.
apply
LetP
with
(A := poly A0 eqA ltM)
(h := nf A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os a P).
intros a0 H'.
case (zerop_dec A A0 eqA n ltM a0); intros red10.
elim (Rec (P, Q2)); simpl in |- *; [ intros R E | idtac ]; auto.
exists R; auto.
apply OBuch2; auto.
rewrite <- H'; auto.
red in |- *; unfold Co in |- *; unfold PtoS in |- *.
apply
(right_lex _ _
(RO A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec os)
Fl); auto.
red in |- *; red in |- *; simpl in |- *; auto.
elim (Rec (addEnd A A0 eqA n ltM a0 P, genPcPf a0 P Q2)); simpl in |- *;
[ intros R E0; try exact E0 | idtac ].
exists R; auto.
apply OBuch1; auto.
rewrite <- H'; auto.
rewrite <- H'; auto.
rewrite H'.
red in |- *; unfold Co in |- *; unfold PtoS in |- *.
apply
(left_lex _ _
(RO A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM ltM_dec os)
Fl); auto.
apply RO_lem; auto.
rewrite <- H'; auto.
Defined.
Definition strip :
forall P : list (poly A0 eqA ltM) -> Prop, sig P -> list (poly A0 eqA ltM).
intros P H'; case H'.
intros x H'0; try assumption.
Defined.
Theorem pbuchf_Stable :
forall P R : list (poly A0 eqA ltM),
R = strip _ (pbuchf (P, genOCPf P)) -> stable P R.
Proof.
simpl in |- *.
intros P R H'; try assumption.
apply OBuch_Stable_f; auto.
rewrite H'.
case (pbuchf (pair P (genOCPf P))); simpl in |- *; auto.
Qed.
Theorem pbuchf_Inv :
forall P R : list (poly A0 eqA ltM),
R = strip _ (pbuchf (P, genOCPf P)) ->
forall a b : poly A0 eqA ltM, In a R -> In b R -> reds a b R.
Proof.
intros P R H' a b H'0 H'1; simpl in |- *.
apply OBuch_Inv_f with (P := P); auto.
rewrite H'; simpl in |- *; auto.
case (pbuchf (P, genOCPf P)); simpl in |- *; auto.
Qed.
Definition buch : list (poly A0 eqA ltM) -> list (poly A0 eqA ltM).
intros P; exact (strip _ (pbuchf (P, genOCPf P))).
Defined.
Theorem buch_Stable : forall P : list (poly A0 eqA ltM), stable P (buch P).
Proof.
intros P; apply pbuchf_Stable; auto.
Qed.
Theorem buch_reds :
forall (P : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
In a (buch P) -> In b (buch P) -> reds a b (buch P).
Proof.
intros P a b H' H'0.
apply pbuchf_Inv with (P := P); auto.
Qed.
Theorem reds_SpolyQ :
forall (P : list (poly A0 eqA ltM)) (a b : poly A0 eqA ltM),
reds a b P ->
Spoly_1 A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec P
(s2p A A0 eqA n ltM a) (s2p A A0 eqA n ltM b).
Proof.
intros P a b H'; elim H'; auto.
intros P0 a0 b0 H'0;
cut
(red A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec
(spolyp A A0 A1 eqA plusA invA minusA multA divA cs eqA_dec n ltM
ltM_dec os b0 a0) P0); auto.
case a0; case b0; unfold red in |- *; simpl in |- *; auto.
intros x H'1 x0 H'2 H'3; inversion H'3.
apply Spoly_10 with (Cp := H'2) (Cq := H'1); auto.
apply red_com; auto.
intros P0 a0 b0 c.
case c; case b0; case a0; simpl in |- *.
intros x; case x; simpl in |- *; auto.
intros c0 x0 c1 x1 c2 H'0 H'1 H'2 H'3 H'4 H'5; elim H'5.
intros a1 l c0 x0; case x0; simpl in |- *.
intros c1 x1 c2 H'0 H'1 H'2 H'3 H'4 H'5; elim H'5.
intros a2 l0 c1 x1; case x1; simpl in |- *.
intros c2 H'0 H'1 H'2 H'3 H'4 H'5; elim H'5.
intros a3 l1 c2 H'0 H'1 H'2 H'3 H'4 H'5.
change
(Spoly_1 A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec P0
(pX a1 l) (pX a2 l0)) in |- *.
apply Spoly_11 with (d := a3) (t := l1); auto.
change
(inPolySet A A0 eqA n ltM
(s2p A A0 eqA n ltM (mks A A0 eqA n ltM (pX a3 l1) c2)) P0)
in |- *.
apply in_inPolySet; simpl in |- *; auto.
red in |- *; intros H; inversion H.
Qed.
Theorem imp_in :
forall (P : list (poly A0 eqA ltM)) (a : list (Term A n)),
inPolySet A A0 eqA n ltM a P ->
exists b : poly A0 eqA ltM, In b P /\ a = s2p A A0 eqA n ltM b.
Proof.
intros P a H'; elim H'; auto.
intros a0 p H P0;
exists
(exist (fun l0 : list (Term A n) => canonical A0 eqA ltM l0) (pX a0 p) H);
split; auto.
simpl in |- *; auto.
intros a0 p P0 H'0 H'1; elim H'1; intros b E; elim E; intros H'2 H'3;
clear E H'1; auto.
exists b; split; auto with datatypes.
Qed.
Theorem reds_SpolyQ1 :
forall P : list (poly A0 eqA ltM),
(forall a b : poly A0 eqA ltM, In a P -> In b P -> reds a b P) ->
SpolyQ A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec P.
Proof.
intros P H'.
apply SpolyQ0; auto.
intros p q H'0 H'1 H'2 H'3.
elim (imp_in P p); [ intros b E; elim E; intros H'7 H'8; clear E | idtac ];
auto.
rewrite H'8.
elim (imp_in P q); [ intros b0 E; elim E; intros H'9 H'10; clear E | idtac ];
auto.
rewrite H'10.
apply reds_SpolyQ; auto.
Qed.
Theorem buch_spolyQ :
forall P : list (poly A0 eqA ltM),
SpolyQ A A0 A1 eqA invA minusA multA divA eqA_dec n ltM ltM_dec (buch P).
Proof.
intros P.
apply reds_SpolyQ1; auto.
intros; apply buch_reds; auto.
Qed.
Theorem buch_Grobner :
forall P : list (poly A0 eqA ltM),
Grobner A A0 A1 eqA plusA invA minusA multA divA eqA_dec n ltM ltM_dec
(buch P).
Proof.
intros P.
apply ConfluentReduce_imp_Grobner; auto.
apply SpolyQ_imp_ConfluentReduce with (1 := cs); auto.
apply buch_spolyQ; auto.
Qed.
End Buch.
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% UMB-CS240-2016S: Programming in C
% Copyright 2016 Pejman Ghorbanzade <[email protected]>
% Creative Commons Attribution-ShareAlike 4.0 International License
% More info: https://github.com/ghorbanzade/UMB-CS240-2016S
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section*{Question 2}
Edgar Linton, a CS240 student, was asked to write a program that promps user for an integer and prints it on the screen.
A sample run of the program was expected to be as given below.
\begin{terminal}
$ gcc prompt-number.c -o prompt-number
$ ./prompt-number
enter number: 125
number: 125
\end{terminal}
To solve this problem, he wrote the following program.
\lstset{language=c,tabsize=4}
\begin{lstlisting}
// Edgar Linton
// <[email protected]>
#include <stdio.h>
#define MAX_LENGTH 32
int main(void)
{
int num = getNum();
printf("%d\n", num);
}
int getNum(void)
{
int res;
char ch;
int i = 0;
char array[MAX_LENGTH];
while ((ch = getchar()) != '\n')
array[i] = ch;
i = 0;
while (array[i] != '\0')
res = res * 10 + array[i++] - '0';
return res;
}
\end{lstlisting}
His program produces the following output.
\begin{terminal}
$ gcc prompt-number.c -o prompt-number
$ ./prompt-number
enter number: 125
number: 971397989
\end{terminal}
Clearly, something is wrong with Edgar's code.
This is why he is asking for your help to debug his program and fix all its bugs.
Rewrite the program \texttt{prompt-number.c} such that its output matches the expected output provided previously.
|
PIMediaMetadata <- function(author = NULL, changeDate = NULL, description = NULL, name = NULL, size = NULL, links = NULL, webException = NULL) {
if (is.null(author) == FALSE) {
if (is.character(author) == FALSE) {
return (print(paste0("Error: author must be a string.")))
}
}
if (is.null(changeDate) == FALSE) {
if (is.character(changeDate) == FALSE) {
return (print(paste0("Error: changeDate must be a string.")))
}
}
if (is.null(description) == FALSE) {
if (is.character(description) == FALSE) {
return (print(paste0("Error: description must be a string.")))
}
}
if (is.null(name) == FALSE) {
if (is.character(name) == FALSE) {
return (print(paste0("Error: name must be a string.")))
}
}
if (is.null(size) == FALSE) {
}
if (is.null(links) == FALSE) {
className <- attr(links, "className")
if ((is.null(className)) || (className != "PIMediaMetadataLinks")) {
return (print(paste0("Error: the class from the parameter links should be PIMediaMetadataLinks.")))
}
}
if (is.null(webException) == FALSE) {
className <- attr(webException, "className")
if ((is.null(className)) || (className != "PIWebException")) {
return (print(paste0("Error: the class from the parameter webException should be PIWebException.")))
}
}
value <- list(
Author = author,
ChangeDate = changeDate,
Description = description,
Name = name,
Size = size,
Links = links,
WebException = webException)
valueCleaned <- rmNullObs(value)
attr(valueCleaned, "className") <- "PIMediaMetadata"
return(valueCleaned)
}
|
#############################################################################
##
#W loewy.gi GAP 4 package SingerAlg Thomas Breuer
##
## This file contains implementations of GAP functions for studying
## the Loewy structure of Singer algebras A[q,z].
##
## for those who worked with an earlier version of the package ...
LoewyLayersData:= function( arg )
Print( "use LoewyStructureInfo not LoewyLayersData\n" );
return CallFuncList( LoewyStructureInfoGAP, arg );
end;
if IsPackageMarkedForLoading( "JuliaInterface", "" ) then
#############################################################################
##
#M LoewyStructureInfoJulia( <A> )
##
## Call a Julia function, convert its result to a record,
## but keep the record components in Julia.
##
InstallMethod( LoewyStructureInfoJulia,
[ "IsSingerAlgebra" ],
A -> CallFuncList( LoewyStructureInfoJulia,
ParametersOfSingerAlgebra( A ) ) );
InstallMethod( LoewyStructureInfoJulia,
[ "IsPosInt", "IsPosInt" ],
{ q, z } -> LoewyStructureInfoJulia( q, OrderMod( q, z ), z ) );
InstallMethod( LoewyStructureInfoJulia,
[ "IsPosInt", "IsPosInt", "IsPosInt" ],
{ q, n, z } -> CallFuncList( Julia.SingerAlg.LoewyStructureInfo,
List( [ q, n, z ], GAPToJulia ) ) );
fi;
if IsPackageMarkedForLoading( "JuliaInterface", "" ) then
#############################################################################
##
#M LoewyStructureInfo( <A> )
##
## We prefer 'LoewyStructureInfoJulia' to 'LoewyStructureInfoGAP'.
## For that, we convert the Julia dictionary into a GAP record.
##
InstallMethod( LoewyStructureInfo,
[ "IsSingerAlgebra" ],
A -> JuliaToGAP( IsRecord, LoewyStructureInfoJulia( A ), true ) );
InstallMethod( LoewyStructureInfo,
[ "IsPosInt", "IsPosInt" ],
{ q, z } -> LoewyStructureInfo( q, OrderMod( q, z ), z ) );
InstallMethod( LoewyStructureInfo,
[ "IsPosInt", "IsPosInt", "IsPosInt" ],
{ q, n, z } -> JuliaToGAP( IsRecord, LoewyStructureInfoJulia( q, n, z ),
true ) );
fi;
#############################################################################
##
#F LoewyStructureInfoGAP( <A> )
#F LoewyStructureInfoGAP( <q>[, <n>], <z> )
##
InstallMethod( LoewyStructureInfoGAP,
[ "IsSingerAlgebra" ],
A -> CallFuncList( LoewyStructureInfoGAP,
ParametersOfSingerAlgebra( A ) ) );
InstallMethod( LoewyStructureInfoGAP,
[ "IsPosInt", "IsPosInt" ],
{ q, z } -> LoewyStructureInfoGAP( q, OrderMod( q, z ), z ) );
InstallMethod( LoewyStructureInfoGAP,
[ "IsPosInt", "IsPosInt", "IsPosInt" ],
function( q, n, z )
local islessorequal, monomials, layers, degrees, predecessors, m, i,
mon, lambda, pred, j, mm;
if n mod OrderMod( q, z ) <> 0 then
Error( "<z> must divide <q>^<n> - 1" );
fi;
islessorequal:= function( mon1, mon2 )
local i;
for i in [ 1 .. n ] do
if mon2[i] < mon1[i] then
return false;
fi;
od;
return true;
end;
monomials:= [ 0 * [ 1 .. n ] ];
layers:= [ 0 ];
degrees:= [ 0 ]; # just for speedup: avoid comparisons
predecessors:= [ 0 ];
m:= n * (q-1);
for i in [ 1 .. z ] do
mon:= CoefficientsQadicReversed( i, z, q, n );
lambda:= 0;
pred:= 1;
mm:= Sum( mon );
for j in [ 2 .. i ] do
if lambda < layers[j] and degrees[j] < mm
and islessorequal( monomials[j], mon ) then
lambda:= layers[j];
pred:= j;
fi;
od;
monomials[i+1]:= mon;
layers[i+1]:= lambda + 1;
degrees[i+1]:= mm;
predecessors[i+1]:= pred;
if lambda = 0 then
if mm < m then
m:= mm;
fi;
fi;
od;
# Extract information about one longest chain.
i:= Length( monomials );
pred:= [];
while i > 0 do
Add( pred, i );
i:= predecessors[i];
od;
return rec( monomials:= monomials,
layers:= layers,
chain:= pred,
m:= m,
LL:= lambda + 2,
parameters:= [ q, n, z ] );
end );
#############################################################################
##
#F DimensionsLoewyFactorsGAP( <A> )
#F DimensionsLoewyFactorsJulia( <A> )
##
## Return the GAP object or the Julia object, respectively.
##
BindGlobal( "DimensionsLoewyFactorsGAP", function( A )
local data, v, i;
data:= LoewyStructureInfoGAP( A );
v:= ListWithIdenticalEntries( data.LL, 0 );
for i in data.layers do
v[ i+1 ]:= v[ i+1 ] + 1;
od;
return v;
end );
if IsPackageMarkedForLoading( "JuliaInterface", "" ) then
BindGlobal( "DimensionsLoewyFactorsJulia",
A -> Julia.SingerAlg.LoewyVector( LoewyStructureInfoJulia( A ) ) );
fi;
#############################################################################
##
#M DimensionsLoewyFactors( <A> )
##
## If Julia is available then use the Julia functionality,
## otherwise use the GAP code.
##
InstallMethod( DimensionsLoewyFactors,
[ "IsSingerAlgebra" ],
A -> DimensionsLoewyFactorsGAP( A ) );
if IsPackageMarkedForLoading( "JuliaInterface", "" ) then
InstallMethod( DimensionsLoewyFactors,
[ "IsSingerAlgebra" ],
A -> JuliaToGAP( IsList, DimensionsLoewyFactorsJulia( A ), true ) );
fi;
#############################################################################
##
#F LoewyVectorAbbreviated( <v> )
#F LoewyVectorExpanded( <v> )
##
InstallGlobalFunction( LoewyVectorAbbreviated, function( v )
local result, x, m, i;
if not IsDenseList( v ) then
Error( "<v> must be a dense list" );
elif Length( v ) = 0 then
return [];
fi;
result:= [];
x:= v[1];
m:= 1;
for i in [ 2 .. Length( v ) ] do
if v[i] = x then
m:= m + 1;
elif m = 1 then
Add( result, x );
x:= v[i];
else
Add( result, [ x, m ] );
x:= v[i];
m:= 1;
fi;
od;
if m = 1 then
Add( result, x );
else
Add( result, [ x, m ] );
fi;
return result;
end );
InstallGlobalFunction( LoewyVectorExpanded, function( v )
local result, x, i;
if not IsDenseList( v ) then
Error( "<v> must be a dense list" );
fi;
result:= [];
for x in v do
if IsList( x ) then
for i in [ 1 .. x[2] ] do
Add( result, x[1] );
od;
else
Add( result, x );
fi;
od;
return result;
end );
#############################################################################
##
#F SingerAlgebra( <q>[, <n>], <z>[, <R>] )
#F SingerAlgebra( <arec>[, <R>] )
##
## The algebra returned by this function claims to be a structure constants
## algebra (via the filter 'IsSCAlgebraObjCollection'),
## but the structure constants table is not created until one asks for
## something that needs it.
## Currently a special method for 'CanonicalBasis' makes sure that the
## relevant data get stored.
##
InstallGlobalFunction( SingerAlgebra, function( arg )
local arec, paras, q, n, e, R, z, zero, filter, Fam, A;
if Length( arg ) = 1 and IsRecord( arg[1] ) then
# The record is assumed to belong to the database of Singer algebras.
arec:= arg[1];
q:= arec.q;
n:= arec.n;
if IsBound( arec.z ) then
z:= arec.z;
elif IsBound( arec.e ) then
z:= ( q^n - 1 ) / arec.e;
else
Error( "<arec> must contain one of the components z or e" );
fi;
R:= Rationals;
elif Length( arg ) = 2 and IsRecord( arg[1] ) and IsRing( arg[2] ) then
# The record is assumed to belong to the database of Singer algebras.
arec:= arg[1];
q:= arec.q;
n:= arec.n;
if IsBound( arec.z ) then
z:= arec.z;
elif IsBound( arec.e ) then
z:= ( q^n - 1 ) / arec.e;
else
Error( "<arec> must contain one of the components z or e" );
fi;
R:= arg[2];
elif Length( arg ) = 1 and IsList( arg[1] ) then
# The parameters must be q, n, e[, R].
paras:= arg[1];
if Length( paras ) = 3 then
q:= paras[1];
n:= paras[2];
e:= paras[3];
R:= Rationals;
elif Length( paras ) = 4 then
q:= paras[1];
n:= paras[2];
e:= paras[3];
R:= paras[4];
else
Error( "usage: SingerAlgebra( <list> ) with list of length 3 or 4" );
fi;
z:= ( q^n - 1 ) / e;
elif Length( arg ) = 2 then
# The parameters must be q, z.
q:= arg[1];
z:= arg[2];
n:= OrderMod( q, z );
R:= Rationals;
elif Length( arg ) = 3 and IsInt( arg[3] ) then
# The parameters must be q, n, z.
q:= arg[1];
n:= arg[2];
z:= arg[3];
R:= Rationals;
elif Length( arg ) = 3 and IsRing( arg[3] ) then
# The parameters must be q, z, R.
q:= arg[1];
z:= arg[2];
R:= arg[3];
n:= OrderMod( q, z );
elif Length( arg ) = 4 then
# The parameters must be q, n, z, R.
q:= arg[1];
n:= arg[2];
z:= arg[3];
R:= arg[4];
else
Error( "usage: SingerAlgebra( <arec>[, <R>] ) or ",
"SingerAlgebra( <q>[, <n>], <z>[, <R>] ) or ",
"SingerAlgebra( <list> )" );
fi;
if not ( IsPosInt( q ) and IsPosInt( n ) and IsPosInt( z ) ) then
Error( "<q>, <n>, <z> must be positive integers" );
elif z <> 1 and PowerMod( q, n, z ) <> 1 then
Error( "<z> must divide <q>^<n> - 1" );
elif q = 1 then
Error( "<q> must be an integer > 1" );
fi;
# Create the algebra as far as necessary.
# (Do not set a 'Name' value, in order to use the different methods
# installed for 'ViewObj' and 'PrintObj'.)
zero := Zero( R );
filter:= IsSCAlgebraObj;
if IsAdditivelyCommutativeElementFamily( FamilyObj( zero ) ) then
filter:= filter and IsAdditivelyCommutativeElement;
fi;
Fam:= NewFamily( "SCAlgebraObjFamily", filter );
if Zero( ElementsFamily( FamilyObj( R ) ) ) <> fail then
SetFilterObj( Fam, IsFamilyOverFullCoefficientsFamily );
else
Fam!.coefficientsDomain:= R;
fi;
Fam!.zerocoeff := zero;
SetCharacteristic( Fam, Characteristic( R ) );
SetCoefficientsFamily( Fam, ElementsFamily( FamilyObj( R ) ) );
A:= Objectify( NewType( CollectionsFamily( Fam ),
IsSingerAlgebra and IsAttributeStoringRep ),
rec() );
SetLeftActingDomain( A, R );
SetParametersOfSingerAlgebra( A, [ q, n, z ] );
SetGeneratingSubsetOfCanonicalBasisOfSingerAlgebra( A, [ 1 .. z+1 ] );
SetDimension( A, z+1 );
Fam!.fullSCAlgebra:= A;
SetIsFullSCAlgebra( A, true );
return A;
end );
#############################################################################
##
#M Display( <A> )
#M ViewObj( <A> )
#M PrintObj( <A> )
#M DisplayString( <A> )
#M ViewString( <A> )
#M PrintString( <A> )
#M String( <A> )
##
## According to the Section "View and Print" in the GAP Reference Manual,
## we need methods (only) for
## 'String' (which then covers 'PrintString' and 'PrintObj') and
## 'ViewString' (which then covers 'ViewObj').
## (We do not need 'DisplayString', which would cover 'Display'.)
##
## We want to *view* Singer algebras as 'A[q,n,z[,R]]',
## and to *print* them as calls to 'SingerAlgebra'.
##
InstallMethod( ViewString,
[ "IsSingerAlgebra" ],
function( A )
local paras;
paras:= ParametersOfSingerAlgebra( A );
if LeftActingDomain( A ) = Rationals then
return Concatenation( "A[",
JoinStringsWithSeparator( List( paras, String ), "," ),
"]" );
else
return Concatenation( "A[",
JoinStringsWithSeparator( List( paras, String ), "," ),
",", String( LeftActingDomain( A ) ), "]" );
fi;
end );
InstallMethod( String,
[ "IsSingerAlgebra" ],
function( A )
local paras;
paras:= ParametersOfSingerAlgebra( A );
if LeftActingDomain( A ) = Rationals then
return Concatenation( "SingerAlgebra( ",
JoinStringsWithSeparator( List( paras, String ), ", " ),
" )" );
else
return Concatenation( "SingerAlgebra( ",
JoinStringsWithSeparator( List( paras, String ), ", " ),
", ", String( LeftActingDomain( A ) ), " )" );
fi;
end );
#T Currently the above installations are unfortunately *not* enough,
#T since some 'ViewObj' and 'PrintObj' methods for algebras interfere,
#T as well as a 'PrintString' method for magmas.
InstallMethod( ViewObj,
[ "IsSingerAlgebra" ],
function( A ) Print( ViewString( A ) ); end );
InstallMethod( PrintObj,
[ "IsSingerAlgebra" ],
function( A ) Print( PrintString( A ) ); end );
InstallMethod( PrintString,
[ "IsSingerAlgebra" ],
String );
#############################################################################
##
#M CanonicalBasis( <A> )
##
## This method provides the internal data for treating the Singer algebra
## <A> as a structure constants algebra in GAP.
##
## Formally, we require those filters that are required also by the
## GAP library method for full s.c. algebras,
## in order to guarantee a higher rank for our method;
## these filters are set in algebras returned by 'SingerAlgebra'.
##
InstallMethod( CanonicalBasis,
[ Concatenation( "IsSingerAlgebra and IsFreeLeftModule and ",
"IsSCAlgebraObjCollection and IsFullSCAlgebra" ) ],
function( A )
local paras, q, n, z, dim, coeffs, T, R, zero, one, empty, nonempty,
i, j, Fam, gens;
# Create the structure constants table.
paras:= ParametersOfSingerAlgebra( A );
q:= paras[1];
n:= paras[2];
z:= paras[3];
dim:= Dimension( A );
coeffs:= List( [ 0 .. z ],
k -> CoefficientsQadicReversed( k, z, q, n ) );
T:= [];
R:= LeftActingDomain( A );
zero:= Zero( R );
one:= One( R );
empty:= MakeImmutable( [ [], [] ] );
nonempty:= List( [ 1 .. dim ],
i -> MakeImmutable( [ [ i ], [ one ] ] ) );
for i in [ 1 .. dim ] do
T[i]:= [];
for j in [ 1 .. i-1 ] do
T[i][j]:= T[j][i];
od;
for j in [ i .. dim-i+1 ] do
if q <= MaximumList( coeffs[i] + coeffs[j], 0 ) then
T[i][j]:= empty;
else
T[i][j]:= nonempty[ i+j-1 ];
fi;
od;
for j in [ dim-i+2 .. dim ] do
T[i][j]:= empty;
od;
od;
T[ dim+1 ]:= 1; # commutativity flag
T[ dim+2 ]:= zero;
# Set the necessary entries in the family.
Fam:= ElementsFamily( FamilyObj( A ) );
Fam!.sctable:= T;
Fam!.names:= MakeImmutable( List( [ 0 .. dim-1 ],
i -> Concatenation( "b", String( i ) ) ) );
Fam!.zerocoeff:= zero;
Fam!.defaultTypeDenseCoeffVectorRep :=
NewType( Fam, IsSCAlgebraObj and IsDenseCoeffVectorRep );
SetZero( Fam, ObjByExtRep( Fam, ListWithIdenticalEntries( dim, zero ) ) );
# Set the algebra generators.
gens:= MakeImmutable( List( IdentityMat( dim, R ),
x -> ObjByExtRep( Fam, x ) ) );
SetGeneratorsOfAlgebra( A, gens );
SetGeneratorsOfAlgebraWithOne( A, gens );
SetOne( A, gens[1] );
Fam!.basisVectors:= gens;
# Delegate to the library method for full s.c. algebras,
# which has a lower rank.
TryNextMethod();
end );
#############################################################################
##
#M Representative( <A> )
#M GeneratorsOfAlgebra( <A> )
#M GeneratorsOfAlgebraWithOne( <A> )
##
## Note that we cannot use 'RedispatchOnCondition' here,
## because we have only the attribute tester 'HasCanonicalBasis'
## that may be missing,
## whereas 'RedispatchOnCondition' checks for missing property values
## plus their testers.
##
InstallMethod( Representative,
[ "IsSingerAlgebra" ],
function( A )
if HasCanonicalBasis( A ) then
TryNextMethod();
fi;
# Set the necessary data and redispatch.
CanonicalBasis( A );
return Representative( A );
end );
InstallMethod( GeneratorsOfAlgebra,
[ "IsSingerAlgebra" ],
function( A )
if HasCanonicalBasis( A ) then
TryNextMethod();
fi;
# Set the necessary data and redispatch.
CanonicalBasis( A );
return GeneratorsOfAlgebra( A );
end );
InstallMethod( GeneratorsOfAlgebraWithOne,
[ "IsSingerAlgebra" ],
function( A )
if HasCanonicalBasis( A ) then
TryNextMethod();
fi;
# Set the necessary data and redispatch.
CanonicalBasis( A );
return GeneratorsOfAlgebraWithOne( A );
end );
#############################################################################
##
#M GeneratingSubsetOfCanonicalBasisOfSingerAlgebra( <triv> )
##
## Provide a method for the trivial subspace/subalgebra of a Singer algebra.
## (We cannot be sure that this attribute gets set on creation.)
##
InstallMethod( GeneratingSubsetOfCanonicalBasisOfSingerAlgebra,
[ "IsVectorSpace and HasParent" ],
function( V )
if IsSingerAlgebra( Parent( V ) ) and IsTrivial( V ) then
return [];
fi;
TryNextMethod();
end );
#############################################################################
##
#M LoewyLengthGAP( <A> )
#M LoewyLengthGAP( <q>, <z> )
#M LoewyLengthGAP( <q>, <n>, <z>, <m> )
##
## Use stored 'LoewyStructureInfoGAP' values,
## use the cheap criteria when the upper bound is attained (using <m>),
## compute 'LoewyStructureInfoGAP' only if necessary.
##
InstallMethod( LoewyLengthGAP,
[ "IsSingerAlgebra and HasLoewyStructureInfoGAP" ],
A -> LoewyStructureInfoGAP( A ).LL );
InstallMethod( LoewyLengthGAP,
[ "IsSingerAlgebra" ],
function( A )
local m, paras;
# We need m(q,e) anyhow in order to compute the upper bound
# to which the cheap criteria refer.
m:= MinimalDegreeOfSingerAlgebraGAP( A );
if HasLoewyStructureInfoGAP( A ) then
# Perhaps the computation of 'm' has triggered this.
return LoewyStructureInfoGAP( A ).LL;
else
paras:= ParametersOfSingerAlgebra( A );
return LoewyLengthGAP( paras[1], paras[2], paras[3], m );
fi;
end );
InstallMethod( LoewyLengthGAP,
[ "IsPosInt", "IsPosInt" ],
function( q, z )
local n;
n:= OrderMod( q, z );
return LoewyLengthGAP( q, n, z,
MinimalDegreeOfSingerAlgebraGAP( q, SingerAlgE( q, n, z ) ) );
end );
InstallMethod( LoewyLengthGAP,
[ "IsPosInt", "IsPosInt", "IsPosInt", "IsPosInt" ],
function( q, n, z, m )
local l;
l:= LoewyLengthCheapGAP( q, n, z, m );
if l = 0 then
l:= LoewyLengthHardGAP( q, n, z, m );
fi;
return l;
end );
#############################################################################
##
#F OrderModExt( <n>, <m>[, <bound>] )
##
## The optional argument <bound> is not yet supported for 'OrderMod'
## in GAP 4.11.0, but it is useful in the context of this package.
##
InstallGlobalFunction( OrderModExt, function( n, m, bound... )
local x, o, d;
# check the arguments and reduce $n$ into the range $0..m-1$
if m <= 0 then Error("<m> must be positive"); fi;
if n < 0 then n := n mod m + m; fi;
if m <= n then n := n mod m; fi;
# return 0 if $m$ is not coprime to $n$
if GcdInt(m,n) <> 1 then
o := 0;
# compute the order simply by iterated multiplying, $x= n^o$ mod $m$
elif m < 100 then
x := n; o := 1;
while x > 1 do
x := x * n mod m; o := o + 1;
od;
# otherwise try the divisors of $\lambda(m)$ and their divisors, etc.
else
if Length( bound ) = 1 then
# We know a multiple of the desired order.
o := bound[1];
else
# The default a priori known multiple is 'Lambda( m )'.
o := Lambda( m );
fi;
for d in PrimeDivisors( o ) do
while o mod d = 0 and PowerModInt(n,o/d,m) = 1 do
o := o / d;
od;
od;
fi;
return o;
end );
#############################################################################
##
#F SufficientCriterionForLoewyBoundAttained( <q>, <n>, <z>, <m> )
##
InstallGlobalFunction( SufficientCriterionForLoewyBoundAttained,
function( q, n, z, m )
local qm, e, r, p, sum, d, phi6;
if n <= 3 then
return "Cor. I.7.1 (n <= 3)";
elif z < 70 then
# The bound is attained, by the classif. of small dim. Singer alg.
return "z < 70";
elif m = 2 then
return "La. I.6.3";
elif ( q - 1 ) mod m = 0 then
return "Thm. I.7.1";
elif n * (q-1) < 3 * m then
return "La. I.7.1 (iii)";
fi;
qm:= q mod m;
if 1 < qm and Int( n * ( qm-1 ) / m ) = 1 then
return "Prop. II.3.15";
fi;
e:= ( q^n - 1 ) / z;
if e <= 32 then
return "Prop. II.6.1 (e <= 32)";
elif e <= 3 * m then
return "Prop. II.6.4";
elif n <= 5 and ((q^n-1)/(q-1)) mod e = 0 then
return "Prop. II.5.3, II.5.6 (e | (q^n-1)/(q-1), n <= 5)";
elif n mod ( m / Gcd( m, q-1 ) * OrderModExt( q, e, n ) ) = 0 then
return "La. II.5.2 (ii)";
fi;
if ( (q^n-1)/(q-1) ) mod e = 0 then
# we know m <= n
if m >= n-1 then
return "Prop. II.5.1 (ii)";
elif m = n-2 and ( q mod m ) in [ 1 .. Int( (m+1)/2 ) ] then
return "Prop. II.5.1 (iii)";
elif Int( (n-m) * (q-1) / m ) mod ( n-m ) = 0 then
return "Prop. II.5.1 (i)";
fi;
fi;
if ( n mod m = 0 ) then
if Sum( List( [ 0 .. m-1 ], i -> q^(i*n/m) ) ) mod e = 0 then
return "La. II.4.1 for r = 1";
fi;
for r in DivisorsInt( n ) do
if (n/r) mod m = 0 and
Sum( [ 0 .. m-1 ], i -> q^(n*i/(m*r)) ) mod e = 0 then
return "La. II.4.1";
fi;
od;
fi;
p:= SmallestRootInt( e );
if p = 2 then
return "Thm. II.4.3 (iii)";
elif IsPrimeInt( p ) then
# (Here we know that 'q mod p <> 1' holds.)
# Check whether 'p' is a Pierpont prime.
p:= p - 1;
while p mod 2 = 0 do
p:= p/2;
od;
while p mod 3 = 0 do
p:= p/3;
od;
if p = 1 then
return "Thm. II.4.3 (ii)";
fi;
fi;
# Concerning Remark II.5.4,
# we need not check \Phi_2 (since m = e if e divides q-1,
# and we have checked whether m divides q-1),
# \Phi_4, \Phi_8 (because then m = 2).
phi6:= q^2 - q + 1; # this is \Phi_6(q)
if phi6 mod e = 0 or
( q^3*(q^3+1) + 1 ) mod e = 0 or # this is \Phi_9(q)
( q^3*(q-1) + phi6 ) mod e = 0 then # this is \Phi_{10}(q)
return "Rem. II.5.4";
fi;
# We give up.
return "";
end );
InstallGlobalFunction( LoewyLengthCheapGAP, function( q, n, z, m )
if SufficientCriterionForLoewyBoundAttained( q, n, z, m ) <> "" then
return Int( n * ( q - 1 ) / m ) + 1;
else
return 0;
fi;
end );
InstallGlobalFunction( LoewyLengthHardGAP,
{ q, n, z, m } -> LoewyStructureInfoGAP( q, n, z ).LL );
#############################################################################
##
#M LoewyLengthJulia( <A> )
#M LoewyLengthJulia( <q>, <z> )
#M LoewyLengthJulia( <q>, <n>, <z>, <m> )
##
## Use the same criteria as for the &GAP; variant.
## These methods are available only if &Julia; is available.
##
if IsPackageMarkedForLoading( "JuliaInterface", "" ) then
InstallMethod( LoewyLengthJulia,
[ "IsSingerAlgebra and HasLoewyStructureInfoJulia" ],
A -> JuliaToGAP( IsInt,
Julia.Base.get( LoewyStructureInfoJulia( A ),
JuliaSymbol( "LL" ), 0 ) ));
InstallMethod( LoewyLengthJulia,
[ "IsSingerAlgebra" ],
function( A )
local paras;
paras:= ParametersOfSingerAlgebra( A );
# Leave the computation of m(q,e) and of the Loewy length to Julia,
# let Julia set also the 'MinimalDegreeOfSingerAlgebraJulia' value in A.
return JuliaToGAP( IsInt,
Julia.SingerAlg.LoewyLength( paras[1], paras[2], paras[3],
A ) );
end );
InstallMethod( LoewyLengthJulia,
[ "IsPosInt", "IsPosInt" ],
function( q, z )
return JuliaToGAP( IsInt, Julia.SingerAlg.LoewyLength( q, z ) );
end );
InstallMethod( LoewyLengthJulia,
[ "IsPosInt", "IsPosInt", "IsPosInt", "IsPosInt" ],
function( q, n, z, m )
return JuliaToGAP( IsInt,
Julia.SingerAlg.LoewyLength( q, n, z, m ) );
end );
fi;
#############################################################################
##
#M RadicalOfAlgebra( <A> )
##
## The Jacobson radical of a Singer algebra of dimension <M>z+1</M>
## has the basis <M>[ b_1, b_2, \ldots, b_z ]</M>.
##
InstallMethod( RadicalOfAlgebra,
[ "IsSingerAlgebra" ],
function( A )
local B, range, S;
B:= BasisVectors( CanonicalBasis( A ) );
range:= [ 2 .. Length( B ) ];
S:= SubalgebraNC( A, B{ range }, "basis" );
SetGeneratingSubsetOfCanonicalBasisOfSingerAlgebra( S, range );
return S;
end );
#############################################################################
##
#M RadicalSeriesOfAlgebra( <A> )
##
InstallMethod( RadicalSeriesOfAlgebra,
[ "IsSingerAlgebra" ],
function( A )
local result, data, B, i, S;
result:= [ A, RadicalOfAlgebra( A ) ];
data:= LoewyStructureInfoGAP( A );
data:= SingerAlg.BasesOfRadicalSeries( data );
B:= BasisVectors( CanonicalBasis( A ) );
for i in [ 2 .. Length( data ) ] do
S:= SubalgebraNC( A, B{ data[i] }, "basis" );
SetGeneratingSubsetOfCanonicalBasisOfSingerAlgebra( S, data[i] );
result[i+1]:= S;
od;
Add( result, TrivialSubalgebra( A ) );
return result;
end );
#############################################################################
##
#M SocleSeriesOfAlgebra( <A> )
##
InstallMethod( SocleSeriesOfAlgebra,
[ "IsSingerAlgebra" ],
function( A )
local result, data, B, i, S;
result:= [ TrivialSubalgebra( A ) ];
data:= LoewyStructureInfoGAP( A );
data:= SingerAlg.BasesOfSocleSeries( data );
B:= BasisVectors( CanonicalBasis( A ) );
for i in [ 1 .. Length( data ) ] do
S:= SubalgebraNC( A, B{ data[i] }, "basis" );
SetGeneratingSubsetOfCanonicalBasisOfSingerAlgebra( S, data[i] );
result[i+1]:= S;
od;
Add( result, A );
return result;
end );
#############################################################################
##
#M Intersection2( <A>, <B> )
##
InstallMethod( Intersection2,
IsIdenticalObj,
[ "IsVectorSpace and HasGeneratingSubsetOfCanonicalBasisOfSingerAlgebra",
"IsVectorSpace and HasGeneratingSubsetOfCanonicalBasisOfSingerAlgebra" ],
100, # beat generic methods for two 'IsFLMLORWithOne'
function( A, B )
local P, basis, data, S;
P:= Parent( A );
if not IsIdenticalObj( P, Parent( B ) ) then
TryNextMethod();
fi;
basis:= BasisVectors( CanonicalBasis( P ) );
data:= Intersection2(
GeneratingSubsetOfCanonicalBasisOfSingerAlgebra( A ),
GeneratingSubsetOfCanonicalBasisOfSingerAlgebra( B ) );
S:= SubspaceNC( P, basis{ data }, "basis" );
SetGeneratingSubsetOfCanonicalBasisOfSingerAlgebra( S, data );
return S;
end );
#############################################################################
##
#M \+( <A>, <B> )
##
InstallOtherMethod( \+,
IsIdenticalObj,
[ "IsVectorSpace and HasGeneratingSubsetOfCanonicalBasisOfSingerAlgebra",
"IsVectorSpace and HasGeneratingSubsetOfCanonicalBasisOfSingerAlgebra" ],
100, # beat generic methods for two rings, two left modules
function( A, B )
local P, basis, data, S;
P:= Parent( A );
if not IsIdenticalObj( P, Parent( B ) ) then
TryNextMethod();
fi;
basis:= BasisVectors( CanonicalBasis( P ) );
data:= Union(
GeneratingSubsetOfCanonicalBasisOfSingerAlgebra( A ),
GeneratingSubsetOfCanonicalBasisOfSingerAlgebra( B ) );
S:= SubspaceNC( P, basis{ data }, "basis" );
SetGeneratingSubsetOfCanonicalBasisOfSingerAlgebra( S, data );
return S;
end );
#############################################################################
##
#M ProductSpace( <A>, <B> )
##
InstallMethod( ProductSpace,
IsIdenticalObj,
[ "IsVectorSpace and HasGeneratingSubsetOfCanonicalBasisOfSingerAlgebra",
"IsVectorSpace and HasGeneratingSubsetOfCanonicalBasisOfSingerAlgebra" ],
100, # beat generic methods for two algebras, two left modules
function( A, B )
local P, basis, data, S;
P:= Parent( A );
if not IsIdenticalObj( P, Parent( B ) ) then
TryNextMethod();
fi;
basis:= BasisVectors( CanonicalBasis( P ) );
data:= LoewyStructureInfoGAP( P );
data:= SingerAlg.BasisOfProductSpace( data,
GeneratingSubsetOfCanonicalBasisOfSingerAlgebra( A ),
GeneratingSubsetOfCanonicalBasisOfSingerAlgebra( B ) );
S:= SubspaceNC( P, basis{ data }, "basis" );
SetGeneratingSubsetOfCanonicalBasisOfSingerAlgebra( S, data );
return S;
end );
#############################################################################
##
#F CoefficientsQadicReversed( <k>, <z>, <q>, <n> )
##
InstallGlobalFunction( CoefficientsQadicReversed, function( k, z, q, n )
local v, i, r, rq, d;
v:= [];
if k = z then
for i in [ 1 .. n ] do
v[i]:= q-1;
od;
else
r:= k;
for i in [ 1 .. n ] do
rq:= r * q;
d:= QuoInt( rq, z );
r:= rq - d * z;
v[i]:= d;
od;
fi;
return v;
end );
#############################################################################
##
#F SingerAlg.MultTable( <data> )
##
## <#GAPDoc Label="SingerAlg.MultTable">
## <ManSection>
## <Func Name="SingerAlg.MultTable" Arg='data'/>
##
## <Description>
## Let <A>data</A> be the record returned by
## <Ref Oper="LoewyStructureInfoGAP" Label="for parameters"/>
## that describes a Singer algebra <M>A[q,z]</M>,
## and let <M>B = B(A)</M>.
## <Ref Func="SingerAlg.MultTable"/> returns the
## <M>(z+1) \times (z+1)</M> matrix that contains at the position
## <M>(i,j)</M> the value <M>i+j-1</M> if the product
## <M>B_i \cdot B_j</M> is nonzero
## (hence equal to <M>B_{{i+j-1}}</M>, the <M>i+j-1</M>-th basis vector),
## and <M>0</M> otherwise.
## <P/>
## <Example><![CDATA[
## gap> data:= LoewyStructureInfoGAP( 2, 3, 7 );;
## gap> Display( SingerAlg.MultTable( data ) );
## [ [ 1, 2, 3, 4, 5, 6, 7, 8 ],
## [ 2, 0, 4, 0, 6, 0, 8, 0 ],
## [ 3, 4, 0, 0, 7, 8, 0, 0 ],
## [ 4, 0, 0, 0, 8, 0, 0, 0 ],
## [ 5, 6, 7, 8, 0, 0, 0, 0 ],
## [ 6, 0, 8, 0, 0, 0, 0, 0 ],
## [ 7, 8, 0, 0, 0, 0, 0, 0 ],
## [ 8, 0, 0, 0, 0, 0, 0, 0 ] ]
## ]]></Example>
## <P/>
## The multiplication table of a Singer algebra of dimension <M>N</M>
## has the value <M>N</M> on the antidiagonal (<M>i+j = N+1</M>),
## is zero below the antidiagonal,
## and contains only <M>N-i</M> and zero on the <M>i</M> parallel above the
## antidiagonal.
## </Description>
## </ManSection>
## <#/GAPDoc>
##
SingerAlg.MultTable:= function( data )
local monoms, n, nn, mat, q, qhalf, i, j;
if not IsBound( data.multtable ) then
monoms:= data.monomials;
n:= Length( monoms ); # z+1
nn:= QuoInt( n+1, 2 );
mat:= NullMat( n, n );
q:= data.parameters[1];
if q mod 2 = 1 then
qhalf:= QuoInt( q, 2 ) + 1;
else
qhalf:= QuoInt( q, 2 );
fi;
for i in [ 1 .. nn ] do
for j in [ 1 .. i-1 ] do
if ForAll( monoms[i] + monoms[j], x -> x < q ) then
mat[i,j]:= i+j-1;
mat[j,i]:= i+j-1;
fi;
od;
if ForAll( monoms[i], x -> x < qhalf ) then
mat[i,i]:= i+i-1;
fi;
od;
for i in [ nn+1 .. n ] do
# The [i,j] entry can be nonzero only if i+j <= z+2 holds.
for j in [ 1 .. n+1-i ] do
if ForAll( monoms[i] + monoms[j], x -> x < q ) then
mat[i,j]:= i+j-1;
mat[j,i]:= i+j-1;
fi;
od;
od;
data.multtable:= mat;
fi;
return data.multtable;
end;
#############################################################################
##
#F SingerAlg.BasisOfSum( <data>, <I>, <J> )
#F SingerAlg.BasisOfIntersection( <data>, <I>, <J> )
##
## <#GAPDoc Label="SingerAlg.BasisOfSum">
## <ManSection>
## <Heading>SingerAlg.BasisOfSum and SingerAlg.BasisOfIntersection</Heading>
## <Func Name="SingerAlg.BasisOfSum" Arg='data, I, J'/>
## <Func Name="SingerAlg.BasisOfIntersection" Arg='data, I, J'/>
##
## <Description>
## For two subsets <A>I</A>, <A>J</A> of <M>\{ 1, 2, \ldots, z+1 \}</M>,
## these functions just return the union and the intersection,
## respectively, of <A>I</A> and <A>J</A>.
## <P/>
## <A>I</A> and <A>J</A> describe subsets of a basis, which generate the
## spaces <M>U</M> and <M>V</M>, say, then the result describes the subset
## of this basis that generates the sum and the intersection, respectively,
## of <M>U</M> and <M>V</M>.
## <P/>
## <Example><![CDATA[
## gap> SingerAlg.BasisOfSum( data, [ 1, 2, 3 ], [ 2, 4, 6 ] );
## [ 1, 2, 3, 4, 6 ]
## gap> SingerAlg.BasisOfIntersection( data, [ 1, 2, 3 ], [ 2, 4, 6 ] );
## [ 2 ]
## ]]></Example>
## </Description>
## </ManSection>
## <#/GAPDoc>
##
SingerAlg.BasisOfSum:= { data, I, J } -> Union( I, J );
SingerAlg.BasisOfIntersection:= { data, I, J } -> Intersection( I, J );
#############################################################################
##
#F SingerAlg.BasisOfProductSpace( <data>, <I>, <J> )
##
## <#GAPDoc Label="SingerAlg.BasisOfProductSpace">
## <ManSection>
## <Func Name="SingerAlg.BasisOfProductSpace" Arg='data, I, J'/>
##
## <Description>
## Let <A>data</A> be the record returned by
## <Ref Oper="LoewyStructureInfoGAP" Label="for parameters"/>
## that describes a Singer algebra <M>A = A[q,z]</M>,
## let <M>B = B(A)</M>,
## and let <A>I</A>, <A>J</A> be subsets of
## <M>\{ 1, 2, \ldots, z+1 \}</M>,
## describing subspaces <M>U</M>, <M>V</M> of <M>A</M> with bases
## <M>( B_i; i \in I )</M> and <M>( B_i; i \in J )</M>, respectively.
## <Ref Func="SingerAlg.BasisOfProductSpace"/> returns the subset <M>K</M>
## of <M>\{ 1, 2, \ldots, z+1 \}</M> such that
## <M>( B_i; i \in K )</M> is a basis of the product space <M>U \cdot V</M>.
## <P/>
## <Example><![CDATA[
## gap> data:= LoewyStructureInfoGAP( 2, 7 );;
## gap> radser:= SingerAlg.BasesOfRadicalSeries( data );
## [ [ 2 .. 8 ], [ 4, 6, 7, 8 ], [ 8 ] ]
## gap> SingerAlg.BasisOfProductSpace( data, radser[1], radser[1] );
## [ 4, 6, 7, 8 ]
## gap> SingerAlg.BasisOfProductSpace( data, radser[1], radser[2] );
## [ 8 ]
## gap> SingerAlg.BasisOfProductSpace( data, radser[2], radser[2] );
## [ ]
## ]]></Example>
## </Description>
## </ManSection>
## <#/GAPDoc>
##
SingerAlg.BasisOfProductSpace:= function( data, I, J )
local mat, basis, i, j;
mat:= SingerAlg.MultTable( data );
basis:= [];
for i in I do
for j in J do
if mat[i,j] <> 0 then
AddSet( basis, i+j-1 );
fi;
od;
od;
return basis;
end;
#############################################################################
##
#F SingerAlg.BasisOfIdeal( <data>, <I> )
##
## <#GAPDoc Label="SingerAlg.BasisOfIdeal">
## <ManSection>
## <Func Name="SingerAlg.BasisOfIdeal" Arg='data, I'/>
##
## <Description>
## Let <A>data</A> be the record returned by
## <Ref Oper="LoewyStructureInfoGAP" Label="for parameters"/>
## that describes a Singer algebra <M>A = A[q,z]</M>,
## let <M>B = B(A)</M>,
## and let <A>I</A> be a subset of <M>\{ 1, 2, \ldots, z+1 \}</M>,
## describing a subspace <M>U</M> of <M>A</M> with basis
## <M>( B_i; i \in I )</M>.
## <Ref Func="SingerAlg.BasisOfIdeal"/> returns the subset <M>J</M>
## of <M>\{ 1, 2, \ldots, z+1 \}</M> such that
## <M>( B_i; i \in J )</M> is a basis of the ideal <M>U \cdot A</M>.
## <P/>
## <Example><![CDATA[
## gap> data:= LoewyStructureInfoGAP( 2, 7 );;
## gap> SingerAlg.BasisOfIdeal( data, [ 4 ] );
## [ 4, 8 ]
## ]]></Example>
## </Description>
## </ManSection>
## <#/GAPDoc>
##
SingerAlg.BasisOfIdeal:= { data, I } -> SingerAlg.BasisOfProductSpace( data,
I, [ 1 .. Length( data.monomials ) ] );
#############################################################################
##
#F SingerAlg.BasisOfAnnihilator( <data>, <I> )
##
## <#GAPDoc Label="SingerAlg.BasisOfAnnihilator">
## <ManSection>
## <Func Name="SingerAlg.BasisOfAnnihilator" Arg='data, I'/>
##
## <Description>
## Let <A>data</A> be the record returned by
## <Ref Oper="LoewyStructureInfoGAP" Label="for parameters"/>
## that describes a Singer algebra <M>A = A[q,z]</M>,
## let <M>B = B(A)</M>,
## and let <A>I</A> be a subset of <M>\{ 1, 2, \ldots, z+1 \}</M>,
## describing a subspace <M>U</M> of <M>A</M> with basis
## <M>( B_i; i \in I )</M>.
## <Ref Func="SingerAlg.BasisOfAnnihilator"/> returns the subset <M>J</M>
## of <M>\{ 1, 2, \ldots, z+1 \}</M> such that
## <M>( B_i; i \in J )</M> is a basis of the annihilator
## <M>\{ x \in A; x \cdot U = 0 \}</M> of <M>U</M> in <M>A</M>.
## <P/>
## <Example><![CDATA[
## gap> data:= LoewyStructureInfoGAP( 2, 7 );;
## gap> radser:= SingerAlg.BasesOfRadicalSeries( data );
## [ [ 2 .. 8 ], [ 4, 6, 7, 8 ], [ 8 ] ]
## gap> List( radser, I -> SingerAlg.BasisOfAnnihilator( data, I ) );
## [ [ 8 ], [ 4, 6, 7, 8 ], [ 2, 3, 4, 5, 6, 7, 8 ] ]
## ]]></Example>
## </Description>
## </ManSection>
## <#/GAPDoc>
##
SingerAlg.BasisOfAnnihilator:= function( data, list )
local mat;
mat:= SingerAlg.MultTable( data );
return Filtered( [ 1 .. Length( mat ) ],
i -> ForAll( list, j -> mat[i,j] = 0 ) );
end;
#############################################################################
##
#F SingerAlg.BasesOfRadicalSeries( <data> )
##
## <#GAPDoc Label="SingerAlg.BasesOfRadicalSeries">
## <ManSection>
## <Func Name="SingerAlg.BasesOfRadicalSeries" Arg='data'/>
##
## <Description>
## Let <A>data</A> be the record returned by
## <Ref Oper="LoewyStructureInfoGAP" Label="for parameters"/>
## that describes a Singer algebra <M>A = A[q,z]</M>,
## and let <M>B = B(A)</M>.
## <Ref Func="SingerAlg.BasesOfRadicalSeries"/> returns the list
## <M>[ I_1, I_2, \ldots, I_l ]</M> of subsets of
## <M>\{ 1, 2, \ldots, z+1 \}</M> such that
## <M>( B_i; i \in I_j )</M> is a basis of the <M>j</M>-th power of the
## Jacobson radical <M>J</M> of <M>A</M>,
## and such that <M>J^l</M> is nonzero and <M>J^{{l+1}}</M> is zero.
## <P/>
## <Example><![CDATA[
## gap> data:= LoewyStructureInfoGAP( 2, 7 );;
## gap> radser:= SingerAlg.BasesOfRadicalSeries( data );
## [ [ 2 .. 8 ], [ 4, 6, 7, 8 ], [ 8 ] ]
## ]]></Example>
## </Description>
## </ManSection>
## <#/GAPDoc>
##
SingerAlg.BasesOfRadicalSeries:= function( data )
local lay, ll, jbylayer, J, i;
lay:= data.layers;
ll:= data.LL;
jbylayer:= List( [ 1 .. ll-1 ], i -> Positions( lay, i ) );
J:= [ jbylayer[ ll-1 ] ];
for i in [ 2 .. ll-1 ] do
J[i]:= Union( J[ i-1 ], jbylayer[ ll - i ] );
od;
return Reversed( J );
end;
#############################################################################
##
#F SingerAlg.BasesOfSocleSeries( <data> )
##
## <#GAPDoc Label="SingerAlg.BasesOfSocleSeries">
## <ManSection>
## <Func Name="SingerAlg.BasesOfSocleSeries" Arg='data'/>
##
## <Description>
## Let <A>data</A> be the record returned by
## <Ref Oper="LoewyStructureInfoGAP" Label="for parameters"/>
## that describes a Singer algebra <M>A = A[q,z]</M>,
## and let <M>B = B(A)</M>.
## <Ref Func="SingerAlg.BasesOfSocleSeries"/> returns the list
## <M>[ I_1, I_2, \ldots, I_l ]</M> of subsets of
## <M>\{ 1, 2, \ldots, z+1 \}</M> such that
## <M>( B_i; i \in I_j )</M> is a basis of <M>S_j</M>,
## where <M>S_1</M> is the socle of <M>A</M>,
## <M>S_{{j+1}}/S_j</M> is the socle of <A>A</A><M>/S_j</M>,
## and <A>A</A><M>/S_l</M> is nonzero and its own socle.
## <P/>
## <Example><![CDATA[
## gap> socser:= SingerAlg.BasesOfSocleSeries( data );
## [ [ 8 ], [ 4, 6, 7, 8 ], [ 2 .. 8 ] ]
## ]]></Example>
## </Description>
## </ManSection>
## <#/GAPDoc>
##
SingerAlg.BasesOfSocleSeries:= function( data )
local lay, ll, sbylayer, S, i;
lay:= Reversed( data.layers );
ll:= data.LL;
sbylayer:= List( [ 0 .. ll-2 ], i -> Positions( lay, i ) );
S:= [ sbylayer[ 1 ] ];
for i in [ 2 .. ll-1 ] do
S[i]:= Union( S[ i-1 ], sbylayer[i] );
od;
return S;
end;
#############################################################################
##
#F SingerAlg.BasisOfPowers( <data>, <I>, <p>, <m> )
##
## <#GAPDoc Label="SingerAlg.BasisOfPowers">
## <ManSection>
## <Func Name="SingerAlg.BasisOfPowers" Arg='data, I, p, m'/>
##
## <Description>
## Let <A>p</A> be a prime integer,
## let <A>data</A> be the record returned by
## <Ref Oper="LoewyStructureInfoGAP" Label="for parameters"/>
## that describes a Singer algebra <M>A = A[q,z]</M>,
## let <M>B = B(A_p)</M>,
## let <A>I</A> be a subset of <M>\{ 1, 2, \ldots, z+1 \}</M>,
## describing a subspace <M>U</M> of <M>A_p</M> with basis
## <M>( B_i; i \in I )</M>,
## and let <A>m</A> be a positive integer.
## <Ref Func="SingerAlg.BasisOfPowers"/> returns the subset <M>J</M>
## of <M>\{ 1, 2, \ldots, z+1 \}</M> such that
## <M>( B_i; i \in J )</M> is a basis of the subspace
## <M>\{ x^{{p^m}}; x \in U \}</M> of <M>A_p</M>.
## <P/>
## <Example><![CDATA[
## gap> data:= LoewyStructureInfoGAP( 3, 8 );;
## gap> SingerAlg.BasisOfPowers( data, [ 1 .. 9 ], 2, 1 );
## [ 1, 3, 7, 9 ]
## gap> SingerAlg.BasisOfPowers( data, [ 1 .. 9 ], 2, 2 );
## [ 1 ]
## gap> SingerAlg.BasisOfPowers( data, [ 1 .. 9 ], 3, 1 );
## [ 1 ]
## ]]></Example>
## </Description>
## </ManSection>
## <#/GAPDoc>
##
SingerAlg.BasisOfPowers:= function( data, I, p, m )
local result, q, pm, quo, k;
result:= [];
q:= data.parameters[1];
pm:= p^m;
quo:= QuoInt( q-1, pm );
for k in I do
if Maximum( data.monomials[k] ) <= quo then
Add( result, pm * ( k-1 ) + 1 );
fi;
od;
return result;
end;
#############################################################################
##
#F SingerAlg.BasisOfPMRoots( <data>, <I>, <p>, <m> )
##
## <#GAPDoc Label="SingerAlg.BasisOfPMRoots">
## <ManSection>
## <Func Name="SingerAlg.BasisOfPMRoots" Arg='data, I, p, m'/>
##
## <Description>
## Let <A>p</A> be a prime integer,
## let <A>data</A> be the record returned by
## <Ref Oper="LoewyStructureInfoGAP" Label="for parameters"/>
## that describes a Singer algebra <M>A = A[q,z]</M>,
## let <M>B = B(A_p)</M>,
## let <A>I</A> be a subset of <M>\{ 1, 2, \ldots, z+1 \}</M>,
## describing a subspace <M>U</M> of <M>A_p</M> with basis
## <M>( B_i; i \in I )</M>,
## and let <A>m</A> be a positive integer.
## (See Section <Ref Sect="sect:Singer algebras"/> for the definition of
## <M>A_p</M>.)
## <P/>
## <Ref Func="SingerAlg.BasisOfPMRoots"/> returns the subset <M>J</M>
## of <M>\{ 1, 2, \ldots, z+1 \}</M> such that
## <M>( B_i; i \in J )</M> is a basis of the subspace
## <M>\{ x \in A_p; x^{{p^m}} \in U \}</M> of <M>A_p</M>.
## <P/>
## <Example><![CDATA[
## gap> data:= LoewyStructureInfoGAP( 3, 8 );;
## gap> SingerAlg.BasisOfPMRoots( data, [], 2, 1 );
## [ 3, 6, 7, 8, 9 ]
## gap> SingerAlg.BasisOfPMRoots( data, [], 2, 2 );
## [ 2, 3, 4, 5, 6, 7, 8, 9 ]
## gap> SingerAlg.BasisOfPMRoots( data, [ 3 ], 2, 1 );
## [ 2, 3, 6, 7, 8, 9 ]
## ]]></Example>
## </Description>
## </ManSection>
## <#/GAPDoc>
##
SingerAlg.BasisOfPMRoots:= function( data, I, p, m )
local q, pm, quo;
q:= data.parameters[1];
pm:= p^m;
quo:= QuoInt( q-1, pm );
# The first condition means that B_k^{p^m} is zero
# and hence inside the given subspace.
# For the second condition, we may assume that B_k^{p^m}
# is nonzero and hence equal to B_{p^m (k-1)+1}.
return Filtered( [ 1 .. Length( data.monomials ) ],
k -> Maximum( data.monomials[k] ) > quo
or pm * (k-1) + 1 in I );
end;
#############################################################################
##
#F SingerAlg.BasisOfPC( <data>, <I>, <J> )
##
## <#GAPDoc Label="SingerAlg.BasisOfPC">
## <ManSection>
## <Func Name="SingerAlg.BasisOfPC" Arg='data, I, J'/>
##
## <Description>
## Let <A>data</A> be the record returned by
## <Ref Oper="LoewyStructureInfoGAP" Label="for parameters"/>
## that describes a Singer algebra <M>A = A[q,z]</M>,
## let <M>B = B(A)</M>,
## let <A>I</A> and <M>J</M> be subsets of <M>\{ 1, 2, \ldots, z+1 \}</M>,
## describing subspaces <M>U</M> and <M>V</M> of <M>A</M> with bases
## <M>( B_i; i \in I )</M> and <M>( B_i; i \in J )</M>,
## respectively.
## <Ref Func="SingerAlg.BasisOfPC"/> returns the subset <M>K</M>
## of <M>\{ 2, 3, \ldots, z+1 \}</M> such that
## <M>( B_i; i \in K )</M> is a basis of the subspace
## <M>\{ x \in J(A); x \cdot U \subseteq V \}</M> of <M>J(A)</M>.
## <P/>
## (The perhaps strange name <Q>BasisOfPC</Q> was chosen because the
## result contains the indices of those basis vectors such that the
## <E>P</E>roduct with the space <M>U</M> is <E>C</E>ontained in the
## space <M>V</M>.)
## <P/>
## <Example><![CDATA[
## gap> data:= LoewyStructureInfoGAP( 23, 585 );;
## gap> soc:= SingerAlg.BasesOfSocleSeries( data );;
## gap> rad:= SingerAlg.BasesOfRadicalSeries( data );;
## gap> I1:= SingerAlg.BasisOfPC( data, soc[3], rad[3] );;
## gap> Length( I1 );
## 581
## gap> data:= LoewyStructureInfoGAP( 212, 585 );;
## gap> soc:= SingerAlg.BasesOfSocleSeries( data );;
## gap> rad:= SingerAlg.BasesOfRadicalSeries( data );;
## gap> I2:= SingerAlg.BasisOfPC( data, soc[3], rad[3] );;
## gap> Length( I2 );
## 545
## ]]></Example>
## </Description>
## </ManSection>
## <#/GAPDoc>
##
SingerAlg.BasisOfPC:= { data, I, J } -> Filtered(
# Note that we are interested in subspaces of the radical.
[ 2 .. Length( data.monomials ) ],
k -> IsSubset( J, SingerAlg.BasisOfProductSpace( data, [ k ], I ) ) );
#############################################################################
##
#E
|
#ifndef _DSGLD_LDA_MODEL_H__
#define _DSGLD_LDA_MODEL_H__
#include <El.hpp>
#include <gsl/gsl_rng.h>
#include "sgld_model.h"
using std::string;
using std::vector;
namespace dsgld {
class LDAModel : public SGLDModel<double, int> {
public:
LDAModel(
const El::Matrix<int>& X,
const int K,
const double alpha,
const double beta);
~LDAModel() {};
El::Matrix<double> sgldEstimate(const El::Matrix<double>& theta) override;
El::Matrix<double> nablaLogPrior(const El::Matrix<double>& theta) const override;
void writePerplexities(const string& filename);
int NumGibbsSteps() const;
LDAModel* NumGibbsSteps(const int);
protected:
void gibbsSample(
const El::Matrix<int>& doc,
const El::Matrix<double>& theta,
El::Matrix<int>& index_to_topic,
El::Matrix<int>& topic_counts) const;
double estimatePerplexity(
const El::Matrix<double>& theta,
const El::Matrix<double>& theta_sum_over_w,
const int num_words_in_doc,
const El::Matrix<int>& topic_counts) const;
private:
const double alpha_;
const double beta_;
const int W;
const int K;
int numGibbsSteps_;
vector<double> perplexities_;
const gsl_rng* rng;
};
} // namespace dsgld
#endif // _DSGLD_LDA_MODEL_H__
|
#' Get Impression Level Revenue Data from Ad Revenue Measurement API
#' @description Get Impression Level Revenue Data from Ad Revenue Measurement API <https://developers.is.com/ironsource-mobile/air/ad-revenue-measurements>
#' @importFrom httr add_headers content GET
#'
#' @param date Date of report in UTC timezone
#' @param app_key Application key
#' @param bearer_token Bearer API Authentication token
#'
#' @export
is_revenue_impression <- function(date, app_key, bearer_token) {
download_link <- GET("https://platform.ironsrc.com/partners/adRevenueMeasurements/v1?",
query = list(date = date,
appKey = app_key),
add_headers("Authorization" = bearer_token))
if (download_link$status_code != 200) {
stop(paste0("Error code ", download_link$status_code, ": ", content(download_link)$error))
}
download_link <- content(download_link)$urls[[1]]
return(download_link)
}
|
#include <math.h>
#include <gsl/gsl_ntuple.h>
#include <gsl/gsl_histogram.h>
struct data
{
double x;
double y;
double z;
};
int sel_func (void *ntuple_data, void *params);
double val_func (void *ntuple_data, void *params);
int
main (void)
{
struct data ntuple_row;
gsl_ntuple *ntuple
= gsl_ntuple_open ("test.dat", &ntuple_row,
sizeof (ntuple_row));
double lower = 1.5;
gsl_ntuple_select_fn S;
gsl_ntuple_value_fn V;
gsl_histogram *h = gsl_histogram_alloc (100);
gsl_histogram_set_ranges_uniform(h, 0.0, 10.0);
S.function = &sel_func;
S.params = &lower;
V.function = &val_func;
V.params = 0;
gsl_ntuple_project (h, ntuple, &V, &S);
gsl_histogram_fprintf (stdout, h, "%f", "%f");
gsl_histogram_free (h);
gsl_ntuple_close (ntuple);
return 0;
}
int
sel_func (void *ntuple_data, void *params)
{
struct data * data = (struct data *) ntuple_data;
double x, y, z, E2, scale;
scale = *(double *) params;
x = data->x;
y = data->y;
z = data->z;
E2 = x * x + y * y + z * z;
return E2 > scale;
}
double
val_func (void *ntuple_data, void *params)
{
struct data * data = (struct data *) ntuple_data;
double x, y, z;
x = data->x;
y = data->y;
z = data->z;
return x * x + y * y + z * z;
}
|
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Quantum data
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/quantum/tutorials/quantum_data">View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/quantum_data.ipynb">Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/quantum/blob/master/docs/tutorials/quantum_data.ipynb">View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/quantum/docs/tutorials/quantum_data.ipynb">Download notebook</a>
</td>
</table>
Building off of the comparisons made in the [MNIST](https://www.tensorflow.org/quantum/tutorials/mnist) tutorial, this tutorial explores the recent work of [Huang et al.](https://arxiv.org/abs/2011.01938) that shows how different datasets affect performance comparisons. In the work, the authors seek to understand how and when classical machine learning models can learn as well as (or better than) quantum models. The work also showcases an empirical performance separation between classical and quantum machine learning model via a carefully crafted dataset. You will:
1. Prepare a reduced dimension Fashion-MNIST dataset.
2. Use quantum circuits to re-label the dataset and compute Projected Quantum Kernel features (PQK).
3. Train a classical neural network on the re-labeled dataset and compare the performance with a model that has access to the PQK features.
## Setup
```
!pip -q install tensorflow==2.3.1 tensorflow-quantum
```
```
import cirq
import sympy
import numpy as np
import tensorflow as tf
import tensorflow_quantum as tfq
# visualization tools
%matplotlib inline
import matplotlib.pyplot as plt
from cirq.contrib.svg import SVGCircuit
np.random.seed(1234)
```
## 1. Data preparation
You will begin by preparing the fashion-MNIST dataset for running on a quantum computer.
### 1.1 Download fashion-MNIST
The first step is to get the traditional fashion-mnist dataset. This can be done using the `tf.keras.datasets` module.
```
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
# Rescale the images from [0,255] to the [0.0,1.0] range.
x_train, x_test = x_train[..., np.newaxis]/255.0, x_test[..., np.newaxis]/255.0
print("Number of original training examples:", len(x_train))
print("Number of original test examples:", len(x_test))
```
Filter the dataset to keep just the shirts and dresses, remove the other classes. At the same time convert the label, `y`, to boolean: True for 0 and False for 3.
```
def filter_03(x, y):
keep = (y == 0) | (y == 3)
x, y = x[keep], y[keep]
y = y == 0
return x,y
```
```
x_train, y_train = filter_03(x_train, y_train)
x_test, y_test = filter_03(x_test, y_test)
print("Number of filtered training examples:", len(x_train))
print("Number of filtered test examples:", len(x_test))
```
```
print(y_train[0])
plt.imshow(x_train[0, :, :, 0])
plt.colorbar()
```
### 1.2 Downscale the images
Just like the MNIST example, you will need to downscale these images in order to be within the boundaries for current quantum computers. This time however you will use a PCA transformation to reduce the dimensions instead of a `tf.image.resize` operation.
```
def truncate_x(x_train, x_test, n_components=10):
"""Perform PCA on image dataset keeping the top `n_components` components."""
n_points_train = tf.gather(tf.shape(x_train), 0)
n_points_test = tf.gather(tf.shape(x_test), 0)
# Flatten to 1D
x_train = tf.reshape(x_train, [n_points_train, -1])
x_test = tf.reshape(x_test, [n_points_test, -1])
# Normalize.
feature_mean = tf.reduce_mean(x_train, axis=0)
x_train_normalized = x_train - feature_mean
x_test_normalized = x_test - feature_mean
# Truncate.
e_values, e_vectors = tf.linalg.eigh(
tf.einsum('ji,jk->ik', x_train_normalized, x_train_normalized))
return tf.einsum('ij,jk->ik', x_train_normalized, e_vectors[:,-n_components:]), \
tf.einsum('ij,jk->ik', x_test_normalized, e_vectors[:, -n_components:])
```
```
DATASET_DIM = 10
x_train, x_test = truncate_x(x_train, x_test, n_components=DATASET_DIM)
print(f'New datapoint dimension:', len(x_train[0]))
```
The last step is to reduce the size of the dataset to just 1000 training datapoints and 200 testing datapoints.
```
N_TRAIN = 1000
N_TEST = 200
x_train, x_test = x_train[:N_TRAIN], x_test[:N_TEST]
y_train, y_test = y_train[:N_TRAIN], y_test[:N_TEST]
```
```
print("New number of training examples:", len(x_train))
print("New number of test examples:", len(x_test))
```
## 2. Relabeling and computing PQK features
You will now prepare a "stilted" quantum dataset by incorporating quantum components and re-labeling the truncated fashion-MNIST dataset you've created above. In order to get the most seperation between quantum and classical methods, you will first prepare the PQK features and then relabel outputs based on their values.
### 2.1 Quantum encoding and PQK features
You will create a new set of features, based on `x_train`, `y_train`, `x_test` and `y_test` that is defined to be the 1-RDM on all qubits of:
$V(x_{\text{train}} / n_{\text{trotter}}) ^ {n_{\text{trotter}}} U_{\text{1qb}} | 0 \rangle$
Where $U_\text{1qb}$ is a wall of single qubit rotations and $V(\hat{\theta}) = e^{-i\sum_i \hat{\theta_i} (X_i X_{i+1} + Y_i Y_{i+1} + Z_i Z_{i+1})}$
First, you can generate the wall of single qubit rotations:
```
def single_qubit_wall(qubits, rotations):
"""Prepare a single qubit X,Y,Z rotation wall on `qubits`."""
wall_circuit = cirq.Circuit()
for i, qubit in enumerate(qubits):
for j, gate in enumerate([cirq.X, cirq.Y, cirq.Z]):
wall_circuit.append(gate(qubit) ** rotations[i][j])
return wall_circuit
```
You can quickly verify this works by looking at the circuit:
```
SVGCircuit(single_qubit_wall(
cirq.GridQubit.rect(1,4), np.random.uniform(size=(4, 3))))
```
Next you can prepare $V(\hat{\theta})$ with the help of `tfq.util.exponential` which can exponentiate any commuting `cirq.PauliSum` objects:
```
def v_theta(qubits):
"""Prepares a circuit that generates V(\theta)."""
ref_paulis = [
cirq.X(q0) * cirq.X(q1) + \
cirq.Y(q0) * cirq.Y(q1) + \
cirq.Z(q0) * cirq.Z(q1) for q0, q1 in zip(qubits, qubits[1:])
]
exp_symbols = list(sympy.symbols('ref_0:'+str(len(ref_paulis))))
return tfq.util.exponential(ref_paulis, exp_symbols), exp_symbols
```
This circuit might be a little bit harder to verify by looking at, but you can still examine a two qubit case to see what is happening:
```
test_circuit, test_symbols = v_theta(cirq.GridQubit.rect(1, 2))
print(f'Symbols found in circuit:{test_symbols}')
SVGCircuit(test_circuit)
```
Now you have all the building blocks you need to put your full encoding circuits together:
```
def prepare_pqk_circuits(qubits, classical_source, n_trotter=10):
"""Prepare the pqk feature circuits around a dataset."""
n_qubits = len(qubits)
n_points = len(classical_source)
# Prepare random single qubit rotation wall.
random_rots = np.random.uniform(-2, 2, size=(n_qubits, 3))
initial_U = single_qubit_wall(qubits, random_rots)
# Prepare parametrized V
V_circuit, symbols = v_theta(qubits)
exp_circuit = cirq.Circuit(V_circuit for t in range(n_trotter))
# Convert to `tf.Tensor`
initial_U_tensor = tfq.convert_to_tensor([initial_U])
initial_U_splat = tf.tile(initial_U_tensor, [n_points])
full_circuits = tfq.layers.AddCircuit()(
initial_U_splat, append=exp_circuit)
# Replace placeholders in circuits with values from `classical_source`.
return tfq.resolve_parameters(
full_circuits, tf.convert_to_tensor([str(x) for x in symbols]),
tf.convert_to_tensor(classical_source*(n_qubits/3)/n_trotter))
```
Chooe some qubits and prepare the data encoding circuits:
```
qubits = cirq.GridQubit.rect(1, DATASET_DIM + 1)
q_x_train_circuits = prepare_pqk_circuits(qubits, x_train)
q_x_test_circuits = prepare_pqk_circuits(qubits, x_test)
```
Next, compute the PQK features based on the 1-RDM of the dataset circuits above and store the results in `rdm`, a `tf.Tensor` with shape `[n_points, n_qubits, 3]`. The entries in `rdm[i][j][k]` = $\langle \psi_i | OP^k_j | \psi_i \rangle$ where `i` indexes over datapoints, `j` indexes over qubits and `k` indexes over $\lbrace \hat{X}, \hat{Y}, \hat{Z} \rbrace$ .
```
def get_pqk_features(qubits, data_batch):
"""Get PQK features based on above construction."""
ops = [[cirq.X(q), cirq.Y(q), cirq.Z(q)] for q in qubits]
ops_tensor = tf.expand_dims(tf.reshape(tfq.convert_to_tensor(ops), -1), 0)
batch_dim = tf.gather(tf.shape(data_batch), 0)
ops_splat = tf.tile(ops_tensor, [batch_dim, 1])
exp_vals = tfq.layers.Expectation()(data_batch, operators=ops_splat)
rdm = tf.reshape(exp_vals, [batch_dim, len(qubits), -1])
return rdm
```
```
x_train_pqk = get_pqk_features(qubits, q_x_train_circuits)
x_test_pqk = get_pqk_features(qubits, q_x_test_circuits)
print('New PQK training dataset has shape:', x_train_pqk.shape)
print('New PQK testing dataset has shape:', x_test_pqk.shape)
```
### 2.2 Re-labeling based on PQK features
Now that you have these quantum generated features in `x_train_pqk` and `x_test_pqk`, it is time to re-label the dataset. To achieve maximum seperation between quantum and classical performance you can re-label the dataset based on the spectrum information found in `x_train_pqk` and `x_test_pqk`.
Note: This preparation of your dataset to explicitly maximize the seperation in performance between the classical and quantum models might feel like cheating, but it provides a **very** important proof of existance for datasets that are hard for classical computers and easy for quantum computers to model. There would be no point in searching for quantum advantage in QML if you couldn't first create something like this to demonstrate advantage.
```
def compute_kernel_matrix(vecs, gamma):
"""Computes d[i][j] = e^ -gamma * (vecs[i] - vecs[j]) ** 2 """
scaled_gamma = gamma / (
tf.cast(tf.gather(tf.shape(vecs), 1), tf.float32) * tf.math.reduce_std(vecs))
return scaled_gamma * tf.einsum('ijk->ij',(vecs[:,None,:] - vecs) ** 2)
def get_spectrum(datapoints, gamma=1.0):
"""Compute the eigenvalues and eigenvectors of the kernel of datapoints."""
KC_qs = compute_kernel_matrix(datapoints, gamma)
S, V = tf.linalg.eigh(KC_qs)
S = tf.math.abs(S)
return S, V
```
```
S_pqk, V_pqk = get_spectrum(
tf.reshape(tf.concat([x_train_pqk, x_test_pqk], 0), [-1, len(qubits) * 3]))
S_original, V_original = get_spectrum(
tf.cast(tf.concat([x_train, x_test], 0), tf.float32), gamma=0.005)
print('Eigenvectors of pqk kernel matrix:', V_pqk)
print('Eigenvectors of original kernel matrix:', V_original)
```
Now you have everything you need to re-label the dataset! Now you can consult with the flowchart to better understand how to maximize performance seperation when re-labeling the dataset:
In order to maximize the seperation between quantum and classical models, you will attempt to maximize the geometric difference between the original dataset and the PQK features kernel matrices $g(K_1 || K_2) = \sqrt{ || \sqrt{K_2} K_1^{-1} \sqrt{K_2} || _\infty}$ using `S_pqk, V_pqk` and `S_original, V_original`. A large value of $g$ ensures that you initially move to the right in the flowchart down towards a prediction advantage in the quantum case.
Note: Computing quantities for $s$ and $d$ are also very useful when looking to better understand performance seperations. In this case ensuring a large $g$ value is enough to see performance seperation.
```
def get_stilted_dataset(S, V, S_2, V_2, lambdav=1.1):
"""Prepare new labels that maximize geometric distance between kernels."""
S_diag = tf.linalg.diag(S ** 0.5)
S_2_diag = tf.linalg.diag(S_2 / (S_2 + lambdav) ** 2)
scaling = S_diag @ tf.transpose(V) @ \
V_2 @ S_2_diag @ tf.transpose(V_2) @ \
V @ S_diag
# Generate new lables using the largest eigenvector.
_, vecs = tf.linalg.eig(scaling)
new_labels = tf.math.real(
tf.einsum('ij,j->i', tf.cast(V @ S_diag, tf.complex64), vecs[-1])).numpy()
# Create new labels and add some small amount of noise.
final_y = new_labels > np.median(new_labels)
noisy_y = (final_y ^ (np.random.uniform(size=final_y.shape) > 0.95))
return noisy_y
```
```
y_relabel = get_stilted_dataset(S_pqk, V_pqk, S_original, V_original)
y_train_new, y_test_new = y_relabel[:N_TRAIN], y_relabel[N_TRAIN:]
```
## 3. Comparing models
Now that you have prepared your dataset it is time to compare model performance. You will create two small feedforward neural networks and compare performance when they are given access to the PQK features found in `x_train_pqk`.
### 3.1 Create PQK enhanced model
Using standard `tf.keras` library features you can now create and a train a model on the `x_train_pqk` and `y_train_new` datapoints:
```
def create_pqk_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(32, activation='sigmoid', input_shape=[len(qubits) * 3,]))
model.add(tf.keras.layers.Dense(16, activation='sigmoid'))
model.add(tf.keras.layers.Dense(1))
return model
pqk_model = create_pqk_model()
pqk_model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(learning_rate=0.003),
metrics=['accuracy'])
pqk_model.summary()
```
```
pqk_history = pqk_model.fit(tf.reshape(x_train_pqk, [N_TRAIN, -1]),
y_train_new,
batch_size=32,
epochs=1000,
verbose=0,
validation_data=(tf.reshape(x_test_pqk, [N_TEST, -1]), y_test_new))
```
### 3.2 Create a classical model
Similar to the code above you can now also create a classical model that doesn't have access to the PQK features in your stilted dataset. This model can be trained using `x_train` and `y_label_new`.
```
def create_fair_classical_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(32, activation='sigmoid', input_shape=[DATASET_DIM,]))
model.add(tf.keras.layers.Dense(16, activation='sigmoid'))
model.add(tf.keras.layers.Dense(1))
return model
model = create_fair_classical_model()
model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(learning_rate=0.03),
metrics=['accuracy'])
model.summary()
```
```
classical_history = model.fit(x_train,
y_train_new,
batch_size=32,
epochs=1000,
verbose=0,
validation_data=(x_test, y_test_new))
```
### 3.3 Compare performance
Now that you have trained the two models you can quickly plot the performance gaps in the validation data between the two. Typically both models will achieve > 0.9 accuaracy on the training data. However on the validation data it becomes clear that only the information found in the PQK features is enough to make the model generalize well to unseen instances.
```
plt.figure(figsize=(10,5))
plt.plot(classical_history.history['accuracy'], label='accuracy_classical')
plt.plot(classical_history.history['val_accuracy'], label='val_accuracy_classical')
plt.plot(pqk_history.history['accuracy'], label='accuracy_quantum')
plt.plot(pqk_history.history['val_accuracy'], label='val_accuracy_quantum')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend()
```
Success: You have engineered a stilted quantum dataset that can intentionally defeat classical models in a fair (but contrived) setting. Try comparing results using other types of classical models. The next step is to try and see if you can find new and interesting datasets that can defeat classical models without needing to engineer them yourself!
## 4. Important conclusions
There are several important conclusions you can draw from this and the [MNIST](https://www.tensorflow.org/quantum/tutorials/mnist) experiments:
1. It's very unlikely that the quantum models of today will beat classical model performance on classical data. Especially on today's classical datasets that can have upwards of a million datapoints.
2. Just because the data might come from a hard to classically simulate quantum circuit, doesn't necessarily make the data hard to learn for a classical model.
3. Datasets (ultimately quantum in nature) that are easy for quantum models to learn and hard for classical models to learn do exist, regardless of model architecture or training algorithms used.
|
function [name mfcc] = readKaldiFeature(fileName)
endian = 'l';
% FID = fopen('D:\Data\MagorBN\MFCC_LDA_MLLT_MVN\Ark\cml_mfcc_train.10.ark', 'r');
FID = fopen(fileName, 'r');
name = {};
mfcc = {};
byte_cnt = 1;
while 1
[tmp,byte_cnt] = readUttName(FID, byte_cnt);
if length(tmp)==0
break;
end
name{end+1} = tmp;
% fprintf('name{%d} = %s\n', length(name), name{end});
header = readHeader(FID, byte_cnt, endian);
data = fread(FID, header.nframe * header.dim, 'float32', 0, endian)';
mfcc{end+1} = reshape(data, header.dim, header.nframe);
if 0
imagesc(mfcc{end});
title(regexprep(name{end}, '_', '\\_'));
colorbar;
pause
end
end
fclose(FID);
%%
function [header,byte_cnt] = readHeader(FID,byte_cnt, endian)
header.format = [];
byte_cnt = skipGap(FID, byte_cnt);
while 1
tmp = fread(FID, 1, 'char');
byte_cnt = byte_cnt + 1;
if tmp==' '
break;
else
header.format(end+1) = tmp;
end
end
header.format = char(header.format);
byte_cnt = skipGap(FID,byte_cnt);
header.nframe = fread(FID, 1, 'int32', 0, endian); byte_cnt = byte_cnt + 4;
byte_cnt = skipGap(FID,byte_cnt);
header.dim = fread(FID, 1, 'int32', 0, endian); byte_cnt = byte_cnt + 4;
function byte_cnt = skipGap(FID, byte_cnt)
while 1
tmp = fread(FID, 1, 'int8');
byte_cnt = byte_cnt + 1;
if tmp==0 || tmp==4
break;
end
end
function [uttName,byte_cnt] = readUttName(FID,byte_cnt)
uttName = [];
while 1
tmp = fread(FID, 1, 'char');
if length(tmp) == 0 % read end of the file
break;
elseif tmp==' '
break;
else
uttName(end+1) = tmp;
end
byte_cnt = byte_cnt + 1;
end
uttName = char(uttName);
|
#! python
# -*- coding: utf-8 -*-
import wx
import cv2
import numpy as np
from mwx.controls import LParam
from mwx.graphman import Layer
import editor as edi
class Plugin(Layer):
"""Gaussian Blur and Threshold --adaptive
"""
menu = "Plugins/&Basic Tools"
category = "Basic Tools"
def Init(self):
self.params = (
LParam("ksize", (1,99,2), 15),
LParam("sigma", (0,100,1), 0),
LParam("block", (1,255*3,2), 3),
LParam("C", (0,255,1), 0),
)
self.cutoff_params = (
LParam("hi", (0, 1 ,0.001), 0.01),
LParam("lo", (0, 1, 0.001), 0.01)
)
btn = wx.Button(self, label="Execute", size=(66,22))
btn.Bind(wx.EVT_BUTTON, lambda v: self.calc())
self.layout(
self.params, title="blur-threshold",
type='vspin', cw=0, lw=48, tw=48
)
## self.layout(
## self.cutoff_params, title="cutoff [%]",
## type='vspin', cw=-1, lw=16, tw=44
## )
self.layout((btn,))
def calc(self, frame=None):
if not frame:
frame = self.selected_view.frame
k, s, block, C = np.int32(self.params)
hi, lo = np.float32(self.cutoff_params)
src = frame.buffer
buf = edi.imconv(src, hi, lo)
if k > 1:
buf = cv2.GaussianBlur(buf, (k,k), s)
self.output.load(buf, "*Gauss*", localunit=frame.unit)
dst = cv2.adaptiveThreshold(buf, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, block, C)
self.output.load(dst, "*threshold*", localunit=frame.unit)
return dst
|
CY001954 843 C T resistance to the neuraminidase inhibitors
CY001953 793 T C resistance to the adamantanes
CY001953 805 G A resistance to the adamantanes
|
-- Andreas, 2015-05-01
-- With clauses for functions with flexible arity.
-- {-# OPTIONS -v tc.with:40 #-}
open import Common.Prelude
open import Common.Equality
mutual
even : Nat β Bool
even 0 = true
even (suc n) = odd n
odd : Nat β Bool
odd 0 = false
odd (suc n) = even n
NPred : Nat β Set
NPred 0 = Bool
NPred (suc n) = Nat β NPred n
const : Bool β β{n} β NPred n
const b {0} = b
const b {suc n} m = const b {n}
allOdd : β n β NPred n
allOdd 0 = true
allOdd (suc n) m with even m
... | true = const false
... | false = allOdd n
test : allOdd 4 1 3 5 7 β‘ true
test = refl
|
Formal statement is: lemma LIMSEQ_iff_nz: "X \<longlonglongrightarrow> L \<longleftrightarrow> (\<forall>r>0. \<exists>no>0. \<forall>n\<ge>no. dist (X n) L < r)" for L :: "'a::metric_space" Informal statement is: A sequence $X$ converges to $L$ if and only if for every $\epsilon > 0$, there exists $N$ such that for all $n \geq N$, we have $|X_n - L| < \epsilon$. |
Hellblazer ( also known as John Constantine , Hellblazer ) is an American contemporary horror comic book series , originally published by DC Comics , and subsequently by the Vertigo imprint since March 1993 when the imprint was introduced . Its central character is the streetwise magician John Constantine , who was created by Alan Moore and Stephen R. Bissette , and first appeared as a supporting character in The Saga of the Swamp Thing # 37 ( June 1985 ) , during that creative team 's run on that title . Hellblazer had been published continuously since January 1988 , and was Vertigo 's longest running title , the only remaining publication from the imprint 's launch . In 2013 , the series concluded with issue 300 , and has been replaced by a DC Universe title , Constantine . Well known for its political and social commentary , the series has spawned a film adaptation , television show , novels , multiple spin @-@ offs and crossovers .
|
[STATEMENT]
lemma pathfinish_translation: "pathfinish((\<lambda>x. a + x) \<circ> g) = a + pathfinish g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. pathfinish ((+) a \<circ> g) = a + pathfinish g
[PROOF STEP]
by (simp add: pathfinish_def) |
During the reintroduction of corn crakes to England in the 2003 breeding season , enteritis and ill health in pre @-@ release birds was due to bacteria of a pathogenic Campylobacter species . Subsequently , microbiology tests were done to detect infected individuals and to find the source of the bacteria in their environment .
|
lemma powser_0_nonzero: fixes a :: "nat \<Rightarrow> 'a::{real_normed_field,banach}" assumes r: "0 < r" and sm: "\<And>x. norm (x-\<xi>) < r \<Longrightarrow> (\<lambda>n. a n * (x-\<xi>) ^ n) sums (f x)" and [simp]: "f \<xi> = 0" and m0: "a m \<noteq> 0" and "m>0" obtains s where "0 < s" and "\<And>z. z \<in> cball \<xi> s - {\<xi>} \<Longrightarrow> f z \<noteq> 0" |
open import Common.Equality
open import Common.Prelude
NaN : Float
NaN = primFloatDiv 0.0 0.0
NaNβ’-NaN : primFloatEquality NaN (primFloatNegate NaN) β‘ false
NaNβ’-NaN = refl
|
\documentclass[a4paper]{article}
\usepackage[pdftex]{graphicx}
\usepackage{xspace}
\def\G#1{\texttt{G#1}\xspace}
\def\P#1{\texttt{P#1}\xspace}
\def\D#1{\texttt{D#1}\xspace}
\parindent 0pt
\parskip 0pt
\begin{document}
\title{How Pulley Produces Incremental Output}
\author{Rick van Rein}
\maketitle
\begin{abstract}\noindent\em
The Pulley script langauge provides a manner of specifying, almost in a SQL style, how variables from various places in LDAP should be related to produce output. The implementation is incremental, making Pulley a very pragmatic component to pass through changes rather than complete new configurations. But how is this done?
\end{abstract}
First, during analysis of the Pulley script, the variabels mentioned in each line is noted. Conditions can only be computed when all their variables are known, and this results in a partitioning of variables. For each partition, there is a set of conditions that can be computed over them. Variables that are not subjected to conditions form partitions of their own.
\section{Deriving impacted drivers}
Generators produce tuples containing variables, and drivers output variables. This leads to a notion of overlap between these entities. The figure below uses \G{$i$}, \P{$j$} and \D{$k$} to encode generator $i$, variable partition $j$ and driver $k$, respectively.
\centerline{\includegraphics[scale=0.5]{img/network0-varoverlap.pdf}}
Now let's assume that an update arrives on generator \G2 forks a tuple addition (or removal, which is similar except for its impact on the driver). We use the red colour to indicate downward reasoning and draw:
\centerline{\includegraphics[scale=0.5]{img/network1-generate.pdf}}
This generator produces variables that are constrained by conditions, represented by variable partitions \P1 and \P2,
\centerline{\includegraphics[scale=0.5]{img/network2-varparts.pdf}}
These partitions may impact a number of drivers, in this case \D1, \D2 and \D3:
\centerline{\includegraphics[scale=0.5]{img/network3-drivers.pdf}}
The drivers shown in red are impacted by the forked tuple from our starting point, \G2.
\section{Deriving co-generators}
Whereas a generator, such as \G2, produces a \textbf{fork}, that is a tuple addition or removal, there is a need to incorporate the values for other, related generators, the so-called co-generators. The original generator produces a single value, for we will need to iterate over all values that have already been generated by co-generators, to be able to produce all the newly initiated output to send to drivers. Well, after applying conditions, that is.
\subsection{Co-generators for complete production of \G2}
To provide the impacted drivers with input, we need to be able to produce variable values from a number of partitions, of course constrained by the conditions that apply to them. The partititons that apply here contain at least the partitions discovered during the downward movement (shown in red) so we draw these upward addition of \P3 in green:
\centerline{\includegraphics[scale=0.5]{img/network4-varsneeded.pdf}}
To be able to produce these variable partitions we need at least the original generator \G2, but there may be additional ones shown again in green; in this case, we also need \G1 and \G3:
\centerline{\includegraphics[scale=0.5]{img/network5-gensneeded.pdf}}
The additional generators in green, so \G1 and \G3, are called the \textbf{co-generators} needed for the production of the drivers impacted by our starting point, \G2 or, briefly put, they are the co-generators for the \textbf{complete production} of \G2.
Thanks to the two colours we can create an overview image, where green are the upward additions,
\centerline{\includegraphics[scale=0.5]{img/network6-combined.pdf}}
\subsection{Co-generators of \G2 for production of \D1}
Implementations may vary in their ability to produce all impacted drivers at once; for our implementation on top of SQLite3 we produce only one driver at a time. We will now look into the production of \D1 alone.
\centerline{\includegraphics[scale=0.5]{img/network3-drivers-w1.pdf}}
This requires only \P1,
\centerline{\includegraphics[scale=0.5]{img/network4-varsneeded-w1.pdf}}
For which we need to run \G1 and \G2,
\centerline{\includegraphics[scale=0.5]{img/network5-gensneeded-w1.pdf}}
In this case, \G1 is a co-generator to \G2 for the production of \D1. This is much simpler than the general case.
\subsection{Co-generators of \G2 for production of \D2}
We now turn to another production, namely for output \D2. As this will demonstrate, we may not always find simpler outcomes by looking at single drivers at a time.
\centerline{\includegraphics[scale=0.5]{img/network3-drivers-w2.pdf}}
This requires \P1, \P2 and \P3,
\centerline{\includegraphics[scale=0.5]{img/network4-varsneeded-w2.pdf}}
For which we need to run \G1, \G2 and \G3, just as in the general case,
\centerline{\includegraphics[scale=0.5]{img/network5-gensneeded-w2.pdf}}
In this case, \G1 and \G3 are the co-generators to \G2 for the production of \D2.
\section{Determining guards for drivers}
A driver outputs variables, but not necessarily the precise amount that is generated by a production from the Pulley script. Even when only the co-generators for its specific output are produced, then the variable partitions involved may still supply extra variables that are not passed on to the driver as output.
Variables that are produced alongside the variables for a driver's output are called \textbf{guards}. When they are not explicitly declared by a user, they count as \textbf{implicit guards}. They are important because productions create unique combinations of guards plus output, but output on its own may not be unique and require the guard as add-on information. Drivers may take guards into account to make this possible, or the Pulley could count how many guards were produced for a given output. To that end, it could store the output (or a hash of the output) together with a counter.
When co-generators are derived for the production of output for multiple drivers at once, then guards may be larger than when a generating them for a single driver. The principle remains the same, although optimisations may exist.
\section{Producing output from a fork}
Given the addition or removal of a fork by a generator, we need to produce output, either adding or removing them through various. driver. We first derive impacted drivers, and co-generators for either one driver are all at once; the following applies to both but require repeating when multiple drivers are separately considered.
\begin{enumerate}
\item Bind the forked tuple to its variables.
\item Run all co-generators in a nested fashion to produce their cartesian product.
\item Bind the elements of the cartesian product to their variables.
\item Skip further processing of variable bindings that fail on one of the conditions defined for the used variable partitions.
\item For each driver, hash the output values and increment its use count.
\item When the output values are new on addition, send an addition to the driver.
\item When the output values are removed for the last time, send a removal to the driver.
\end{enumerate}
This procedure ensures that output is generated by a driver for as long as at least one of its entries occurs in the LDAP system. Some drivers may however accept the addition of guards, which makes them responsible of managing the multiplicity from these extra variables. There is no reason why this could be problematic, as the guards will be as consistent on reproduction as are the normal output variables.
\section{Compiling the Pulley script language}
The compiler built into Pulley performs a fair amount of semantic analysis to be able to produce the incremental output for drivers. The result of the analysis is a series of SQLite3 queries, aimed at producing one driver at a time (and running multiple of these as a result of a single generator fork).
\subsection{Storage model}
The SQLite3 database holds the following information:
\begin{itemize}
\item\textbf{Generator tuples}, holding each of the variable values as a separate blob. These tables are extended when a fork is added, and reduced when a fork is removed. They are used to produce the cartesian product when the generator serves as a co-generator. Generators that never serve as co-generators do not need this table.
\item\textbf{Driver output use-counts}, stored as a hash over the output variables (in their normal order) together with the number of fork additions minus the number of fork removals. When not in the table, the use-count is considered 0. Transitions from 0 to 1 cause the driver to be supplied with an output addition, and transitions from 1 to 0 cause the driver to be supplied with an output removal. This is not required when an output has no guards, or when it handles the guards internally.
\end{itemize}
\subsection{Global process of the Pulley script Compiler}
The following phases define the broad processing of the Pulley compiler:
\begin{enumerate}
\item\textbf{Download} of the Pulley script from LDAP and/or files into the re-entrant parser is possible, because it consists of lines without structure, and processing does not depend on the order of occurrence of the lines.
\item\textbf{Hashing} of individual lines, and their combination to an overall script hash in an order-independent manner support a quick check whether the Pulley script has changed. This is instrumental for fast restarts; complete reset of the output is only required when the Pulley script changes and when SyncRepl updates over LDAP fails.
\item\textbf{Parsing} introduces new variables in generators, each time using a unique name. Initially, each variable is assigned to a unique partition.
\item\textbf{Analysis} produces knowledge about variable partitions and co-generators, initially targeted at a single driver at a time.
\item\textbf{Production} produces the computation rules for an engine, initially SQLite3 with generation of queries that process generator forks and produce output through drivers.
\end{enumerate}
\subsection{Analysis of a Pulley script}
The analysis phase is the most complex phase during compilation, and is summarised by the images presented before. The following steps are taken during the analysis phase:
\begin{enumerate}
\item\textbf{Structural analysis} derives variable partitions and the things a driver needs for its work, and the things a generator needs for its work.
\item\textbf{Structural consistency} validates that every variables is bound by precisely one generator, that every condition contains at least one variable and that driver output always contains at least one variable.
\item\textbf{Cost analysis} is not needed with the SQLite3 engine; it analyses the cheapest generators through estimated costs; this is overtaken by the analysis done by SQLite3 during query optimisation.
\item\textbf{Generate paths} is not needed with the SQLite3 engine; it produces what is thought to be the cheapest path, known as the \textbf{path of least resistence}, for production of output for drivers.
\end{enumerate}
\end{document}
|
[STATEMENT]
lemma in_obs_valid:
assumes "n' \<in> obs n S" shows "valid_node n" and "valid_node n'"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. valid_node n &&& valid_node n'
[PROOF STEP]
using \<open>n' \<in> obs n S\<close>
[PROOF STATE]
proof (prove)
using this:
n' \<in> obs n S
goal (1 subgoal):
1. valid_node n &&& valid_node n'
[PROOF STEP]
by(auto elim:obsE intro:path_valid_node) |
Formal statement is: lemma GPicard2: assumes "S \<subseteq> T" "connected T" "S \<noteq> {}" "open S" "\<And>x. \<lbrakk>x islimpt S; x \<in> T\<rbrakk> \<Longrightarrow> x \<in> S" shows "S = T" Informal statement is: If $S$ is a nonempty open connected subset of a topological space $T$, and if every point of $T$ that is a limit point of $S$ is in $S$, then $S = T$. |
State Before: Ξ± : Type u_1
instβ : DecidableEq Ξ±
sβ s' s : Cycle Ξ±
h : Nodup s
x : Ξ±
hx : x β s
β’ β(formPerm s h) x = next s h x hx State After: case h
Ξ± : Type u_1
instβ : DecidableEq Ξ±
s s' : Cycle Ξ±
x : Ξ±
aβ : List Ξ±
h : Nodup (Quot.mk Setoid.r aβ)
hx : x β Quot.mk Setoid.r aβ
β’ β(formPerm (Quot.mk Setoid.r aβ) h) x = next (Quot.mk Setoid.r aβ) h x hx Tactic: induction s using Quot.inductionOn State Before: case h
Ξ± : Type u_1
instβ : DecidableEq Ξ±
s s' : Cycle Ξ±
x : Ξ±
aβ : List Ξ±
h : Nodup (Quot.mk Setoid.r aβ)
hx : x β Quot.mk Setoid.r aβ
β’ β(formPerm (Quot.mk Setoid.r aβ) h) x = next (Quot.mk Setoid.r aβ) h x hx State After: no goals Tactic: simpa using List.formPerm_apply_mem_eq_next h _ (by simp_all) State Before: Ξ± : Type u_1
instβ : DecidableEq Ξ±
s s' : Cycle Ξ±
x : Ξ±
aβ : List Ξ±
h : Nodup (Quot.mk Setoid.r aβ)
hx : x β Quot.mk Setoid.r aβ
β’ x β aβ State After: no goals Tactic: simp_all |
function masks = char2img( strings, h, pad )
% Convert ascii text to a binary image using pre-computed templates.
%
% Input strings can only contain standard characters (ascii character
% 32-126). First time char2img() is ever called with a given height h,
% txt2img() is used to create a template for each ascii character. All
% subsequent calls to to char2img() with the given height are very fast as
% the pre-computed templates are used and no display/screen capture is
% needed.
%
% USAGE
% masks = char2img( strings, h, [pad] )
%
% INPUTS
% strings - {1xn} text string or cell array of text strings to convert
% h - font height in pixels
% pad - [0] amount of extra padding between chars
%
% OUTPUTS
% masks - {1xn} binary image masks of height h for each string
%
% EXAMPLE
% mask=char2img('hello world',50); im(mask{1})
% mask=char2img(num2str(pi),35); im(mask{1})
%
% See also TXT2IMG
%
% Piotr's Image&Video Toolbox Version 2.65
% Copyright 2012 Piotr Dollar. [pdollar-at-caltech.edu]
% Please email me if you find bugs, or have suggestions or questions!
% Licensed under the Simplified BSD License [see external/bsd.txt]
% load or create character templates (or simply store persistently)
persistent chars h0
if(isempty(h0) || h0~=h)
pth=fileparts(mfilename('fullpath'));
fName=sprintf('%s/private/char2img_h%03i.mat',pth,h); h0=h;
if(exist(fName,'file')), load(fName); else
chars=char(32:126); chars=num2cell(chars);
chars=txt2img(chars,h,{'Interpreter','none'});
save(fName,'chars');
end
end
% add padding to chars
if(nargin<3 || isempty(pad)), pad=0; end
charsPad=chars; if(pad), pad=ones(h,pad,'uint8');
for i=1:length(chars), charsPad{i}=[pad chars{i} pad]; end; end
% create actual string using templates
if(~iscell(strings)), strings={strings}; end
n=length(strings); masks=cell(1,n);
for s=1:n, str=strings{s};
str(str<32 | str>126)=32; str=uint8(str-31);
M=[charsPad{str}]; masks{s}=M;
end
|
# coding: utf-8
import sys
import jieba
import numpy
from sklearn import metrics
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.naive_bayes import MultinomialNB
def input_data(train_file, test_file):
train_words = []
train_tags = []
test_words = []
test_tags = []
with open(train_file, 'r') as f1:
for line in f1:
tks = line.split('\t', 1)
train_words.append(tks[1])
train_tags.append(tks[0])
with open(test_file, 'r') as f1:
for line in f1:
tks = line.split('\t', 1)
test_words.append(tks[1])
test_tags.append(tks[0])
return train_words, train_tags, test_words, test_tags
with open('stopwords.txt', 'r') as f:
stopwords = set([w.strip() for w in f])
comma_tokenizer = lambda x: jieba.cut(x, cut_all=True)
def vectorize(train_words, test_words):
v = HashingVectorizer(tokenizer=comma_tokenizer, n_features=30000, non_negative=True)
train_data = v.fit_transform(train_words)
test_data = v.fit_transform(test_words)
return train_data, test_data
def evaluate(actual, pred):
m_precision = metrics.precision_score(actual, pred)
m_recall = metrics.recall_score(actual, pred)
print 'precision:{0:.3f}'.format(m_precision)
print 'recall:{0:0.3f}'.format(m_recall)
def train_clf(train_data, train_tags):
clf = MultinomialNB(alpha=0.01)
clf.fit(train_data, numpy.asarray(train_tags))
return clf
def main():
if len(sys.argv) < 3:
print '[Usage]: python classifier.py train_file test_file'
sys.exit(0)
train_file = sys.argv[1]
test_file = sys.argv[2]
train_words, train_tags, test_words, test_tags = input_data(train_file, test_file)
train_data, test_data = vectorize(train_words, test_words)
clf = train_clf(train_data, train_tags)
pred = clf.predict(test_data)
evaluate(numpy.asarray(test_tags), pred)
if __name__ == '__main__':
main()
|
(* Title: L3_Lib.thy
Original author: Anthony Fox, University of Cambridge
Contributions by: Kyndylan Nienhuis <[email protected]>
L3 operations.
*)
theory L3_Lib
imports "HOL-Word.Word"
"HOL-Library.Code_Target_Numeral"
"HOL-Library.Monad_Syntax"
begin
translations
"_do_block (_do_cons (_do_bind p t) (_do_final e))"
<= "CONST bind t (\<lambda>p. e)"
(* avoid syntax clash with shift-right operation *)
no_syntax
"_thenM" :: "['a, 'b] \<Rightarrow> 'c" (infixr ">>" 54)
(* basic state Monad *)
definition "return = Pair"
definition bind :: "('state \<Rightarrow> ('a \<times> 'state)) \<Rightarrow>
('a \<Rightarrow> 'state \<Rightarrow> ('b \<times> 'state)) \<Rightarrow>
('state \<Rightarrow> ('b \<times> 'state))" where
"bind f g = (\<lambda>s. let (a, s') = f s in g a s')"
(* use do syntax for this state monad *)
adhoc_overloading
Monad_Syntax.bind L3_Lib.bind
definition read_state :: "('state \<Rightarrow> 'a) \<Rightarrow> 'state \<Rightarrow> 'a \<times> 'state" where
"read_state f = (\<lambda>s. (f s, s))"
definition update_state :: "('state \<Rightarrow> 'state) \<Rightarrow> 'state \<Rightarrow> unit \<times> 'state" where
"update_state f = (\<lambda>s. ((), f s))"
definition extend_state :: "'b \<Rightarrow> ('b \<times> 'state \<Rightarrow> 'a \<times> 'b \<times> 'state) \<Rightarrow> 'state \<Rightarrow> 'a \<times> 'state" where
"extend_state v f = (\<lambda>s. let (a, s') = f (v, s) in (a, snd s'))"
definition trim_state :: "('state \<Rightarrow> 'a \<times> 'state) \<Rightarrow> 'b \<times> 'state \<Rightarrow> 'a \<times> 'b \<times> 'state" where
"trim_state f = (\<lambda>(s1, s2). let (a, s') = f s2 in (a, s1, s'))"
fun foreach_loop :: "'a list \<times> ('a \<Rightarrow> 'state \<Rightarrow> unit \<times> 'state) \<Rightarrow> 'state \<Rightarrow> unit \<times> 'state" where
"foreach_loop ([], _) = return ()" |
"foreach_loop (h # t, a) = bind (a h) (\<lambda>u. foreach_loop (t, a))"
function for_loop :: "nat \<times> nat \<times> (nat \<Rightarrow> 'state \<Rightarrow> unit \<times> 'state) \<Rightarrow> 'state \<Rightarrow> unit \<times> 'state" where
"for_loop (i, j, a) =
(if i = j then
a i
else
bind (a i) (\<lambda>u. for_loop ((if i < j then i + 1 else i - 1), j, a)))"
by auto
termination by (relation "measure (\<lambda>(i, j, _). if i < j then j - i else i - j)") auto
(* Because there are no constraints on i, j and a on the left-hand side of the definition, every
occurrence of for_loop can be simplified by for_loop.simps, and since the definition is recursive
the simplifier might diverge. For this reason we remove for_loop.simps from the simp set. *)
declare for_loop.simps [simp del]
(* Monad laws *)
lemma bind_left_identity [simp]:
shows "bind (return a) f = f a"
unfolding return_def bind_def
by auto
lemma bind_right_identity [simp]:
shows "bind m return = m"
unfolding return_def bind_def
by auto
lemma bind_associativity:
shows "bind (bind m f) g = bind m (\<lambda>a. bind (f a) g)"
(is "?l = ?r")
proof
fix s
show "?l s = ?r s"
unfolding return_def bind_def
by (cases "m s") auto
qed
(* Projections *)
lemma project_return [simp]:
shows "fst (return a s) = a"
and "snd (return a s) = s"
unfolding return_def
by auto
lemma project_read_state [simp]:
shows "fst (read_state f s) = f s"
and "snd (read_state f s) = s"
unfolding read_state_def
by auto
lemma project_update_state [simp]:
shows "fst (update_state f s) = ()"
and "snd (update_state f s) = f s"
unfolding update_state_def
by auto
(* Other monad simplifications *)
lemma read_state_constant [simp]:
shows "read_state (\<lambda>s. a) = return a"
unfolding read_state_def return_def
..
lemma update_state_id [simp]:
shows "update_state (\<lambda>s. s) = return ()"
unfolding update_state_def return_def
..
lemma foreach_loop_return [simp]:
shows "foreach_loop (l, \<lambda>_. return a) = return ()"
by (induct l) simp_all
lemma extend_state_return [simp]:
shows "extend_state v (return a) = return a"
unfolding extend_state_def return_def
by simp
lemma extend_state_trim_state [simp]:
shows "extend_state v (trim_state m) = m"
(is "?l = ?r")
proof
fix s
show "?l s = ?r s"
unfolding extend_state_def trim_state_def
by (cases "m s") auto
qed
(* extra character operations *)
definition Ord :: "char \<Rightarrow> nat" where
"Ord = nat_of_char"
definition Chr :: "nat \<Rightarrow> char" where
"Chr = char_of_nat"
definition is_lower :: "char \<Rightarrow> bool" where
"is_lower c = (Ord (CHR ''a'') \<le> Ord c \<and> Ord c \<le> Ord (CHR ''z''))"
definition is_upper :: "char \<Rightarrow> bool" where
"is_upper c = (Ord (CHR ''A'') \<le> Ord c \<and> Ord c \<le> Ord (CHR ''Z''))"
definition is_space :: "char \<Rightarrow> bool" where
"is_space c = (Ord (CHR '' '') = Ord c \<or> 9 \<le> Ord c \<and> Ord c \<le> 13)"
definition is_digit :: "char \<Rightarrow> bool" where
"is_digit c = (Ord (CHR ''0'') \<le> Ord c \<and> Ord c \<le> Ord (CHR ''9''))"
definition is_hex_digit :: "char \<Rightarrow> bool" where
"is_hex_digit c = (is_digit c \<or> Ord (CHR ''a'') \<le> Ord c \<and> Ord c \<le> Ord (CHR ''f'') \<or>
Ord (CHR ''A'') \<le> Ord c \<and> Ord c \<le> Ord (CHR ''F''))"
definition is_alpha :: "char \<Rightarrow> bool" where
"is_alpha c = (is_lower c \<or> is_upper c)"
definition is_alpha_num :: "char \<Rightarrow> bool" where
"is_alpha_num c = (is_alpha c \<or> is_digit c)"
definition to_lower :: "char \<Rightarrow> char" where
"to_lower c = (if is_upper c then Chr (Ord c + 32) else c)"
definition to_upper :: "char \<Rightarrow> char" where
"to_upper c = (if is_lower c then Chr (Ord c - 32) else c)"
(* numeric strings *)
fun list_to_nat :: "nat \<Rightarrow> nat list \<Rightarrow> nat" where
"list_to_nat _ [] = 0" |
"list_to_nat base (h # t) = h mod base + base * list_to_nat base t"
fun nat_to_list :: "nat \<Rightarrow> nat \<Rightarrow> nat list" where
"nat_to_list base n =
(if n < base \<or> base < 2 then [n mod base] else n mod base # nat_to_list base (n div base))"
(* Because there are no constraints on n on the left-hand side of the definition, every occurrence
of nat_to_list can be simplified by nat_to_list.simps, and since the definition is recursive the
simplifier might diverge. For this reason we remove nat_to_list.simps from the simp set. *)
declare nat_to_list.simps [simp del]
definition hex :: "nat \<Rightarrow> char" where
"hex n = (if n = 0 then CHR ''0''
else if n = 1 then CHR ''1''
else if n = 2 then CHR ''2''
else if n = 3 then CHR ''3''
else if n = 4 then CHR ''4''
else if n = 5 then CHR ''5''
else if n = 6 then CHR ''6''
else if n = 7 then CHR ''7''
else if n = 8 then CHR ''8''
else if n = 9 then CHR ''9''
else if n = 10 then CHR ''A''
else if n = 11 then CHR ''B''
else if n = 12 then CHR ''C''
else if n = 13 then CHR ''D''
else if n = 14 then CHR ''E''
else if n = 15 then CHR ''F''
else undefined)"
definition unhex :: "char \<Rightarrow> nat" where
"unhex c = (if c = CHR ''0'' then 0
else if c = CHR ''1'' then 1
else if c = CHR ''2'' then 2
else if c = CHR ''3'' then 3
else if c = CHR ''4'' then 4
else if c = CHR ''5'' then 5
else if c = CHR ''6'' then 6
else if c = CHR ''7'' then 7
else if c = CHR ''8'' then 8
else if c = CHR ''9'' then 9
else if c = CHR ''a'' \<or> c = CHR ''A'' then 10
else if c = CHR ''b'' \<or> c = CHR ''B'' then 11
else if c = CHR ''c'' \<or> c = CHR ''C'' then 12
else if c = CHR ''d'' \<or> c = CHR ''D'' then 13
else if c = CHR ''e'' \<or> c = CHR ''E'' then 14
else if c = CHR ''f'' \<or> c = CHR ''F'' then 15
else undefined)"
definition string_to_nat :: "nat \<Rightarrow> string \<Rightarrow> nat" where
"string_to_nat base s = list_to_nat base (map unhex (rev s))"
definition nat_to_string :: "nat \<Rightarrow> nat \<Rightarrow> string" where
"nat_to_string base n = rev (map hex (nat_to_list base n))"
definition "bin_string_to_nat \<equiv> string_to_nat 2"
definition "nat_to_bin_string \<equiv> nat_to_string 2"
definition "dec_string_to_nat \<equiv> string_to_nat 10"
definition "nat_to_dec_string \<equiv> nat_to_string 10"
definition "hex_string_to_nat \<equiv> string_to_nat 16"
definition "nat_to_hex_string \<equiv> nat_to_string 16"
definition nat_from_bin_string :: "string \<Rightarrow> nat option" where
"nat_from_bin_string s =
(if s \<noteq> '''' \<and> list_all (\<lambda>c. c = CHR ''0'' \<or> c = CHR ''1'') s then
Some (bin_string_to_nat s)
else None)"
definition nat_from_dec_string :: "string \<Rightarrow> nat option" where
"nat_from_dec_string s =
(if s \<noteq> '''' \<and> list_all is_digit s then Some (dec_string_to_nat s) else None)"
definition nat_from_hex_string :: "string \<Rightarrow> nat option" where
"nat_from_hex_string s =
(if s \<noteq> '''' \<and> list_all is_hex_digit s then Some (hex_string_to_nat s) else None)"
definition dec_string_to_int :: "string \<Rightarrow> int" where
"dec_string_to_int r =
(case r of [] \<Rightarrow> 0 |
h # t \<Rightarrow> (if h = CHR ''-'' \<or> h = CHR ''~''
then -int (dec_string_to_nat t)
else int (dec_string_to_nat r)))"
definition int_to_dec_string :: "int \<Rightarrow> string" where
"int_to_dec_string i =
(if i < 0 then CHR ''~'' # nat_to_dec_string (nat (-i)) else nat_to_dec_string (nat i))"
definition string_to_bool :: "string \<Rightarrow> bool" where
"string_to_bool s = (if s = ''true'' then True
else if s = ''false'' then False
else undefined)"
definition string_to_char :: "string \<Rightarrow> char" where
"string_to_char s = (case s of [c] \<Rightarrow> c | _ \<Rightarrow> undefined)"
(* extra Nat operation *)
fun log2 :: "nat \<Rightarrow> nat" where
"log2 n = (if n = 0 then undefined else if n = 1 then 0 else Suc (log2 (n div 2)))"
(* Because there are no constraints on n on the left-hand side of the definition, every occurrence
of log2 can be simplified by log2.simps, and since the definition is recursive the simplifier might
diverge. For this reason we remove log2.simps from the simp set. *)
declare log2.simps [simp del]
lemma log2_bounds:
assumes "n \<noteq> 0"
shows "2 ^ (log2 n) \<le> n"
and "n < 2 ^ (Suc (log2 n))"
proof -
(* "The induction works better if we prove one goal instead of two goals" *)
have "2 ^ (log2 n) \<le> n \<and> n < 2 ^ (Suc (log2 n))"
using assms
proof (induct "log2 n" arbitrary: n)
case 0
hence "n = 1"
by (simp add: log2.simps) (meson nat.simps(3))
thus ?case by (simp add: log2.simps)
next
case (Suc k)
show ?case
proof (cases "n = 1")
case True
thus ?thesis by (simp add: log2.simps)
next
case False
hence "1 < n" using Suc(3) by simp
hence "(n div 2) \<noteq> 0" by auto
have log2: "log2 n = Suc (log2 (n div 2))"
using `1 < n` by (simp add: log2.simps)
hence "k = log2 (n div 2)" using Suc(2) by simp
note Suc(1)[OF this `(n div 2) \<noteq> 0`]
thus ?thesis
using log2 by auto
qed
qed
thus "2 ^ (log2 n) \<le> n" "n < 2 ^ (Suc (log2 n))" by auto
qed
lemma log2_unat_bounds:
fixes x :: "('a :: len) word"
assumes "x \<noteq> 0"
shows "log2 (unat x) < len_of TYPE('a)"
proof -
have "unat x \<noteq> 0"
using assms
by (simp add: unat_eq_zero)
have "unat x < 2 ^ len_of TYPE('a)"
by simp
note le_less_trans[OF log2_bounds(1)[OF `unat x \<noteq> 0`] this]
thus ?thesis by auto
qed
(* extra int operations *)
definition quot :: "int \<Rightarrow> int \<Rightarrow> int" (infixl "quot" 70) where
"i quot j = (if j = 0 then undefined
else if 0 < j then if 0 \<le> i then i div j else -(-i div j)
else if 0 \<le> i then -(i div -j)
else -i div -j)"
definition rem :: "int \<Rightarrow> int \<Rightarrow> int" (infixl "rem" 70) where
"i rem j = (if j = 0 then undefined else i - i quot j * j)"
definition quot_rem :: "int * int \<Rightarrow> int * int" where
"quot_rem p = (case p of (i, j) \<Rightarrow> (i div j, i rem j))"
(* extra option operations *)
definition is_some :: "'a option \<Rightarrow> bool" where
"is_some x = (case x of Some _ \<Rightarrow> True | _ \<Rightarrow> False)"
lemma is_some_alt:
shows "is_some x = (x \<noteq> None)"
unfolding is_some_def
using option.disc_eq_case(2)
by auto
lemma is_some_simps [simp]:
shows "\<not> (is_some None)"
and "is_some (Some x)"
unfolding is_some_def by simp_all
(* extra list operations *)
fun splitl :: "('a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<times> 'a list" where
"splitl _ [] = ([], [])" |
"splitl P (h # t) = (if P h then let (l, r) = splitl P t in (h # l, r) else ([], h # t))"
definition splitr :: "('a \<Rightarrow> bool) \<Rightarrow> 'a list \<Rightarrow> 'a list \<times> 'a list" where
"splitr P x = (let (l, r) = splitl P (rev x) in (rev r, rev l))"
definition pad_left :: "'a \<Rightarrow> nat \<Rightarrow> 'a list \<Rightarrow> 'a list" where
"pad_left c n s = replicate (n - length s) c @ s"
definition pad_right :: "'a \<Rightarrow> nat \<Rightarrow> 'a list \<Rightarrow> 'a list" where
"pad_right c n s = s @ replicate (n - length s) c"
fun index_find :: "nat \<Rightarrow> 'a \<times> 'a list \<Rightarrow> nat option" where
"index_find _ (_, []) = None" |
"index_find i (v, h # t) = (if v = h then Some i else index_find (Suc i) (v, t))"
definition "index_of = index_find 0"
definition remove :: "'a list * 'a list \<Rightarrow> 'a list" where
"remove p = (case p of (l1, l2) \<Rightarrow> filter (\<lambda>x. x \<notin> set l1) l2)"
definition remove_except :: "'a list * 'a list \<Rightarrow> 'a list" where
"remove_except p = (case p of (l1, l2) \<Rightarrow> filter (\<lambda>x. x \<in> set l1) l2)"
fun remove_duplicates :: "'a list \<Rightarrow> 'a list" where
"remove_duplicates [] = []" |
"remove_duplicates (h # t) = (if h \<in> set t then remove_duplicates t else h # remove_duplicates t)"
lemma splitl_length:
shows "length (fst (splitl P l)) + length (snd (splitl P l)) = length l"
by (induct l, auto simp add: case_prod_beta)
lemma splitl_fst_length [simp]:
shows "length (fst (splitl P x)) \<le> length x"
using splitl_length
by (metis order_refl trans_le_add1)
lemma splitl_snd_length [simp]:
shows "length (snd (splitl P x)) \<le> length x"
using splitl_length
by (metis order_refl trans_le_add2)
lemma pad_left_length [simp]:
shows "length (pad_left e n l) = max (length l) n"
unfolding pad_left_def
by auto
lemma pad_right_length [simp]:
shows "length (pad_right e n l) = max (length l) n"
unfolding pad_right_def
by auto
lemma pad_left_nth:
shows "pad_left e n l ! m =
(if m < n - List.length l
then e
else l ! (m - (n - List.length l)))"
unfolding pad_left_def nth_append
by simp
(* extra string operations *)
lemma fields_termination_lem [simp]:
assumes "a \<noteq> []" and "length a \<le> length c"
shows "length a - b < Suc (length c)"
by (simp add: assms(2) le_imp_less_Suc less_imp_diff_less)
function (sequential) tokens :: "(char \<Rightarrow> bool) \<Rightarrow> string \<Rightarrow> string list" where
"tokens _ '''' = []" |
"tokens P x =
(let (l, r) = splitl (\<lambda>e. ~P e) x in if l = [] then tokens P (tl r) else l # tokens P r)"
by pat_completeness auto
termination tokens
apply (relation "measure (length o snd)")
apply auto
apply (case_tac "~ P v", auto simp add: case_prod_beta le_imp_less_Suc)
apply (case_tac "~ P v", auto simp add: case_prod_beta le_imp_less_Suc)
done
function (sequential) fields :: "(char \<Rightarrow> bool) \<Rightarrow> string \<Rightarrow> string list" where
"fields _ '''' = [[]]" |
"fields P x =
(let (l, r) = splitl (\<lambda>e. ~P e) x in if l = [] then [] # fields P (tl r)
else if r = [] then [l]
else l # fields P (tl r))"
by pat_completeness auto
termination fields
apply (relation "measure (length o snd)")
apply auto
apply (case_tac "~ P v", auto simp add: case_prod_beta le_imp_less_Suc)
apply (case_tac "~ P v", auto simp add: case_prod_beta)
done
(* bit-string operations - extends Bool_List_Representation.thy *)
definition nat_to_bitstring :: "nat \<Rightarrow> bool list" where
"nat_to_bitstring n =
(if n = 0 then [False] else bin_to_bl (log2 n + 1) (int n))"
definition "bitstring_to_nat = nat o bl_to_bin"
definition fixwidth :: "nat \<Rightarrow> bool list \<Rightarrow> bool list" where
"fixwidth n v = (let l = length v in if l < n then pad_left False n v else drop (l - n) v)"
definition bitwise :: "(bool \<Rightarrow> bool \<Rightarrow> bool) \<Rightarrow> bool list \<Rightarrow> bool list \<Rightarrow> bool list" where
"bitwise f v1 v2 =
(let m = max (length v1) (length v2) in map (case_prod f) (zip (fixwidth m v1) (fixwidth m v2)))"
definition "bor = bitwise (\<or>)"
definition "band = bitwise (\<and>)"
definition "bxor = bitwise (\<noteq>)"
definition bitstring_shiftl :: "bool list \<Rightarrow> nat \<Rightarrow> bool list" where
"bitstring_shiftl v m = pad_right False (length v + m) v"
definition bitstring_shiftr :: "bool list \<Rightarrow> nat \<Rightarrow> bool list" where
"bitstring_shiftr v m = take (length v - m) v"
definition bitstring_field :: "nat \<Rightarrow> nat \<Rightarrow> bool list \<Rightarrow> bool list" where
"bitstring_field h l v = fixwidth (Suc h - l) (bitstring_shiftr v l)"
definition bitstring_rotate :: "bool list \<Rightarrow> nat \<Rightarrow> bool list" where
"bitstring_rotate v m =
(let l = length v in
let x = m mod l in
if l = 0 \<or> x = 0 then v else bitstring_field (x - 1) 0 v @ bitstring_field (l - 1) x v)"
definition bitstring_test_bit :: "bool list \<Rightarrow> nat \<Rightarrow> bool" where
"bitstring_test_bit v n = (bitstring_field n n v = [True])"
definition bitstring_modify :: "(nat \<times> bool \<Rightarrow> bool) \<times> bool list \<Rightarrow> bool list" where
"bitstring_modify p = (case p of (f, l) \<Rightarrow> map f (zip (rev (upt 0 (length l))) l))"
definition bitstring_field_insert :: "nat \<Rightarrow> nat \<Rightarrow> bool list \<Rightarrow> bool list \<Rightarrow> bool list" where
"bitstring_field_insert h l v1 v2 =
bitstring_modify (\<lambda>(i, b). if l \<le> i \<and> i \<le> h then bitstring_test_bit v1 (i - l) else b, v2)"
lemma nat_to_bitstring_zero [simp]:
shows "nat_to_bitstring 0 = [False]"
unfolding nat_to_bitstring_def by simp
(* We do not add the following rule to the simp set, because n occurs twice at the right hand side,
and therefore the state might not become simpler when applying this rule. *)
lemma nat_to_bitstring_length:
shows "length (nat_to_bitstring n) = (if n = 0 then 1 else log2 n + 1)"
unfolding nat_to_bitstring_def
by (simp del: bin_to_bl_def)
lemma fixwidth_length [simp]:
shows "length (fixwidth n l) = n"
unfolding fixwidth_def Let_def
by auto
lemma bitwise_length [simp]:
shows "length (bitwise f v1 v2) = max (length v1) (length v2)"
unfolding bitwise_def Let_def
by auto
(* extra word operations *)
definition unsigned_min :: "'a::len word \<times> 'a::len word \<Rightarrow> 'a::len word" where
"unsigned_min p = (case p of (w1, w2) \<Rightarrow> (if w1 \<le> w2 then w1 else w2))"
definition unsigned_max :: "'a::len word \<times> 'a::len word \<Rightarrow> 'a::len word" where
"unsigned_max p = (case p of (w1, w2) \<Rightarrow> (if w1 \<le> w2 then w2 else w1))"
definition word_log2 :: "'a::len word \<Rightarrow> 'a::len word" where
"word_log2 w = of_nat (log2 (unat w))"
definition word_quot :: "'a::len word \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" where
"word_quot i j = of_int (sint i quot sint j)"
definition word_rem :: "'a::len word \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" where
"word_rem i j = of_int (sint i rem sint j)"
definition word_sdiv :: "'a::len word \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" where
"word_sdiv i j = of_int (sint i div sint j)"
definition word_smod :: "'a::len word \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" where
"word_smod i j = of_int (sint i mod sint j)"
definition word_modify :: "(nat \<times> bool \<Rightarrow> bool) \<times> 'a::len word \<Rightarrow> 'a::len word" where
"word_modify p = (case p of (f, w) \<Rightarrow> of_bl (bitstring_modify (f, to_bl w)))"
definition word_bit_field_insert :: "nat \<Rightarrow> nat \<Rightarrow> 'a::len word \<Rightarrow> 'b::len word \<Rightarrow> 'b::len word" where
"word_bit_field_insert h l w1 w2 =
word_modify (\<lambda>(i, b). if l \<le> i \<and> i \<le> h then test_bit w1 (i - l) else b, w2)"
definition word_bits :: "nat \<Rightarrow> nat \<Rightarrow> 'a::len word \<Rightarrow> 'a::len word" where
"word_bits h l w = (w >> l) AND mask (Suc h - l)"
definition word_extract :: "nat \<Rightarrow> nat \<Rightarrow> 'a::len word \<Rightarrow> 'b::len word" where
"word_extract h l w = ucast (word_bits h l w)"
definition word_replicate :: "nat \<Rightarrow> 'a::len word \<Rightarrow> 'b::len word" where
"word_replicate n a = word_rcat (replicate n a)"
(* floating-point stubs *)
datatype ieee_rounding =
roundTiesToEven | roundTowardPositive | roundTowardNegative | roundTowardZero
datatype ieee_compare = LT | EQ | GT | UN
record ieee_flags =
DivideByZero :: bool
InvalidOp :: bool
Overflow :: bool
Precision :: bool
Underflow :: bool
consts
fp32_abs :: "32 word \<Rightarrow> 32 word"
fp32_add :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word"
fp32_add_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word"
fp32_compare :: "32 word \<Rightarrow> 32 word \<Rightarrow> ieee_compare"
fp32_div :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word"
fp32_div_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word"
fp32_equal :: "32 word \<Rightarrow> 32 word \<Rightarrow> bool"
fp32_from_int :: "ieee_rounding \<Rightarrow> int \<Rightarrow> 32 word"
fp32_greater :: "32 word \<Rightarrow> 32 word \<Rightarrow> bool"
fp32_greater_equal :: "32 word \<Rightarrow> 32 word \<Rightarrow> bool"
fp32_is_integral :: "32 word \<Rightarrow> bool"
fp32_is_finite :: "32 word \<Rightarrow> bool"
fp32_is_nan :: "32 word \<Rightarrow> bool"
fp32_is_normal :: "32 word \<Rightarrow> bool"
fp32_is_signalling_nan :: "32 word \<Rightarrow> bool"
fp32_is_subnormal :: "32 word \<Rightarrow> bool"
fp32_less :: "32 word \<Rightarrow> 32 word \<Rightarrow> bool"
fp32_less_equal :: "32 word \<Rightarrow> 32 word \<Rightarrow> bool"
fp32_mul :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word"
fp32_mul_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word"
fp32_mul_add :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word"
fp32_mul_add_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word"
fp32_mul_sub :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word"
fp32_mul_sub_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word"
fp32_neg_inf :: "32 word"
fp32_neg_max :: "32 word"
fp32_neg_min :: "32 word"
fp32_neg_zero :: "32 word"
fp32_negate :: "32 word \<Rightarrow> 32 word"
fp32_pos_inf :: "32 word"
fp32_pos_max :: "32 word"
fp32_pos_min :: "32 word"
fp32_pos_zero :: "32 word"
fp32_round_to_integral :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word"
fp32_snan :: "32 word"
fp32_sqrt :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word"
fp32_sqrt_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word"
fp32_sub :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> 32 word"
fp32_sub_with_flags :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> 32 word \<Rightarrow> ieee_flags \<times> 32 word"
fp32_qnan :: "32 word"
fp32_to_int :: "ieee_rounding \<Rightarrow> 32 word \<Rightarrow> int option"
consts
fp64_abs :: "64 word \<Rightarrow> 64 word"
fp64_add :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word"
fp64_add_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word"
fp64_compare :: "64 word \<Rightarrow> 64 word \<Rightarrow> ieee_compare"
fp64_div :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word"
fp64_div_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word"
fp64_equal :: "64 word \<Rightarrow> 64 word \<Rightarrow> bool"
fp64_from_int :: "ieee_rounding \<Rightarrow> int \<Rightarrow> 64 word"
fp64_greater :: "64 word \<Rightarrow> 64 word \<Rightarrow> bool"
fp64_greater_equal :: "64 word \<Rightarrow> 64 word \<Rightarrow> bool"
fp64_is_integral :: "64 word \<Rightarrow> bool"
fp64_is_finite :: "64 word \<Rightarrow> bool"
fp64_is_nan :: "64 word \<Rightarrow> bool"
fp64_is_normal :: "64 word \<Rightarrow> bool"
fp64_is_signalling_nan :: "64 word \<Rightarrow> bool"
fp64_is_subnormal :: "64 word \<Rightarrow> bool"
fp64_less :: "64 word \<Rightarrow> 64 word \<Rightarrow> bool"
fp64_less_equal :: "64 word \<Rightarrow> 64 word \<Rightarrow> bool"
fp64_mul :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word"
fp64_mul_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word"
fp64_mul_add :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word"
fp64_mul_add_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word"
fp64_mul_sub :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word"
fp64_mul_sub_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word"
fp64_neg_inf :: "64 word"
fp64_neg_min :: "64 word"
fp64_neg_max :: "64 word"
fp64_neg_zero :: "64 word"
fp64_negate :: "64 word \<Rightarrow> 64 word"
fp64_pos_inf :: "64 word"
fp64_pos_min :: "64 word"
fp64_pos_max :: "64 word"
fp64_pos_zero :: "64 word"
fp64_round_to_integral :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word"
fp64_snan :: "64 word"
fp64_sqrt :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word"
fp64_sqrt_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word"
fp64_sub :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> 64 word"
fp64_sub_with_flags :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 64 word \<Rightarrow> ieee_flags \<times> 64 word"
fp64_qnan :: "64 word"
fp64_to_int :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> int option"
consts
fp32_to_fp64 :: "32 word \<Rightarrow> 64 word"
fp64_to_fp32 :: "ieee_rounding \<Rightarrow> 64 word \<Rightarrow> 32 word"
code_printing
constant "fp32_abs" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp32'_abs\")"
and (OCaml) "!(fun '_ -> failwith \"fp32'_abs\")"
and (Haskell) "!(\\ '_ -> error \"fp32'_abs\")"
| constant "fp32_add" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_add\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_add\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_add\")"
| constant "fp32_add_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_add'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_add'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_add'_with'_flag\")"
| constant "fp32_compare" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_compare\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_compare\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp32'_compare\")"
| constant "fp32_div" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_div\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_div\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_div\")"
| constant "fp32_div_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_div'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_div'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_div'_with'_flag\")"
| constant "fp32_equal" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_equal\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_equal\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp32'_equal\")"
| constant "fp32_from_int" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_from'_int\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_from'_int\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp32'_from'_int\")"
| constant "fp32_greater" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_greater\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_greater\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp32'_greater\")"
| constant "fp32_greater_equal" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_greater'_equal\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_greater'_equal\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp32'_greater'_equal\")"
| constant "fp32_is_integral" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp32'_is'_integral\")"
and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_integral\")"
and (Haskell) "!(\\ '_ -> error \"fp32'_is'_intagral\")"
| constant "fp32_is_finite" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp32'_is'_finite\")"
and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_finite\")"
and (Haskell) "!(\\ '_ -> error \"fp32'_is'_finite\")"
| constant "fp32_is_nan" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp32'_is'_nan\")"
and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_nan\")"
and (Haskell) "!(\\ '_ -> error \"fp32'_is'_nan\")"
| constant "fp32_is_normal" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp32'_is'_normal\")"
and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_normal\")"
and (Haskell) "!(\\ '_ -> error \"fp32'_is'_normal\")"
| constant "fp32_is_signalling_nan" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp32'_is'_signalling'_nan\")"
and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_signalling'_nan\")"
and (Haskell) "!(\\ '_ -> error \"fp32'_is'_signalling'_nan\")"
| constant "fp32_is_subnormal" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp32'_is'_subnormal\")"
and (OCaml) "!(fun '_ -> failwith \"fp32'_is'_subnormal\")"
and (Haskell) "!(\\ '_ -> error \"fp32'_is'_subnormal\")"
| constant "fp32_less" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_less\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_less\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp32'_less\")"
| constant "fp32_less_equal" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_less'_equal\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_less'_equal\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp32'_less'_equal\")"
| constant "fp32_mul" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_mul\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_mul\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_mul\")"
| constant "fp32_mul_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_mul'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_mul'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_mul'_with'_flag\")"
| constant "fp32_mul_add" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_mul'_add\")"
and (OCaml) "!(fun '_ '_ '_ '_ -> failwith \"fp32'_mul'_add\")"
and (Haskell) "!(\\ '_ '_ '_ '_ -> error \"fp32'_mul'_add\")"
| constant "fp32_mul_add_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_mul'_add'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_mul'_add'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_mul'_add'_with'_flag\")"
| constant "fp32_mul_sub" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_mul'_sub\")"
and (OCaml) "!(fun '_ '_ '_ '_ -> failwith \"fp32'_mul'_sub\")"
and (Haskell) "!(\\ '_ '_ '_ '_ -> error \"fp32'_mul'_sub\")"
| constant "fp32_mul_sub_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_sub'_add'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_mul'_sub'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_mul'_sub'_with'_flag\")"
| constant "fp32_neg_inf" \<rightharpoonup>
(SML) "!(raise Fail \"fp32'_neg'_inf\")"
and (OCaml) "!(failwith \"fp32'_neg'_inf\")"
and (Haskell) "!(error \"fp32'_neg'_inf\")"
| constant "fp32_neg_min" \<rightharpoonup>
(SML) "!(raise Fail \"fp32'_neg'_min\")"
and (OCaml) "!(failwith \"fp32'_neg'_min\")"
and (Haskell) "!(error \"fp32'_neg'_min\")"
| constant "fp32_neg_max" \<rightharpoonup>
(SML) "!(raise Fail \"fp32'_neg'_max\")"
and (OCaml) "!(failwith \"fp32'_neg'_max\")"
and (Haskell) "!(error \"fp32'_neg'_min\")"
| constant "fp32_neg_zero" \<rightharpoonup>
(SML) "!(raise Fail \"fp32'_neg'_zero\")"
and (OCaml) "!(failwith \"fp32'_neg'_zero\")"
and (Haskell) "!(error \"fp32'_neg'_zero\")"
| constant "fp32_negate" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp32'_negate\")"
and (OCaml) "!(fun '_ -> failwith \"fp32'_negate\")"
and (Haskell) "!(\\ '_ -> error \"fp32'_negate\")"
| constant "fp32_pos_inf" \<rightharpoonup>
(SML) "!(raise Fail \"fp32'_pos'_inf\")"
and (OCaml) "!(failwith \"fp32'_pos'_inf\")"
and (Haskell) "!(error \"fp32'_pos'_inf\")"
| constant "fp32_pos_min" \<rightharpoonup>
(SML) "!(raise Fail \"fp32'_pos'_min\")"
and (OCaml) "!(failwith \"fp32'_pos'_min\")"
and (Haskell) "!(error \"fp32'_pos'_min\")"
| constant "fp32_pos_max" \<rightharpoonup>
(SML) "!(raise Fail \"fp32'_pos'_max\")"
and (OCaml) "!(failwith \"fp32'_pos'_max\")"
and (Haskell) "!(error \"fp32'_pos'_max\")"
| constant "fp32_pos_zero" \<rightharpoonup>
(SML) "!(raise Fail \"fp32'_pos'_zero\")"
and (OCaml) "!(failwith \"fp32'_pos'_zero\")"
and (Haskell) "!(error \"fp32'_pos'_zero\")"
| constant "fp32_snan" \<rightharpoonup>
(SML) "!(raise Fail \"fp32'_snan\")"
and (OCaml) "!(failwith \"fp32'_snan\")"
and (Haskell) "!(error \"fp32'_snan\")"
| constant "fp32_round_to_integral" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_round'_to'_integral\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_round'_to'_integral\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp32'_to'_integral\")"
| constant "fp32_sqrt" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_sqrt\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_sqrt\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp32'_sqrt\")"
| constant "fp32_sqrt_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_sqrt'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_sqrt'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_sqrt'_with'_flag\")"
| constant "fp32_sub" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_sub\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_sub\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_sub\")"
| constant "fp32_sub_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp32'_sub'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp32'_sub'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp32'_sub'_with'_flag\")"
| constant "fp32_qnan" \<rightharpoonup>
(SML) "!(raise Fail \"fp32'_qnan\")"
and (OCaml) "!(failwith \"fp32'_qnan\")"
and (Haskell) "!(error \"fp32'_qnan\")"
| constant "fp32_to_int" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp32'_to'_int\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp32'_to'_int\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp32'_to'_int\")"
| constant "fp64_abs" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp64'_abs\")"
and (OCaml) "!(fun '_ -> failwith \"fp64'_abs\")"
and (Haskell) "!(\\ '_ -> error \"fp64'_abs\")"
| constant "fp64_add" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_add\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_add\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_add\")"
| constant "fp64_add_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_add'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_add'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_add'_with'_flag\")"
| constant "fp64_compare" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_compare\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_compare\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp64'_compare\")"
| constant "fp64_div" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_div\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_div\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_div\")"
| constant "fp64_div_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_div'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_div'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_div'_with'_flag\")"
| constant "fp64_equal" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_equal\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_equal\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp64'_equal\")"
| constant "fp64_from_int" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_from'_int\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_from'_int\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp64'_from'_int\")"
| constant "fp64_greater" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_greater\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_greater\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp64'_greater\")"
| constant "fp64_greater_equal" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_greater'_equal\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_greater'_equal\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp64'_greater'_equal\")"
| constant "fp64_is_integral" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp64'_is'_integral\")"
and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_integral\")"
and (Haskell) "!(\\ '_ -> error \"fp64'_is'_intagral\")"
| constant "fp64_is_finite" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp64'_is'_finite\")"
and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_finite\")"
and (Haskell) "!(\\ '_ -> error \"fp64'_is'_finite\")"
| constant "fp64_is_nan" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp64'_is'_nan\")"
and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_nan\")"
and (Haskell) "!(\\ '_ -> error \"fp64'_is'_nan\")"
| constant "fp64_is_normal" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp64'_is'_normal\")"
and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_normal\")"
and (Haskell) "!(\\ '_ -> error \"fp64'_is'_normal\")"
| constant "fp64_is_signalling_nan" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp64'_is'_signalling'_nan\")"
and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_signalling'_nan\")"
and (Haskell) "!(\\ '_ -> error \"fp64'_is'_signalling'_nan\")"
| constant "fp64_is_subnormal" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp64'_is'_subnormal\")"
and (OCaml) "!(fun '_ -> failwith \"fp64'_is'_subnormal\")"
and (Haskell) "!(\\ '_ -> error \"fp64'_is'_subnormal\")"
| constant "fp64_less" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_less\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_less\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp64'_less\")"
| constant "fp64_less_equal" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_less'_equal\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_less'_equal\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp64'_less'_equal\")"
| constant "fp64_mul" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_mul\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_mul\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_mul\")"
| constant "fp64_mul_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_mul'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_mul'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_mul'_with'_flag\")"
| constant "fp64_mul_add" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_mul'_add\")"
and (OCaml) "!(fun '_ '_ '_ '_ -> failwith \"fp64'_mul'_add\")"
and (Haskell) "!(\\ '_ '_ '_ '_ -> error \"fp64'_mul'_add\")"
| constant "fp64_mul_add_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_mul'_add'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_mul'_add'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_mul'_add'_with'_flag\")"
| constant "fp64_mul_sub" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_mul'_sub\")"
and (OCaml) "!(fun '_ '_ '_ '_ -> failwith \"fp64'_mul'_sub\")"
and (Haskell) "!(\\ '_ '_ '_ '_ -> error \"fp64'_mul'_sub\")"
| constant "fp64_mul_sub_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_sub'_add'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_mul'_sub'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_mul'_sub'_with'_flag\")"
| constant "fp64_neg_inf" \<rightharpoonup>
(SML) "!(raise Fail \"fp64'_neg'_inf\")"
and (OCaml) "!(failwith \"fp64'_neg'_inf\")"
and (Haskell) "!(error \"fp64'_neg'_inf\")"
| constant "fp64_neg_min" \<rightharpoonup>
(SML) "!(raise Fail \"fp64'_neg'_min\")"
and (OCaml) "!(failwith \"fp64'_neg'_min\")"
and (Haskell) "!(error \"fp64'_neg'_min\")"
| constant "fp64_neg_max" \<rightharpoonup>
(SML) "!(raise Fail \"fp64'_neg'_max\")"
and (OCaml) "!(failwith \"fp64'_neg'_max\")"
and (Haskell) "!(error \"fp64'_neg'_max\")"
| constant "fp64_neg_zero" \<rightharpoonup>
(SML) "!(raise Fail \"fp64'_neg'_zero\")"
and (OCaml) "!(failwith \"fp64'_neg'_zero\")"
and (Haskell) "!(error \"fp64'_neg'_zero\")"
| constant "fp64_negate" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp64'_negate\")"
and (OCaml) "!(fun '_ -> failwith \"fp64'_negate\")"
and (Haskell) "!(\\ '_ -> error \"fp64'_negate\")"
| constant "fp64_pos_inf" \<rightharpoonup>
(SML) "!(raise Fail \"fp64'_pos'_inf\")"
and (OCaml) "!(failwith \"fp64'_pos'_inf\")"
and (Haskell) "!(error \"fp64'_pos'_inf\")"
| constant "fp64_pos_min" \<rightharpoonup>
(SML) "!(raise Fail \"fp64'_pos'_min\")"
and (OCaml) "!(failwith \"fp64'_pos'_min\")"
and (Haskell) "!(error \"fp64'_pos'_min\")"
| constant "fp64_pos_max" \<rightharpoonup>
(SML) "!(raise Fail \"fp64'_pos'_max\")"
and (OCaml) "!(failwith \"fp64'_pos'_max\")"
and (Haskell) "!(error \"fp64'_pos'_max\")"
| constant "fp64_pos_zero" \<rightharpoonup>
(SML) "!(raise Fail \"fp64'_pos'_zero\")"
and (OCaml) "!(failwith \"fp64'_pos'_zero\")"
and (Haskell) "!(error \"fp64'_pos'_zero\")"
| constant "fp64_snan" \<rightharpoonup>
(SML) "!(raise Fail \"fp64'_snan\")"
and (OCaml) "!(failwith \"fp64'_snan\")"
and (Haskell) "!(error \"fp64'_snan\")"
| constant "fp64_round_to_integral" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_round'_to'_integral\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_round'_to'_integral\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp64'_to'_integral\")"
| constant "fp64_sqrt" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_sqrt\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_sqrt\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp64'_sqrt\")"
| constant "fp64_sqrt_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_sqrt'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_sqrt'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_sqrt'_with'_flag\")"
| constant "fp64_qnan" \<rightharpoonup>
(SML) "!(raise Fail \"fp64'_qnan\")"
and (OCaml) "!(failwith \"fp64'_qnan\")"
and (Haskell) "!(error \"fp64'_qnan\")"
| constant "fp64_sub" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_sub\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_sub\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_sub\")"
| constant "fp64_sub_with_flags" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => fn '_ => raise Fail \"fp64'_sub'_with'_flags\")"
and (OCaml) "!(fun '_ '_ '_ -> failwith \"fp64'_sub'_with'_flag\")"
and (Haskell) "!(\\ '_ '_ '_ -> error \"fp64'_sub'_with'_flag\")"
| constant "fp64_to_int" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_to'_int\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_to'_int\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp64'_to'_int\")"
| constant "fp32_to_fp64" \<rightharpoonup>
(SML) "!(fn '_ => raise Fail \"fp32'_to'_fp64\")"
and (OCaml) "!(fun '_ -> failwith \"fp32'_to'_fp64\")"
and (Haskell) "!(\\ '_ -> error \"fp32'_to'_fp64\")"
| constant "fp64_to_fp32" \<rightharpoonup>
(SML) "!(fn '_ => fn '_ => raise Fail \"fp64'_to'_fp32\")"
and (OCaml) "!(fun '_ '_ -> failwith \"fp64'_to'_fp32\")"
and (Haskell) "!(\\ '_ '_ -> error \"fp64'_to'_fp32\")"
end
|
(* Property from Productive Use of Failure in Inductive Proof,
Andrew Ireland and Alan Bundy, JAR 1996.
This Isabelle theory is produced using the TIP tool offered at the following website:
https://github.com/tip-org/tools
This file was originally provided as part of TIP benchmark at the following website:
https://github.com/tip-org/benchmarks
Yutaka Nagashima at CIIRC, CTU changed the TIP output theory file slightly
to make it compatible with Isabelle2017.*)
theory TIP_prop_33
imports "../../Test_Base"
begin
datatype Nat = Z | S "Nat"
definition(*fun*) one :: "Nat" where
"one = S Z"
fun t2 :: "Nat => Nat => Nat" where
"t2 (Z) y = y"
| "t2 (S z) y = S (t2 z y)"
fun t22 :: "Nat => Nat => Nat" where
"t22 (Z) y = Z"
| "t22 (S z) y = t2 y (t22 z y)"
fun fac :: "Nat => Nat" where
"fac (Z) = S Z"
| "fac (S y) = t22 (S y) (fac y)"
fun qfac :: "Nat => Nat => Nat" where
"qfac (Z) y = y"
| "qfac (S z) y = qfac z (t22 (S z) y)"
theorem property0 :
"((fac x) = (qfac x one))"
oops
end
|
C> \ingroup selci
C> @{
subroutine selci_wthcon(iflcon, title, multi, nelec, issss,
$ norbs,
& nnsmax, nci, noconf, nintpo, nbitpi, nbpsy, isym, nsym,
& inttyp,nsneed)
*
* $Id$
*
character*80 title
dimension nbpsy(8), isym(255), nsneed(3)
c
c write header of the ciconf file
c
write(iflcon) title, multi, nelec, issss, norbs, nnsmax,
& nci, noconf, nintpo, nbitpi, nbpsy, isym, nsym, inttyp,
& nsneed
c write(6,*) ' in rdhcon '
c write(6,*) ' title, multi, nelec, issss, norbs, nnsmax, nci,',
c & 'noconf, nintpo, nbitpi '
c write(6,*) title
c write(6,*) multi, nelec, issss, norbs, nnsmax, nci, noconf,
c & nintpo, nbitpi, nbpsy, isym, inttyp, nsneed
end
C> @}
|
# Probabilistic Programming and Bayesian Methods for Hackers Chapter 1
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://drive.google.com/file/d/1r0JJuIQ9Uujpy8L941oMpJVrdQ6JLtSk/view?usp=sharing">Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/13ec2c5a49406d6986b6b5f2468d6fa0b8b5d5ac/Chapter1_Introduction/Ch1_Introduction_TFP.ipynb">View source on GitHub</a>
</td>
</table>
<br>
<br>
<br>
Original content ([this Jupyter notebook](https://nbviewer.jupyter.org/github/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/blob/master/Chapter1_Introduction/Ch1_Introduction_PyMC2.ipynb)) created by Cam Davidson-Pilon ([`@Cmrn_DP`](https://twitter.com/Cmrn_DP))
Ported to [Tensorflow Probability](https://www.tensorflow.org/probability/) by Matthew McAteer ([`@MatthewMcAteer0`](https://twitter.com/MatthewMcAteer0)), with help from the TFP team at Google ([`[email protected]`](mailto:[email protected])).
Welcome to Bayesian Methods for Hackers. The full Github repository is available at [github/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers](https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers). The other chapters can be found on the project's [homepage](https://camdavidsonpilon.github.io/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/). We hope you enjoy the book, and we encourage any contributions!
---
- [Dependencies & Prerequisites](#scrollTo=YcJ8nEDVH30J)
- [The Philosophy of Bayesian Inference](#scrollTo=dXqjzSnXRRr3)
- [The Bayesian state of mind](#scrollTo=YO2eSwZQRRqv)
- [Bayesian Inference in Practice](#scrollTo=FXUBMaYsRWvl)
- [Are frequentist methods incorrect then?](#scrollTo=rACyvZBVdqB9)
- [Our Bayesian framework](#scrollTo=TTUDkI8peKw6)
- [Example: Mandatory coin-flip example](#scrollTo=DkB3Ou8UjW-F)
- [Example: Bug, or just sweet, unintended feature?](#scrollTo=5UKnxit-mevN)
- [Probability Distributions](#scrollTo=2zNt6157C0Cr)
- [Discrete Case](#scrollTo=xG03a_sgDRlc)
- [Continuous Case](#scrollTo=ipS19FlBEmqK)
- [But what is $\lambda \;$?](#scrollTo=_1fhqQhAFLkk)
- [Example: Inferring behaviour from text-message data](#scrollTo=JrRddMMfHHKJ)
- [Introducing our first hammer: Tensorflow Probability](#scrollTo=mCz2BozPcYNy)
- [specify the joint log-density](#scrollTo=gYVjgZQ3hOw-)
- [Specify the posterior sampler](#scrollTo=KnyDyY8Tjyiy)
- [Execute the TF graph to sample from the posterior](#scrollTo=N1mb2NDUkJLU)
- [Plot the Results](#scrollTo=vIxEqx9qkhWr)
- [Interpretation](#scrollTo=FfiTXgF80sDA)
- [Exercises](#scrollTo=cgCrDy8M3IZT)
- [References](#scrollTo=nDdph0r1ABCn)
### Dependencies & Prerequisites
```
#@title Tensorflow Probability Installation (make sure to run this cell) { display-mode: "form" }
TFP_Installation = "Stable TFP" #@param ["Most Recent TFP", "Stable TFP", "Stable TFP-GPU", "Most Recent TFP-GPU", "TFP Already Installed"]
if TFP_Installation == "Stable TFP":
!pip3 install -q --upgrade tensorflow-probability
print("Up-to-date, stable TFP version installed")
elif TFP_Installation == "Most Recent TFP":
!pip3 install -q tfp-nightly
print("Most recent TFP version installed")
elif TFP_Installation == "Stable TFP-GPU":
!pip3 install -q --upgrade tensorflow-probability-gpu
print("Up-to-date, stable TFP-GPU version installed")
print("(make sure GPU is properly configured)")
elif TFP_Installation == "Most Recent TFP-GPU":
!pip3 install -q tfp-nightly-gpu
print("Most recent TFP-GPU version installed")
print("(make sure GPU is properly configured)")
elif TFP_Installation == "TFP Already Installed":
print("TFP already instaled in this environment")
pass
else:
print("Installation Error: Please select a viable TFP installation option.")
```
Up-to-date, stable TFP version installed
```
#@title Imports and Global Variables (make sure to run this cell) { display-mode: "form" }
from __future__ import absolute_import, division, print_function
warning_status = "ignore" #@param ["ignore", "always", "module", "once", "default", "error"]
import warnings
warnings.filterwarnings(warning_status)
with warnings.catch_warnings():
warnings.filterwarnings(warning_status, category=DeprecationWarning)
warnings.filterwarnings(warning_status, category=UserWarning)
import numpy as np
import os
matplotlib_style = 'fivethirtyeight' #@param ['fivethirtyeight', 'bmh', 'ggplot', 'seaborn', 'default', 'Solarize_Light2', 'classic', 'dark_background', 'seaborn-colorblind', 'seaborn-notebook']
import matplotlib.pyplot as plt; plt.style.use(matplotlib_style)
import matplotlib.axes as axes;
from matplotlib.patches import Ellipse
%matplotlib inline
import seaborn as sns; sns.set_context('notebook')
from IPython.core.pylabtools import figsize
notebook_screen_res = 'png' #@param ['retina', 'png', 'jpeg', 'svg', 'pdf']
%config InlineBackend.figure_format = notebook_screen_res
import tensorflow as tf
tfe = tf.contrib.eager
# Eager Execution
use_tf_eager = False #@param {type:"boolean"}
# Use try/except so we can easily re-execute the whole notebook.
if use_tf_eager:
try:
tf.enable_eager_execution()
except:
pass
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
def evaluate(tensors):
"""Evaluates Tensor or EagerTensor to Numpy `ndarray`s.
Args:
tensors: Object of `Tensor` or EagerTensor`s; can be `list`, `tuple`,
`namedtuple` or combinations thereof.
Returns:
ndarrays: Object with same structure as `tensors` except with `Tensor` or
`EagerTensor`s replaced by Numpy `ndarray`s.
"""
if tf.executing_eagerly():
return tf.contrib.framework.nest.pack_sequence_as(
tensors,
[t.numpy() if tf.contrib.framework.is_tensor(t) else t
for t in tf.contrib.framework.nest.flatten(tensors)])
return sess.run(tensors)
class _TFColor(object):
"""Enum of colors used in TF docs."""
red = '#F15854'
blue = '#5DA5DA'
orange = '#FAA43A'
green = '#60BD68'
pink = '#F17CB0'
brown = '#B2912F'
purple = '#B276B2'
yellow = '#DECF3F'
gray = '#4D4D4D'
def __getitem__(self, i):
return [
self.red,
self.orange,
self.green,
self.blue,
self.pink,
self.brown,
self.purple,
self.yellow,
self.gray,
][i % 9]
TFColor = _TFColor()
def session_options(enable_gpu_ram_resizing=True, enable_xla=True):
"""
Allowing the notebook to make use of GPUs if they're available.
XLA (Accelerated Linear Algebra) is a domain-specific compiler for linear
algebra that optimizes TensorFlow computations.
"""
config = tf.ConfigProto()
config.log_device_placement = True
if enable_gpu_ram_resizing:
# `allow_growth=True` makes it possible to connect multiple colabs to your
# GPU. Otherwise the colab malloc's all GPU ram.
config.gpu_options.allow_growth = True
if enable_xla:
# Enable on XLA. https://www.tensorflow.org/performance/xla/.
config.graph_options.optimizer_options.global_jit_level = (
tf.OptimizerOptions.ON_1)
return config
def reset_sess(config=None):
"""
Convenience function to create the TF graph & session or reset them.
"""
if config is None:
config = session_options()
global sess
tf.reset_default_graph()
try:
sess.close()
except:
pass
sess = tf.InteractiveSession(config=config)
reset_sess()
```
## The Philosophy of Bayesian Inference
>You are a skilled programmer, but bugs still slip into your code. After a particularly difficult implementation of an algorithm, you decide to test your code on a trivial example. It passes. You test the code on a harder problem. It passes once again. And it passes the next, even more difficult, test too! You are starting to believe that there may be no bugs in this code...
If you think this way, then congratulations, you already are thinking Bayesian! Bayesian inference is simply updating your beliefs after considering new evidence. A Bayesian can rarely be certain about a result, but he or she can be very confident. Just like in the example above, we can never be 100% sure that our code is bug-free unless we test it on every possible problem; something rarely possible in practice. Instead, we can test it on a large number of problems, and if it succeeds we can feel more confident about our code, but still not certain. Bayesian inference works identically: we update our beliefs about an outcome; rarely can we be absolutely sure unless we rule out all other alternatives.
## The Bayesian state of mind
Bayesian inference differs from more traditional statistical inference by preserving uncertainty. At first, this sounds like a bad statistical technique. Isn't statistics all about deriving certainty from randomness? To reconcile this, we need to start thinking like Bayesians.
The Bayesian world-view interprets probability as measure of believability in an event, that is, how confident we are in an event occurring. In fact, we will see in a moment that this is the natural interpretation of probability.
For this to be clearer, we consider an alternative interpretation of probability: Frequentist, known as the more classical version of statistics, assume that probability is the long-run frequency of events (hence the bestowed title). For example, the probability of plane accidents under a frequentist philosophy is interpreted as the long-term frequency of plane accidents. This makes logical sense for many probabilities of events, but becomes more difficult to understand when events have no long-term frequency of occurrences. Consider: we often assign probabilities to outcomes of presidential elections, but the election itself only happens once! Frequentists get around this by invoking alternative realities and saying across all these realities, the frequency of occurrences defines the probability.
Bayesians, on the other hand, have a more intuitive approach. Bayesians interpret a probability as measure of belief, or confidence, of an event occurring. Simply, a probability is a summary of an opinion. An individual who assigns a belief of 0 to an event has no confidence that the event will occur; conversely, assigning a belief of 1 implies that the individual is absolutely certain of an event occurring. Beliefs between 0 and 1 allow for weightings of other outcomes. This definition agrees with the probability of a plane accident example, for having observed the frequency of plane accidents, an individual's belief should be equal to that frequency, excluding any outside information. Similarly, under this definition of probability being equal to beliefs, it is meaningful to speak about probabilities (beliefs) of presidential election outcomes: how confident are you candidate A will win?
Notice in the paragraph above, I assigned the belief (probability) measure to an individual, not to Nature. This is very interesting, as this definition leaves room for conflicting beliefs between individuals. Again, this is appropriate for what naturally occurs: different individuals have different beliefs of events occurring, because they possess different information about the world. The existence of different beliefs does not imply that anyone is wrong. Consider the following examples demonstrating the relationship between individual beliefs and probabilities:
* I flip a coin, and we both guess the result. We would both agree, assuming the coin is fair, that the probability of Heads is 1/2. Assume, then, that I peek at the coin. Now I know for certain what the result is: I assign probability 1.0 to either Heads or Tails (whichever it is). Now what is your belief that the coin is Heads? My knowledge of the outcome has not changed the coin's results. Thus we assign different probabilities to the result.
* Your code either has a bug in it or not, but we do not know for certain which is true, though we have a belief about the presence or absence of a bug.
* A medical patient is exhibiting symptoms *x*, *y* and *z*. There are a number of diseases that could be causing all of them, but only a single disease is present. A doctor has beliefs about which disease, but a second doctor may have slightly different beliefs.
This philosophy of treating beliefs as probability is natural to humans. We employ it constantly as we interact with the world and only see partial truths, but gather evidence to form beliefs. Alternatively, you have to be trained to think like a frequentist.
To align ourselves with traditional probability notation, we denote our belief about event $A$ as $P(A)$. We call this quantity the prior probability.
John Maynard Keynes, a great economist and thinker, said "When the facts change, I change my mind. What do you do, sir?" This quote reflects the way a Bayesian updates his or her beliefs after seeing evidence. Even β especially β if the evidence is counter to what was initially believed, the evidence cannot be ignored. We denote our updated belief as $P(A|X)$, interpreted as the probability of $A$ given the evidence $X$. We call the updated belief the posterior probability so as to contrast it with the prior probability. For example, consider the posterior probabilities (read: posterior beliefs) of the above examples, after observing some evidence $X$:
1. $P(A)$: the coin has a 50 percent chance of being Heads. $P(A|X)$: You look at the coin, observe a Heads has landed, denote this information $X$, and trivially assign probability 1.0 to Heads and 0.0 to Tails.
2. $P(A)$: This big, complex code likely has a bug in it. $P(A|X)$: The code passed all $X$ tests; there still might be a bug, but its presence is less likely now.
3. $P(A)$: The patient could have any number of diseases. $P(A|X)$: Performing a blood test generated evidence $X$, ruling out some of the possible diseases from consideration.
It's clear that in each example we did not completely discard the prior belief after seeing new evidence $X$, but we re-weighted the prior to incorporate the new evidence (i.e. we put more weight, or confidence, on some beliefs versus others).
By introducing prior uncertainty about events, we are already admitting that any guess we make is potentially very wrong. After observing data, evidence, or other information, we update our beliefs, and our guess becomes less wrong. This is the alternative side of the prediction coin, where typically we try to be more right.
## Bayesian Inference in Practice
If frequentist and Bayesian inference were programming functions, with inputs being statistical problems, then the two would be different in what they return to the user. The frequentist inference function would return a number, representing an estimate (typically a summary statistic like the sample average etc.), whereas the Bayesian function would return probabilities.
For example, in our debugging problem above, calling the frequentist function with the argument "My code passed all $X$ tests; is my code bug-free?" would return a YES. On the other hand, asking our Bayesian function "Often my code has bugs. My code passed all $X$ tests; is my code bug-free?" would return something very different: probabilities of YES and NO. The function might return:
>YES, with probability 0.8; NO, with probability 0.2
This is very different from the answer the frequentist function returned. Notice that the Bayesian function accepted an additional argument: "Often my code has bugs". This parameter is the prior. By including the prior parameter, we are telling the Bayesian function to include our belief about the situation. Technically this parameter in the Bayesian function is optional, but we will see excluding it has its own consequences.
### Incorporating evidence
As we acquire more and more instances of evidence, our prior belief is washed out by the new evidence. This is to be expected. For example, if your prior belief is something ridiculous, like "I expect the sun to explode today", and each day you are proved wrong, you would hope that any inference would correct you, or at least align your beliefs better. Bayesian inference will correct this belief.
Denote $N$ as the number of instances of evidence we possess. As we gather an infinite amount of evidence, say as $Nββ,$ our Bayesian results (often) align with frequentist results. Hence for large N, statistical inference is more or less objective. On the other hand, for small $N$, inference is much more unstable: frequentist estimates have more variance and larger confidence intervals. This is where Bayesian analysis excels. By introducing a prior, and returning probabilities (instead of a scalar estimate), we preserve the uncertainty that reflects the instability of statistical inference of a small N dataset.
One may think that for large $N$, one can be indifferent between the two techniques since they offer similar inference, and might lean towards the computationally-simpler, frequentist methods. An individual in this position should consider the following quote by Andrew Gelman (2005)[1], before making such a decision:
Sample sizes are never large. If $N$, is too small to get a sufficiently-precise estimate, you need to get more data (or make more assumptions). But once $N$, is "large enough," you can start subdividing the data to learn more (for example, in a public opinion poll, once you have a good estimate for the entire country, you can estimate among men and women, northerners and southerners, different age groups, etc.). $N$, is never enough because if it were "enough" you'd already be on to the next problem for which you need more data.
## Are frequentist methods incorrect then?
No.
Frequentist methods are still useful or state-of-the-art in many areas. Tools such as least squares linear regression, LASSO regression, and expectation-maximization algorithms are all powerful and fast. Bayesian methods complement these techniques by solving problems that these approaches cannot, or by illuminating the underlying system with more flexible modeling.
### A note on *Big Data*
Paradoxically, big data's predictive analytic problems are actually solved by relatively simple algorithms [2][4]. Thus we can argue that big data's prediction difficulty does not lie in the algorithm used, but instead on the computational difficulties of storage and execution on big data. (One should also consider Gelman's quote from above and ask "Do I really have big data?")
The much more difficult analytic problems involve medium data and, especially troublesome, really small data. Using a similar argument as Gelman's above, if big data problems are big enough to be readily solved, then we should be more interested in the not-quite-big enough datasets.
## Our Bayesian framework
We are interested in beliefs, which can be interpreted as probabilities by thinking Bayesian. We have a prior belief in event A, beliefs formed by previous information, e.g., our prior belief about bugs being in our code before performing tests.
Secondly, we observe our evidence. To continue our buggy-code example: if our code passes X tests, we want to update our belief to incorporate this. We call this new belief the posterior probability. Updating our belief is done via the following equation, known as Bayes' Theorem, after its discoverer Thomas Bayes:
$$ P(A|X) = \frac{P(X | A) P(A) }{P(X) } $$
$$ P(A|X) \propto{P(X | A) P(A) } $$
NOTE: ($\propto$ is "proportional to")
The above formula is not unique to Bayesian inference: it is a mathematical fact with uses outside Bayesian inference. Bayesian inference merely uses it to connect prior probabilities $P(A)$ with an updated posterior probabilities $P(A|X)$.
## Example: Mandatory coin-flip example
Every statistics text must contain a coin-flipping example, I'll use it here to get it out of the way. Suppose, naively, that you are unsure about the probability of heads in a coin flip (spoiler alert: it's 50%). You believe there is some true underlying ratio, call it p, but have no prior opinion on what p might be.
We begin to flip a coin, and record the observations: either H or T. This is our observed data. An interesting question to ask is how our inference changes as we observe more and more data? More specifically, what do our posterior probabilities look like when we have little data, versus when we have lots of data.
Below we plot a sequence of updating posterior probabilities as we observe increasing amounts of data (coin flips), while also demonstrating some of the best practices when it comes to evaluating tensors and plotting the data. First, the easy part: We define the values in our Tensorflow graph
```
# Build graph.
probs_of_heads = tf.linspace(start=0., stop=1., num=100, name="linspace")
n_trials_ = [0, 1, 2, 3, 4, 5, 8, 15, 50, 500]
coin_flip_prior = tfp.distributions.Bernoulli(probs=0.5)
coin_flip_data = coin_flip_prior.sample(n_trials[-1])
n_trials_unpacked = tf.unstack(tf.constant(n_trials))
counted = [] # this will be the list of processed head count tensors
for k, N in enumerate(n_trials_):
result_tensor = tf.reduce_sum(coin_flip_data[:N])
counted.append(result_tensor)
headcounts = tf.stack(counted, 0)
observed_head_probs = [] # this will be the list of processed probability tensors
for k, N in enumerate(n_trials_):
result_tensor = tfp.distributions.Beta(
concentration1 = tf.to_float(1 + headcounts[k]),
concentration0 = tf.to_float(1 + n_trials_[k] - headcounts[k])).prob(probs_of_heads)
observed_head_probs.append(result_tensor)
observed_probs_heads = tf.stack(observed_head_probs, 0)
```
Next we move onto executing the graph. When it comes to calculations that need to be made frequently and repeatedly, this method of first-defining and then executing graphs provides a handy speed boost. We can actually use a custom `evaluate()` function that allows us to evaluate tensors whether we are operating in TF Graph mode, or whether we have Eager mode active. The function looks like the following:
```python
def evaluate(tensors):
"""Evaluates Tensor or EagerTensor to Numpy `ndarray`s.
Args:
tensors: Object of `Tensor` or EagerTensor`s; can be `list`, `tuple`,
`namedtuple` or combinations thereof.
Returns:
ndarrays: Object with same structure as `tensors` except with `Tensor` or
`EagerTensor`s replaced by Numpy `ndarray`s.
"""
if tf.executing_eagerly():
return tf.contrib.framework.nest.pack_sequence_as(
tensors,
[t.numpy() if tf.contrib.framework.is_tensor(t) else t
for t in tf.contrib.framework.nest.flatten(tensors)])
return sess.run(tensors)
```
To graph the tensors, we need to convert into numpy variables. One handy way of associating tensors with their corrresponding numpy variables is to append an underscore to the numpy-like arrays. For example, if the input to `evaluate()` is `variable`, then we assign that value to `variable_`. Below we see an example of how we use both `evaluate()` and this new styling.
```
# Execute graph
[
n_trials_unpacked_,
coin_flip_data_,
probs_of_heads_,
headcounts_,
observed_probs_heads_,
] = evaluate([
n_trials_unpacked,
coin_flip_data,
probs_of_heads,
headcounts,
observed_probs_heads,
])
```
Finally, we move onto plotting our evaluated tensors in matplotlib.
```
# For the already prepared, I'm using Binomial's conj. prior.
plt.figure(figsize(16, 9))
for i in range(len(n_trials_)):
sx = plt.subplot(len(n_trials_)/2, 2, i+1)
plt.xlabel("$p$, probability of heads") \
if i in [0, len(n_trials_)-1] else None
plt.setp(sx.get_yticklabels(), visible=False)
plt.plot(probs_of_heads_, observed_probs_heads_[i],
label="observe %d tosses,\n %d heads" % (n_trials_[i], headcounts_[i]))
plt.fill_between(probs_of_heads_, 0, observed_probs_heads_[i],
color=TFColor[3], alpha=0.4)
plt.vlines(0.5, 0, 4, color="k", linestyles="--", lw=1)
leg = plt.legend()
leg.get_frame().set_alpha(0.4)
plt.autoscale(tight=True)
plt.suptitle("Bayesian updating of posterior probabilities", y=1.02,
fontsize=14)
plt.tight_layout()
```
The posterior probabilities are represented by the curves, and our uncertainty is proportional to the width of the curve. As the plot above shows, as we start to observe data our posterior probabilities start to shift and move around. Eventually, as we observe more and more data (coin-flips), our probabilities will tighten closer and closer around the true value of $p=0.5$ (marked by a dashed line).
Notice that the plots are not always peaked at 0.5. There is no reason it should be: recall we assumed we did not have a prior opinion of what p is. In fact, if we observe quite extreme data, say 8 flips and only 1 observed heads, our distribution would look very biased away from lumping around 0.5 (with no prior opinion, how confident would you feel betting on a fair coin after observing 8 tails and 1 head?). As more data accumulates, we would see more and more probability being assigned at $p=0.5$, though never all of it.
The next example is a simple demonstration of the mathematics of Bayesian inference.
## Example: Bug, or just sweet, unintended feature?
Let $A$ denote the event that our code has no bugs in it. Let $X$ denote the event that the code passes all debugging tests. For now, we will leave the prior probability of no bugs as a variable, i.e. $P(A)=p$.
We are interested in $P(A|X)$, i.e. the probability of no bugs, given our debugging tests $X$. To use the formula above, we need to compute some quantities.
What is $P(X|A)$, i.e., the probability that the code passes $X$ tests given there are no bugs? Well, it is equal to 1, for a code with no bugs will pass all tests.
$P(X)$ is a little bit trickier: The event $X$ can be divided into two possibilities, event X occurring even though our code indeed has bugs (denoted $βΌA$, spoken not $A$), or event $X$ without bugs $(A)$. $ P(X)$ can be represented as:
$$ \begin{align*}
P(A|X) &= \frac{P(X | A) P(A) }{P(X) } \\
P(X) &= P(X \text{ and } A) + P(X \text{ and } \sim A) \\
&= P(X|A)P(A) + P(X | \sim A)P(\sim A) \\
&= P(X|A)p + P(X | \sim A)(1-p) \end{align*} $$
We have already computed $P(X|A)$ above. On the other hand, $P(X|βΌA)$ is subjective: our code can pass tests but still have a bug in it, though the probability there is a bug present is reduced. Note this is dependent on the number of tests performed, the degree of complication in the tests, etc. Let's be conservative and assign $P(X|βΌA)=0.5$. Then:
$$ \begin{align*}
P(A | X) &= \frac{1\cdot p}{ 1\cdot p +0.5 (1-p) } \\
&= \frac{ 2 p}{1+p} \end{align*} $$
This is the posterior probability. What does it look like as a function of our prior, $pβ[0,1]$?
```
# Defining our range of probabilities
p = tf.linspace(start=0., stop=1., num=50)
# Convert from TF to numpy.
[p_] = evaluate([p])
# Visualization.
plt.figure(figsize=(12.5, 6))
plt.plot(p_, 2*p_/(1+p_), color=TFColor[3], lw=3)
#plt.fill_between(p, 2*p/(1+p), alpha=.5, facecolor=["#A60628"])
plt.scatter(0.2, 2*(0.2)/1.2, s=140, c=TFColor[3])
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel("Prior, $P(A) = p$")
plt.ylabel("Posterior, $P(A|X)$, with $P(A) = p$")
plt.title("Are there bugs in my code?");
```
We can see the biggest gains if we observe the $X$ tests passed when the prior probability, $p$, is low. Let's settle on a specific value for the prior. I'm a strong programmer (I think), so I'm going to give myself a realistic prior of 0.20, that is, there is a 20% chance that I write code bug-free. To be more realistic, this prior should be a function of how complicated and large the code is, but let's pin it at 0.20. Then my updated belief that my code is bug-free is 0.33.
Recall that the prior is a probability: $p$ is the prior probability that there are no bugs, so $1βp$ is the prior probability that there are bugs.
Similarly, our posterior is also a probability, with $P(A|X)$ the probability there is no bug given we saw all tests pass, hence $1βP(A|X)$ is the probability there is a bug given all tests passed. What does our posterior probability look like? Below is a chart of both the prior and the posterior probabilities.
```
# Defining our priors and posteriors
prior = tf.constant([0.20, 0.80])
posterior = tf.constant([1./3, 2./3])
# Convert from TF to numpy.
[
prior_,
posterior_,
] = evaluate([
prior,
posterior,
])
# Our Simple Visualization
plt.figure(figsize=(12.5, 4))
colours = [TFColor[0], TFColor[3]]
plt.bar([0, .7], prior_, alpha=0.70, width=0.25,
color=colours[0], label="prior distribution",
lw="3", edgecolor=colours[0])
plt.bar([0+0.25, .7+0.25], posterior_, alpha=0.7,
width=0.25, color=colours[1],
label="posterior distribution",
lw="3", edgecolor=colours[1])
plt.xticks([0.20, .95], ["Bugs Absent", "Bugs Present"])
plt.title("Prior and Posterior probability of bugs present")
plt.ylabel("Probability")
plt.legend(loc="upper left");
```
Notice that after we observed $X$ occur, the probability of bugs being absent increased. By increasing the number of tests, we can approach confidence (probability 1) that there are no bugs present.
This was a very simple example of Bayesian inference and Bayes rule. Unfortunately, the mathematics necessary to perform more complicated Bayesian inference only becomes more difficult, except for artificially constructed cases. We will later see that this type of mathematical analysis is actually unnecessary. First we must broaden our modeling tools. The next section deals with probability distributions. If you are already familiar, feel free to skip (or at least skim), but for the less familiar the next section is essential.
## Probability Distributions
Let's quickly recall what a probability distribution is: Let $Z$ be some random variable. Then associated with $Z$ is a probability distribution function that assigns probabilities to the different outcomes $Z$ can take. Graphically, a probability distribution is a curve where the probability of an outcome is proportional to the height of the curve. You can see examples in the first figure of this chapter.
We can divide random variables into three classifications:
* $Z$ is discrete: Discrete random variables may only assume values on a specified list. Things like populations, movie ratings, and number of votes are all discrete random variables. Discrete random variables become more clear when we contrast them with...
* $Z$ is continuous: Continuous random variable can take on arbitrarily exact values. For example, temperature, speed, time, color are all modeled as continuous variables because you can progressively make the values more and more precise.
* $Z$ is mixed: Mixed random variables assign probabilities to both discrete and continuous random variables, i.e. it is a combination of the above two categories.
### Discrete Case
If $Z$ is discrete, then its distribution is called a *probability mass function*, which measures the probability $Z$ takes on the value $k$, denoted $P(Z=k)$. Note that the probability mass function completely describes the random variable $Z$, that is, if we know the mass function, we know how $Z$ should behave. There are popular probability mass functions that consistently appear: we will introduce them as needed, but let's introduce the first very useful probability mass function. We say $Z$ is *Poisson*-distributed if:
$$P(Z = k) =\frac{ \lambda^k e^{-\lambda} }{k!}, \; \; k=0,1,2, \dots $$
$\lambda$ is called a parameter of the distribution, and it controls the distribution's shape. For the Poisson distribution, $\lambda$ can be any positive number. By increasing $\lambda$, we add more probability to larger values, and conversely by decreasing $\lambda$ we add more probability to smaller values. One can describe $\lambda$ as the *intensity* of the Poisson distribution.
Unlike $\lambda$, which can be any positive number, the value $k$ in the above formula must be a non-negative integer, i.e., $k$ must take on values 0,1,2, and so on. This is very important, because if you wanted to model a population you could not make sense of populations with 4.25 or 5.612 members.
If a random variable $Z$ has a Poisson mass distribution, we denote this by writing
$$Z \sim \text{Poi}(\lambda) $$
One useful property of the Poisson distribution is that its expected value is equal to its parameter, i.e.:
$$E\large[ \;Z\; | \; \lambda \;\large] = \lambda $$
We will use this property often, so it's useful to remember. Below, we plot the probability mass distribution for different $\lambda$ values. The first thing to notice is that by increasing $\lambda$, we add more probability of larger values occurring. Second, notice that although the graph ends at 15, the distributions do not. They assign positive probability to every non-negative integer.
```
# Build graph.
grid_of_days = tf.range(start=0., limit=16.)
texts_per_day = tf.constant([1.5, 4.25])
text_count_probs = tfp.distributions.Poisson(
rate=texts_per_day[:, tf.newaxis]).prob(grid_of_days)
# Execute graph
[
grid_of_days_,
texts_per_day_,
text_count_probs_,
] = evaluate([
grid_of_days,
texts_per_day,
text_count_probs,
])
# Display results
plt.figure(figsize=(12.5, 4))
colours = [TFColor[0], TFColor[1]]
plt.bar(grid_of_days_,
text_count_probs_[0],
color=colours[0],
label="$\lambda = %.1f$" % texts_per_day_[0], alpha=0.60,
edgecolor=colours[0], lw="3")
plt.bar(grid_of_days_,
text_count_probs_[1],
color=colours[1],
label="$\lambda = %.1f$" % texts_per_day_[1], alpha=0.60,
edgecolor=colours[1], lw="3")
plt.xticks(grid_of_days_ + 0.4, grid_of_days_)
plt.legend()
plt.ylabel("probability of $k$")
plt.xlabel("$k$")
plt.title("Probability mass function of a Poisson random variable; differing "
"$\lambda$ values");
```
### Continuous Case
Instead of a probability mass function, a continuous random variable has a *probability density function*. This might seem like unnecessary nomenclature, but the density function and the mass function are very different creatures. An example of continuous random variable is a random variable with *exponential density*. The density function for an exponential random variable looks like this:
$$f_Z(z | \lambda) = \lambda e^{-\lambda z }, \;\; z\ge 0$$
Like a Poisson random variable, an exponential random variable can take on only non-negative values. But unlike a Poisson variable, the exponential can take on *any* non-negative values, including non-integral values such as 4.25 or 5.612401. This property makes it a poor choice for count data, which must be an integer, but a great choice for time data, temperature data (measured in Kelvins, of course), or any other precise *and positive* variable. The graph below shows two probability density functions with different $\lambda$ values.
When a random variable $Z$ has an exponential distribution with parameter $\lambda$, we say *$Z$ is exponential* and write
$$Z \sim \text{Exp}(\lambda)$$
Given a specific $\lambda$, the expected value of an exponential random variable is equal to the inverse of $\lambda$, that is:
$$E[\; Z \;|\; \lambda \;] = \frac{1}{\lambda}$$
```
# Defining our Data and assumptions (use tf.linspace for continuous)
a = tf.range(start=0., limit=4., delta=0.04)
a = a[..., tf.newaxis]
lambdas = tf.constant([0.5, 1.])
# Now we use TFP to compute probabilities in a vectorized manner.
pdf_at_z = tfp.distributions.Exponential(rate=lambdas).prob(a)
# Convert from TF to numpy
[
a_,
lambdas_,
pdf_at_z_,
] = evaluate([
a,
lambdas,
pdf_at_z,
])
# Visualizing our results
plt.figure(figsize=(12.5, 4))
for i in range(lambdas_.size):
plt.plot(a_.T[0], pdf_at_z_.T[[i]][0],
lw=3, color=TFColor[i], label="$\lambda = %.1f$" % lambdas_[i])
plt.fill_between(a_.T[0], pdf_at_z_.T[[i]][0],
color=TFColor[i], alpha=.33)
plt.legend()
plt.ylabel("PDF at $z$")
plt.xlabel("$z$")
plt.ylim(0,1.2)
plt.title("Probability density function of an Exponential random variable;\
differing $\lambda$");
```
## But what is $\lambda \;$?
**This question is what motivates statistics**. In the real world, $\lambda$ is hidden from us. We see only $Z$, and must go backwards to try and determine $\lambda$. The problem is difficult because there is no one-to-one mapping from $Z$ to $\lambda$. Many different methods have been created to solve the problem of estimating $\lambda$, but since $\lambda$ is never actually observed, no one can say for certain which method is best!
Bayesian inference is concerned with *beliefs* about what $\lambda$ might be. Rather than try to guess $\lambda$ exactly, we can only talk about what $\lambda$ is likely to be by assigning a probability distribution to $\lambda$.
This might seem odd at first. After all, $\lambda$ is fixed; it is not (necessarily) random! How can we assign probabilities to values of a non-random variable? Ah, we have fallen for our old, frequentist way of thinking. Recall that under Bayesian philosophy, we *can* assign probabilities if we interpret them as beliefs. And it is entirely acceptable to have *beliefs* about the parameter $\lambda$.
#### Example: Inferring behaviour from text-message data
Let's try to model a more interesting example, one that concerns the rate at which a user sends and receives text messages:
> You are given a series of daily text-message counts from a user of your system. The data, plotted over time, appears in the chart below. You are curious to know if the user's text-messaging habits have changed over time, either gradually or suddenly. How can you model this? (This is in fact my own text-message data. Judge my popularity as you wish.)
```
# Defining our Data and assumptions
count_data = tf.constant([
13, 24, 8, 24, 7, 35, 14, 11, 15, 11, 22, 22, 11, 57,
11, 19, 29, 6, 19, 12, 22, 12, 18, 72, 32, 9, 7, 13,
19, 23, 27, 20, 6, 17, 13, 10, 14, 6, 16, 15, 7, 2,
15, 15, 19, 70, 49, 7, 53, 22, 21, 31, 19, 11, 18, 20,
12, 35, 17, 23, 17, 4, 2, 31, 30, 13, 27, 0, 39, 37,
5, 14, 13, 22,
], dtype=tf.float32)
count_data_mean = tf.reduce_mean(count_data)
n_count_data = tf.shape(count_data)
count_of_text_msgs = tf.range(n_count_data[0])
# Convert from TF to numpy.
[
count_data_,
count_data_mean_,
n_count_data_,
count_of_text_msgs_,
] = evaluate([
count_data,
count_data_mean,
n_count_data,
count_of_text_msgs,
])
# Visualizing the Results
plt.figure(figsize=(12.5, 4))
plt.bar(count_of_text_msgs_, count_data_, color="#5DA5DA")
plt.xlabel("Time (days)")
plt.ylabel("count of text-msgs received")
plt.title("Did the user's texting habits change over time?")
plt.xlim(0, n_count_data_[0]);
print("Count Data Mean: ", count_data_mean_)
```
Before we start modeling, see what you can figure out just by looking at the chart above. Would you say there was a change in behaviour during this time period?
How can we start to model this? Well, as we have conveniently already seen, a Poisson random variable is a very appropriate model for this type of *count* data. Denoting day $i$'s text-message count by $C_i$,
$$ C_i \sim \text{Poisson}(\lambda) $$
We are not sure what the value of the $\lambda$ parameter really is, however. Looking at the chart above, it appears that the rate might become higher late in the observation period, which is equivalent to saying that $\lambda$ increases at some point during the observations. (Recall that a higher value of $\lambda$ assigns more probability to larger outcomes. That is, there is a higher probability of many text messages having been sent on a given day.)
How can we represent this observation mathematically? Let's assume that on some day during the observation period (call it $\tau$), the parameter $\lambda$ suddenly jumps to a higher value. So we really have two $\lambda$ parameters: one for the period before $\tau$, and one for the rest of the observation period. In the literature, a sudden transition like this would be called a *switchpoint*:
$$\lambda =
\begin{cases} \lambda_1 & \text{if } t \lt \tau \cr
\lambda_2 & \text{if } t \ge \tau
\end{cases}
$$
If, in reality, no sudden change occurred and indeed $\lambda_1 = \lambda_2$, then the $\lambda$s posterior distributions should look about equal.
We are interested in inferring the unknown $\lambda$s. To use Bayesian inference, we need to assign prior probabilities to the different possible values of $\lambda$. What would be good prior probability distributions for $\lambda_1$ and $\lambda_2$? Recall that $\lambda$ can be any positive number. As we saw earlier, the *exponential* distribution provides a continuous density function for positive numbers, so it might be a good choice for modeling $\lambda_i$. But recall that the exponential distribution takes a parameter of its own, so we'll need to include that parameter in our model. Let's call that parameter $\alpha$.
\begin{align}
&\lambda_1 \sim \text{Exp}( \alpha ) \\\
&\lambda_2 \sim \text{Exp}( \alpha )
\end{align}
$\alpha$ is called a *hyper-parameter* or *parent variable*. In literal terms, it is a parameter that influences other parameters. Our initial guess at $\alpha$ does not influence the model too strongly, so we have some flexibility in our choice. A good rule of thumb is to set the exponential parameter equal to the inverse of the average of the count data. Since we're modeling $\lambda$ using an exponential distribution, we can use the expected value identity shown earlier to get:
$$\frac{1}{N}\sum_{i=0}^N \;C_i \approx E[\; \lambda \; |\; \alpha ] = \frac{1}{\alpha}$$
An alternative, and something I encourage the reader to try, would be to have two priors: one for each $\lambda_i$. Creating two exponential distributions with different $\alpha$ values reflects our prior belief that the rate changed at some point during the observations.
What about $\tau$? Because of the noisiness of the data, it's difficult to pick out a priori when $\tau$ might have occurred. Instead, we can assign a *uniform prior belief* to every possible day. This is equivalent to saying
\begin{align}
& \tau \sim \text{DiscreteUniform(1,70) }\\\\
& \Rightarrow P( \tau = k ) = \frac{1}{70}
\end{align}
So after all this, what does our overall prior distribution for the unknown variables look like? Frankly, *it doesn't matter*. What we should understand is that it's an ugly, complicated mess involving symbols only a mathematician could love. And things will only get uglier the more complicated our models become. Regardless, all we really care about is the posterior distribution.
We next turn to [TensorFlow Probability](https://tensorflow.org/probability), a Python library for performing Bayesian analysis that is undaunted by the mathematical monster we have created.
## Introducing our first hammer: TensorFlow Probability
TensorFlow Probability (TFP) is a Python library for programming Bayesian analysis. It is intended for data scientists, statisticians, machine learning practitioners, and scientists. Since it is built on the TensorFlow (TF) stack, it brings the runtime benefits of TF to Bayesian analysis. These include write-once run many (ability to your your development model in production) and speedups via state-of-the-art hardware (GPUs and TPUs).
Since TFP is relatively new, the TFP community is actively developing documentation,
especially docs and examples that bridge the gap between beginner and hacker. One of this book's main goals is to solve that problem, and also to demonstrate why TFP is so cool.
We will model the problem above using TFP. This type of programming is called *probabilistic programming*, an unfortunate misnomer that invokes ideas of randomly-generated code and has likely confused and frightened users away from this field. The code is not random; it is probabilistic in the sense that we create probability models using programming variables as the model's components. Model components are first-class primitives within the PyMC3 framework.
B. Cronin [5] has a very motivating description of probabilistic programming:
> Another way of thinking about this: unlike a traditional program, which only runs in the forward directions, a probabilistic program is run in both the forward and backward direction. It runs forward to compute the consequences of the assumptions it contains about the world (i.e., the model space it represents), but it also runs backward from the data to constrain the possible explanations. In practice, many probabilistic programming systems will cleverly interleave these forward and backward operations to efficiently home in on the best explanations.
Because of the confusion engendered by the term *probabilistic programming*, I'll refrain from using it. Instead, I'll simply say *programming*, since that's what it really is.
TFP code is easy to read. The only novel thing should be the syntax. Simply remember that we are representing the model's components ($\tau, \lambda_1, \lambda_2$ ) as variables.
## Specify the joint log-density
We'll assume the data is a consequence of the following generative model:
$$\begin{align*}
\lambda_{1}^{(0)} &\sim \text{Exponential}(\text{rate}=\alpha) \\
\lambda_{2}^{(0)} &\sim \text{Exponential}(\text{rate}=\alpha) \\
\tau &\sim \text{Uniform}[\text{low}=0,\text{high}=1) \\
\text{for } i &= 1\ldots N: \\
\lambda_i &= \begin{cases} \lambda_{1}^{(0)}, & \tau > i/N \\ \lambda_{2}^{(0)}, & \text{otherwise}\end{cases}\\
X_i &\sim \text{Poisson}(\text{rate}=\lambda_i)
\end{align*}$$
Happily, this model can be easily implemented using TF and TFP's distributions:
This code creates a new function `lambda_`, but really we can think of it as a random variable: the random variable $\lambda$ from above. The [gather](https://https://www.tensorflow.org/api_docs/python/tf/gather) function assigns `lambda_1` or `lambda_2` as the value of `lambda_`, depending on what side of `tau` we are on. The values of `lambda_` up until `tau` are `lambda_1` and the values afterwards are `lambda_2`.
Note that because `lambda_1`, `lambda_2` and `tau` are random, `lambda_` will be random. We are **not** fixing any variables yet.
```
def joint_log_prob(count_data, lambda_1, lambda_2, tau):
tfd = tfp.distributions
alpha = np.array(1. / count_data.mean(), np.float32)
rv_lambda_1 = tfd.Exponential(rate=alpha)
rv_lambda_2 = tfd.Exponential(rate=alpha)
rv_tau = tfd.Uniform()
lambda_ = tf.gather(
[lambda_1, lambda_2],
indices=tf.to_int32(tau * count_data.size <= np.arange(count_data.size)))
rv_observation = tfd.Poisson(rate=lambda_)
return (
rv_lambda_1.log_prob(lambda_1)
+ rv_lambda_2.log_prob(lambda_2)
+ rv_tau.log_prob(tau)
+ tf.reduce_sum(rv_observation.log_prob(count_data))
)
```
Notice that the implementation is arguably very close to being a 1:1 translation of the mathematical model. The main difference is merely that once we've specified the probabilistic model, we return the sum of the log_probs.
## Specify the posterior sampler
The code below will be explained in Chapter 3, but we show it here so you can see where our results come from. One can think of it as a *learning* step. The machinery being employed is called *Markov Chain Monte Carlo* (MCMC), which we also delay explaining until Chapter 3. This technique returns thousands of random variables from the posterior distributions of $\lambda_1, \lambda_2$ and $\tau$. We can plot a histogram of the random variables to see what the posterior distributions look like. Below, we collect the samples (called *traces* in the MCMC literature) into histograms.
```
# Set the chain's start state.
initial_chain_state = [
tf.to_float(tf.reduce_mean(count_data)) * tf.ones([], dtype=tf.float32, name="init_lambda1"),
tf.to_float(tf.reduce_mean(count_data)) * tf.ones([], dtype=tf.float32, name="init_lambda2"),
0.5 * tf.ones([], dtype=tf.float32, name="init_tau"),
]
# Since HMC operates over unconstrained space, we need to transform the
# samples so they live in real-space.
unconstraining_bijectors = [
tfp.bijectors.Exp(), # Maps a positive real to R.
tfp.bijectors.Exp(), # Maps a positive real to R.
tfp.bijectors.Sigmoid(), # Maps [0,1] to R.
]
def joint_log_prob(count_data, lambda_1, lambda_2, tau):
tfd = tfp.distributions
alpha = (1. / tf.reduce_mean(count_data))
rv_lambda_1 = tfd.Exponential(rate=alpha)
rv_lambda_2 = tfd.Exponential(rate=alpha)
rv_tau = tfd.Uniform()
lambda_ = tf.gather(
[lambda_1, lambda_2],
indices=tf.to_int32(tau * tf.to_float(tf.size(count_data)) <= tf.to_float(tf.range(tf.size(count_data)))))
rv_observation = tfd.Poisson(rate=lambda_)
return (
rv_lambda_1.log_prob(lambda_1)
+ rv_lambda_2.log_prob(lambda_2)
+ rv_tau.log_prob(tau)
+ tf.reduce_sum(rv_observation.log_prob(count_data))
)
# Define a closure over our joint_log_prob.
def unnormalized_log_posterior(lambda1, lambda2, tau):
return joint_log_prob(count_data, lambda1, lambda2, tau)
# Initialize the step_size. (It will be automatically adapted.)
step_size = tf.get_variable(
name='step_size',
initializer=tf.constant(0.05, dtype=tf.float32),
trainable=False)
# Sample from the chain.
[
lambda_1_samples,
lambda_2_samples,
posterior_tau,
], kernel_results = tfp.mcmc.sample_chain(
num_results=10000,
num_burnin_steps=1000,
current_state=initial_chain_state,
kernel=tfp.mcmc.TransformedTransitionKernel(
inner_kernel=tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=unnormalized_log_posterior,
num_leapfrog_steps=2,
step_size=step_size,
step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(),
state_gradients_are_stopped=True),
bijector=unconstraining_bijectors))
# Convert our samples to what the book's code expects.
tau_samples = tf.floor(posterior_tau * tf.to_float(tf.size(count_data)))
# tau_samples, lambda_1_samples, lambda_2_samples contain
# N samples from the corresponding posterior distribution
N = tf.shape(tau_samples)[0]
expected_texts_per_day = tf.zeros(n_count_data)
# Initialize any created variables.
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
```
## Executing the TF graph to sample from the posterior
```
evaluate(init_g)
evaluate(init_l)
[
lambda_1_samples_,
lambda_2_samples_,
tau_samples_,
kernel_results_,
N_,
expected_texts_per_day_,
] = evaluate([
lambda_1_samples,
lambda_2_samples,
tau_samples,
kernel_results,
N,
expected_texts_per_day,
])
print("acceptance rate: {}".format(
kernel_results_.inner_results.is_accepted.mean()))
print("final step size: {}".format(
kernel_results_.inner_results.extra.step_size_assign[-100:].mean()))
```
acceptance rate: 0.5887
final step size: 0.02644856460392475
## Plot the Results
```
plt.figure(figsize=(12.5, 15))
#histogram of the samples:
ax = plt.subplot(311)
ax.set_autoscaley_on(False)
plt.hist(lambda_1_samples_, histtype='stepfilled', bins=30, alpha=0.85,
label="posterior of $\lambda_1$", color=TFColor[0], normed=True)
plt.legend(loc="upper left")
plt.title(r"""Posterior distributions of the variables
$\lambda_1,\;\lambda_2,\;\tau$""")
plt.xlim([15, 30])
plt.xlabel("$\lambda_1$ value")
ax = plt.subplot(312)
ax.set_autoscaley_on(False)
plt.hist(lambda_2_samples_, histtype='stepfilled', bins=30, alpha=0.85,
label="posterior of $\lambda_2$", color=TFColor[6], normed=True)
plt.legend(loc="upper left")
plt.xlim([15, 30])
plt.xlabel("$\lambda_2$ value")
plt.subplot(313)
w = 1.0 / tau_samples_.shape[0] * np.ones_like(tau_samples_)
plt.hist(tau_samples_, bins=n_count_data_[0], alpha=1,
label=r"posterior of $\tau$",
color=TFColor[2], weights=w, rwidth=2.)
plt.xticks(np.arange(n_count_data_[0]))
plt.legend(loc="upper left")
plt.ylim([0, .75])
plt.xlim([35, len(count_data_)-20])
plt.xlabel(r"$\tau$ (in days)")
plt.ylabel("probability");
```
## Interpretation
Recall that Bayesian methodology returns a *distribution*. Hence we now have distributions to describe the unknown $\lambda$s and $\tau$. What have we gained? Immediately, we can see the uncertainty in our estimates: the wider the distribution, the less certain our posterior belief should be. We can also see what the plausible values for the parameters are: $\lambda_1$ is around 18 and $\lambda_2$ is around 23. The posterior distributions of the two $\lambda$s are clearly distinct, indicating that it is indeed likely that there was a change in the user's text-message behaviour.
What other observations can you make? If you look at the original data again, do these results seem reasonable?
Notice also that the posterior distributions for the $\lambda$s do not look like exponential distributions, even though our priors for these variables were exponential. In fact, the posterior distributions are not really of any form that we recognize from the original model. But that's OK! This is one of the benefits of taking a computational point of view. If we had instead done this analysis using mathematical approaches, we would have been stuck with an analytically intractable (and messy) distribution. Our use of a computational approach makes us indifferent to mathematical tractability.
Our analysis also returned a distribution for $\tau$. Its posterior distribution looks a little different from the other two because it is a discrete random variable, so it doesn't assign probabilities to intervals. We can see that near day 45, there was a 50% chance that the user's behaviour changed. Had no change occurred, or had the change been gradual over time, the posterior distribution of $\tau$ would have been more spread out, reflecting that many days were plausible candidates for $\tau$. By contrast, in the actual results we see that only three or four days make any sense as potential transition points.
### Why would I want samples from the posterior, anyways?
We will deal with this question for the remainder of the book, and it is an understatement to say that it will lead us to some amazing results. For now, let's end this chapter with one more example.
We'll use the posterior samples to answer the following question: what is the expected number of texts at day $t, \; 0 \le t \le 70$ ? Recall that the expected value of a Poisson variable is equal to its parameter $\lambda$. Therefore, the question is equivalent to *what is the expected value of $\lambda$ at time $t$*?
In the code below, let $i$ index samples from the posterior distributions. Given a day $t$, we average over all possible $\lambda_i$ for that day $t$, using $\lambda_i = \lambda_{1,i}$ if $t \lt \tau_i$ (that is, if the behaviour change has not yet occurred), else we use $\lambda_i = \lambda_{2,i}$.
```
plt.figure(figsize=(12.5, 9))
for day in range(0, n_count_data_[0]):
# ix is a bool index of all tau samples corresponding to
# the switchpoint occurring prior to value of 'day'
ix = day < tau_samples_
# Each posterior sample corresponds to a value for tau.
# for each day, that value of tau indicates whether we're "before"
# (in the lambda1 "regime") or
# "after" (in the lambda2 "regime") the switchpoint.
# by taking the posterior sample of lambda1/2 accordingly, we can average
# over all samples to get an expected value for lambda on that day.
# As explained, the "message count" random variable is Poisson distributed,
# and therefore lambda (the poisson parameter) is the expected value of
# "message count".
expected_texts_per_day_[day] = (lambda_1_samples_[ix].sum()
+ lambda_2_samples_[~ix].sum()) / N_
plt.plot(range(n_count_data_[0]), expected_texts_per_day_, lw=4, color="#E24A33",
label="expected number of text-messages received")
plt.xlim(0, n_count_data_[0])
plt.xlabel("Day")
plt.ylabel("Expected # text-messages")
plt.title("Expected number of text-messages received")
plt.ylim(0, 60)
plt.bar(np.arange(len(count_data_)), count_data_, color="#5DA5DA", alpha=0.65,
label="observed texts per day")
plt.legend(loc="upper left");
```
Our analysis shows strong support for believing the user's behavior did change ($\lambda_1$ would have been close in value to $\lambda_2$ had this not been true), and that the change was sudden rather than gradual (as demonstrated by $\tau$'s strongly peaked posterior distribution). We can speculate what might have caused this: a cheaper text-message rate, a recent weather-to-text subscription, or perhaps a new relationship. (In fact, the 45th day corresponds to Christmas, and I moved away to Toronto the next month, leaving a girlfriend behind.)
## Exercises
1. Using `lambda_1_samples` and `lambda_2_samples`, what is the mean of the posterior distributions of $\lambda_1$ and $\lambda_2$?
```
#type your code here.
```
2. What is the expected percentage increase in text-message rates? `hint:` compute the mean of `lambda_1_samples/lambda_2_samples`. Note that this quantity is very different from `lambda_1_samples.mean()/lambda_2_samples.mean()`
```
#type your code here.
```
3. What is the mean of $\lambda_1$ **given** that we know $\tau$ is less than 45? That is, suppose we have been given new information that the change in behaviour occurred prior to day 45. What is the expected value of $\lambda_1$ now? (You do not need to redo the TFP part. Just consider all instances where `tau_samples < 45`.)
```
#type your code here.
```
## References
[1] Gelman, Andrew. N.p.. Web. 22 Jan 2013. [N is never large enough](http://andrewgelman.com/2005/07/31/n_is_never_larg)
[2] Norvig, Peter. 2009. [The Unreasonable Effectiveness of Data](http://static.googleusercontent.com/media/research.google.com/en//pubs/archive/35179.pdf).
[3] Jimmy Lin and Alek Kolcz. Large-Scale Machine Learning at Twitter. Proceedings of the 2012 ACM SIGMOD International Conference on Management of Data (SIGMOD 2012), pages 793-804, May 2012, Scottsdale, Arizona.
[4] Cronin, Beau. "Why Probabilistic Programming Matters." 24 Mar 2013. Google, Online Posting to Google . Web. 24 Mar. 2013. <https://plus.google.com/u/0/107971134877020469960/posts/KpeRdJKR6Z1>.
[5] Introducing Tensorflow Probability ([on Medium](https://medium.com/tensorflow/introducing-tensorflow-probability-dca4c304e245))
[6] Tensorflow Probability Style Guide ([on Github](https://github.com/tensorflow/probability/blob/master/STYLE_GUIDE.md))
[7] How Not To Program the Tensorflow Graph ([on KDnuggets](https://www.kdnuggets.com/2017/05/how-not-program-tensorflow-graph.html))
[8] Tensorflow's Map Function ([link](https://www.tensorflow.org/api_docs/python/tf/map_fn))
[9] Visualizing Tensorflow graphs in Jupyter Notebooks ([link](https://blog.jakuba.net/2017/05/30/tensorflow-visualization.html))
[10] Tensorflow 1.10 Documentation ([link](https://www.tensorflow.org/api_docs/))
[11] Tensorflow Probability whitepaper ([link](https://arxiv.org/abs/1711.10604))
[12] TensorFlow | A Concise Handbook of TensorFlow ([link](https://tf.wiki/))
[13] TensorFlow | A Concise Handbook of TensorFlow, alternatie URL ([link](https://github.com/snowkylin/Tensorflow-cn))
[14] Google Colab | Examples ([Overview of Colaboratory](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)) ([Forms](https://colab.research.google.com/notebooks/forms.ipynb)) ([Charts in Colaboratory](https://colab.research.google.com/notebooks/charts.ipynb))
```
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
```
|
{- Byzantine Fault Tolerant Consensus Verification in Agda, version 0.9.
Copyright (c) 2020 Oracle and/or its affiliates.
Licensed under the Universal Permissive License v 1.0 as shown at https://opensource.oracle.com/licenses/upl
-}
open import Category.Functor
open import Data.Maybe
open import Function
open import Level
open import Relation.Binary.PropositionalEquality
module Optics.Functorial where
Lens' : (F : Set β Set) β RawFunctor F β Set β Set β Set
Lens' F _ S A = (A β F A) β S β F S
data Lens (S A : Set) : Setβ where
lens : ((F : Set β Set)(rf : RawFunctor F) β Lens' F rf S A)
β Lens S A
private
cf : {A : Set} β RawFunctor {Level.zero} (const A)
cf = record { _<$>_ = Ξ» x xβ β xβ }
if : RawFunctor {Level.zero} id
if = record { _<$>_ = Ξ» x xβ β x xβ }
-- We can make lenses relatively painlessly without requiring reflection
-- by providing getter and setter functions
mkLens' : β {A B : Set}
β (B β A)
β (B β A β B)
β Lens B A
mkLens' {A} {B} get set =
lens (Ξ» F rf f b β Category.Functor.RawFunctor._<$>_
{F = F} rf
{A = A}
{B = B}
(set b)
(f (get b)))
-- Getter:
-- this is typed as ^\.
_^β_ : β{S A} β S β Lens S A β A
_^β_ {_} {A} s (lens p) = p (const A) cf id s
-- Setter:
set : β{S A} β Lens S A β A β S β S
set (lens p) a s = p id if (const a) s
infixr 4 _β~_
_β~_ = set
-- _|>_ is renamed to _&_ by Util.Prelude
set? : β{S A} β Lens S (Maybe A) β A β S β S
set? l a s = s |> l β~ just a
infixr 4 _?~_
_?~_ = set?
-- Modifier:
over : β{S A} β Lens S A β (A β A) β S β S
over (lens p) f s = p id if f s
infixr 4 _%~_
_%~_ = over
-- Composition
infixr 30 _β_
_β_ : β{S A B} β Lens S A β Lens A B β Lens S B
(lens p) β (lens q) = lens (Ξ» F rf x xβ β p F rf (q F rf x) xβ)
-- Relation between the same field of two states This most general form allows us to specify a
-- Lens S A, a function A β B, and a relation between two B's, and holds iff the relation holds
-- between the values yielded by applying the Lens to two S's and then applying the function to
-- the results; more specific variants are provided below
_[_]L_f=_at_ : β {β} {S A B : Set} β S β (B β B β Set β) β S β (A β B) β Lens S A β Set β
sβ [ _~_ ]L sβ f= f at l = f (sβ ^β l) ~ f (sβ ^β l)
_[_]L_at_ : β {β} {S A} β S β (A β A β Set β) β S β Lens S A β Set β
sβ [ _~_ ]L sβ at l = _[_]L_f=_at_ sβ _~_ sβ id l
infix 4 _β‘L_f=_at_
_β‘L_f=_at_ : β {S A B : Set} β (sβ sβ : S) β (A β B) β Lens S A β Set
sβ β‘L sβ f= f at l = _[_]L_f=_at_ sβ _β‘_ sβ f l
infix 4 _β‘L_at_
_β‘L_at_ : β {S A} β (sβ sβ : S) β Lens S A β Set
sβ β‘L sβ at l = _[_]L_f=_at_ sβ _β‘_ sβ id l
|
subroutine psopareto(basnam)
!========================================================================================
!==== This subroutine executes basic pso in estimation mode. ====
!==== by Adam Siade ====
!========================================================================================
!========================================================================================
use psodat
implicit none
! specifications:
!----------------------------------------------------------------------------------------
!----------------------------------------------------------------------------------------
! external routines for run management via YAMR
!----------------------------------------------------------------------------------------
external rmif_run
integer rmif_run
external rmif_get_num_failed_runs
integer rmif_get_num_failed_runs
external rmif_get_failed_run_ids
integer rmif_get_failed_run_ids
external rmif_get_num_total_runs
integer rmif_get_num_total_runs
external rmif_delete
integer rmif_delete
!----------------------------------------------------------------------------------------
! local PSO-main variables
!----------------------------------------------------------------------------------------
integer::err,irun,iiter,ipart,iparm,iparm1,iparm2,repred,iptout,iobs,ipto,irep,fail,&
inpar
double precision::alpha
character(len=100),intent(in)::basnam
!----------------------------------------------------------------------------------------
!----------------------------------------------------------------------------------------
err = 0
repred = 0
iptout = 1
fail = 0
alpha = 0.0d+00
!
! remove all repository output files from previous runs
!call system('rm ./*.rep')
!
! restart from previous run if requested
if (rstpso == 1) then
!
call readrst(basnam)
!
if (nrepact > 1) then
!
! manage repository and calculate fitness
if (repmode == 1) then
!
! use grid-based method described by Coello et al, 2004
call repgrid(repred)
!
else if (repmode == 2) then
!
! use loneliness method
call loneliness(repred,alpha)
!
end if
!
else if (nrepact == 1) then
!
do irep=1,npop+nrep
if (repindx(irep) == 1) fitness(irep) = 1.00d+00
end do
!
end if
!
! write initial conditions to record file
call listipt(basnam)
!
call listpto(-1,iptout,basnam,alpha)
!
else
!
! write initial conditions to record file
call listipt(basnam)
!
!-- generate random parameter sets and velocities, set pbest, and add corresponding runs to the queue
call random_seed()
!
inpar = 0
!
do ipart=1,npop
!
if (initp == 1 .and. ipart == 1) then
!
do iparm=1,npar
partval(ipart,iparm) = parval1(iparm)
end do
!
call unisamp(ipart,0)
!
else if (initp == 2) then
!
inpar = inpar + 1
!
do iparm1=1,npar
!
do iparm2=1,npar
!
if (trim(parnme(iparm1)) == trim(iprnme(iparm2))) then
partval(ipart,iparm1) = (iprval(iparm2,inpar) - offset(iparm1))/scale(iparm1)
exit
end if
!
if (iparm2 == npar) then
!
write(*,'(A,A/A)')'Parameter names in Pareto parameter file do not match',&
' in control file','-- stopping execution --'
stop
!
end if
!
end do
!
end do
!
call unisamp(ipart,0)
!
if (inpar == nitp) initp = 0
!
else
!
call unisamp(ipart,1)
!
end if
!
do iparm=1,npar
parval(iparm) = scale(iparm)*partval(ipart,iparm) + offset(iparm)
end do
!
! add model runs to the queue
call modelrm(1,irun,fail)
!
end do
!
!-- execute model runs
err = rmif_run()
!
!-- evaluate each initial objective value for each particle
do ipart=1,npop
!
modfail(ipart) = 0
!
! get model run results
call modelrm(0,ipart-1,fail)
!
modfail(ipart) = fail
!
if (fail == 0) then
!
call ptoobjeval(ipart)
!
! save observations for recording purposes
do iobs=1,nobs
mobssav(ipart,iobs) = mobsval(iobs)
end do
!
else
!
do ipto=1,nptogp
ptogp(ipart,ipto) = 1.00d+30
end do
!
if (nptocon > 0) violate(ipart) = 1.00d+30
!
end if
!
if (nptocon > 0) then
!
! since this is the initial condition, vioopt set directly to initial violate
vioopt(ipart) = violate(ipart)
!
end if
!
! since this is the initial condition, ptogpopts set directly to their objs
do ipto=1,nptogp
ptogpopt(ipart,ipto) = ptogp(ipart,ipto)
end do
!
end do
!
! check if maximum allowable model failures has been exceeded
fail = 0
!
do ipart=1,npop
fail = fail + modfail(ipart)
end do
!
if (fail > nforg) then
write(*,'(I0,A,I0)')fail,' failed model runs greater than limit set by user, ',nforg
write(*,'(A)')'-- stopping execution --'
stop
end if
!
! update pareto repository
call repcont(0,repred)
!
if (nrepact > 1) then
!
! manage repository and calculate fitness
if (repmode == 1) then
!
! use grid-based method described by Coello et al, 2004
call repgrid(repred)
!
else if (repmode == 2) then
!
! use loneliness method
call loneliness(repred,alpha)
!
end if
!
else if (nrepact == 1) then
!
do irep=1,npop+nrep
if (repindx(irep) == 1) fitness(irep) = 1.00d+00
end do
!
end if
!
! list output for pareto
call listpto(0,iptout,basnam,alpha)
!
!-- write restart data if requested
if (trim(rstfle) == 'restart') then
call wrparerst(basnam)
end if
!
! stop if noptmax = 0
if (noptmax == 0) then
!
err = rmif_delete()
!
call wrfinpareto(basnam)
!
stop
!
end if
!
end if
! begin the PSO iterative procedure
! ---------------------------------
do iiter=1,noptmax
!
!-- write message to terminal
call witmess(iiter)
!
!-- calculate velocities and update particle positions
call pertpareto(iiter)
!
!-- reinitialize run manager and make another set of runs
call initialrm(1)
!
!-- add model runs to the queue
do ipart=1,npop
!
do iparm=1,npar
parval(iparm) = scale(iparm)*partval(ipart,iparm) + offset(iparm)
end do
!
call modelrm(1,irun,fail)
!
end do
!
!-- execute model runs
err = rmif_run()
!
!-- evaluate objective value for each particle, find pbest, gbest and gindex
do ipart=1,npop
!
modfail(ipart) = 0
!
!---- get model run results
call modelrm(0,ipart-1,fail)
!
modfail(ipart) = fail
!
if (fail == 0) then
!
call ptoobjeval(ipart)
!
do iobs=1,nobs
mobssav(ipart,iobs) = mobsval(iobs)
end do
!
else
!
do ipto=1,nptogp
ptogp(ipart,ipto) = 1.00d+30
end do
!
if (nptocon > 0) violate = 1.00d+30
!
end if
!
end do
!
! check if maximum allowable model failures has been exceeded
fail = 0
!
do ipart=1,npop
fail = fail + modfail(ipart)
end do
!
if (fail > nforg) then
write(*,'(I0,A,I0)')fail,' failed model runs greater than limit set by user, ',nforg
write(*,'(A)')'-- stopping execution --'
stop
end if
!
!-- update pareto repository
call repcont(iiter,repred)
!
if (nrepact > 1) then
!
! manage repository and calculate fitness
if (repmode == 1) then
!
! use grid-based method described by Coello et al, 2004
call repgrid(repred)
!
else if (repmode == 2) then
!
! use loneliness method
call loneliness(repred,alpha)
!
end if
!
else if (nrepact == 1) then
!
do irep=1,npop+nrep
if (repindx(irep) == 1) fitness(irep) = 1.00d+00
end do
!
end if
!
!-- set pbest
call pbestpareto(iiter)
!
!-- list output for pareto
call listpto(iiter,iptout,basnam,alpha)
!
!-- write restart data if requested
if (trim(rstfle) == 'restart') then
call wrparerst(basnam)
end if
!
end do
!
! end main loop of PSO iterative procedure
!-----------------------------------------
!
! write out final repository data
call wrfinpareto(basnam)
end subroutine psopareto
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.